{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \"\"\")\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"schema\")\n parser.add_argument('--only', action='append')\n parser.add_argument('--redirect', action='append')\n parser.add_argument('--brand')\n parser.add_argument('--brandlink')\n parser.add_argument('--primtype', default=\"#PrimitiveType\")\n\n args = parser.parse_args()\n\n s = []\n a = args.schema\n with open(a) as f:\n if a.endswith(\"md\"):\n s.append({\"name\": os.path.splitext(os.path.basename(a))[0],\n \"type\": \"documentation\",\n \"doc\": f.read().decode(\"utf-8\")\n })\n else:\n uri = \"file://\" + os.path.abspath(a)\n _, _, metaschema_loader = schema.get_metaschema()\n j, schema_metadata = metaschema_loader.resolve_ref(uri, \"\")\n if isinstance(j, list):\n s.extend(j)\n else:\n s.append(j)\n\n primitiveType = args.primtype\n\n redirect = {r.split(\"=\")[0]:r.split(\"=\")[1] for r in args.redirect} if args.redirect else {}\n renderlist = args.only if args.only else []\n avrold_doc(s, sys.stdout, renderlist, redirect, args.brand, args.brandlink)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475116,"cells":{"repo_name":{"kind":"string","value":"nwjs/chromium.src"},"path":{"kind":"string","value":"mojo/public/tools/bindings/pylib/mojom/parse/lexer.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"6258"},"content":{"kind":"string","value":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport imp\nimport os.path\nimport sys\n\ndef _GetDirAbove(dirname):\n \"\"\"Returns the directory \"above\" this file containing |dirname| (which must\n also be \"above\" this file).\"\"\"\n path = os.path.abspath(__file__)\n while True:\n path, tail = os.path.split(path)\n assert tail\n if tail == dirname:\n return path\n\ntry:\n imp.find_module(\"ply\")\nexcept ImportError:\n sys.path.append(os.path.join(_GetDirAbove(\"mojo\"), \"third_party\"))\nfrom ply.lex import TOKEN\n\nfrom ..error import Error\n\n\nclass LexError(Error):\n \"\"\"Class for errors from the lexer.\"\"\"\n\n def __init__(self, filename, message, lineno):\n Error.__init__(self, filename, message, lineno=lineno)\n\n\n# We have methods which look like they could be functions:\n# pylint: disable=R0201\nclass Lexer(object):\n\n def __init__(self, filename):\n self.filename = filename\n\n ######################-- PRIVATE --######################\n\n ##\n ## Internal auxiliary methods\n ##\n def _error(self, msg, token):\n raise LexError(self.filename, msg, token.lineno)\n\n ##\n ## Reserved keywords\n ##\n keywords = (\n 'HANDLE',\n\n 'IMPORT',\n 'MODULE',\n 'STRUCT',\n 'UNION',\n 'INTERFACE',\n 'ENUM',\n 'CONST',\n 'TRUE',\n 'FALSE',\n 'DEFAULT',\n 'ARRAY',\n 'MAP',\n 'ASSOCIATED',\n 'PENDING_REMOTE',\n 'PENDING_RECEIVER',\n 'PENDING_ASSOCIATED_REMOTE',\n 'PENDING_ASSOCIATED_RECEIVER',\n )\n\n keyword_map = {}\n for keyword in keywords:\n keyword_map[keyword.lower()] = keyword\n\n ##\n ## All the tokens recognized by the lexer\n ##\n tokens = keywords + (\n # Identifiers\n 'NAME',\n\n # Constants\n 'ORDINAL',\n 'INT_CONST_DEC', 'INT_CONST_HEX',\n 'FLOAT_CONST',\n\n # String literals\n 'STRING_LITERAL',\n\n # Operators\n 'MINUS',\n 'PLUS',\n 'AMP',\n 'QSTN',\n\n # Assignment\n 'EQUALS',\n\n # Request / response\n 'RESPONSE',\n\n # Delimiters\n 'LPAREN', 'RPAREN', # ( )\n 'LBRACKET', 'RBRACKET', # [ ]\n 'LBRACE', 'RBRACE', # { }\n 'LANGLE', 'RANGLE', # < >\n 'SEMI', # ;\n 'COMMA', 'DOT' # , .\n )\n\n ##\n ## Regexes for use in tokens\n ##\n\n # valid C identifiers (K&R2: A.2.3)\n identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'\n\n hex_prefix = '0[xX]'\n hex_digits = '[0-9a-fA-F]+'\n\n # integer constants (K&R2: A.2.5.1)\n decimal_constant = '0|([1-9][0-9]*)'\n hex_constant = hex_prefix+hex_digits\n # Don't allow octal constants (even invalid octal).\n octal_constant_disallowed = '0[0-9]+'\n\n # character constants (K&R2: A.2.5.2)\n # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line\n # directives with Windows paths as filenames (..\\..\\dir\\file)\n # For the same reason, decimal_escape allows all digit sequences. We want to\n # parse all correct code, even if it means to sometimes parse incorrect\n # code.\n #\n simple_escape = r\"\"\"([a-zA-Z._~!=&\\^\\-\\\\?'\"])\"\"\"\n decimal_escape = r\"\"\"(\\d+)\"\"\"\n hex_escape = r\"\"\"(x[0-9a-fA-F]+)\"\"\"\n bad_escape = r\"\"\"([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?'\"x0-7])\"\"\"\n\n escape_sequence = \\\n r\"\"\"(\\\\(\"\"\"+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'\n\n # string literals (K&R2: A.2.6)\n string_char = r\"\"\"([^\"\\\\\\n]|\"\"\"+escape_sequence+')'\n string_literal = '\"'+string_char+'*\"'\n bad_string_literal = '\"'+string_char+'*'+bad_escape+string_char+'*\"'\n\n # floating constants (K&R2: A.2.5.3)\n exponent_part = r\"\"\"([eE][-+]?[0-9]+)\"\"\"\n fractional_constant = r\"\"\"([0-9]*\\.[0-9]+)|([0-9]+\\.)\"\"\"\n floating_constant = \\\n '(((('+fractional_constant+')'+ \\\n exponent_part+'?)|([0-9]+'+exponent_part+')))'\n\n # Ordinals\n ordinal = r'@[0-9]+'\n missing_ordinal_value = r'@'\n # Don't allow ordinal values in octal (even invalid octal, like 09) or\n # hexadecimal.\n octal_or_hex_ordinal_disallowed = r'@((0[0-9]+)|('+hex_prefix+hex_digits+'))'\n\n ##\n ## Rules for the normal state\n ##\n t_ignore = ' \\t\\r'\n\n # Newlines\n def t_NEWLINE(self, t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n # Operators\n t_MINUS = r'-'\n t_PLUS = r'\\+'\n t_AMP = r'&'\n t_QSTN = r'\\?'\n\n # =\n t_EQUALS = r'='\n\n # =>\n t_RESPONSE = r'=>'\n\n # Delimiters\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n t_LBRACKET = r'\\['\n t_RBRACKET = r'\\]'\n t_LBRACE = r'\\{'\n t_RBRACE = r'\\}'\n t_LANGLE = r'<'\n t_RANGLE = r'>'\n t_COMMA = r','\n t_DOT = r'\\.'\n t_SEMI = r';'\n\n t_STRING_LITERAL = string_literal\n\n # The following floating and integer constants are defined as\n # functions to impose a strict order (otherwise, decimal\n # is placed before the others because its regex is longer,\n # and this is bad)\n #\n @TOKEN(floating_constant)\n def t_FLOAT_CONST(self, t):\n return t\n\n @TOKEN(hex_constant)\n def t_INT_CONST_HEX(self, t):\n return t\n\n @TOKEN(octal_constant_disallowed)\n def t_OCTAL_CONSTANT_DISALLOWED(self, t):\n msg = \"Octal values not allowed\"\n self._error(msg, t)\n\n @TOKEN(decimal_constant)\n def t_INT_CONST_DEC(self, t):\n return t\n\n # unmatched string literals are caught by the preprocessor\n\n @TOKEN(bad_string_literal)\n def t_BAD_STRING_LITERAL(self, t):\n msg = \"String contains invalid escape code\"\n self._error(msg, t)\n\n # Handle ordinal-related tokens in the right order:\n @TOKEN(octal_or_hex_ordinal_disallowed)\n def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):\n msg = \"Octal and hexadecimal ordinal values not allowed\"\n self._error(msg, t)\n\n @TOKEN(ordinal)\n def t_ORDINAL(self, t):\n return t\n\n @TOKEN(missing_ordinal_value)\n def t_BAD_ORDINAL(self, t):\n msg = \"Missing ordinal value\"\n self._error(msg, t)\n\n @TOKEN(identifier)\n def t_NAME(self, t):\n t.type = self.keyword_map.get(t.value, \"NAME\")\n return t\n\n # Ignore C and C++ style comments\n def t_COMMENT(self, t):\n r'(/\\*(.|\\n)*?\\*/)|(//.*(\\n[ \\t]*//.*)*)'\n t.lexer.lineno += t.value.count(\"\\n\")\n\n def t_error(self, t):\n msg = \"Illegal character %s\" % repr(t.value[0])\n self._error(msg, t)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475117,"cells":{"repo_name":{"kind":"string","value":"dlazz/ansible"},"path":{"kind":"string","value":"lib/ansible/modules/network/ios/ios_linkagg.py"},"copies":{"kind":"string","value":"57"},"size":{"kind":"string","value":"9433"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2017, Ansible by Red Hat, inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ios_linkagg\nversion_added: \"2.5\"\nauthor: \"Trishna Guha (@trishnaguha)\"\nshort_description: Manage link aggregation groups on Cisco IOS network devices\ndescription:\n - This module provides declarative management of link aggregation groups\n on Cisco IOS network devices.\nnotes:\n - Tested against IOS 15.2\noptions:\n group:\n description:\n - Channel-group number for the port-channel\n Link aggregation group. Range 1-255.\n mode:\n description:\n - Mode of the link aggregation group.\n choices: ['active', 'on', 'passive', 'auto', 'desirable']\n members:\n description:\n - List of members of the link aggregation group.\n aggregate:\n description: List of link aggregation definitions.\n state:\n description:\n - State of the link aggregation group.\n default: present\n choices: ['present', 'absent']\n purge:\n description:\n - Purge links not defined in the I(aggregate) parameter.\n default: no\n type: bool\nextends_documentation_fragment: ios\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: create link aggregation group\n ios_linkagg:\n group: 10\n state: present\n\n- name: delete link aggregation group\n ios_linkagg:\n group: 10\n state: absent\n\n- name: set link aggregation group to members\n ios_linkagg:\n group: 200\n mode: active\n members:\n - GigabitEthernet0/0\n - GigabitEthernet0/1\n\n- name: remove link aggregation group from GigabitEthernet0/0\n ios_linkagg:\n group: 200\n mode: active\n members:\n - GigabitEthernet0/1\n\n- name: Create aggregate of linkagg definitions\n ios_linkagg:\n aggregate:\n - { group: 3, mode: on, members: [GigabitEthernet0/1] }\n - { group: 100, mode: passive, members: [GigabitEthernet0/2] }\n\"\"\"\n\nRETURN = \"\"\"\ncommands:\n description: The list of configuration mode commands to send to the device\n returned: always, except for the platforms that use Netconf transport to manage the device.\n type: list\n sample:\n - interface port-channel 30\n - interface GigabitEthernet0/3\n - channel-group 30 mode on\n - no interface port-channel 30\n\"\"\"\n\nimport re\nfrom copy import deepcopy\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.network.common.config import CustomNetworkConfig\nfrom ansible.module_utils.network.common.utils import remove_default_spec\nfrom ansible.module_utils.network.ios.ios import get_config, load_config\nfrom ansible.module_utils.network.ios.ios import ios_argument_spec\n\n\ndef search_obj_in_list(group, lst):\n for o in lst:\n if o['group'] == group:\n return o\n\n\ndef map_obj_to_commands(updates, module):\n commands = list()\n want, have = updates\n purge = module.params['purge']\n\n for w in want:\n group = w['group']\n mode = w['mode']\n members = w.get('members') or []\n state = w['state']\n del w['state']\n\n obj_in_have = search_obj_in_list(group, have)\n\n if state == 'absent':\n if obj_in_have:\n commands.append('no interface port-channel {0}'.format(group))\n\n elif state == 'present':\n cmd = ['interface port-channel {0}'.format(group),\n 'end']\n if not obj_in_have:\n if not group:\n module.fail_json(msg='group is a required option')\n commands.extend(cmd)\n\n if members:\n for m in members:\n commands.append('interface {0}'.format(m))\n commands.append('channel-group {0} mode {1}'.format(group, mode))\n\n else:\n if members:\n if 'members' not in obj_in_have.keys():\n for m in members:\n commands.extend(cmd)\n commands.append('interface {0}'.format(m))\n commands.append('channel-group {0} mode {1}'.format(group, mode))\n\n elif set(members) != set(obj_in_have['members']):\n missing_members = list(set(members) - set(obj_in_have['members']))\n for m in missing_members:\n commands.extend(cmd)\n commands.append('interface {0}'.format(m))\n commands.append('channel-group {0} mode {1}'.format(group, mode))\n\n superfluous_members = list(set(obj_in_have['members']) - set(members))\n for m in superfluous_members:\n commands.extend(cmd)\n commands.append('interface {0}'.format(m))\n commands.append('no channel-group {0} mode {1}'.format(group, mode))\n\n if purge:\n for h in have:\n obj_in_want = search_obj_in_list(h['group'], want)\n if not obj_in_want:\n commands.append('no interface port-channel {0}'.format(h['group']))\n\n return commands\n\n\ndef map_params_to_obj(module):\n obj = []\n\n aggregate = module.params.get('aggregate')\n if aggregate:\n for item in aggregate:\n for key in item:\n if item.get(key) is None:\n item[key] = module.params[key]\n\n d = item.copy()\n d['group'] = str(d['group'])\n\n obj.append(d)\n else:\n obj.append({\n 'group': str(module.params['group']),\n 'mode': module.params['mode'],\n 'members': module.params['members'],\n 'state': module.params['state']\n })\n\n return obj\n\n\ndef parse_mode(module, config, group, member):\n mode = None\n netcfg = CustomNetworkConfig(indent=1, contents=config)\n parents = ['interface {0}'.format(member)]\n body = netcfg.get_section(parents)\n\n match_int = re.findall(r'interface {0}\\n'.format(member), body, re.M)\n if match_int:\n match = re.search(r'channel-group {0} mode (\\S+)'.format(group), body, re.M)\n if match:\n mode = match.group(1)\n\n return mode\n\n\ndef parse_members(module, config, group):\n members = []\n\n for line in config.strip().split('!'):\n l = line.strip()\n if l.startswith('interface'):\n match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M)\n if match_group:\n match = re.search(r'interface (\\S+)', l, re.M)\n if match:\n members.append(match.group(1))\n\n return members\n\n\ndef get_channel(module, config, group):\n match = re.findall(r'^interface (\\S+)', config, re.M)\n\n if not match:\n return {}\n\n channel = {}\n for item in set(match):\n member = item\n channel['mode'] = parse_mode(module, config, group, member)\n channel['members'] = parse_members(module, config, group)\n\n return channel\n\n\ndef map_config_to_obj(module):\n objs = list()\n config = get_config(module)\n\n for line in config.split('\\n'):\n l = line.strip()\n match = re.search(r'interface Port-channel(\\S+)', l, re.M)\n if match:\n obj = {}\n group = match.group(1)\n obj['group'] = group\n obj.update(get_channel(module, config, group))\n objs.append(obj)\n\n return objs\n\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n element_spec = dict(\n group=dict(type='int'),\n mode=dict(choices=['active', 'on', 'passive', 'auto', 'desirable']),\n members=dict(type='list'),\n state=dict(default='present',\n choices=['present', 'absent'])\n )\n\n aggregate_spec = deepcopy(element_spec)\n aggregate_spec['group'] = dict(required=True)\n\n required_one_of = [['group', 'aggregate']]\n required_together = [['members', 'mode']]\n mutually_exclusive = [['group', 'aggregate']]\n\n # remove default in aggregate spec, to handle common arguments\n remove_default_spec(aggregate_spec)\n\n argument_spec = dict(\n aggregate=dict(type='list', elements='dict', options=aggregate_spec,\n required_together=required_together),\n purge=dict(default=False, type='bool')\n )\n\n argument_spec.update(element_spec)\n argument_spec.update(ios_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n required_one_of=required_one_of,\n required_together=required_together,\n mutually_exclusive=mutually_exclusive,\n supports_check_mode=True)\n\n warnings = list()\n result = {'changed': False}\n if warnings:\n result['warnings'] = warnings\n\n want = map_params_to_obj(module)\n have = map_config_to_obj(module)\n\n commands = map_obj_to_commands((want, have), module)\n result['commands'] = commands\n\n if commands:\n if not module.check_mode:\n load_config(module, commands)\n result['changed'] = True\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475118,"cells":{"repo_name":{"kind":"string","value":"adamklawonn/CityCircles"},"path":{"kind":"string","value":"citycircles_iphone/build_back2/iphoneDistribution-iphoneos/CityCircles.app/pntsandrects.py"},"copies":{"kind":"string","value":"17"},"size":{"kind":"string","value":"6225"},"content":{"kind":"string","value":"\"\"\"Point and Rectangle classes.\n\nThis code is in the public domain.\n\nPoint -- point with (x,y) coordinates\nRect -- two points, forming a rectangle\n\"\"\"\n\nimport math\n\n\nclass Point:\n \n \"\"\"A point identified by (x,y) coordinates.\n \n supports: +, -, *, /, str, repr\n \n length -- calculate length of vector to point from origin\n distance_to -- calculate distance between two points\n as_tuple -- construct tuple (x,y)\n clone -- construct a duplicate\n integerize -- convert x & y to integers\n floatize -- convert x & y to floats\n move_to -- reset x & y\n slide -- move (in place) +dx, +dy, as spec'd by point\n slide_xy -- move (in place) +dx, +dy\n rotate -- rotate around the origin\n rotate_about -- rotate around another point\n \"\"\"\n \n def __init__(self, x=0.0, y=0.0):\n self.x = x\n self.y = y\n \n def __add__(self, p):\n \"\"\"Point(x1+x2, y1+y2)\"\"\"\n return Point(self.x+p.x, self.y+p.y)\n \n def __sub__(self, p):\n \"\"\"Point(x1-x2, y1-y2)\"\"\"\n return Point(self.x-p.x, self.y-p.y)\n \n def __mul__( self, scalar ):\n \"\"\"Point(x1*x2, y1*y2)\"\"\"\n return Point(self.x*scalar, self.y*scalar)\n \n def __div__(self, scalar):\n \"\"\"Point(x1/x2, y1/y2)\"\"\"\n return Point(self.x/scalar, self.y/scalar)\n \n def __str__(self):\n return \"(%s, %s)\" % (self.x, self.y)\n \n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.x, self.y)\n \n def length(self):\n return math.sqrt(self.x**2 + self.y**2)\n \n def distance_to(self, p):\n \"\"\"Calculate the distance between two points.\"\"\"\n return (self - p).length()\n \n def as_tuple(self):\n \"\"\"(x, y)\"\"\"\n return (self.x, self.y)\n \n def clone(self):\n \"\"\"Return a full copy of this point.\"\"\"\n return Point(self.x, self.y)\n \n def integerize(self):\n \"\"\"Convert co-ordinate values to integers.\"\"\"\n self.x = int(self.x)\n self.y = int(self.y)\n \n def floatize(self):\n \"\"\"Convert co-ordinate values to floats.\"\"\"\n self.x = float(self.x)\n self.y = float(self.y)\n \n def move_to(self, x, y):\n \"\"\"Reset x & y coordinates.\"\"\"\n self.x = x\n self.y = y\n \n def slide(self, p):\n '''Move to new (x+dx,y+dy).\n \n Can anyone think up a better name for this function?\n slide? shift? delta? move_by?\n '''\n self.x = self.x + p.x\n self.y = self.y + p.y\n \n def slide_xy(self, dx, dy):\n '''Move to new (x+dx,y+dy).\n \n Can anyone think up a better name for this function?\n slide? shift? delta? move_by?\n '''\n self.x = self.x + dx\n self.y = self.y + dy\n \n def rotate(self, rad):\n \"\"\"Rotate counter-clockwise by rad radians.\n \n Positive y goes *up,* as in traditional mathematics.\n \n Interestingly, you can use this in y-down computer graphics, if\n you just remember that it turns clockwise, rather than\n counter-clockwise.\n \n The new position is returned as a new Point.\n \"\"\"\n s, c = [f(rad) for f in (math.sin, math.cos)]\n x, y = (c*self.x - s*self.y, s*self.x + c*self.y)\n return Point(x,y)\n \n def rotate_about(self, p, theta):\n \"\"\"Rotate counter-clockwise around a point, by theta degrees.\n \n Positive y goes *up,* as in traditional mathematics.\n \n The new position is returned as a new Point.\n \"\"\"\n result = self.clone()\n result.slide(-p.x, -p.y)\n result.rotate(theta)\n result.slide(p.x, p.y)\n return result\n\n\nclass Rect:\n\n \"\"\"A rectangle identified by two points.\n\n The rectangle stores left, top, right, and bottom values.\n\n Coordinates are based on screen coordinates.\n\n origin top\n +-----> x increases |\n | left -+- right\n v |\n y increases bottom\n\n set_points -- reset rectangle coordinates\n contains -- is a point inside?\n overlaps -- does a rectangle overlap?\n top_left -- get top-left corner\n bottom_right -- get bottom-right corner\n expanded_by -- grow (or shrink)\n \"\"\"\n\n def __init__(self, pt1, pt2):\n \"\"\"Initialize a rectangle from two points.\"\"\"\n self.set_points(pt1, pt2)\n\n def set_points(self, pt1, pt2):\n \"\"\"Reset the rectangle coordinates.\"\"\"\n (x1, y1) = pt1.as_tuple()\n (x2, y2) = pt2.as_tuple()\n self.left = min(x1, x2)\n self.top = min(y1, y2)\n self.right = max(x1, x2)\n self.bottom = max(y1, y2)\n\n def contains(self, pt):\n \"\"\"Return true if a point is inside the rectangle.\"\"\"\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)\n\n def overlaps(self, other):\n \"\"\"Return true if a rectangle overlaps this rectangle.\"\"\"\n return (self.right > other.left and self.left < other.right and\n self.top < other.bottom and self.bottom > other.top)\n \n def top_left(self):\n \"\"\"Return the top-left corner as a Point.\"\"\"\n return Point(self.left, self.top)\n \n def bottom_right(self):\n \"\"\"Return the bottom-right corner as a Point.\"\"\"\n return Point(self.right, self.bottom)\n \n def expanded_by(self, n):\n \"\"\"Return a rectangle with extended borders.\n\n Create a new rectangle that is wider and taller than the\n immediate one. All sides are extended by \"n\" points.\n \"\"\"\n p1 = Point(self.left-n, self.top-n)\n p2 = Point(self.right+n, self.bottom+n)\n return Rect(p1, p2)\n \n def __str__( self ):\n return \"\" % (self.left,self.top,\n self.right,self.bottom)\n \n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__,\n Point(self.left, self.top),\n Point(self.right, self.bottom))"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475119,"cells":{"repo_name":{"kind":"string","value":"rosjat/python-scsi"},"path":{"kind":"string","value":"pyscsi/pyscsi/scsi_enum_command.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"23775"},"content":{"kind":"string","value":"# coding: utf-8\n\n# Copyright (C) 2014 by Ronnie Sahlberg\n# Copyright (C) 2015 by Markus Rosjat\n# SPDX-FileCopyrightText: 2014 The python-scsi Authors\n#\n# SPDX-License-Identifier: LGPL-2.1-or-later\n\nfrom pyscsi.pyscsi.scsi_opcode import OpCode\nfrom pyscsi.utils.enum import Enum\n\n# Dictionaries to define service actions and there values\n#\n# We use a helper to connect the service actions to the corresponding opcode.as\n# The OpCode object holds a Enum object with the service actions and has a value and\n# a name property to access the opcode name and value.\n\n\"\"\"\n------------------------------------------------------------------------------\nMaintenance in Service Actions\n------------------------------------------------------------------------------\n\"\"\"\nsa_maintenance_in = {'REPORT_ASSIGNED_UNASSIGNED_P_EXTENT': 0x00,\n 'REPORT_COMPONENT_DEVICE': 0x01,\n 'REPORT_COMPONENT_DEVICE_ATTACHMENTS': 0x02,\n 'REPORT_DEVICE_IDENTIFICATION': 0x07,\n 'REPORT_PERIPHERAL_DEVICE': 0x03,\n 'REPORT_PERIPHERAL_DEVICE_ASSOCIATIONS': 0x04,\n 'REPORT_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x05,\n 'REPORT_STATES': 0x06,\n 'REPORT_SUPPORTED_CONFIGURATION_METHOD': 0x09,\n 'REPORT_UNCONFIGURED_CAPACITY': 0x08, }\n\n\"\"\"\n------------------------------------------------------------------------------\nMaintenance out Service Actions Dictionaries\n------------------------------------------------------------------------------\n\"\"\"\n\nsa_maintenance_out = {'ADD_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x00,\n 'ATTACH_TO_COMPONENT_DEVICE': 0x01,\n 'BREAK_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x07,\n 'EXCHANGE_P_EXTENT': 0x02,\n 'EXCHANGE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x03,\n 'INSTRUCT_COMPONENT_DEVICE': 0x04,\n 'REMOVE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x05,\n 'SET_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x06, }\n\n\"\"\"\n------------------------------------------------------------------------------\nService Actions Dictionaries for the A3 opcode\n------------------------------------------------------------------------------\n\"\"\"\n\nservice_actions = {'REPORT_DEVICE_IDENTIFIER': 0x05,\n 'REPORT_ALIASES': 0x0b,\n 'REPORT_PRIORITY': 0x0e,\n 'REPORT_SUPPORTED_OPERATION_CODES': 0x0c,\n 'REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS': 0x0d,\n 'REPORT_TARGET_PORT_GROUPS': 0x0a,\n 'REPORT_TIMESTAMP': 0x0f,\n 'REPORT_IDENTIFYING_INFORMATION': 0x05,\n 'REQUEST_DATA_TRANSFER_ELEMENT_INQUIRY': 0x06,\n 'CHANGE_ALIASES': 0x0b,\n 'SET_DEVICE_IDENTIFIER': 0x06,\n 'SET_PRIORITY': 0x0e,\n 'SET_TARGET_PORT_GROUPS': 0x0a,\n 'SET_TIMESTAMP': 0x0f,\n 'SET_IDENTIFYING_INFORMATION': 0x06,\n 'ORWRITE_32': 0x000e,\n 'READ_32': 0x0009,\n 'VERIFY_32': 0x000a,\n 'WRITE_32': 0x000b,\n 'WRITE_AND_VERIFY_32': 0x000c,\n 'WRITE_SAME_32': 0x000d,\n 'XDREAD_32': 0x0003,\n 'XDWRITE_32': 0x0004,\n 'XDWRITEREAD_32': 0x0007,\n 'XPWRITE_32': 0x0006,\n 'GET_LBA_STATUS': 0x12,\n 'READ_CAPACITY_16': 0x10,\n 'REPORT_REFERRALS': 0x13,\n 'OPEN_IMPORTEXPORT_ELEMENT': 0x00,\n 'CLOSE_IMPORTEXPORT_ELEMENT': 0x01, }\n\n\"\"\"\n------------------------------------------------------------------------------\nopcode Dictionaries\n------------------------------------------------------------------------------\n\"\"\"\n\nspc_opcodes = {'SPC_OPCODE_A4': OpCode('SPC_OPCODE_A4', 0xa4, service_actions),\n 'SPC_OPCODE_A3': OpCode('SPC_OPCODE_A3', 0xa3, service_actions),\n 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),\n 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),\n 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),\n 'INQUIRY': OpCode('INQUIRY', 0x12, {}),\n 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),\n 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),\n 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),\n 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),\n 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),\n 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),\n 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),\n 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),\n 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),\n 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),\n 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),\n 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),\n 'READ_MEDIA_SERIAL_NUMBER': OpCode('READ_MEDIA_SERIAL_NUMBER', 0xab,\n {'READ_MEDIA_SERIAL_NUMBER': 0x01, }),\n 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),\n 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),\n 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),\n 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),\n 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),\n 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),\n 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),\n 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }\n\nsbc_opcodes = {'SBC_OPCODE_7F': OpCode('SBC_OPCODE_7F', 0x7f, service_actions),\n 'SBC_OPCODE_A4': OpCode('SBC_OPCODE_A4', 0xa4, service_actions),\n 'SBC_OPCODE_A3': OpCode('SBC_OPCODE_A3', 0xa3, service_actions),\n 'SBC_OPCODE_9E': OpCode('SBC_OPCODE_9E', 0x9e, service_actions),\n 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),\n 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),\n 'COMPARE_AND_WRITE': OpCode('COMPARE_AND_WRITE', 0x89, {}),\n 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),\n 'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}),\n 'INQUIRY': OpCode('INQUIRY', 0x12, {}),\n 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),\n 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),\n 'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in),\n 'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out),\n 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),\n 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),\n 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),\n 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),\n 'ORWRITE_16': OpCode('ORWRITE_16', 0x8b, {}),\n 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),\n 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),\n 'PRE_FETCH_10': OpCode('PRE_FETCH_10', 0x34, {}),\n 'PRE_FETCH_16': OpCode('PRE_FETCH_16', 0x90, {}),\n 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),\n 'READ_6': OpCode('READ_6', 0x08, {}),\n 'READ_10': OpCode('READ_10', 0x28, {}),\n 'READ_12': OpCode('READ_12', 0xa8, {}),\n 'READ_16': OpCode('READ_16', 0x88, {}),\n 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),\n 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),\n 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),\n 'READ_CAPACITY_10': OpCode('READ_CAPACITY_10', 0x25, {}),\n 'READ_DEFECT_DATA_10': OpCode('READ_DEFECT_DATA_10', 0x37, {}),\n 'READ_DEFECT_DATA_12': OpCode('READ_DEFECT_DATA_12', 0xb7, {}),\n 'READ_LONG_10': OpCode('READ_LONG_10', 0x3e, {}),\n 'READ_LONG_16': OpCode('READ_LONG_16', 0x9e, {'READ_LONG_16': 0x11, }),\n 'REASSIGN_BLOCKS': OpCode('REASSIGN_BLOCKS', 0x07, {}),\n 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),\n 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),\n 'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}),\n 'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OT', 0xbb, {}),\n 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),\n 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),\n 'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}),\n 'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}),\n 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),\n 'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}),\n 'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}),\n 'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}),\n 'SYNCHRONIZE_CACHE_10': OpCode('SYNCHRONIZE_CACHE_10', 0x35, {}),\n 'SYNCHRONIZE_CACHE_16': OpCode('SYNCHRONIZE_CACHE_16', 0x91, {}),\n 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),\n 'UNMAP': OpCode('UNMAP', 0x42, {}),\n 'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}),\n 'VERIFY_12': OpCode('VERIFY_12', 0xaf, {}),\n 'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}),\n 'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}),\n 'VOLUME_SET_OUT': OpCode('VOLUME_SET_IN', 0xbf, {}),\n 'WRITE_6': OpCode('WRITE_6', 0xa0, {}),\n 'WRITE_10': OpCode('WRITE_10', 0x2a, {}),\n 'WRITE_12': OpCode('WRITE_12', 0xaa, {}),\n 'WRITE_16': OpCode('WRITE_16', 0x8a, {}),\n 'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}),\n 'WRITE_AND_VERIFY_12': OpCode('WRITE_AND_VERIFY_12', 0xae, {}),\n 'WRITE_AND_VERIFY_16': OpCode('WRITE_AND_VERIFY_16', 0x8e, {}),\n 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),\n 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}),\n 'WRITE_LONG_10': OpCode('WRITE_LONG_10', 0x3f, {}),\n 'WRITE_LONG_16': OpCode('WRITE_LONG_16', 0x9f, {'WRITE_LONG_16': 0x11, }),\n 'WRITE_SAME_10': OpCode('WRITE_SAME_10', 0x41, {}),\n 'WRITE_SAME_16': OpCode('WRITE_SAME_16', 0x93, {}),\n 'XDREAD_10': OpCode('XDREAD_10', 0x52, {}),\n 'XDWRITE_10': OpCode('XDWRITE_10', 0x50, {}),\n 'XDWRITEREAD_10': OpCode('XDWRITEREAD_10', 0x53, {}),\n 'XPWRITE_10': OpCode('XPWRITE_10', 0x51, {}), }\n\nssc_opcodes = {'SSC_OPCODE_A4': OpCode('SSC_OPCODE_A4', 0xa4, service_actions),\n 'SSC_OPCODE_A3': OpCode('SSC_OPCODE_A3', 0xa3, service_actions),\n 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),\n 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),\n 'ERASE_16': OpCode('ERASE_16', 0x93, {}),\n 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}),\n 'FORMAT_MEDIUM': OpCode('FORMAT_MEDIUM', 0x04, {}),\n 'INQUIRY': OpCode('INQUIRY', 0x12, {}),\n 'LOAD_UNLOAD': OpCode('LOAD_UNLOAD', 0x1b, {}),\n 'LOCATE_16': OpCode('LOCATE_16', 0x92, {}),\n 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),\n 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),\n 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),\n 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),\n 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),\n 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),\n 'MOVE_MEDIUM_ATTACHED': OpCode('MOVE_MEDIUM_ATTACHED', 0xa7, {}),\n 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),\n 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),\n 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),\n 'READ_6': OpCode('READ_6', 0x08, {}),\n 'READ_16': OpCode('READ_16', 0x88, {}),\n 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),\n 'READ_BLOCK_LIMITS': OpCode('READ_BLOCK_LIMITS', 0x05, {}),\n 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),\n 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),\n 'READ_ELEMENT_STATUS_ATTACHED': OpCode('READ_ELEMENT_STATUS_ATTACHED', 0xb4, {}),\n 'READ_POSITION': OpCode('READ_POSITION', 0x34, {}),\n 'READ_REVERSE_6': OpCode('READ_REVERSE_6', 0x0f, {}),\n 'READ_REVERSE_16': OpCode('READ_REVERSE_16', 0x81, {}),\n 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}),\n 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),\n 'RECOVER_BUFFERED_DATA': OpCode('RECOVER_BUFFERED_DATA', 0x14, {}),\n 'REPORT_ALIAS': OpCode('REPORT_ALIAS', 0xa3, {'REPORT_ALIAS': 0x0b, }),\n 'REPORT_DENSITY_SUPPORT': OpCode('REPORT_DENSITY_SUPPORT', 0x44, {}),\n 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),\n 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),\n 'REWIND': OpCode('REWIND', 0x01, {}),\n 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),\n 'SET_CAPACITY': OpCode('SET_CAPACITY', 0x0b, {}),\n 'SPACE_6': OpCode('SPACE_6', 0x11, {}),\n 'SPACE_16': OpCode('SPACE_16', 0x91, {}),\n 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),\n 'VERIFY_6': OpCode('VERIFY_6', 0x13, {}),\n 'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}),\n 'WRITE_6': OpCode('WRITE_6', 0x0a, {}),\n 'WRITE_16': OpCode('WRITE_16', 0x8a, {}),\n 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),\n 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}),\n 'WRITE_FILEMARKS_6': OpCode('WRITE_FILEMARKS_6', 0x10, {}),\n 'WRITE_FILEMARKS_16': OpCode('WRITE_FILEMARKS_16', 0x80, {}), }\n\nsmc_opcodes = {'SMC_OPCODE_A4': OpCode('SMC_OPCODE_A4', 0xa4, service_actions),\n 'SMC_OPCODE_A3': OpCode('SMC_OPCODE_A3', 0xa3, service_actions),\n 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}),\n 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}),\n 'EXCHANGE_MEDIUM': OpCode('EXCHANGE_MEDIUM', 0xa6, {}),\n 'INITIALIZE_ELEMENT_STATUS': OpCode('INITIALIZE_ELEMENT_STATUS', 0x07, {}),\n 'INITIALIZE_ELEMENT_STATUS_WITH_RANGE': OpCode('INITIALIZE_ELEMENT_STATUS_WITH_RANGE', 0x37, {}),\n 'INQUIRY': OpCode('INQUIRY', 0x12, {}),\n 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}),\n 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}),\n 'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in),\n 'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out),\n 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}),\n 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),\n 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}),\n 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}),\n 'MOVE_MEDIUM': OpCode('MOVE_MEDIUM', 0xa5, {}),\n 'OPEN_CLOSE_IMPORT_EXPORT_ELEMENT': OpCode('SMC_OPCODE_1B', 0x1b, service_actions),\n 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}),\n 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}),\n 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),\n 'POSITION_TO_ELEMENT': OpCode('POSITION_TO_ELEMENT', 0x2b, {}),\n 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}),\n 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),\n 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),\n 'READ_ELEMENT_STATUS': OpCode('READ_ELEMENT_STATUS', 0xb8, {}),\n 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}),\n 'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}),\n 'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OUT', 0xbb, {}),\n 'RELEASE_6': OpCode('RELEASE_6', 0x17, {}),\n 'RELEASE_10': OpCode('RELEASE_10', 0x57, {}),\n 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),\n 'REPORT_VOLUME_TYPES_SUPPORTED': OpCode('REPORT_VOLUME_TYPES_SUPPORTED', 0x44, {}),\n 'REQUEST_VOLUME_ELEMENT_ADDRESS': OpCode('REQUEST_VOLUME_ELEMENT_ADDRESS', 0xb5, {}),\n 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),\n 'RESERVE_6': OpCode('RESERVE_6', 0x16, {}),\n 'RESERVE_10': OpCode('RESERVE_10', 0x56, {}),\n 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}),\n 'SEND_VOLUME_TAG': OpCode('SEND_VOLUME_TAG', 0xb6, {}),\n 'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}),\n 'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}),\n 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),\n 'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}),\n 'VOLUME_SET_OUT': OpCode('VOLUME_SET_OUT', 0xbf, {}),\n 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}),\n 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }\n\nmmc_opcodes = {'BLANK': OpCode('BLANK', 0xa1, {}),\n 'CLOSE_TRACK_SESSION': OpCode('CLOSE_TRACK_SESSION', 0x5b, {}),\n 'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}),\n 'GET_CONFIGURATION': OpCode('GET_CONFIGURATION', 0x46, {}),\n 'GET_EVENT_STATUS_NOTIFICATION': OpCode('GET_EVENT_STATUS_NOTIFICATION', 0x4a, {}),\n 'GET_PERFORMANCE': OpCode('GET_PERFORMANCE', 0xac, {}),\n 'INQUIRY': OpCode('INQUIRY', 0x12, {}),\n 'LOAD_UNLOAD_MEDIUM': OpCode('LOAD_UNLOAD_MEDIUM', 0xa6, {}),\n 'MECHANISM_STATUS': OpCode('MECHANISM_STATUS', 0xbd, {}),\n 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}),\n 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0xa5, {}),\n 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}),\n 'READ_10': OpCode('READ_10', 0x28, {}),\n 'READ_12': OpCode('READ_12', 0xa8, {}),\n 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}),\n 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}),\n 'READ_BUFFER_CAPACITY': OpCode('READ_BUFFER_CAPACITY', 0x5c, {}),\n 'READ_CAPACITY': OpCode('READ_CAPACITY', 0x25, {}),\n 'READ_CD': OpCode('READ_CD', 0xbe, {}),\n 'READ_CD_MSF': OpCode('READ_CD_MSF', 0xb9, {}),\n 'READ_DISC_INFORMATION': OpCode('READ_DISC_INFORMATION', 0x51, {}),\n 'READ_DISC_STRUCTURE': OpCode('READ_DISC_STRUCTURE', 0xad, {}),\n 'READ_FORMAT_CAPACITIES': OpCode('READ_FORMAT_CAPACITIES', 0x23, {}),\n 'READ_TOC_PMA_ATIP': OpCode('READ_TOC_PMA_ATIP', 0x43, {}),\n 'READ_TRACK_INFORMATION': OpCode('READ_TRACK_INFORMATION', 0x52, {}),\n 'REPAIR_TRACK': OpCode('REPAIR_TRACK', 0x58, {}),\n 'REPORT_KEY': OpCode('REPORT_KEY', 0xa4, {}),\n 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}),\n 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}),\n 'RESERVE_TRACK': OpCode('RESERVE_TRACK', 0x53, {}),\n 'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}),\n 'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}),\n 'SEEK_10': OpCode('SEEK_10', 0x2b, {}),\n 'SEND_CUE_SHEET': OpCode('SEND_CUE_SHEET', 0x5d, {}),\n 'SEND_DISC_STRUCTURE': OpCode('SEND_DISC_STRUCTURE', 0xbf, {}),\n 'SEND_KEY': OpCode('SEND_KEY', 0xa3, {}),\n 'SEND_OPC_INFORMATION': OpCode('SEND_OPC_INFORMATION', 0x54, {}),\n 'SET_CD_SPEED': OpCode('SET_CD_SPEED', 0xbb, {}),\n 'SET_READ_AHEAD': OpCode('SET_READ_AHEAD', 0xa7, {}),\n 'SET_STREAMING': OpCode('SET_STREAMING', 0xb6, {}),\n 'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}),\n 'SYNCHRONIZE_CACHE': OpCode('SYNCHRONIZE_CACHE', 0x35, {}),\n 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}),\n 'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}),\n 'WRITE_10': OpCode('WRITE_10', 0x2a, {}),\n 'WRITE_12': OpCode('WRITE_12', 0xaa, {}),\n 'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}),\n 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), }\n\n\"\"\"\n------------------------------------------------------------------------------\nscsi status Dictionaries\n------------------------------------------------------------------------------\n\"\"\"\n\nscsi_status = {'GOOD': 0x00,\n 'CHECK_CONDITION': 0x02,\n 'CONDITIONS_MET': 0x04,\n 'BUSY': 0x08,\n 'RESERVATION_CONFLICT': 0x18,\n 'TASK_SET_FULL': 0x28,\n 'ACA_ACTIVE': 0x30,\n 'TASK_ABORTED': 0x40,\n 'SGIO_ERROR': 0xff, }\n\n\"\"\"\n------------------------------------------------------------------------------\nopen/close\n------------------------------------------------------------------------------\n\"\"\"\n\naction_codes = {''}\n\n\"\"\"\n------------------------------------------------------------------------------\nInstantiate the Enum Objects\n------------------------------------------------------------------------------\n\"\"\"\n\nSCSI_STATUS = Enum(scsi_status)\n\nspc = Enum(spc_opcodes)\nsbc = Enum(sbc_opcodes)\nssc = Enum(ssc_opcodes)\nsmc = Enum(smc_opcodes)\nmmc = Enum(mmc_opcodes)\n\n\"\"\"\n------------------------------------------------------------------------------\nObsolete Dictionaries and Enums\n------------------------------------------------------------------------------\n\nNOTE: the dicts and Enums in this section and will be removed in a future release\n\n\"\"\"\n\nopcodes = {'INQUIRY': 0x12,\n 'MODE_SENSE_6': 0x1a,\n 'MOVE_MEDIUM': 0xa5,\n 'READ_10': 0x28,\n 'READ_12': 0xa8,\n 'READ_16': 0x88,\n 'READ_CAPACITY_10': 0x25,\n 'READ_ELEMENT_STATUS': 0xb8,\n 'SERVICE_ACTION_IN': 0x9e,\n 'TEST_UNIT_READY': 0x00,\n 'WRITE_10': 0x2a,\n 'WRITE_12': 0xaa,\n 'WRITE_16': 0x8a,\n 'WRITE_SAME_10': 0x41,\n 'WRITE_SAME_16': 0x93,\n }\n\nOPCODE = Enum(opcodes)\n\nservice_action_ins = {'READ_CAPACITY_16': 0x10,\n 'GET_LBA_STATUS': 0x12, }\n\nSERVICE_ACTION_IN = Enum(service_action_ins)\n\n\"\"\"\n------------------------------------------------------------------------------\n\"\"\"\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":475120,"cells":{"repo_name":{"kind":"string","value":"forrestv/myhdl"},"path":{"kind":"string","value":"myhdl/test/conversion/toVerilog/test_inc.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"5163"},"content":{"kind":"string","value":"import os\npath = os.path\nimport unittest\nfrom unittest import TestCase\nimport random\nfrom random import randrange\nrandom.seed(2)\n\nfrom myhdl import *\n\nfrom util import setupCosimulation\n\nACTIVE_LOW, INACTIVE_HIGH = 0, 1\n\ndef incRef(count, enable, clock, reset, n):\n \"\"\" Incrementer with enable.\n \n count -- output\n enable -- control input, increment when 1\n clock -- clock input\n reset -- asynchronous reset input\n n -- counter max value\n \"\"\"\n @instance\n def logic():\n while 1:\n yield clock.posedge, reset.negedge\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n return logic\n \ndef inc(count, enable, clock, reset, n):\n \n \"\"\" Incrementer with enable.\n \n count -- output\n enable -- control input, increment when 1\n clock -- clock input\n reset -- asynchronous reset input\n n -- counter max value\n \n \"\"\"\n \n @always(clock.posedge, reset.negedge)\n def incProcess():\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n \n return incProcess\n\ndef inc2(count, enable, clock, reset, n):\n \n @always(clock.posedge, reset.negedge)\n def incProcess():\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n if count == n-1:\n count.next = 0\n else:\n count.next = count + 1\n return incProcess\n \n\ndef incTask(count, enable, clock, reset, n):\n \n def incTaskFunc(cnt, enable, reset, n):\n if enable:\n cnt[:] = (cnt + 1) % n\n\n @instance\n def incTaskGen():\n cnt = intbv(0)[8:]\n while 1:\n yield clock.posedge, reset.negedge\n if reset == ACTIVE_LOW:\n cnt[:] = 0\n count.next = 0\n else:\n # print count\n incTaskFunc(cnt, enable, reset, n)\n count.next = cnt\n\n return incTaskGen\n\n\ndef incTaskFreeVar(count, enable, clock, reset, n):\n \n def incTaskFunc():\n if enable:\n count.next = (count + 1) % n\n\n @always(clock.posedge, reset.negedge)\n def incTaskGen():\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n # print count\n incTaskFunc()\n\n return incTaskGen\n\n \ndef inc_v(name, count, enable, clock, reset):\n return setupCosimulation(**locals())\n\nclass TestInc(TestCase):\n\n def clockGen(self, clock):\n while 1:\n yield delay(10)\n clock.next = not clock\n \n def stimulus(self, enable, clock, reset):\n reset.next = INACTIVE_HIGH\n yield clock.negedge\n reset.next = ACTIVE_LOW\n yield clock.negedge\n reset.next = INACTIVE_HIGH\n for i in range(1000):\n enable.next = 1\n yield clock.negedge\n for i in range(1000):\n enable.next = min(1, randrange(5))\n yield clock.negedge\n raise StopSimulation\n\n def check(self, count, count_v, enable, clock, reset, n):\n expect = 0\n yield reset.posedge\n self.assertEqual(count, expect)\n self.assertEqual(count, count_v)\n while 1:\n yield clock.posedge\n if enable:\n expect = (expect + 1) % n\n yield delay(1)\n # print \"%d count %s expect %s count_v %s\" % (now(), count, expect, count_v)\n self.assertEqual(count, expect)\n self.assertEqual(count, count_v)\n \n def bench(self, inc):\n\n m = 8\n n = 2 ** m\n \n count = Signal(intbv(0)[m:])\n count_v = Signal(intbv(0)[m:])\n enable = Signal(bool(0))\n clock, reset = [Signal(bool()) for i in range(2)]\n\n inc_inst_ref = incRef(count, enable, clock, reset, n=n)\n inc_inst = toVerilog(inc, count, enable, clock, reset, n=n)\n # inc_inst = inc(count, enable, clock, reset, n=n)\n inc_inst_v = inc_v(inc.func_name, count_v, enable, clock, reset)\n clk_1 = self.clockGen(clock)\n st_1 = self.stimulus(enable, clock, reset)\n ch_1 = self.check(count, count_v, enable, clock, reset, n=n)\n\n sim = Simulation(inc_inst_ref, inc_inst_v, clk_1, st_1, ch_1)\n return sim\n\n def testIncRef(self):\n \"\"\" Check increment operation \"\"\"\n sim = self.bench(incRef)\n sim.run(quiet=1)\n \n def testInc(self):\n \"\"\" Check increment operation \"\"\"\n sim = self.bench(inc)\n sim.run(quiet=1)\n \n def testInc2(self):\n \"\"\" Check increment operation \"\"\"\n sim = self.bench(inc2)\n sim.run(quiet=1)\n \n def testIncTask(self):\n sim = self.bench(incTask)\n sim.run(quiet=1)\n \n def testIncTaskFreeVar(self):\n sim = self.bench(incTaskFreeVar)\n sim.run(quiet=1)\n\nif __name__ == '__main__':\n unittest.main()\n\n\n \n \n\n \n\n \n \n\n\n \n\n \n\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":475121,"cells":{"repo_name":{"kind":"string","value":"grap/OpenUpgrade"},"path":{"kind":"string","value":"setup/package.py"},"copies":{"kind":"string","value":"180"},"size":{"kind":"string","value":"22070"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-Today OpenERP SA ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport optparse\nimport os\nimport pexpect\nimport shutil\nimport signal\nimport subprocess\nimport tempfile\nimport time\nimport xmlrpclib\nfrom contextlib import contextmanager\nfrom glob import glob\nfrom os.path import abspath, dirname, join\nfrom sys import stdout\nfrom tempfile import NamedTemporaryFile\n\n\n#----------------------------------------------------------\n# Utils\n#----------------------------------------------------------\nexecfile(join(dirname(__file__), '..', 'openerp', 'release.py'))\nversion = version.split('-')[0]\ntimestamp = time.strftime(\"%Y%m%d\", time.gmtime())\nGPGPASSPHRASE = os.getenv('GPGPASSPHRASE')\nGPGID = os.getenv('GPGID')\nPUBLISH_DIRS = {\n 'debian': 'deb',\n 'redhat': 'rpm',\n 'tarball': 'src',\n 'windows': 'exe',\n}\nADDONS_NOT_TO_PUBLISH = [\n 'web_analytics'\n]\n\ndef mkdir(d):\n if not os.path.isdir(d):\n os.makedirs(d)\n\ndef system(l, chdir=None):\n print l\n if chdir:\n cwd = os.getcwd()\n os.chdir(chdir)\n if isinstance(l, list):\n rc = os.spawnvp(os.P_WAIT, l[0], l)\n elif isinstance(l, str):\n tmp = ['sh', '-c', l]\n rc = os.spawnvp(os.P_WAIT, tmp[0], tmp)\n if chdir:\n os.chdir(cwd)\n return rc\n\ndef _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'):\n time.sleep(5)\n modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(\n dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')]\n )\n if modules and len(modules) > 1:\n time.sleep(1)\n toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(\n dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')]\n )\n if toinstallmodules:\n print(\"Package test: FAILED. Not able to install dependencies of base.\")\n raise Exception(\"Installation of package failed\")\n else:\n print(\"Package test: successfuly installed %s modules\" % len(modules))\n else:\n print(\"Package test: FAILED. Not able to install base.\")\n raise Exception(\"Installation of package failed\")\n\ndef publish(o, type, extensions):\n def _publish(o, release):\n arch = ''\n filename = release.split(os.path.sep)[-1]\n\n release_dir = PUBLISH_DIRS[type]\n release_path = join(o.pub, release_dir, filename)\n\n system('mkdir -p %s' % join(o.pub, release_dir))\n shutil.move(join(o.build_dir, release), release_path)\n\n # Latest/symlink handler\n release_abspath = abspath(release_path)\n latest_abspath = release_abspath.replace(timestamp, 'latest')\n\n if os.path.islink(latest_abspath):\n os.unlink(latest_abspath)\n\n os.symlink(release_abspath, latest_abspath)\n\n return release_path\n\n published = []\n for extension in extensions:\n release = glob(\"%s/odoo_*.%s\" % (o.build_dir, extension))[0]\n published.append(_publish(o, release))\n return published\n\nclass OdooDocker(object):\n def __init__(self):\n self.log_file = NamedTemporaryFile(mode='w+b', prefix=\"bash\", suffix=\".txt\", delete=False)\n self.port = 8069 # TODO sle: reliable way to get a free port?\n self.prompt_re = '[root@nightly-tests] # '\n self.timeout = 600\n\n def system(self, command):\n self.docker.sendline(command)\n self.docker.expect_exact(self.prompt_re)\n\n def start(self, docker_image, build_dir, pub_dir):\n self.build_dir = build_dir\n self.pub_dir = pub_dir\n\n self.docker = pexpect.spawn(\n 'docker run -v %s:/opt/release -p 127.0.0.1:%s:8069'\n ' -t -i %s /bin/bash --noediting' % (self.build_dir, self.port, docker_image),\n timeout=self.timeout,\n searchwindowsize=len(self.prompt_re) + 1,\n )\n time.sleep(2) # let the bash start\n self.docker.logfile_read = self.log_file\n self.id = subprocess.check_output('docker ps -l -q', shell=True)\n\n def end(self):\n try:\n _rpc_count_modules(port=str(self.port))\n except Exception, e:\n print('Exception during docker execution: %s:' % str(e))\n print('Error during docker execution: printing the bash output:')\n with open(self.log_file.name) as f:\n print '\\n'.join(f.readlines())\n raise\n finally:\n self.docker.close()\n system('docker rm -f %s' % self.id)\n self.log_file.close()\n os.remove(self.log_file.name)\n\n@contextmanager\ndef docker(docker_image, build_dir, pub_dir):\n _docker = OdooDocker()\n try:\n _docker.start(docker_image, build_dir, pub_dir)\n try:\n yield _docker\n except Exception, e:\n raise\n finally:\n _docker.end()\n\nclass KVM(object):\n def __init__(self, o, image, ssh_key='', login='openerp'):\n self.o = o\n self.image = image\n self.ssh_key = ssh_key\n self.login = login\n\n def timeout(self,signum,frame):\n print \"vm timeout kill\",self.pid\n os.kill(self.pid,15)\n\n def start(self):\n l=\"kvm -net nic,model=rtl8139 -net user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432 -drive\".split(\" \")\n #l.append('file=%s,if=virtio,index=0,boot=on,snapshot=on'%self.image)\n l.append('file=%s,snapshot=on'%self.image)\n #l.extend(['-vnc','127.0.0.1:1'])\n l.append('-nographic')\n print \" \".join(l)\n self.pid=os.spawnvp(os.P_NOWAIT, l[0], l)\n time.sleep(10)\n signal.alarm(2400)\n signal.signal(signal.SIGALRM, self.timeout)\n try:\n self.run()\n finally:\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n os.kill(self.pid,15)\n time.sleep(10)\n\n def ssh(self,cmd):\n l=['ssh','-o','UserKnownHostsFile=/dev/null','-o','StrictHostKeyChecking=no','-p','10022','-i',self.ssh_key,'%s@127.0.0.1'%self.login,cmd]\n system(l)\n\n def rsync(self,args,options='--delete --exclude .bzrignore'):\n cmd ='rsync -rt -e \"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s\" %s %s' % (self.ssh_key, options, args)\n system(cmd)\n\n def run(self):\n pass\n\nclass KVMWinBuildExe(KVM):\n def run(self):\n with open(join(self.o.build_dir, 'setup/win32/Makefile.version'), 'w') as f:\n f.write(\"VERSION=%s\\n\" % self.o.version_full)\n with open(join(self.o.build_dir, 'setup/win32/Makefile.python'), 'w') as f:\n f.write(\"PYTHON_VERSION=%s\\n\" % self.o.vm_winxp_python_version.replace('.', ''))\n\n self.ssh(\"mkdir -p build\")\n self.rsync('%s/ %s@127.0.0.1:build/server/' % (self.o.build_dir, self.login))\n self.ssh(\"cd build/server/setup/win32;time make allinone;\")\n self.rsync('%s@127.0.0.1:build/server/setup/win32/release/ %s/' % (self.login, self.o.build_dir), '')\n print \"KVMWinBuildExe.run(): done\"\n\nclass KVMWinTestExe(KVM):\n def run(self):\n # Cannot use o.version_full when the version is not correctly parsed\n # (for instance, containing *rc* or *dev*)\n setuppath = glob(\"%s/openerp-server-setup-*.exe\" % self.o.build_dir)[0]\n setupfile = setuppath.split('/')[-1]\n setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0]\n\n self.rsync('\"%s\" %s@127.0.0.1:' % (setuppath, self.login))\n self.ssh(\"TEMP=/tmp ./%s /S\" % setupfile)\n self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/\"Program Files\"/\"Odoo %s\"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion)\n self.ssh('/cygdrive/c/\"Program Files\"/\"Odoo %s\"/server/openerp-server.exe -d mycompany -i base --stop-after-init' % setupversion)\n self.ssh('net start odoo-server-8.0')\n _rpc_count_modules(port=18069)\n\n#----------------------------------------------------------\n# Stage: building\n#----------------------------------------------------------\ndef _prepare_build_dir(o, win32=False):\n cmd = ['rsync', '-a', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo']\n if not win32:\n cmd += ['--exclude', 'setup/win32']\n system(cmd + ['%s/' % o.odoo_dir, o.build_dir])\n try:\n for addon_path in glob(join(o.build_dir, 'addons/*')):\n if addon_path.split(os.path.sep)[-1] not in ADDONS_NOT_TO_PUBLISH:\n shutil.move(addon_path, join(o.build_dir, 'openerp/addons'))\n except shutil.Error:\n # Thrown when the add-on is already in openerp/addons (if _prepare_build_dir\n # has already been called once)\n pass\n\ndef build_tgz(o):\n system(['python2', 'setup.py', 'sdist', '--quiet', '--formats=gztar,zip'], o.build_dir)\n system(['mv', glob('%s/dist/odoo-*.tar.gz' % o.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (o.build_dir, version, timestamp)])\n system(['mv', glob('%s/dist/odoo-*.zip' % o.build_dir)[0], '%s/odoo_%s.%s.zip' % (o.build_dir, version, timestamp)])\n\ndef build_deb(o):\n # Append timestamp to version for the .dsc to refer the right .tar.gz\n cmd=['sed', '-i', '1s/^.*$/odoo (%s.%s) stable; urgency=low/'%(version,timestamp), 'debian/changelog']\n subprocess.call(cmd, cwd=o.build_dir)\n deb = pexpect.spawn('dpkg-buildpackage -rfakeroot -k%s' % GPGID, cwd=o.build_dir)\n deb.logfile = stdout\n if GPGPASSPHRASE:\n deb.expect_exact('Enter passphrase: ', timeout=1200)\n deb.send(GPGPASSPHRASE + '\\r\\n')\n deb.expect_exact('Enter passphrase: ')\n deb.send(GPGPASSPHRASE + '\\r\\n')\n deb.expect(pexpect.EOF, timeout=1200)\n system(['mv', glob('%s/../odoo_*.deb' % o.build_dir)[0], '%s' % o.build_dir])\n system(['mv', glob('%s/../odoo_*.dsc' % o.build_dir)[0], '%s' % o.build_dir])\n system(['mv', glob('%s/../odoo_*_amd64.changes' % o.build_dir)[0], '%s' % o.build_dir])\n system(['mv', glob('%s/../odoo_*.tar.gz' % o.build_dir)[0], '%s' % o.build_dir])\n\ndef build_rpm(o):\n system(['python2', 'setup.py', '--quiet', 'bdist_rpm'], o.build_dir)\n system(['mv', glob('%s/dist/odoo-*.noarch.rpm' % o.build_dir)[0], '%s/odoo_%s.%s.noarch.rpm' % (o.build_dir, version, timestamp)])\n\ndef build_exe(o):\n KVMWinBuildExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()\n system(['cp', glob('%s/openerp*.exe' % o.build_dir)[0], '%s/odoo_%s.%s.exe' % (o.build_dir, version, timestamp)])\n\n#----------------------------------------------------------\n# Stage: testing\n#----------------------------------------------------------\ndef _prepare_testing(o):\n if not o.no_tarball:\n subprocess.call([\"mkdir\", \"docker_src\"], cwd=o.build_dir)\n subprocess.call([\"cp\", \"package.dfsrc\", os.path.join(o.build_dir, \"docker_src\", \"Dockerfile\")],\n cwd=os.path.join(o.odoo_dir, \"setup\"))\n # Use rsync to copy requirements.txt in order to keep original permissions\n subprocess.call([\"rsync\", \"-a\", \"requirements.txt\", os.path.join(o.build_dir, \"docker_src\")],\n cwd=os.path.join(o.odoo_dir))\n subprocess.call([\"docker\", \"build\", \"-t\", \"odoo-%s-src-nightly-tests\" % version, \".\"],\n cwd=os.path.join(o.build_dir, \"docker_src\"))\n if not o.no_debian:\n subprocess.call([\"mkdir\", \"docker_debian\"], cwd=o.build_dir)\n subprocess.call([\"cp\", \"package.dfdebian\", os.path.join(o.build_dir, \"docker_debian\", \"Dockerfile\")],\n cwd=os.path.join(o.odoo_dir, \"setup\"))\n # Use rsync to copy requirements.txt in order to keep original permissions\n subprocess.call([\"rsync\", \"-a\", \"requirements.txt\", os.path.join(o.build_dir, \"docker_debian\")],\n cwd=os.path.join(o.odoo_dir))\n subprocess.call([\"docker\", \"build\", \"-t\", \"odoo-%s-debian-nightly-tests\" % version, \".\"],\n cwd=os.path.join(o.build_dir, \"docker_debian\"))\n if not o.no_rpm:\n subprocess.call([\"mkdir\", \"docker_centos\"], cwd=o.build_dir)\n subprocess.call([\"cp\", \"package.dfcentos\", os.path.join(o.build_dir, \"docker_centos\", \"Dockerfile\")],\n cwd=os.path.join(o.odoo_dir, \"setup\"))\n subprocess.call([\"docker\", \"build\", \"-t\", \"odoo-%s-centos-nightly-tests\" % version, \".\"],\n cwd=os.path.join(o.build_dir, \"docker_centos\"))\n\ndef test_tgz(o):\n with docker('odoo-%s-src-nightly-tests' % version, o.build_dir, o.pub) as wheezy:\n wheezy.release = '*.tar.gz'\n wheezy.system(\"service postgresql start\")\n wheezy.system('pip install /opt/release/%s' % wheezy.release)\n wheezy.system(\"useradd --system --no-create-home odoo\")\n wheezy.system('su postgres -s /bin/bash -c \"createuser -s odoo\"')\n wheezy.system('su postgres -s /bin/bash -c \"createdb mycompany\"')\n wheezy.system('mkdir /var/lib/odoo')\n wheezy.system('chown odoo:odoo /var/lib/odoo')\n wheezy.system('su odoo -s /bin/bash -c \"odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany -i base --stop-after-init\"')\n wheezy.system('su odoo -s /bin/bash -c \"odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany &\"')\n\ndef test_deb(o):\n with docker('odoo-%s-debian-nightly-tests' % version, o.build_dir, o.pub) as wheezy:\n wheezy.release = '*.deb'\n wheezy.system(\"service postgresql start\")\n wheezy.system('su postgres -s /bin/bash -c \"createdb mycompany\"')\n wheezy.system('/usr/bin/dpkg -i /opt/release/%s' % wheezy.release)\n wheezy.system('/usr/bin/apt-get install -f -y')\n wheezy.system('su odoo -s /bin/bash -c \"odoo.py -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init\"')\n wheezy.system('su odoo -s /bin/bash -c \"odoo.py -c /etc/odoo/openerp-server.conf -d mycompany &\"')\n\ndef test_rpm(o):\n with docker('odoo-%s-centos-nightly-tests' % version, o.build_dir, o.pub) as centos7:\n centos7.release = '*.noarch.rpm'\n # Start postgresql\n centos7.system('su postgres -c \"/usr/bin/pg_ctl -D /var/lib/postgres/data start\"')\n centos7.system('sleep 5')\n centos7.system('su postgres -c \"createdb mycompany\"')\n # Odoo install\n centos7.system('yum install -d 0 -e 0 /opt/release/%s -y' % centos7.release)\n centos7.system('su odoo -s /bin/bash -c \"openerp-server -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init\"')\n centos7.system('su odoo -s /bin/bash -c \"openerp-server -c /etc/odoo/openerp-server.conf -d mycompany &\"')\n\ndef test_exe(o):\n KVMWinTestExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()\n\n#---------------------------------------------------------\n# Generates Packages, Sources and Release files of debian package\n#---------------------------------------------------------\ndef gen_deb_package(o, published_files):\n # Executes command to produce file_name in path, and moves it to o.pub/deb\n def _gen_file(o, (command, file_name), path):\n cur_tmp_file_path = os.path.join(path, file_name)\n with open(cur_tmp_file_path, 'w') as out:\n subprocess.call(command, stdout=out, cwd=path)\n system(['cp', cur_tmp_file_path, os.path.join(o.pub, 'deb', file_name)])\n\n # Copy files to a temp directory (required because the working directory must contain only the\n # files of the last release)\n temp_path = tempfile.mkdtemp(suffix='debPackages')\n for pub_file_path in published_files:\n system(['cp', pub_file_path, temp_path])\n\n commands = [\n (['dpkg-scanpackages', '.'], \"Packages\"), # Generate Packages file\n (['dpkg-scansources', '.'], \"Sources\"), # Generate Sources file\n (['apt-ftparchive', 'release', '.'], \"Release\") # Generate Release file\n ]\n # Generate files\n for command in commands:\n _gen_file(o, command, temp_path)\n # Remove temp directory\n shutil.rmtree(temp_path)\n\n # Generate Release.gpg (= signed Release)\n # Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature)\n subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(o.pub, 'deb'))\n\n#---------------------------------------------------------\n# Generates an RPM repo\n#---------------------------------------------------------\ndef gen_rpm_repo(o, file_name):\n # Sign the RPM\n rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(o.pub, 'rpm'))\n rpmsign.expect_exact('Enter pass phrase: ')\n rpmsign.send(GPGPASSPHRASE + '\\r\\n')\n rpmsign.expect(pexpect.EOF)\n\n # Removes the old repodata\n subprocess.call(['rm', '-rf', os.path.join(o.pub, 'rpm', 'repodata')])\n\n # Copy files to a temp directory (required because the working directory must contain only the\n # files of the last release)\n temp_path = tempfile.mkdtemp(suffix='rpmPackages')\n subprocess.call(['cp', file_name, temp_path])\n\n subprocess.call(['createrepo', temp_path]) # creates a repodata folder in temp_path\n subprocess.call(['cp', '-r', os.path.join(temp_path, \"repodata\"), os.path.join(o.pub, 'rpm')])\n\n # Remove temp directory\n shutil.rmtree(temp_path)\n\n#----------------------------------------------------------\n# Options and Main\n#----------------------------------------------------------\ndef options():\n op = optparse.OptionParser()\n root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n build_dir = \"%s-%s\" % (root, timestamp)\n\n op.add_option(\"-b\", \"--build-dir\", default=build_dir, help=\"build directory (%default)\", metavar=\"DIR\")\n op.add_option(\"-p\", \"--pub\", default=None, help=\"pub directory (%default)\", metavar=\"DIR\")\n op.add_option(\"\", \"--no-testing\", action=\"store_true\", help=\"don't test the builded packages\")\n op.add_option(\"-v\", \"--version\", default='8.0', help=\"version (%default)\")\n\n op.add_option(\"\", \"--no-debian\", action=\"store_true\", help=\"don't build the debian package\")\n op.add_option(\"\", \"--no-rpm\", action=\"store_true\", help=\"don't build the rpm package\")\n op.add_option(\"\", \"--no-tarball\", action=\"store_true\", help=\"don't build the tarball\")\n op.add_option(\"\", \"--no-windows\", action=\"store_true\", help=\"don't build the windows package\")\n\n # Windows VM\n op.add_option(\"\", \"--vm-winxp-image\", default='/home/odoo/vm/winxp27/winxp27.vdi', help=\"%default\")\n op.add_option(\"\", \"--vm-winxp-ssh-key\", default='/home/odoo/vm/winxp27/id_rsa', help=\"%default\")\n op.add_option(\"\", \"--vm-winxp-login\", default='Naresh', help=\"Windows login (%default)\")\n op.add_option(\"\", \"--vm-winxp-python-version\", default='2.7', help=\"Windows Python version installed in the VM (default: %default)\")\n\n (o, args) = op.parse_args()\n # derive other options\n o.odoo_dir = root\n o.pkg = join(o.build_dir, 'pkg')\n o.version_full = '%s-%s' % (o.version, timestamp)\n o.work = join(o.build_dir, 'openerp-%s' % o.version_full)\n o.work_addons = join(o.work, 'openerp', 'addons')\n\n return o\n\ndef main():\n o = options()\n _prepare_build_dir(o)\n if not o.no_testing:\n _prepare_testing(o)\n try:\n if not o.no_tarball:\n build_tgz(o)\n try:\n if not o.no_testing:\n test_tgz(o)\n published_files = publish(o, 'tarball', ['tar.gz', 'zip'])\n except Exception, e:\n print(\"Won't publish the tgz release.\\n Exception: %s\" % str(e))\n if not o.no_debian:\n build_deb(o)\n try:\n if not o.no_testing:\n test_deb(o)\n published_files = publish(o, 'debian', ['deb', 'dsc', 'changes', 'tar.gz'])\n gen_deb_package(o, published_files)\n except Exception, e:\n print(\"Won't publish the deb release.\\n Exception: %s\" % str(e))\n if not o.no_rpm:\n build_rpm(o)\n try:\n if not o.no_testing:\n test_rpm(o)\n published_files = publish(o, 'redhat', ['noarch.rpm'])\n gen_rpm_repo(o, published_files[0])\n except Exception, e:\n print(\"Won't publish the rpm release.\\n Exception: %s\" % str(e))\n if not o.no_windows:\n _prepare_build_dir(o, win32=True)\n build_exe(o)\n try:\n if not o.no_testing:\n test_exe(o)\n published_files = publish(o, 'windows', ['exe'])\n except Exception, e:\n print(\"Won't publish the exe release.\\n Exception: %s\" % str(e))\n except:\n pass\n finally:\n shutil.rmtree(o.build_dir)\n print('Build dir %s removed' % o.build_dir)\n\n if not o.no_testing:\n system(\"docker rm -f `docker ps -a | awk '{print $1 }'` 2>>/dev/null\")\n print('Remaining dockers removed')\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475122,"cells":{"repo_name":{"kind":"string","value":"miptliot/edx-platform"},"path":{"kind":"string","value":"openedx/core/djangoapps/ccxcon/tasks.py"},"copies":{"kind":"string","value":"19"},"size":{"kind":"string","value":"1672"},"content":{"kind":"string","value":"\"\"\"\nThis file contains celery tasks for ccxcon\n\"\"\"\n\nfrom celery.task import task # pylint: disable=no-name-in-module, import-error\nfrom celery.utils.log import get_task_logger # pylint: disable=no-name-in-module, import-error\nfrom opaque_keys.edx.keys import CourseKey\nfrom requests.exceptions import ConnectionError, HTTPError, RequestException, TooManyRedirects\n\nfrom openedx.core.djangoapps.ccxcon import api\n\nlog = get_task_logger(__name__)\n\n\n@task()\ndef update_ccxcon(course_id, cur_retry=0):\n \"\"\"\n Pass through function to update course information on CCXCon.\n Takes care of retries in case of some specific exceptions.\n\n Args:\n course_id (str): string representing a course key\n cur_retry (int): integer representing the current task retry\n \"\"\"\n course_key = CourseKey.from_string(course_id)\n try:\n api.course_info_to_ccxcon(course_key)\n log.info('Course update to CCXCon returned no errors. Course key: %s', course_id)\n except (ConnectionError, HTTPError, RequestException, TooManyRedirects, api.CCXConnServerError) as exp:\n log.error('Course update to CCXCon failed for course_id %s with error: %s', course_id, exp)\n # in case the maximum amount of retries has not been reached,\n # insert another task delayed exponentially up to 5 retries\n if cur_retry < 5:\n update_ccxcon.apply_async(\n kwargs={'course_id': course_id, 'cur_retry': cur_retry + 1},\n countdown=10 ** cur_retry # number of seconds the task should be delayed\n )\n log.info('Requeued celery task for course key %s ; retry # %s', course_id, cur_retry + 1)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475123,"cells":{"repo_name":{"kind":"string","value":"neharejanjeva/techstitution"},"path":{"kind":"string","value":"venv/lib/python2.7/site-packages/flask/testsuite/reqctx.py"},"copies":{"kind":"string","value":"557"},"size":{"kind":"string","value":"5960"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n flask.testsuite.reqctx\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Tests the request context.\n\n :copyright: (c) 2012 by Armin Ronacher.\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport flask\nimport unittest\ntry:\n from greenlet import greenlet\nexcept ImportError:\n greenlet = None\nfrom flask.testsuite import FlaskTestCase\n\n\nclass RequestContextTestCase(FlaskTestCase):\n\n def test_teardown_on_pop(self):\n buffer = []\n app = flask.Flask(__name__)\n @app.teardown_request\n def end_of_request(exception):\n buffer.append(exception)\n\n ctx = app.test_request_context()\n ctx.push()\n self.assert_equal(buffer, [])\n ctx.pop()\n self.assert_equal(buffer, [None])\n\n def test_proper_test_request_context(self):\n app = flask.Flask(__name__)\n app.config.update(\n SERVER_NAME='localhost.localdomain:5000'\n )\n\n @app.route('/')\n def index():\n return None\n\n @app.route('/', subdomain='foo')\n def sub():\n return None\n\n with app.test_request_context('/'):\n self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')\n\n with app.test_request_context('/'):\n self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')\n\n try:\n with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):\n pass\n except Exception as e:\n self.assert_true(isinstance(e, ValueError))\n self.assert_equal(str(e), \"the server name provided \" +\n \"('localhost.localdomain:5000') does not match the \" + \\\n \"server name from the WSGI environment ('localhost')\")\n\n try:\n app.config.update(SERVER_NAME='localhost')\n with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):\n pass\n except ValueError as e:\n raise ValueError(\n \"No ValueError exception should have been raised \\\"%s\\\"\" % e\n )\n\n try:\n app.config.update(SERVER_NAME='localhost:80')\n with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):\n pass\n except ValueError as e:\n raise ValueError(\n \"No ValueError exception should have been raised \\\"%s\\\"\" % e\n )\n\n def test_context_binding(self):\n app = flask.Flask(__name__)\n @app.route('/')\n def index():\n return 'Hello %s!' % flask.request.args['name']\n @app.route('/meh')\n def meh():\n return flask.request.url\n\n with app.test_request_context('/?name=World'):\n self.assert_equal(index(), 'Hello World!')\n with app.test_request_context('/meh'):\n self.assert_equal(meh(), 'http://localhost/meh')\n self.assert_true(flask._request_ctx_stack.top is None)\n\n def test_context_test(self):\n app = flask.Flask(__name__)\n self.assert_false(flask.request)\n self.assert_false(flask.has_request_context())\n ctx = app.test_request_context()\n ctx.push()\n try:\n self.assert_true(flask.request)\n self.assert_true(flask.has_request_context())\n finally:\n ctx.pop()\n\n def test_manual_context_binding(self):\n app = flask.Flask(__name__)\n @app.route('/')\n def index():\n return 'Hello %s!' % flask.request.args['name']\n\n ctx = app.test_request_context('/?name=World')\n ctx.push()\n self.assert_equal(index(), 'Hello World!')\n ctx.pop()\n try:\n index()\n except RuntimeError:\n pass\n else:\n self.assert_true(0, 'expected runtime error')\n\n def test_greenlet_context_copying(self):\n app = flask.Flask(__name__)\n greenlets = []\n\n @app.route('/')\n def index():\n reqctx = flask._request_ctx_stack.top.copy()\n def g():\n self.assert_false(flask.request)\n self.assert_false(flask.current_app)\n with reqctx:\n self.assert_true(flask.request)\n self.assert_equal(flask.current_app, app)\n self.assert_equal(flask.request.path, '/')\n self.assert_equal(flask.request.args['foo'], 'bar')\n self.assert_false(flask.request)\n return 42\n greenlets.append(greenlet(g))\n return 'Hello World!'\n\n rv = app.test_client().get('/?foo=bar')\n self.assert_equal(rv.data, b'Hello World!')\n\n result = greenlets[0].run()\n self.assert_equal(result, 42)\n\n def test_greenlet_context_copying_api(self):\n app = flask.Flask(__name__)\n greenlets = []\n\n @app.route('/')\n def index():\n reqctx = flask._request_ctx_stack.top.copy()\n @flask.copy_current_request_context\n def g():\n self.assert_true(flask.request)\n self.assert_equal(flask.current_app, app)\n self.assert_equal(flask.request.path, '/')\n self.assert_equal(flask.request.args['foo'], 'bar')\n return 42\n greenlets.append(greenlet(g))\n return 'Hello World!'\n\n rv = app.test_client().get('/?foo=bar')\n self.assert_equal(rv.data, b'Hello World!')\n\n result = greenlets[0].run()\n self.assert_equal(result, 42)\n\n # Disable test if we don't have greenlets available\n if greenlet is None:\n test_greenlet_context_copying = None\n test_greenlet_context_copying_api = None\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(RequestContextTestCase))\n return suite\n"},"license":{"kind":"string","value":"cc0-1.0"}}},{"rowIdx":475124,"cells":{"repo_name":{"kind":"string","value":"lichia/luigi"},"path":{"kind":"string","value":"luigi/contrib/hdfs/__init__.py"},"copies":{"kind":"string","value":"12"},"size":{"kind":"string","value":"3160"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nProvides access to HDFS using the :py:class:`HdfsTarget`, a subclass of :py:class:`~luigi.target.Target`.\nYou can configure what client by setting the \"client\" config under the \"hdfs\" section in the configuration, or using the ``--hdfs-client`` command line option.\n\"hadoopcli\" is the slowest, but should work out of the box. \"snakebite\" is the fastest, but requires Snakebite to be installed.\n\nCurrently (4th May) the :py:mod:`luigi.contrib.hdfs` module is under\nreorganization. We recommend importing the reexports from\n:py:mod:`luigi.contrib.hdfs` instead of the sub-modules, as we're not yet sure\nhow the final structure of the sub-modules will be. Eventually this module\nwill be empty and you'll have to import directly from the sub modules like\n:py:mod:`luigi.contrib.hdfs.config`.\n\"\"\"\n\n# config.py\nfrom luigi.contrib.hdfs import config as hdfs_config\nhdfs = hdfs_config.hdfs\nload_hadoop_cmd = hdfs_config.load_hadoop_cmd\nget_configured_hadoop_version = hdfs_config.get_configured_hadoop_version\nget_configured_hdfs_client = hdfs_config.get_configured_hdfs_client\ntmppath = hdfs_config.tmppath\n\n\n# clients\nfrom luigi.contrib.hdfs import clients as hdfs_clients\nfrom luigi.contrib.hdfs import error as hdfs_error\nfrom luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client\nfrom luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients\nHDFSCliError = hdfs_error.HDFSCliError\ncall_check = hdfs_hadoopcli_clients.HdfsClient.call_check\nlist_path = hdfs_snakebite_client.SnakebiteHdfsClient.list_path\nHdfsClient = hdfs_hadoopcli_clients.HdfsClient\nSnakebiteHdfsClient = hdfs_snakebite_client.SnakebiteHdfsClient\nHdfsClientCdh3 = hdfs_hadoopcli_clients.HdfsClientCdh3\nHdfsClientApache1 = hdfs_hadoopcli_clients.HdfsClientApache1\ncreate_hadoopcli_client = hdfs_hadoopcli_clients.create_hadoopcli_client\nget_autoconfig_client = hdfs_clients.get_autoconfig_client\nexists = hdfs_clients.exists\nrename = hdfs_clients.rename\nremove = hdfs_clients.remove\nmkdir = hdfs_clients.mkdir\nlistdir = hdfs_clients.listdir\n\n\n# format.py\nfrom luigi.contrib.hdfs import format as hdfs_format\n\nHdfsReadPipe = hdfs_format.HdfsReadPipe\nHdfsAtomicWritePipe = hdfs_format.HdfsAtomicWritePipe\nHdfsAtomicWriteDirPipe = hdfs_format.HdfsAtomicWriteDirPipe\nPlainFormat = hdfs_format.PlainFormat\nPlainDirFormat = hdfs_format.PlainDirFormat\nPlain = hdfs_format.Plain\nPlainDir = hdfs_format.PlainDir\nCompatibleHdfsFormat = hdfs_format.CompatibleHdfsFormat\n\n\n# target.py\nfrom luigi.contrib.hdfs import target as hdfs_target\nHdfsTarget = hdfs_target.HdfsTarget\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475125,"cells":{"repo_name":{"kind":"string","value":"ubiar/odoo"},"path":{"kind":"string","value":"addons/note/tests/__init__.py"},"copies":{"kind":"string","value":"260"},"size":{"kind":"string","value":"1076"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Business Applications\n# Copyright (c) 2013-TODAY OpenERP S.A. \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom . import test_note\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475126,"cells":{"repo_name":{"kind":"string","value":"mathgl67/pymmr"},"path":{"kind":"string","value":"tests/file.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4005"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# vi:ai:et:ts=4 sw=4\n#\n# -*- coding: utf8 -*-\n#\n# PyMmr My Music Renamer\n# Copyright (C) 2007-2010 mathgl67@gmail.com\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\nimport unittest\nimport os\nimport mmr.file\nfrom mmr.file import BaseFile, AudioFile\n\nclass TestFileFactory(unittest.TestCase):\n def setUp(self):\n # create cross os compatible path\n self.file_name = \"name.ext\"\n self.file_path = os.path.join(\"tests\", \"data\", \"file\")\n self.file_fullpath = os.path.join(\n self.file_path,\n self.file_name\n ) \n # create a base file object with previous data\n # this will be used for all test in this class.\n self.file = mmr.file.factory(self.file_fullpath)\n\n def testName(self):\n self.assertEquals(\n self.file.name, self.file_name,\n \"Factory must set the name to '%s' and it was '%s' !\" % (\n self.file_name,\n self.file.name\n )\n )\n\n def testExtension(self):\n self.assertEquals(\n self.file.extension, \".ext\",\n \"Factory must set extension to '%s' and it was '%s' !\" % (\n \".ext\",\n self.file.extension\n )\n )\n\n def testPath(self):\n self.assertEquals(\n self.file.path, self.file_path,\n \"Factory must set path to '%s' and it was '%s' !\" % (\n self.file_path,\n self.file.path\n )\n )\n\n def testFullpath(self):\n self.assertEquals(\n self.file.get_fullpath(), self.file_fullpath,\n \"Factory must retrieve path to '%s' (!= '%s').\" % (\n self.file_fullpath,\n self.file.get_fullpath()\n )\n )\n\n\nclass TestFileUnknown(unittest.TestCase):\n def setUp(self):\n self.file = mmr.file.factory(\"tests/data/file/unknown\")\n\n def testObjectType(self):\n self.assertTrue(isinstance(self.file, BaseFile), \"file should be a BaseFile object\")\n\n def testExtention(self):\n self.assertEquals(self.file.extension, None, \"file extension on unknown file should be None != %s\" % self.file.extension)\n\n def testBaseFileRepr(self):\n self.assertEquals(repr(self.file), \"\")\n\nclass TestFileAudio(unittest.TestCase):\n def setUp(self):\n self.file = {\n \".mp3\": mmr.file.factory(\"tests/data/tags/silence.mp3\"),\n \".ogg\": mmr.file.factory(\"tests/data/tags/silence.ogg\"),\n \".flac\":mmr.file.factory(\"tests/data/tags/silence.flac\"),\n }\n\n def testMp3FileIsFileAudio(self):\n self.assertTrue(isinstance(self.file[\".mp3\"], AudioFile), \"File with '.mp3' extension should be 'AudioFile'\")\n\n def testOggFileIsFileAudio(self):\n self.assertTrue(isinstance(self.file[\".ogg\"], AudioFile), \"File with '.ogg' extension should be 'AudioFile'\")\n\n def testFlacFileIsFileAudio(self):\n self.assertTrue(isinstance(self.file[\".flac\"], AudioFile), \"File with '.flac' extension should be 'AudioFile'\")\n\n def testHaveTag(self):\n self.assertNotEquals(self.file[\".mp3\"].tags, None)\n self.assertNotEquals(self.file[\".ogg\"].tags, None)\n self.assertNotEquals(self.file[\".flac\"].tags, None)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475127,"cells":{"repo_name":{"kind":"string","value":"cloudfoundry/php-buildpack-legacy"},"path":{"kind":"string","value":"builds/runtimes/python-2.7.6/lib/python2.7/test/test_int_literal.py"},"copies":{"kind":"string","value":"138"},"size":{"kind":"string","value":"9128"},"content":{"kind":"string","value":"\"\"\"Test correct treatment of hex/oct constants.\n\nThis is complex because of changes due to PEP 237.\n\"\"\"\n\nimport unittest\nfrom test import test_support\n\n\nclass TestHexOctBin(unittest.TestCase):\n\n def test_hex_baseline(self):\n # A few upper/lowercase tests\n self.assertEqual(0x0, 0X0)\n self.assertEqual(0x1, 0X1)\n self.assertEqual(0x123456789abcdef, 0X123456789abcdef)\n # Baseline tests\n self.assertEqual(0x0, 0)\n self.assertEqual(0x10, 16)\n self.assertEqual(0x7fffffff, 2147483647)\n self.assertEqual(0x7fffffffffffffff, 9223372036854775807)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0x0), 0)\n self.assertEqual(-(0x10), -16)\n self.assertEqual(-(0x7fffffff), -2147483647)\n self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)\n # Ditto with a minus sign and NO parentheses\n self.assertEqual(-0x0, 0)\n self.assertEqual(-0x10, -16)\n self.assertEqual(-0x7fffffff, -2147483647)\n self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)\n\n def test_hex_unsigned(self):\n # Positive constants\n self.assertEqual(0x80000000, 2147483648L)\n self.assertEqual(0xffffffff, 4294967295L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0x80000000), -2147483648L)\n self.assertEqual(-(0xffffffff), -4294967295L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0x80000000, -2147483648L)\n self.assertEqual(-0xffffffff, -4294967295L)\n\n # Positive constants\n self.assertEqual(0x8000000000000000, 9223372036854775808L)\n self.assertEqual(0xffffffffffffffff, 18446744073709551615L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0x8000000000000000), -9223372036854775808L)\n self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0x8000000000000000, -9223372036854775808L)\n self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)\n\n def test_oct_baseline(self):\n # Baseline tests\n self.assertEqual(00, 0)\n self.assertEqual(020, 16)\n self.assertEqual(017777777777, 2147483647)\n self.assertEqual(0777777777777777777777, 9223372036854775807)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(00), 0)\n self.assertEqual(-(020), -16)\n self.assertEqual(-(017777777777), -2147483647)\n self.assertEqual(-(0777777777777777777777), -9223372036854775807)\n # Ditto with a minus sign and NO parentheses\n self.assertEqual(-00, 0)\n self.assertEqual(-020, -16)\n self.assertEqual(-017777777777, -2147483647)\n self.assertEqual(-0777777777777777777777, -9223372036854775807)\n\n def test_oct_baseline_new(self):\n # A few upper/lowercase tests\n self.assertEqual(0o0, 0O0)\n self.assertEqual(0o1, 0O1)\n self.assertEqual(0o1234567, 0O1234567)\n # Baseline tests\n self.assertEqual(0o0, 0)\n self.assertEqual(0o20, 16)\n self.assertEqual(0o17777777777, 2147483647)\n self.assertEqual(0o777777777777777777777, 9223372036854775807)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0o0), 0)\n self.assertEqual(-(0o20), -16)\n self.assertEqual(-(0o17777777777), -2147483647)\n self.assertEqual(-(0o777777777777777777777), -9223372036854775807)\n # Ditto with a minus sign and NO parentheses\n self.assertEqual(-0o0, 0)\n self.assertEqual(-0o20, -16)\n self.assertEqual(-0o17777777777, -2147483647)\n self.assertEqual(-0o777777777777777777777, -9223372036854775807)\n\n def test_oct_unsigned(self):\n # Positive constants\n self.assertEqual(020000000000, 2147483648L)\n self.assertEqual(037777777777, 4294967295L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(020000000000), -2147483648L)\n self.assertEqual(-(037777777777), -4294967295L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-020000000000, -2147483648L)\n self.assertEqual(-037777777777, -4294967295L)\n\n # Positive constants\n self.assertEqual(01000000000000000000000, 9223372036854775808L)\n self.assertEqual(01777777777777777777777, 18446744073709551615L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(01000000000000000000000), -9223372036854775808L)\n self.assertEqual(-(01777777777777777777777), -18446744073709551615L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-01000000000000000000000, -9223372036854775808L)\n self.assertEqual(-01777777777777777777777, -18446744073709551615L)\n\n def test_oct_unsigned_new(self):\n # Positive constants\n self.assertEqual(0o20000000000, 2147483648L)\n self.assertEqual(0o37777777777, 4294967295L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0o20000000000), -2147483648L)\n self.assertEqual(-(0o37777777777), -4294967295L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0o20000000000, -2147483648L)\n self.assertEqual(-0o37777777777, -4294967295L)\n\n # Positive constants\n self.assertEqual(0o1000000000000000000000, 9223372036854775808L)\n self.assertEqual(0o1777777777777777777777, 18446744073709551615L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)\n self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)\n self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)\n\n def test_bin_baseline(self):\n # A few upper/lowercase tests\n self.assertEqual(0b0, 0B0)\n self.assertEqual(0b1, 0B1)\n self.assertEqual(0b10101010101, 0B10101010101)\n # Baseline tests\n self.assertEqual(0b0, 0)\n self.assertEqual(0b10000, 16)\n self.assertEqual(0b1111111111111111111111111111111, 2147483647)\n self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0b0), 0)\n self.assertEqual(-(0b10000), -16)\n self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)\n self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)\n # Ditto with a minus sign and NO parentheses\n self.assertEqual(-0b0, 0)\n self.assertEqual(-0b10000, -16)\n self.assertEqual(-0b1111111111111111111111111111111, -2147483647)\n self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)\n\n def test_bin_unsigned(self):\n # Positive constants\n self.assertEqual(0b10000000000000000000000000000000, 2147483648L)\n self.assertEqual(0b11111111111111111111111111111111, 4294967295L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)\n self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)\n self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)\n\n # Positive constants\n self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)\n self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)\n # Ditto with a minus sign and parentheses\n self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)\n self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)\n # Ditto with a minus sign and NO parentheses\n # This failed in Python 2.2 through 2.2.2 and in 2.3a1\n self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)\n self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)\n\ndef test_main():\n test_support.run_unittest(TestHexOctBin)\n\nif __name__ == \"__main__\":\n test_main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475128,"cells":{"repo_name":{"kind":"string","value":"jkkummerfeld/1ec-graph-parser"},"path":{"kind":"string","value":"properties/count_unique_dev_spines.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2799"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport argparse\nimport string\nimport sys\n\ndef read(filename):\n sent = []\n spines = []\n for line in open(filename):\n line = line.strip()\n if line.startswith(\"# Sentence\"):\n spines.append([])\n sent = line.strip().split()[2:]\n elif len(line) > 0 and line[0] != '#':\n fields = line.split()\n num = int(fields[0])\n word = fields[1]\n pos = fields[2]\n spine = fields[3]\n spines[-1].append((word, pos, spine))\n return spines\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Calculate how many spines in the dev set are novel.')\n parser.add_argument('train', help='Training data in SHP format.')\n parser.add_argument('dev', help='Development data in SHP format.')\n args = parser.parse_args()\n\n train_spines = read(args.train)\n word_set = set()\n pos_set = set()\n spine_set = set()\n for spines in train_spines:\n for spine in spines:\n word_set.add(spine)\n pos_set.add((spine[1], spine[2]))\n spine_set.add(spine[2])\n\n results = {\n 'Dev sentences with all seen: (word, POS, spine)': 0,\n 'Dev sentences with all seen: (POS, spine)': 0,\n 'Dev sentences with all seen: spine': 0,\n 'Train spines (word, POS, spine)': len(word_set),\n 'Train spines (POS, spine)': len(pos_set),\n 'Train spines spine': len(spine_set),\n 'Dev spines new (word, POS, spine)': 0,\n 'Dev spines new (POS, spine)': 0,\n 'Dev spines new spine': 0,\n 'Dev spines': 0\n }\n sentences = 0\n for spines in read(args.dev):\n sentences += 1\n all_wpresent = True\n all_ppresent = True\n all_cpresent = True\n for spine in spines:\n results['Dev spines'] += 1\n if spine not in word_set:\n results['Dev spines new (word, POS, spine)'] += 1\n all_wpresent = False\n if (spine[1], spine[2]) not in pos_set:\n results['Dev spines new (POS, spine)'] += 1\n all_ppresent = False\n if spine[2] not in spine_set:\n results['Dev spines new spine'] += 1\n all_cpresent = False\n if all_wpresent:\n results['Dev sentences with all seen: (word, POS, spine)'] += 1\n if all_ppresent:\n results['Dev sentences with all seen: (POS, spine)'] += 1\n if all_cpresent:\n results['Dev sentences with all seen: spine'] += 1\n\n for key in results:\n if key.startswith(\"Dev sentences\"):\n print(\"{} {} {:.1f}%\".format(key, results[key], results[key] * 100 / sentences))\n else:\n print(key, results[key])\n"},"license":{"kind":"string","value":"isc"}}},{"rowIdx":475129,"cells":{"repo_name":{"kind":"string","value":"ABaldwinHunter/django-clone-classic"},"path":{"kind":"string","value":"tests/template_tests/test_logging.py"},"copies":{"kind":"string","value":"117"},"size":{"kind":"string","value":"4628"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport logging\n\nfrom django.template import Context, Engine, Variable, VariableDoesNotExist\nfrom django.test import SimpleTestCase\n\n\nclass TestHandler(logging.Handler):\n def __init__(self):\n super(TestHandler, self).__init__()\n self.log_record = None\n\n def emit(self, record):\n self.log_record = record\n\n\nclass BaseTemplateLoggingTestCase(SimpleTestCase):\n def setUp(self):\n self.test_handler = TestHandler()\n self.logger = logging.getLogger('django.template')\n self.original_level = self.logger.level\n self.logger.addHandler(self.test_handler)\n self.logger.setLevel(self.loglevel)\n\n def tearDown(self):\n self.logger.removeHandler(self.test_handler)\n self.logger.level = self.original_level\n\n\nclass VariableResolveLoggingTests(BaseTemplateLoggingTestCase):\n loglevel = logging.DEBUG\n\n def test_log_on_variable_does_not_exist_silent(self):\n class TestObject(object):\n class SilentDoesNotExist(Exception):\n silent_variable_failure = True\n\n @property\n def template_name(self):\n return \"template_name\"\n\n @property\n def template(self):\n return Engine().from_string('')\n\n @property\n def article(self):\n raise TestObject.SilentDoesNotExist(\"Attribute does not exist.\")\n\n def __iter__(self):\n return iter(attr for attr in dir(TestObject) if attr[:2] != \"__\")\n\n def __getitem__(self, item):\n return self.__dict__[item]\n\n Variable('article').resolve(TestObject())\n\n self.assertEqual(\n self.test_handler.log_record.getMessage(),\n \"Exception while resolving variable 'article' in template 'template_name'.\"\n )\n self.assertIsNotNone(self.test_handler.log_record.exc_info)\n raised_exception = self.test_handler.log_record.exc_info[1]\n self.assertEqual(str(raised_exception), 'Attribute does not exist.')\n\n def test_log_on_variable_does_not_exist_not_silent(self):\n with self.assertRaises(VariableDoesNotExist):\n Variable('article.author').resolve({'article': {'section': 'News'}})\n\n self.assertEqual(\n self.test_handler.log_record.getMessage(),\n \"Exception while resolving variable 'author' in template 'unknown'.\"\n )\n self.assertIsNotNone(self.test_handler.log_record.exc_info)\n raised_exception = self.test_handler.log_record.exc_info[1]\n self.assertEqual(\n str(raised_exception),\n 'Failed lookup for key [author] in %r' % (\"{%r: %r}\" % ('section', 'News'))\n )\n\n def test_no_log_when_variable_exists(self):\n Variable('article.section').resolve({'article': {'section': 'News'}})\n self.assertIsNone(self.test_handler.log_record)\n\n\nclass IncludeNodeLoggingTests(BaseTemplateLoggingTestCase):\n loglevel = logging.WARN\n\n @classmethod\n def setUpClass(cls):\n super(IncludeNodeLoggingTests, cls).setUpClass()\n cls.engine = Engine(loaders=[\n ('django.template.loaders.locmem.Loader', {\n 'child': '{{ raises_exception }}',\n }),\n ], debug=False)\n\n def error_method():\n raise IndexError(\"some generic exception\")\n\n cls.ctx = Context({'raises_exception': error_method})\n\n def test_logs_exceptions_during_rendering_with_debug_disabled(self):\n template = self.engine.from_string('{% include \"child\" %}')\n template.name = 'template_name'\n self.assertEqual(template.render(self.ctx), '')\n self.assertEqual(\n self.test_handler.log_record.getMessage(),\n \"Exception raised while rendering {% include %} for template \"\n \"'template_name'. Empty string rendered instead.\"\n )\n self.assertIsNotNone(self.test_handler.log_record.exc_info)\n self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)\n\n def test_logs_exceptions_during_rendering_with_no_template_name(self):\n template = self.engine.from_string('{% include \"child\" %}')\n self.assertEqual(template.render(self.ctx), '')\n self.assertEqual(\n self.test_handler.log_record.getMessage(),\n \"Exception raised while rendering {% include %} for template \"\n \"'unknown'. Empty string rendered instead.\"\n )\n self.assertIsNotNone(self.test_handler.log_record.exc_info)\n self.assertEqual(self.test_handler.log_record.levelno, logging.WARN)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475130,"cells":{"repo_name":{"kind":"string","value":"BeyondTheClouds/nova"},"path":{"kind":"string","value":"nova/api/openstack/compute/cells.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"12036"},"content":{"kind":"string","value":"# Copyright 2011-2012 OpenStack Foundation\n# All Rights Reserved.\n# Copyright 2013 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"The cells extension.\"\"\"\n\nimport oslo_messaging as messaging\nfrom oslo_utils import strutils\nimport six\nfrom webob import exc\n\nfrom nova.api.openstack import common\nfrom nova.api.openstack.compute.schemas import cells\nfrom nova.api.openstack import extensions\nfrom nova.api.openstack import wsgi\nfrom nova.api import validation\nfrom nova.cells import rpcapi as cells_rpcapi\nimport nova.conf\nfrom nova import exception\nfrom nova.i18n import _\nfrom nova import rpc\n\n\nCONF = nova.conf.CONF\n\nALIAS = \"os-cells\"\nauthorize = extensions.os_compute_authorizer(ALIAS)\n\n\ndef _filter_keys(item, keys):\n \"\"\"Filters all model attributes except for keys\n item is a dict\n \"\"\"\n return {k: v for k, v in six.iteritems(item) if k in keys}\n\n\ndef _fixup_cell_info(cell_info, keys):\n \"\"\"If the transport_url is present in the cell, derive username,\n rpc_host, and rpc_port from it.\n \"\"\"\n\n if 'transport_url' not in cell_info:\n return\n\n # Disassemble the transport URL\n transport_url = cell_info.pop('transport_url')\n try:\n transport_url = rpc.get_transport_url(transport_url)\n except messaging.InvalidTransportURL:\n # Just go with None's\n for key in keys:\n cell_info.setdefault(key, None)\n return\n\n if not transport_url.hosts:\n return\n\n transport_host = transport_url.hosts[0]\n\n transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}\n for key in keys:\n if key in cell_info:\n continue\n\n transport_field = transport_field_map.get(key, key)\n cell_info[key] = getattr(transport_host, transport_field)\n\n\ndef _scrub_cell(cell, detail=False):\n keys = ['name', 'username', 'rpc_host', 'rpc_port']\n if detail:\n keys.append('capabilities')\n\n cell_info = _filter_keys(cell, keys + ['transport_url'])\n _fixup_cell_info(cell_info, keys)\n cell_info['type'] = 'parent' if cell['is_parent'] else 'child'\n return cell_info\n\n\nclass CellsController(wsgi.Controller):\n \"\"\"Controller for Cell resources.\"\"\"\n\n def __init__(self):\n self.cells_rpcapi = cells_rpcapi.CellsAPI()\n\n def _get_cells(self, ctxt, req, detail=False):\n \"\"\"Return all cells.\"\"\"\n # Ask the CellsManager for the most recent data\n items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)\n items = common.limited(items, req)\n items = [_scrub_cell(item, detail=detail) for item in items]\n return dict(cells=items)\n\n @extensions.expected_errors(501)\n @common.check_cells_enabled\n def index(self, req):\n \"\"\"Return all cells in brief.\"\"\"\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n return self._get_cells(ctxt, req)\n\n @extensions.expected_errors(501)\n @common.check_cells_enabled\n def detail(self, req):\n \"\"\"Return all cells in detail.\"\"\"\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n return self._get_cells(ctxt, req, detail=True)\n\n @extensions.expected_errors(501)\n @common.check_cells_enabled\n def info(self, req):\n \"\"\"Return name and capabilities for this cell.\"\"\"\n context = req.environ['nova.context']\n authorize(context)\n cell_capabs = {}\n my_caps = CONF.cells.capabilities\n for cap in my_caps:\n key, value = cap.split('=')\n cell_capabs[key] = value\n cell = {'name': CONF.cells.name,\n 'type': 'self',\n 'rpc_host': None,\n 'rpc_port': 0,\n 'username': None,\n 'capabilities': cell_capabs}\n return dict(cell=cell)\n\n @extensions.expected_errors((404, 501))\n @common.check_cells_enabled\n def capacities(self, req, id=None):\n \"\"\"Return capacities for a given cell or all cells.\"\"\"\n # TODO(kaushikc): return capacities as a part of cell info and\n # cells detail calls in v2.1, along with capabilities\n context = req.environ['nova.context']\n authorize(context)\n try:\n capacities = self.cells_rpcapi.get_capacities(context,\n cell_name=id)\n except exception.CellNotFound as e:\n raise exc.HTTPNotFound(explanation=e.format_message())\n\n return dict(cell={\"capacities\": capacities})\n\n @extensions.expected_errors((404, 501))\n @common.check_cells_enabled\n def show(self, req, id):\n \"\"\"Return data about the given cell name. 'id' is a cell name.\"\"\"\n context = req.environ['nova.context']\n authorize(context)\n try:\n cell = self.cells_rpcapi.cell_get(context, id)\n except exception.CellNotFound as e:\n raise exc.HTTPNotFound(explanation=e.format_message())\n return dict(cell=_scrub_cell(cell))\n\n # NOTE(gmann): Returns 200 for backwards compatibility but should be 204\n # as this operation complete the deletion of aggregate resource and return\n # no response body.\n @extensions.expected_errors((403, 404, 501))\n @common.check_cells_enabled\n def delete(self, req, id):\n \"\"\"Delete a child or parent cell entry. 'id' is a cell name.\"\"\"\n context = req.environ['nova.context']\n\n authorize(context, action=\"delete\")\n\n try:\n num_deleted = self.cells_rpcapi.cell_delete(context, id)\n except exception.CellsUpdateUnsupported as e:\n raise exc.HTTPForbidden(explanation=e.format_message())\n if num_deleted == 0:\n raise exc.HTTPNotFound(\n explanation=_(\"Cell %s doesn't exist.\") % id)\n\n def _normalize_cell(self, cell, existing=None):\n \"\"\"Normalize input cell data. Normalizations include:\n\n * Converting cell['type'] to is_parent boolean.\n * Merging existing transport URL with transport information.\n \"\"\"\n\n if 'name' in cell:\n cell['name'] = common.normalize_name(cell['name'])\n\n # Start with the cell type conversion\n if 'type' in cell:\n cell['is_parent'] = cell['type'] == 'parent'\n del cell['type']\n # Avoid cell type being overwritten to 'child'\n elif existing:\n cell['is_parent'] = existing['is_parent']\n else:\n cell['is_parent'] = False\n\n # Now we disassemble the existing transport URL...\n transport_url = existing.get('transport_url') if existing else None\n transport_url = rpc.get_transport_url(transport_url)\n\n if 'rpc_virtual_host' in cell:\n transport_url.virtual_host = cell.pop('rpc_virtual_host')\n\n if not transport_url.hosts:\n transport_url.hosts.append(messaging.TransportHost())\n transport_host = transport_url.hosts[0]\n if 'rpc_port' in cell:\n cell['rpc_port'] = int(cell['rpc_port'])\n # Copy over the input fields\n transport_field_map = {\n 'username': 'username',\n 'password': 'password',\n 'hostname': 'rpc_host',\n 'port': 'rpc_port',\n }\n for key, input_field in transport_field_map.items():\n # Only override the value if we're given an override\n if input_field in cell:\n setattr(transport_host, key, cell.pop(input_field))\n\n # Now set the transport URL\n cell['transport_url'] = str(transport_url)\n\n # NOTE(gmann): Returns 200 for backwards compatibility but should be 201\n # as this operation complete the creation of aggregates resource when\n # returning a response.\n @extensions.expected_errors((400, 403, 501))\n @common.check_cells_enabled\n @validation.schema(cells.create_v20, '2.0', '2.0')\n @validation.schema(cells.create, '2.1')\n def create(self, req, body):\n \"\"\"Create a child cell entry.\"\"\"\n context = req.environ['nova.context']\n\n authorize(context, action=\"create\")\n\n cell = body['cell']\n self._normalize_cell(cell)\n try:\n cell = self.cells_rpcapi.cell_create(context, cell)\n except exception.CellsUpdateUnsupported as e:\n raise exc.HTTPForbidden(explanation=e.format_message())\n return dict(cell=_scrub_cell(cell))\n\n @extensions.expected_errors((400, 403, 404, 501))\n @common.check_cells_enabled\n @validation.schema(cells.update_v20, '2.0', '2.0')\n @validation.schema(cells.update, '2.1')\n def update(self, req, id, body):\n \"\"\"Update a child cell entry. 'id' is the cell name to update.\"\"\"\n context = req.environ['nova.context']\n\n authorize(context, action=\"update\")\n\n cell = body['cell']\n cell.pop('id', None)\n\n try:\n # NOTE(Vek): There is a race condition here if multiple\n # callers are trying to update the cell\n # information simultaneously. Since this\n # operation is administrative in nature, and\n # will be going away in the future, I don't see\n # it as much of a problem...\n existing = self.cells_rpcapi.cell_get(context, id)\n except exception.CellNotFound as e:\n raise exc.HTTPNotFound(explanation=e.format_message())\n self._normalize_cell(cell, existing)\n try:\n cell = self.cells_rpcapi.cell_update(context, id, cell)\n except exception.CellNotFound as e:\n raise exc.HTTPNotFound(explanation=e.format_message())\n except exception.CellsUpdateUnsupported as e:\n raise exc.HTTPForbidden(explanation=e.format_message())\n return dict(cell=_scrub_cell(cell))\n\n # NOTE(gmann): Returns 200 for backwards compatibility but should be 204\n # as this operation complete the sync instance info and return\n # no response body.\n @extensions.expected_errors((400, 501))\n @common.check_cells_enabled\n @validation.schema(cells.sync_instances)\n def sync_instances(self, req, body):\n \"\"\"Tell all cells to sync instance info.\"\"\"\n context = req.environ['nova.context']\n\n authorize(context, action=\"sync_instances\")\n\n project_id = body.pop('project_id', None)\n deleted = body.pop('deleted', False)\n updated_since = body.pop('updated_since', None)\n if isinstance(deleted, six.string_types):\n deleted = strutils.bool_from_string(deleted, strict=True)\n self.cells_rpcapi.sync_instances(context, project_id=project_id,\n updated_since=updated_since, deleted=deleted)\n\n\nclass Cells(extensions.V21APIExtensionBase):\n \"\"\"Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n \"\"\"\n\n name = \"Cells\"\n alias = ALIAS\n version = 1\n\n def get_resources(self):\n coll_actions = {\n 'detail': 'GET',\n 'info': 'GET',\n 'sync_instances': 'POST',\n 'capacities': 'GET',\n }\n memb_actions = {\n 'capacities': 'GET',\n }\n\n res = extensions.ResourceExtension(ALIAS, CellsController(),\n collection_actions=coll_actions,\n member_actions=memb_actions)\n return [res]\n\n def get_controller_extensions(self):\n return []\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475131,"cells":{"repo_name":{"kind":"string","value":"tkremenek/swift"},"path":{"kind":"string","value":"utils/build_swift/tests/build_swift/test_driver_arguments.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"24206"},"content":{"kind":"string","value":"# This source file is part of the Swift.org open source project\n#\n# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors\n# Licensed under Apache License v2.0 with Runtime Library Exception\n#\n# See https://swift.org/LICENSE.txt for license information\n# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors\n\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport platform\nimport sys\nimport unittest\n\nfrom build_swift import argparse\nfrom build_swift import constants\nfrom build_swift import driver_arguments\nfrom build_swift import migration\nfrom build_swift.presets import PresetParser\n\nimport six\n\nfrom .test_presets import PRESET_DEFAULTS\nfrom .. import expected_options as eo\nfrom .. import utils\n\n\nPRESETS_FILES = [\n os.path.join(constants.UTILS_PATH, 'build-presets.ini'),\n]\n\n\nclass ParserError(Exception):\n pass\n\n\ndef _load_all_presets(preset_files):\n parser = PresetParser()\n parser.read_files(preset_files)\n\n # Hack to filter out mixins which are not expected to be valid presets\n preset_names = [\n name for name in parser.preset_names\n if not name.startswith('mixin')\n ]\n\n presets = dict()\n for name in preset_names:\n preset = parser.get_preset(name, vars=PRESET_DEFAULTS)\n args = migration.migrate_swift_sdks(preset.args)\n\n presets[name] = args\n\n return presets\n\n\nclass TestDriverArgumentParserMeta(type):\n \"\"\"Metaclass used to dynamically generate test methods for each of the\n individual options accepted by the parser and methods to validate all of\n the presets.\n \"\"\"\n\n def __new__(cls, name, bases, attrs):\n # Generate tests for each default value\n for dest, value in eo.EXPECTED_DEFAULTS.items():\n test_name = 'test_default_value_{}'.format(dest)\n attrs[test_name] = cls.generate_default_value_test(dest, value)\n\n # Generate tests for each expected option\n for option in eo.EXPECTED_OPTIONS:\n test_name = 'test_option_{}'.format(option.sanitized_string())\n attrs[test_name] = cls.generate_option_test(option)\n\n # Generate tests for each preset\n presets = _load_all_presets(PRESETS_FILES)\n\n for name, args in presets.items():\n test_name = 'test_preset_{}'.format(name)\n attrs[test_name] = cls.generate_preset_test(name, args)\n\n if six.PY2:\n name = str(name)\n\n return super(TestDriverArgumentParserMeta, cls).__new__(\n cls, name, bases, attrs)\n\n @classmethod\n def generate_default_value_test(cls, dest, default_value):\n def test(self):\n parsed_values = self.parse_default_args([])\n\n parsed_value = getattr(parsed_values, dest)\n if default_value.__class__ in six.string_types:\n parsed_value = six.text_type(parsed_value)\n\n self.assertEqual(default_value, parsed_value,\n 'Invalid default value for \"{}\": {} != {}'\n .format(dest, default_value, parsed_value))\n\n return test\n\n @classmethod\n def _generate_help_option_test(cls, option):\n def test(self):\n with utils.redirect_stdout() as output:\n with self.assertRaises(ParserError):\n self.parse_args([option.option_string])\n self.assertNotEmpty(output)\n\n return test\n\n @classmethod\n def _generate_set_option_test(cls, option):\n def test(self):\n namespace = self.parse_args([option.option_string])\n self.assertEqual(getattr(namespace, option.dest), option.value)\n\n with self.assertRaises(ParserError):\n self.parse_args([option.option_string, 'foo'])\n\n return test\n\n @classmethod\n def _generate_set_true_option_test(cls, option):\n def test(self):\n # TODO: Move to unit-tests for the action class\n namespace = self.parse_args([])\n self.assertFalse(getattr(namespace, option.dest))\n\n namespace = self.parse_args([option.option_string])\n self.assertTrue(getattr(namespace, option.dest))\n\n return test\n\n @classmethod\n def _generate_set_false_option_test(cls, option):\n def test(self):\n # TODO: Move to unit-tests for the action class\n namespace = self.parse_args([])\n self.assertTrue(getattr(namespace, option.dest))\n\n namespace = self.parse_args([option.option_string])\n self.assertFalse(getattr(namespace, option.dest))\n\n return test\n\n @classmethod\n def _generate_enable_option_test(cls, option):\n def test(self):\n # TODO: Move to unit-tests for the action class\n # Test parsing True values\n self.parse_args([option.option_string, '1'])\n self.parse_args([option.option_string, 'true'])\n self.parse_args([option.option_string, 'True'])\n self.parse_args([option.option_string, 'TRUE'])\n\n # TODO: Move to unit-tests for the action class\n # Test parsing False values\n self.parse_args([option.option_string, '0'])\n self.parse_args([option.option_string, 'false'])\n self.parse_args([option.option_string, 'False'])\n self.parse_args([option.option_string, 'FALSE'])\n\n # TODO: Move to unit-tests for the action class\n # Test default value\n namespace = self.parse_args([option.option_string])\n self.assertTrue(getattr(namespace, option.dest))\n\n # Test setting value to True\n namespace = self.parse_args([option.option_string, 'True'])\n self.assertTrue(getattr(namespace, option.dest))\n\n # Test setting value to False\n namespace = self.parse_args([option.option_string, 'False'])\n self.assertFalse(getattr(namespace, option.dest))\n\n return test\n\n @classmethod\n def _generate_disable_option_test(cls, option):\n def test(self):\n # TODO: Move to unit-tests for the action class\n # Test parsing True values\n self.parse_args([option.option_string, '1'])\n self.parse_args([option.option_string, 'true'])\n self.parse_args([option.option_string, 'True'])\n self.parse_args([option.option_string, 'TRUE'])\n\n # TODO: Move to unit-tests for the action class\n # Test parsing False values\n self.parse_args([option.option_string, '0'])\n self.parse_args([option.option_string, 'false'])\n self.parse_args([option.option_string, 'False'])\n self.parse_args([option.option_string, 'FALSE'])\n\n # TODO: Move to unit-tests for the action class\n # Test default value\n namespace = self.parse_args([option.option_string])\n self.assertFalse(getattr(namespace, option.dest))\n\n # Test setting value to True resulting in False\n namespace = self.parse_args([option.option_string, 'True'])\n self.assertFalse(getattr(namespace, option.dest))\n\n # Test setting value to False resulting in True\n namespace = self.parse_args([option.option_string, 'False'])\n self.assertTrue(getattr(namespace, option.dest))\n\n return test\n\n @classmethod\n def _generate_choices_option_test(cls, option):\n def test(self):\n for choice in option.choices:\n namespace = self.parse_args(\n [option.option_string, six.text_type(choice)])\n self.assertEqual(getattr(namespace, option.dest), choice)\n\n with self.assertRaises(ParserError):\n self.parse_args([option.option_string, 'INVALID'])\n\n return test\n\n @classmethod\n def _generate_int_option_test(cls, option):\n def test(self):\n for i in [0, 1, 42]:\n namespace = self.parse_args(\n [option.option_string, six.text_type(i)])\n self.assertEqual(int(getattr(namespace, option.dest)), i)\n\n # FIXME: int-type options should not accept non-int strings\n # self.parse_args([option.option_string, six.text_type(0.0)])\n # self.parse_args([option.option_string, six.text_type(1.0)])\n # self.parse_args([option.option_string, six.text_type(3.14)])\n # self.parse_args([option.option_string, 'NaN'])\n\n return test\n\n @classmethod\n def _generate_str_option_test(cls, option):\n def test(self):\n self.parse_args([option.option_string, 'foo'])\n\n return test\n\n @classmethod\n def _generate_path_option_test(cls, option):\n def test(self):\n self.parse_args([option.option_string, sys.executable])\n\n # FIXME: path-type options should not accept non-path inputs\n # self.parse_args([option.option_string, 'foo'])\n\n return test\n\n @classmethod\n def _generate_append_option_test(cls, option):\n def test(self):\n # Range size is arbitrary, just needs to be more than once\n for i in range(1, 4):\n namespace = self.parse_args([option.option_string, 'ARG'] * i)\n self.assertEqual(getattr(namespace, option.dest), ['ARG'] * i)\n\n return test\n\n @classmethod\n def _generate_unsupported_option_test(cls, option):\n def test(self):\n with self.assertRaises(ParserError):\n self.parse_args([option.option_string])\n\n return test\n\n @classmethod\n def _generate_build_script_impl_option_test(cls, option):\n def test(self):\n namespace, unknown_args = self.parse_args_and_unknown_args([])\n self.assertFalse(hasattr(namespace, option.dest))\n self.assertEqual(unknown_args, [])\n\n namespace, unknown_args = self.parse_args_and_unknown_args(\n [option.option_string])\n # The argument should never show up in the namespace\n self.assertFalse(hasattr(namespace, option.dest))\n # It should instead be forwareded to unkown_args\n self.assertEqual(unknown_args, [option.option_string])\n\n return test\n\n @classmethod\n def generate_option_test(cls, option):\n generate_test_funcs = {\n eo.HelpOption: cls._generate_help_option_test,\n eo.SetOption: cls._generate_set_option_test,\n eo.SetTrueOption: cls._generate_set_true_option_test,\n eo.SetFalseOption: cls._generate_set_false_option_test,\n eo.EnableOption: cls._generate_enable_option_test,\n eo.DisableOption: cls._generate_disable_option_test,\n eo.ChoicesOption: cls._generate_choices_option_test,\n eo.IntOption: cls._generate_int_option_test,\n eo.StrOption: cls._generate_str_option_test,\n eo.PathOption: cls._generate_path_option_test,\n eo.AppendOption: cls._generate_append_option_test,\n eo.UnsupportedOption: cls._generate_unsupported_option_test,\n eo.BuildScriptImplOption:\n cls._generate_build_script_impl_option_test,\n\n # IgnoreOptions should be manually tested\n eo.IgnoreOption: lambda self: None,\n }\n\n test_func = generate_test_funcs.get(option.__class__, None)\n if test_func is not None:\n return test_func(option)\n\n # Catch-all meaningless test\n return lambda self: \\\n self.fail('unexpected option \"{}\"'.format(option.option_string))\n\n @classmethod\n def generate_preset_test(cls, preset_name, preset_args):\n def test(self):\n try:\n # Windows cannot run build-script-impl to check the impl args.\n is_windows = platform.system() == 'Windows'\n self.parse_default_args(preset_args,\n check_impl_args=not is_windows)\n except ParserError as e:\n self.fail('failed to parse preset \"{}\": {}'.format(\n preset_name, e))\n\n return test\n\n\n@six.add_metaclass(TestDriverArgumentParserMeta)\nclass TestDriverArgumentParser(unittest.TestCase):\n\n def _parse_args(self, args):\n try:\n return migration.parse_args(self.parser, args)\n except (SystemExit, ValueError) as e:\n raise ParserError('failed to parse arguments: {}'.format(\n six.text_type(args), e))\n\n def _check_impl_args(self, namespace):\n assert hasattr(namespace, 'build_script_impl_args')\n\n try:\n migration.check_impl_args(\n constants.BUILD_SCRIPT_IMPL_PATH,\n namespace.build_script_impl_args)\n except (SystemExit, ValueError) as e:\n raise ParserError('failed to parse impl arguments: {}'.format(\n six.text_type(namespace.build_script_impl_args), e))\n\n def parse_args_and_unknown_args(self, args, namespace=None):\n if namespace is None:\n namespace = argparse.Namespace()\n\n with utils.quiet_output():\n try:\n namespace, unknown_args = (\n super(self.parser.__class__, self.parser).parse_known_args(\n args, namespace))\n namespace, unknown_args = (\n migration._process_disambiguation_arguments(\n namespace, unknown_args))\n except (SystemExit, argparse.ArgumentError) as e:\n raise ParserError('failed to parse arguments: {}'.format(\n six.text_type(args), e))\n\n return namespace, unknown_args\n\n def parse_args(self, args, namespace=None):\n namespace, unknown_args = self.parse_args_and_unknown_args(\n args, namespace)\n\n if unknown_args:\n raise ParserError('unknown arguments: {}'.format(\n six.text_type(unknown_args)))\n\n return namespace\n\n def parse_default_args(self, args, check_impl_args=False):\n with utils.quiet_output():\n namespace = self._parse_args(args)\n\n if check_impl_args:\n self._check_impl_args(namespace)\n\n return namespace\n\n def setUp(self):\n self.parser = driver_arguments.create_argument_parser()\n\n # -------------------------------------------------------------------------\n\n def test_expected_options_exhaustive(self):\n \"\"\"Test that we are exhaustively testing all options accepted by the\n parser. If this test if failing then the parser accepts more options\n than currently being tested, meaning the EXPECTED_OPTIONS list in\n build_swift/tests/expected_options.py should be updated to include\n the missing options.\n \"\"\"\n\n expected_options = {o.option_string for o in eo.EXPECTED_OPTIONS}\n\n # aggregate and flatten the options_strings accepted by the parser\n actual_options = [a.option_strings for a in self.parser._actions]\n actual_options = set(sum(actual_options, []))\n\n diff = actual_options - expected_options\n\n if len(diff) > 0:\n self.fail('non-exhaustive expected options, missing: {}'\n .format(diff))\n\n def test_expected_options_have_default_values(self):\n \"\"\"Test that all the options in EXPECTED_OPTIONS have an associated\n default value.\n \"\"\"\n\n skip_option_classes = [\n eo.HelpOption,\n eo.IgnoreOption,\n eo.UnsupportedOption,\n eo.BuildScriptImplOption,\n ]\n\n missing_defaults = set()\n for option in eo.EXPECTED_OPTIONS:\n if option.__class__ in skip_option_classes:\n continue\n\n if option.dest not in eo.EXPECTED_DEFAULTS:\n missing_defaults.add(option.dest)\n\n if len(missing_defaults) > 0:\n self.fail('non-exhaustive default values for options, missing: {}'\n .format(missing_defaults))\n\n # -------------------------------------------------------------------------\n # Manual option tests\n\n def test_option_clang_compiler_version(self):\n option_string = '--clang-compiler-version'\n\n self.parse_default_args([option_string, '5.0.0'])\n self.parse_default_args([option_string, '5.0.1'])\n self.parse_default_args([option_string, '5.0.0.1'])\n\n with self.assertRaises(ParserError):\n self.parse_default_args([option_string, '1'])\n self.parse_default_args([option_string, '1.2'])\n self.parse_default_args([option_string, '0.0.0.0.1'])\n\n def test_option_clang_user_visible_version(self):\n option_string = '--clang-user-visible-version'\n\n self.parse_default_args([option_string, '5.0.0'])\n self.parse_default_args([option_string, '5.0.1'])\n self.parse_default_args([option_string, '5.0.0.1'])\n\n with self.assertRaises(ParserError):\n self.parse_default_args([option_string, '1'])\n self.parse_default_args([option_string, '1.2'])\n self.parse_default_args([option_string, '0.0.0.0.1'])\n\n def test_option_swift_compiler_version(self):\n option_string = '--swift-compiler-version'\n\n self.parse_default_args([option_string, '4.1'])\n self.parse_default_args([option_string, '4.0.1'])\n self.parse_default_args([option_string, '200.99.1'])\n\n with self.assertRaises(ParserError):\n self.parse_default_args([option_string, '1'])\n self.parse_default_args([option_string, '0.0.0.1'])\n\n def test_option_swift_user_visible_version(self):\n option_string = '--swift-user-visible-version'\n\n self.parse_default_args([option_string, '4.1'])\n self.parse_default_args([option_string, '4.0.1'])\n self.parse_default_args([option_string, '200.99.1'])\n\n with self.assertRaises(ParserError):\n self.parse_default_args([option_string, '1'])\n self.parse_default_args([option_string, '0.0.0.1'])\n\n def test_option_I(self):\n with self.assertRaises(ValueError):\n self.parse_default_args(['-I'])\n\n def test_option_ios_all(self):\n with self.assertRaises(ValueError):\n self.parse_default_args(['--ios-all'])\n\n def test_option_tvos_all(self):\n with self.assertRaises(ValueError):\n self.parse_default_args(['--tvos-all'])\n\n def test_option_watchos_all(self):\n with self.assertRaises(ValueError):\n self.parse_default_args(['--watchos-all'])\n\n # -------------------------------------------------------------------------\n # Implied defaults tests\n\n def test_implied_defaults_assertions(self):\n namespace = self.parse_default_args(['--assertions'])\n\n self.assertTrue(namespace.cmark_assertions)\n self.assertTrue(namespace.llvm_assertions)\n self.assertTrue(namespace.swift_assertions)\n self.assertTrue(namespace.swift_stdlib_assertions)\n\n def test_implied_defaults_cmark_build_variant(self):\n namespace = self.parse_default_args(['--debug-cmark'])\n self.assertTrue(namespace.build_cmark)\n\n def test_implied_defaults_lldb_build_variant(self):\n namespace = self.parse_default_args(['--debug-lldb'])\n self.assertTrue(namespace.build_lldb)\n\n namespace = self.parse_default_args(['--lldb-assertions'])\n self.assertTrue(namespace.build_lldb)\n\n def test_implied_defaults_build_variant(self):\n namespace = self.parse_default_args(['--debug'])\n\n self.assertEqual(namespace.cmark_build_variant, 'Debug')\n self.assertEqual(namespace.foundation_build_variant, 'Debug')\n self.assertEqual(namespace.libdispatch_build_variant, 'Debug')\n self.assertEqual(namespace.libicu_build_variant, 'Debug')\n self.assertEqual(namespace.lldb_build_variant, 'Debug')\n self.assertEqual(namespace.llvm_build_variant, 'Debug')\n self.assertEqual(namespace.swift_build_variant, 'Debug')\n self.assertEqual(namespace.swift_stdlib_build_variant, 'Debug')\n\n def test_implied_defaults_skip_build_ios(self):\n namespace = self.parse_default_args(['--skip-build-ios'])\n self.assertFalse(namespace.build_ios_device)\n self.assertFalse(namespace.build_ios_simulator)\n\n # Also implies that the tests should be skipped\n self.assertFalse(namespace.test_ios_host)\n self.assertFalse(namespace.test_ios_simulator)\n\n def test_implied_defaults_skip_build_tvos(self):\n namespace = self.parse_default_args(['--skip-build-tvos'])\n self.assertFalse(namespace.build_tvos_device)\n self.assertFalse(namespace.build_tvos_simulator)\n\n # Also implies that the tests should be skipped\n self.assertFalse(namespace.test_tvos_host)\n self.assertFalse(namespace.test_tvos_simulator)\n\n def test_implied_defaults_skip_build_watchos(self):\n namespace = self.parse_default_args(['--skip-build-watchos'])\n self.assertFalse(namespace.build_watchos_device)\n self.assertFalse(namespace.build_watchos_simulator)\n\n # Also implies that the tests should be skipped\n self.assertFalse(namespace.test_watchos_host)\n self.assertFalse(namespace.test_watchos_simulator)\n\n def test_implied_defaults_validation_test(self):\n namespace = self.parse_default_args(['--validation-test'])\n self.assertTrue(namespace.test)\n\n def test_implied_defaults_test_optimized(self):\n namespace = self.parse_default_args(['--test-optimized'])\n self.assertTrue(namespace.test)\n\n def test_implied_defaults_test_optimize_for_size(self):\n namespace = self.parse_default_args(['--test-optimize-for-size'])\n self.assertTrue(namespace.test)\n\n def test_implied_defaults_test_optimize_none_with_implicit_dynamic(self):\n namespace = self.parse_default_args(\n ['--test-optimize-none-with-implicit-dynamic'])\n self.assertTrue(namespace.test)\n\n def test_implied_defaults_skip_all_tests(self):\n namespace = self.parse_default_args([\n '--test', '0',\n '--validation-test', '0',\n '--long-test', '0',\n '--stress-test', '0',\n ])\n\n self.assertFalse(namespace.test_linux)\n self.assertFalse(namespace.test_freebsd)\n self.assertFalse(namespace.test_cygwin)\n self.assertFalse(namespace.test_osx)\n self.assertFalse(namespace.test_ios)\n self.assertFalse(namespace.test_tvos)\n self.assertFalse(namespace.test_watchos)\n\n def test_implied_defaults_skip_test_ios(self):\n namespace = self.parse_default_args(['--skip-test-ios'])\n self.assertFalse(namespace.test_ios_host)\n self.assertFalse(namespace.test_ios_simulator)\n\n def test_implied_defaults_skip_test_tvos(self):\n namespace = self.parse_default_args(['--skip-test-tvos'])\n self.assertFalse(namespace.test_tvos_host)\n self.assertFalse(namespace.test_tvos_simulator)\n\n def test_implied_defaults_skip_test_watchos(self):\n namespace = self.parse_default_args(['--skip-test-watchos'])\n self.assertFalse(namespace.test_watchos_host)\n self.assertFalse(namespace.test_watchos_simulator)\n\n def test_implied_defaults_skip_build_android(self):\n namespace = self.parse_default_args(['--android', '0'])\n self.assertFalse(namespace.test_android_host)\n\n namespace = self.parse_default_args(['--skip-build-android'])\n self.assertFalse(namespace.test_android_host)\n\n def test_implied_defaults_host_test(self):\n namespace = self.parse_default_args(['--host-test', '0'])\n self.assertFalse(namespace.test_ios_host)\n self.assertFalse(namespace.test_tvos_host)\n self.assertFalse(namespace.test_watchos_host)\n self.assertFalse(namespace.test_android_host)\n self.assertFalse(namespace.build_libparser_only)\n\n def test_build_lib_swiftsyntaxparser_only(self):\n namespace = self.parse_default_args(['--build-libparser-only'])\n self.assertTrue(namespace.build_libparser_only)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475132,"cells":{"repo_name":{"kind":"string","value":"googlecodelabs/nest-tensorflow"},"path":{"kind":"string","value":"wwn/access_token.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1354"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport urllib\nimport urllib2\nimport json\nimport os\n\nnest_auth_url = 'https://home.nest.com/login/oauth2'\nnest_access_token_url = 'https://api.home.nest.com/oauth2/access_token'\n\n# Set your OAuth client ID and secret as environment variables. \n# See docker-compose.yml for an example of where they can be set \n# if not publishing that file.\nclient_id = os.environ.get(\"CLIENT_ID\", None)\nclient_secret = os.environ.get(\"CLIENT_SECRET\", None)\n\ndef get_access_token(authorization_code):\n \"\"\"Paste get_access_token(authorization_code) snippet below this line\"\"\"\n return\n\ndef authorization_url():\n query = urllib.urlencode({\n 'client_id': client_id,\n 'state': 'STATE'\n })\n return \"{0}?{1}\".format(nest_auth_url, query)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475133,"cells":{"repo_name":{"kind":"string","value":"RomanKharin/lrmq"},"path":{"kind":"string","value":"test/async_agent_socket.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3459"},"content":{"kind":"string","value":"# -*- coding: utf8 -*-\n\n# Low-resource message queue framework\n# Access hub with tcp socket\n# Copyright (c) 2016 Roman Kharin \n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport sys\nimport asyncio\nfrom asyncio.streams import StreamWriter, FlowControlMixin\n\n\nasync def run(port):\n loop = asyncio.get_event_loop()\n # out command queue\n ans_queue = asyncio.Queue()\n \n stdreader = None\n stdwriter = None\n \n # stdio initiation\n # NOTE: os.fdopen(0, \"wb\") will not works in pipe\n # os.fdopen(sys.stdout, \"wb\") may crash print()\n writer_transport, writer_protocol = await loop.connect_write_pipe(\n FlowControlMixin, os.fdopen(sys.stdout.fileno(), \"wb\"))\n stdwriter = StreamWriter(writer_transport, writer_protocol, \n None, loop)\n stdreader = asyncio.StreamReader()\n reader_protocol = asyncio.StreamReaderProtocol(stdreader) \n await loop.connect_read_pipe(lambda: reader_protocol, sys.stdin.buffer)\n\n server_coro = None \n\n async def onclient(reader, writer):\n # read from socket\n async def coro_reader():\n while True:\n data = await stdreader.readline()\n if not data: \n if server_coro:\n server_coro.cancel()\n break\n writer.write(data)\n await writer.drain()\n task = asyncio.ensure_future(coro_reader())\n while True:\n data = await reader.readline()\n if not data: \n break\n stdwriter.write(data)\n await stdwriter.drain()\n task.cancel()\n \n server_coro = asyncio.start_server(onclient, port = port, backlog = 1)\n sock_server = await server_coro\n await sock_server.wait_closed()\n\ndef main():\n port = 5550\n if len(sys.argv) > 1:\n if sys.argv[1] in (\"-h\", \"--help\"):\n print(\"Start with:\")\n print(\"\\tpython3 -m lrmq -a python3 async_agent_socket.py 5550\")\n print(\"Then connect with\")\n print(\"\\ttelnet 127.0.0.1 5550\")\n return\n port = int(sys.argv[1])\n if sys.platform == \"win32\":\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n else:\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(run(port))\n finally:\n loop.close() \n \nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475134,"cells":{"repo_name":{"kind":"string","value":"ChromiumWebApps/chromium"},"path":{"kind":"string","value":"base/android/jni_generator/jni_generator.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"48419"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Extracts native methods from a Java file and generates the JNI bindings.\nIf you change this, please run and update the tests.\"\"\"\n\nimport collections\nimport errno\nimport optparse\nimport os\nimport re\nimport string\nfrom string import Template\nimport subprocess\nimport sys\nimport textwrap\nimport zipfile\n\n\nclass ParseError(Exception):\n \"\"\"Exception thrown when we can't parse the input file.\"\"\"\n\n def __init__(self, description, *context_lines):\n Exception.__init__(self)\n self.description = description\n self.context_lines = context_lines\n\n def __str__(self):\n context = '\\n'.join(self.context_lines)\n return '***\\nERROR: %s\\n\\n%s\\n***' % (self.description, context)\n\n\nclass Param(object):\n \"\"\"Describes a param for a method, either java or native.\"\"\"\n\n def __init__(self, **kwargs):\n self.datatype = kwargs['datatype']\n self.name = kwargs['name']\n\n\nclass NativeMethod(object):\n \"\"\"Describes a C/C++ method that is called by Java code\"\"\"\n\n def __init__(self, **kwargs):\n self.static = kwargs['static']\n self.java_class_name = kwargs['java_class_name']\n self.return_type = kwargs['return_type']\n self.name = kwargs['name']\n self.params = kwargs['params']\n if self.params:\n assert type(self.params) is list\n assert type(self.params[0]) is Param\n if (self.params and\n self.params[0].datatype == kwargs.get('ptr_type', 'int') and\n self.params[0].name.startswith('native')):\n self.type = 'method'\n self.p0_type = self.params[0].name[len('native'):]\n if kwargs.get('native_class_name'):\n self.p0_type = kwargs['native_class_name']\n else:\n self.type = 'function'\n self.method_id_var_name = kwargs.get('method_id_var_name', None)\n\n\nclass CalledByNative(object):\n \"\"\"Describes a java method exported to c/c++\"\"\"\n\n def __init__(self, **kwargs):\n self.system_class = kwargs['system_class']\n self.unchecked = kwargs['unchecked']\n self.static = kwargs['static']\n self.java_class_name = kwargs['java_class_name']\n self.return_type = kwargs['return_type']\n self.name = kwargs['name']\n self.params = kwargs['params']\n self.method_id_var_name = kwargs.get('method_id_var_name', None)\n self.signature = kwargs.get('signature')\n self.is_constructor = kwargs.get('is_constructor', False)\n self.env_call = GetEnvCall(self.is_constructor, self.static,\n self.return_type)\n self.static_cast = GetStaticCastForReturnType(self.return_type)\n\n\ndef JavaDataTypeToC(java_type):\n \"\"\"Returns a C datatype for the given java type.\"\"\"\n java_pod_type_map = {\n 'int': 'jint',\n 'byte': 'jbyte',\n 'char': 'jchar',\n 'short': 'jshort',\n 'boolean': 'jboolean',\n 'long': 'jlong',\n 'double': 'jdouble',\n 'float': 'jfloat',\n }\n java_type_map = {\n 'void': 'void',\n 'String': 'jstring',\n 'java/lang/String': 'jstring',\n 'java/lang/Class': 'jclass',\n }\n\n if java_type in java_pod_type_map:\n return java_pod_type_map[java_type]\n elif java_type in java_type_map:\n return java_type_map[java_type]\n elif java_type.endswith('[]'):\n if java_type[:-2] in java_pod_type_map:\n return java_pod_type_map[java_type[:-2]] + 'Array'\n return 'jobjectArray'\n elif java_type.startswith('Class'):\n # Checking just the start of the name, rather than a direct comparison,\n # in order to handle generics.\n return 'jclass'\n else:\n return 'jobject'\n\n\ndef JavaReturnValueToC(java_type):\n \"\"\"Returns a valid C return value for the given java type.\"\"\"\n java_pod_type_map = {\n 'int': '0',\n 'byte': '0',\n 'char': '0',\n 'short': '0',\n 'boolean': 'false',\n 'long': '0',\n 'double': '0',\n 'float': '0',\n 'void': ''\n }\n return java_pod_type_map.get(java_type, 'NULL')\n\n\nclass JniParams(object):\n _imports = []\n _fully_qualified_class = ''\n _package = ''\n _inner_classes = []\n _remappings = []\n\n @staticmethod\n def SetFullyQualifiedClass(fully_qualified_class):\n JniParams._fully_qualified_class = 'L' + fully_qualified_class\n JniParams._package = '/'.join(fully_qualified_class.split('/')[:-1])\n\n @staticmethod\n def ExtractImportsAndInnerClasses(contents):\n contents = contents.replace('\\n', '')\n re_import = re.compile(r'import.*?(?P\\S*?);')\n for match in re.finditer(re_import, contents):\n JniParams._imports += ['L' + match.group('class').replace('.', '/')]\n\n re_inner = re.compile(r'(class|interface)\\s+?(?P\\w+?)\\W')\n for match in re.finditer(re_inner, contents):\n inner = match.group('name')\n if not JniParams._fully_qualified_class.endswith(inner):\n JniParams._inner_classes += [JniParams._fully_qualified_class + '$' +\n inner]\n\n @staticmethod\n def ParseJavaPSignature(signature_line):\n prefix = 'Signature: '\n return '\"%s\"' % signature_line[signature_line.index(prefix) + len(prefix):]\n\n @staticmethod\n def JavaToJni(param):\n \"\"\"Converts a java param into a JNI signature type.\"\"\"\n pod_param_map = {\n 'int': 'I',\n 'boolean': 'Z',\n 'char': 'C',\n 'short': 'S',\n 'long': 'J',\n 'double': 'D',\n 'float': 'F',\n 'byte': 'B',\n 'void': 'V',\n }\n object_param_list = [\n 'Ljava/lang/Boolean',\n 'Ljava/lang/Integer',\n 'Ljava/lang/Long',\n 'Ljava/lang/Object',\n 'Ljava/lang/String',\n 'Ljava/lang/Class',\n ]\n prefix = ''\n # Array?\n while param[-2:] == '[]':\n prefix += '['\n param = param[:-2]\n # Generic?\n if '<' in param:\n param = param[:param.index('<')]\n if param in pod_param_map:\n return prefix + pod_param_map[param]\n if '/' in param:\n # Coming from javap, use the fully qualified param directly.\n return prefix + 'L' + JniParams.RemapClassName(param) + ';'\n for qualified_name in (object_param_list +\n [JniParams._fully_qualified_class] +\n JniParams._inner_classes):\n if (qualified_name.endswith('/' + param) or\n qualified_name.endswith('$' + param.replace('.', '$')) or\n qualified_name == 'L' + param):\n return prefix + JniParams.RemapClassName(qualified_name) + ';'\n\n # Is it from an import? (e.g. referecing Class from import pkg.Class;\n # note that referencing an inner class Inner from import pkg.Class.Inner\n # is not supported).\n for qualified_name in JniParams._imports:\n if qualified_name.endswith('/' + param):\n # Ensure it's not an inner class.\n components = qualified_name.split('/')\n if len(components) > 2 and components[-2][0].isupper():\n raise SyntaxError('Inner class (%s) can not be imported '\n 'and used by JNI (%s). Please import the outer '\n 'class and use Outer.Inner instead.' %\n (qualified_name, param))\n return prefix + JniParams.RemapClassName(qualified_name) + ';'\n\n # Is it an inner class from an outer class import? (e.g. referencing\n # Class.Inner from import pkg.Class).\n if '.' in param:\n components = param.split('.')\n outer = '/'.join(components[:-1])\n inner = components[-1]\n for qualified_name in JniParams._imports:\n if qualified_name.endswith('/' + outer):\n return (prefix + JniParams.RemapClassName(qualified_name) +\n '$' + inner + ';')\n\n # Type not found, falling back to same package as this class.\n return (prefix + 'L' +\n JniParams.RemapClassName(JniParams._package + '/' + param) + ';')\n\n @staticmethod\n def Signature(params, returns, wrap):\n \"\"\"Returns the JNI signature for the given datatypes.\"\"\"\n items = ['(']\n items += [JniParams.JavaToJni(param.datatype) for param in params]\n items += [')']\n items += [JniParams.JavaToJni(returns)]\n if wrap:\n return '\\n' + '\\n'.join(['\"' + item + '\"' for item in items])\n else:\n return '\"' + ''.join(items) + '\"'\n\n @staticmethod\n def Parse(params):\n \"\"\"Parses the params into a list of Param objects.\"\"\"\n if not params:\n return []\n ret = []\n for p in [p.strip() for p in params.split(',')]:\n items = p.split(' ')\n if 'final' in items:\n items.remove('final')\n param = Param(\n datatype=items[0],\n name=(items[1] if len(items) > 1 else 'p%s' % len(ret)),\n )\n ret += [param]\n return ret\n\n @staticmethod\n def RemapClassName(class_name):\n \"\"\"Remaps class names using the jarjar mapping table.\"\"\"\n for old, new in JniParams._remappings:\n if old in class_name:\n return class_name.replace(old, new, 1)\n return class_name\n\n @staticmethod\n def SetJarJarMappings(mappings):\n \"\"\"Parse jarjar mappings from a string.\"\"\"\n JniParams._remappings = []\n for line in mappings.splitlines():\n keyword, src, dest = line.split()\n if keyword != 'rule':\n continue\n assert src.endswith('.**')\n src = src[:-2].replace('.', '/')\n dest = dest.replace('.', '/')\n if dest.endswith('@0'):\n JniParams._remappings.append((src, dest[:-2] + src))\n else:\n assert dest.endswith('@1')\n JniParams._remappings.append((src, dest[:-2]))\n\n\ndef ExtractJNINamespace(contents):\n re_jni_namespace = re.compile('.*?@JNINamespace\\(\"(.*?)\"\\)')\n m = re.findall(re_jni_namespace, contents)\n if not m:\n return ''\n return m[0]\n\n\ndef ExtractFullyQualifiedJavaClassName(java_file_name, contents):\n re_package = re.compile('.*?package (.*?);')\n matches = re.findall(re_package, contents)\n if not matches:\n raise SyntaxError('Unable to find \"package\" line in %s' % java_file_name)\n return (matches[0].replace('.', '/') + '/' +\n os.path.splitext(os.path.basename(java_file_name))[0])\n\n\ndef ExtractNatives(contents, ptr_type):\n \"\"\"Returns a list of dict containing information about a native method.\"\"\"\n contents = contents.replace('\\n', '')\n natives = []\n re_native = re.compile(r'(@NativeClassQualifiedName'\n '\\(\\\"(?P.*?)\\\"\\))?\\s*'\n '(@NativeCall(\\(\\\"(?P.*?)\\\"\\)))?\\s*'\n '(?P\\w+\\s\\w+|\\w+|\\s+)\\s*?native '\n '(?P\\S*?) '\n '(?Pnative\\w+?)\\((?P.*?)\\);')\n for match in re.finditer(re_native, contents):\n native = NativeMethod(\n static='static' in match.group('qualifiers'),\n java_class_name=match.group('java_class_name'),\n native_class_name=match.group('native_class_name'),\n return_type=match.group('return_type'),\n name=match.group('name').replace('native', ''),\n params=JniParams.Parse(match.group('params')),\n ptr_type=ptr_type)\n natives += [native]\n return natives\n\n\ndef GetStaticCastForReturnType(return_type):\n type_map = { 'String' : 'jstring',\n 'java/lang/String' : 'jstring',\n 'boolean[]': 'jbooleanArray',\n 'byte[]': 'jbyteArray',\n 'char[]': 'jcharArray',\n 'short[]': 'jshortArray',\n 'int[]': 'jintArray',\n 'long[]': 'jlongArray',\n 'double[]': 'jdoubleArray' }\n ret = type_map.get(return_type, None)\n if ret:\n return ret\n if return_type.endswith('[]'):\n return 'jobjectArray'\n return None\n\n\ndef GetEnvCall(is_constructor, is_static, return_type):\n \"\"\"Maps the types availabe via env->Call__Method.\"\"\"\n if is_constructor:\n return 'NewObject'\n env_call_map = {'boolean': 'Boolean',\n 'byte': 'Byte',\n 'char': 'Char',\n 'short': 'Short',\n 'int': 'Int',\n 'long': 'Long',\n 'float': 'Float',\n 'void': 'Void',\n 'double': 'Double',\n 'Object': 'Object',\n }\n call = env_call_map.get(return_type, 'Object')\n if is_static:\n call = 'Static' + call\n return 'Call' + call + 'Method'\n\n\ndef GetMangledParam(datatype):\n \"\"\"Returns a mangled identifier for the datatype.\"\"\"\n if len(datatype) <= 2:\n return datatype.replace('[', 'A')\n ret = ''\n for i in range(1, len(datatype)):\n c = datatype[i]\n if c == '[':\n ret += 'A'\n elif c.isupper() or datatype[i - 1] in ['/', 'L']:\n ret += c.upper()\n return ret\n\n\ndef GetMangledMethodName(name, params, return_type):\n \"\"\"Returns a mangled method name for the given signature.\n\n The returned name can be used as a C identifier and will be unique for all\n valid overloads of the same method.\n\n Args:\n name: string.\n params: list of Param.\n return_type: string.\n\n Returns:\n A mangled name.\n \"\"\"\n mangled_items = []\n for datatype in [return_type] + [x.datatype for x in params]:\n mangled_items += [GetMangledParam(JniParams.JavaToJni(datatype))]\n mangled_name = name + '_'.join(mangled_items)\n assert re.match(r'[0-9a-zA-Z_]+', mangled_name)\n return mangled_name\n\n\ndef MangleCalledByNatives(called_by_natives):\n \"\"\"Mangles all the overloads from the call_by_natives list.\"\"\"\n method_counts = collections.defaultdict(\n lambda: collections.defaultdict(lambda: 0))\n for called_by_native in called_by_natives:\n java_class_name = called_by_native.java_class_name\n name = called_by_native.name\n method_counts[java_class_name][name] += 1\n for called_by_native in called_by_natives:\n java_class_name = called_by_native.java_class_name\n method_name = called_by_native.name\n method_id_var_name = method_name\n if method_counts[java_class_name][method_name] > 1:\n method_id_var_name = GetMangledMethodName(method_name,\n called_by_native.params,\n called_by_native.return_type)\n called_by_native.method_id_var_name = method_id_var_name\n return called_by_natives\n\n\n# Regex to match the JNI return types that should be included in a\n# ScopedJavaLocalRef.\nRE_SCOPED_JNI_RETURN_TYPES = re.compile('jobject|jclass|jstring|.*Array')\n\n# Regex to match a string like \"@CalledByNative public void foo(int bar)\".\nRE_CALLED_BY_NATIVE = re.compile(\n '@CalledByNative(?P(Unchecked)*?)(?:\\(\"(?P.*)\"\\))?'\n '\\s+(?P[\\w ]*?)'\n '\\s*(?P\\S+?)'\n '\\s+(?P\\w+)'\n '\\s*\\((?P[^\\)]*)\\)')\n\n\ndef ExtractCalledByNatives(contents):\n \"\"\"Parses all methods annotated with @CalledByNative.\n\n Args:\n contents: the contents of the java file.\n\n Returns:\n A list of dict with information about the annotated methods.\n TODO(bulach): return a CalledByNative object.\n\n Raises:\n ParseError: if unable to parse.\n \"\"\"\n called_by_natives = []\n for match in re.finditer(RE_CALLED_BY_NATIVE, contents):\n called_by_natives += [CalledByNative(\n system_class=False,\n unchecked='Unchecked' in match.group('Unchecked'),\n static='static' in match.group('prefix'),\n java_class_name=match.group('annotation') or '',\n return_type=match.group('return_type'),\n name=match.group('name'),\n params=JniParams.Parse(match.group('params')))]\n # Check for any @CalledByNative occurrences that weren't matched.\n unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\\n')\n for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):\n if '@CalledByNative' in line1:\n raise ParseError('could not parse @CalledByNative method signature',\n line1, line2)\n return MangleCalledByNatives(called_by_natives)\n\n\nclass JNIFromJavaP(object):\n \"\"\"Uses 'javap' to parse a .class file and generate the JNI header file.\"\"\"\n\n def __init__(self, contents, options):\n self.contents = contents\n self.namespace = options.namespace\n self.fully_qualified_class = re.match(\n '.*?(class|interface) (?P.*?)( |{)',\n contents[1]).group('class_name')\n self.fully_qualified_class = self.fully_qualified_class.replace('.', '/')\n # Java 7's javap includes type parameters in output, like HashSet. Strip\n # away the <...> and use the raw class name that Java 6 would've given us.\n self.fully_qualified_class = self.fully_qualified_class.split('<', 1)[0]\n JniParams.SetFullyQualifiedClass(self.fully_qualified_class)\n self.java_class_name = self.fully_qualified_class.split('/')[-1]\n if not self.namespace:\n self.namespace = 'JNI_' + self.java_class_name\n re_method = re.compile('(?P.*?)(?P\\S+?) (?P\\w+?)'\n '\\((?P.*?)\\)')\n self.called_by_natives = []\n for lineno, content in enumerate(contents[2:], 2):\n match = re.match(re_method, content)\n if not match:\n continue\n self.called_by_natives += [CalledByNative(\n system_class=True,\n unchecked=False,\n static='static' in match.group('prefix'),\n java_class_name='',\n return_type=match.group('return_type').replace('.', '/'),\n name=match.group('name'),\n params=JniParams.Parse(match.group('params').replace('.', '/')),\n signature=JniParams.ParseJavaPSignature(contents[lineno + 1]))]\n re_constructor = re.compile('(.*?)public ' +\n self.fully_qualified_class.replace('/', '.') +\n '\\((?P.*?)\\)')\n for lineno, content in enumerate(contents[2:], 2):\n match = re.match(re_constructor, content)\n if not match:\n continue\n self.called_by_natives += [CalledByNative(\n system_class=True,\n unchecked=False,\n static=False,\n java_class_name='',\n return_type=self.fully_qualified_class,\n name='Constructor',\n params=JniParams.Parse(match.group('params').replace('.', '/')),\n signature=JniParams.ParseJavaPSignature(contents[lineno + 1]),\n is_constructor=True)]\n self.called_by_natives = MangleCalledByNatives(self.called_by_natives)\n self.inl_header_file_generator = InlHeaderFileGenerator(\n self.namespace, self.fully_qualified_class, [],\n self.called_by_natives, options)\n\n def GetContent(self):\n return self.inl_header_file_generator.GetContent()\n\n @staticmethod\n def CreateFromClass(class_file, options):\n class_name = os.path.splitext(os.path.basename(class_file))[0]\n p = subprocess.Popen(args=[options.javap, '-s', class_name],\n cwd=os.path.dirname(class_file),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, _ = p.communicate()\n jni_from_javap = JNIFromJavaP(stdout.split('\\n'), options)\n return jni_from_javap\n\n\nclass JNIFromJavaSource(object):\n \"\"\"Uses the given java source file to generate the JNI header file.\"\"\"\n\n def __init__(self, contents, fully_qualified_class, options):\n contents = self._RemoveComments(contents, options)\n JniParams.SetFullyQualifiedClass(fully_qualified_class)\n JniParams.ExtractImportsAndInnerClasses(contents)\n jni_namespace = ExtractJNINamespace(contents) or options.namespace\n natives = ExtractNatives(contents, options.ptr_type)\n called_by_natives = ExtractCalledByNatives(contents)\n if len(natives) == 0 and len(called_by_natives) == 0:\n raise SyntaxError('Unable to find any JNI methods for %s.' %\n fully_qualified_class)\n inl_header_file_generator = InlHeaderFileGenerator(\n jni_namespace, fully_qualified_class, natives, called_by_natives,\n options)\n self.content = inl_header_file_generator.GetContent()\n\n def _RemoveComments(self, contents, options):\n # We need to support both inline and block comments, and we need to handle\n # strings that contain '//' or '/*'. Rather than trying to do all that with\n # regexps, we just pipe the contents through the C preprocessor. We tell cpp\n # the file has already been preprocessed, so it just removes comments and\n # doesn't try to parse #include, #pragma etc.\n #\n # TODO(husky): This is a bit hacky. It would be cleaner to use a real Java\n # parser. Maybe we could ditch JNIFromJavaSource and just always use\n # JNIFromJavaP; or maybe we could rewrite this script in Java and use APT.\n # http://code.google.com/p/chromium/issues/detail?id=138941\n p = subprocess.Popen(args=[options.cpp, '-fpreprocessed'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, _ = p.communicate(contents)\n return stdout\n\n def GetContent(self):\n return self.content\n\n @staticmethod\n def CreateFromFile(java_file_name, options):\n contents = file(java_file_name).read()\n fully_qualified_class = ExtractFullyQualifiedJavaClassName(java_file_name,\n contents)\n return JNIFromJavaSource(contents, fully_qualified_class, options)\n\n\nclass InlHeaderFileGenerator(object):\n \"\"\"Generates an inline header file for JNI integration.\"\"\"\n\n def __init__(self, namespace, fully_qualified_class, natives,\n called_by_natives, options):\n self.namespace = namespace\n self.fully_qualified_class = fully_qualified_class\n self.class_name = self.fully_qualified_class.split('/')[-1]\n self.natives = natives\n self.called_by_natives = called_by_natives\n self.header_guard = fully_qualified_class.replace('/', '_') + '_JNI'\n self.options = options\n self.init_native = self.ExtractInitNative(options)\n\n def ExtractInitNative(self, options):\n for native in self.natives:\n if options.jni_init_native_name == 'native' + native.name:\n self.natives.remove(native)\n return native\n return None\n\n def GetContent(self):\n \"\"\"Returns the content of the JNI binding file.\"\"\"\n template = Template(\"\"\"\\\n// Copyright (c) 2012 The Chromium Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style license that can be\n// found in the LICENSE file.\n\n\n// This file is autogenerated by\n// ${SCRIPT_NAME}\n// For\n// ${FULLY_QUALIFIED_CLASS}\n\n#ifndef ${HEADER_GUARD}\n#define ${HEADER_GUARD}\n\n#include \n\n${INCLUDES}\n\n// Step 1: forward declarations.\nnamespace {\n$CLASS_PATH_DEFINITIONS\n$METHOD_ID_DEFINITIONS\n} // namespace\n\n$OPEN_NAMESPACE\n$FORWARD_DECLARATIONS\n\n// Step 2: method stubs.\n$METHOD_STUBS\n\n// Step 3: RegisterNatives.\n$JNI_NATIVE_METHODS\n$REGISTER_NATIVES\n$CLOSE_NAMESPACE\n$JNI_REGISTER_NATIVES\n#endif // ${HEADER_GUARD}\n\"\"\")\n values = {\n 'SCRIPT_NAME': self.options.script_name,\n 'FULLY_QUALIFIED_CLASS': self.fully_qualified_class,\n 'CLASS_PATH_DEFINITIONS': self.GetClassPathDefinitionsString(),\n 'METHOD_ID_DEFINITIONS': self.GetMethodIDDefinitionsString(),\n 'FORWARD_DECLARATIONS': self.GetForwardDeclarationsString(),\n 'METHOD_STUBS': self.GetMethodStubsString(),\n 'OPEN_NAMESPACE': self.GetOpenNamespaceString(),\n 'JNI_NATIVE_METHODS': self.GetJNINativeMethodsString(),\n 'REGISTER_NATIVES': self.GetRegisterNativesString(),\n 'CLOSE_NAMESPACE': self.GetCloseNamespaceString(),\n 'HEADER_GUARD': self.header_guard,\n 'INCLUDES': self.GetIncludesString(),\n 'JNI_REGISTER_NATIVES': self.GetJNIRegisterNativesString()\n }\n return WrapOutput(template.substitute(values))\n\n def GetClassPathDefinitionsString(self):\n ret = []\n ret += [self.GetClassPathDefinitions()]\n return '\\n'.join(ret)\n\n def GetMethodIDDefinitionsString(self):\n \"\"\"Returns the definition of method ids for the called by native methods.\"\"\"\n if not self.options.eager_called_by_natives:\n return ''\n template = Template(\"\"\"\\\njmethodID g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = NULL;\"\"\")\n ret = []\n for called_by_native in self.called_by_natives:\n values = {\n 'JAVA_CLASS': called_by_native.java_class_name or self.class_name,\n 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,\n }\n ret += [template.substitute(values)]\n return '\\n'.join(ret)\n\n def GetForwardDeclarationsString(self):\n ret = []\n for native in self.natives:\n if native.type != 'method':\n ret += [self.GetForwardDeclaration(native)]\n return '\\n'.join(ret)\n\n def GetMethodStubsString(self):\n \"\"\"Returns the code corresponding to method stubs.\"\"\"\n ret = []\n for native in self.natives:\n if native.type == 'method':\n ret += [self.GetNativeMethodStubString(native)]\n if self.options.eager_called_by_natives:\n ret += self.GetEagerCalledByNativeMethodStubs()\n else:\n ret += self.GetLazyCalledByNativeMethodStubs()\n return '\\n'.join(ret)\n\n def GetLazyCalledByNativeMethodStubs(self):\n return [self.GetLazyCalledByNativeMethodStub(called_by_native)\n for called_by_native in self.called_by_natives]\n\n def GetEagerCalledByNativeMethodStubs(self):\n ret = []\n if self.called_by_natives:\n ret += ['namespace {']\n for called_by_native in self.called_by_natives:\n ret += [self.GetEagerCalledByNativeMethodStub(called_by_native)]\n ret += ['} // namespace']\n return ret\n\n def GetIncludesString(self):\n if not self.options.includes:\n return ''\n includes = self.options.includes.split(',')\n return '\\n'.join('#include \"%s\"' % x for x in includes)\n\n def GetKMethodsString(self, clazz):\n ret = []\n for native in self.natives:\n if (native.java_class_name == clazz or\n (not native.java_class_name and clazz == self.class_name)):\n ret += [self.GetKMethodArrayEntry(native)]\n return '\\n'.join(ret)\n\n def SubstituteNativeMethods(self, template):\n \"\"\"Substitutes JAVA_CLASS and KMETHODS in the provided template.\"\"\"\n ret = []\n all_classes = self.GetUniqueClasses(self.natives)\n all_classes[self.class_name] = self.fully_qualified_class\n for clazz in all_classes:\n kmethods = self.GetKMethodsString(clazz)\n if kmethods:\n values = {'JAVA_CLASS': clazz,\n 'KMETHODS': kmethods}\n ret += [template.substitute(values)]\n if not ret: return ''\n return '\\n' + '\\n'.join(ret)\n\n def GetJNINativeMethodsString(self):\n \"\"\"Returns the implementation of the array of native methods.\"\"\"\n template = Template(\"\"\"\\\nstatic const JNINativeMethod kMethods${JAVA_CLASS}[] = {\n${KMETHODS}\n};\n\"\"\")\n return self.SubstituteNativeMethods(template)\n\n def GetRegisterCalledByNativesImplString(self):\n \"\"\"Returns the code for registering the called by native methods.\"\"\"\n if not self.options.eager_called_by_natives:\n return ''\n template = Template(\"\"\"\\\n g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = ${GET_METHOD_ID_IMPL}\n if (g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} == NULL) {\n return false;\n }\n \"\"\")\n ret = []\n for called_by_native in self.called_by_natives:\n values = {\n 'JAVA_CLASS': called_by_native.java_class_name or self.class_name,\n 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,\n 'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native),\n }\n ret += [template.substitute(values)]\n return '\\n'.join(ret)\n\n def GetRegisterNativesString(self):\n \"\"\"Returns the code for RegisterNatives.\"\"\"\n template = Template(\"\"\"\\\n${REGISTER_NATIVES_SIGNATURE} {\n${CLASSES}\n${NATIVES}\n${CALLED_BY_NATIVES}\n return true;\n}\n\"\"\")\n signature = 'static bool RegisterNativesImpl(JNIEnv* env'\n if self.init_native:\n signature += ', jclass clazz)'\n else:\n signature += ')'\n\n natives = self.GetRegisterNativesImplString()\n called_by_natives = self.GetRegisterCalledByNativesImplString()\n values = {'REGISTER_NATIVES_SIGNATURE': signature,\n 'CLASSES': self.GetFindClasses(),\n 'NATIVES': natives,\n 'CALLED_BY_NATIVES': called_by_natives,\n }\n return template.substitute(values)\n\n def GetRegisterNativesImplString(self):\n \"\"\"Returns the shared implementation for RegisterNatives.\"\"\"\n template = Template(\"\"\"\\\n const int kMethods${JAVA_CLASS}Size = arraysize(kMethods${JAVA_CLASS});\n\n if (env->RegisterNatives(g_${JAVA_CLASS}_clazz,\n kMethods${JAVA_CLASS},\n kMethods${JAVA_CLASS}Size) < 0) {\n jni_generator::HandleRegistrationError(\n env, g_${JAVA_CLASS}_clazz, __FILE__);\n return false;\n }\n\"\"\")\n return self.SubstituteNativeMethods(template)\n\n def GetJNIRegisterNativesString(self):\n \"\"\"Returns the implementation for the JNI registration of native methods.\"\"\"\n if not self.init_native:\n return ''\n\n template = Template(\"\"\"\\\nextern \"C\" JNIEXPORT bool JNICALL\nJava_${FULLY_QUALIFIED_CLASS}_${INIT_NATIVE_NAME}(JNIEnv* env, jclass clazz) {\n return ${NAMESPACE}RegisterNativesImpl(env, clazz);\n}\n\"\"\")\n fully_qualified_class = self.fully_qualified_class.replace('/', '_')\n namespace = ''\n if self.namespace:\n namespace = self.namespace + '::'\n values = {'FULLY_QUALIFIED_CLASS': fully_qualified_class,\n 'INIT_NATIVE_NAME': 'native' + self.init_native.name,\n 'NAMESPACE': namespace,\n 'REGISTER_NATIVES_IMPL': self.GetRegisterNativesImplString()\n }\n return template.substitute(values)\n\n def GetOpenNamespaceString(self):\n if self.namespace:\n all_namespaces = ['namespace %s {' % ns\n for ns in self.namespace.split('::')]\n return '\\n'.join(all_namespaces)\n return ''\n\n def GetCloseNamespaceString(self):\n if self.namespace:\n all_namespaces = ['} // namespace %s' % ns\n for ns in self.namespace.split('::')]\n all_namespaces.reverse()\n return '\\n'.join(all_namespaces) + '\\n'\n return ''\n\n def GetJNIFirstParam(self, native):\n ret = []\n if native.type == 'method':\n ret = ['jobject jcaller']\n elif native.type == 'function':\n if native.static:\n ret = ['jclass jcaller']\n else:\n ret = ['jobject jcaller']\n return ret\n\n def GetParamsInDeclaration(self, native):\n \"\"\"Returns the params for the stub declaration.\n\n Args:\n native: the native dictionary describing the method.\n\n Returns:\n A string containing the params.\n \"\"\"\n return ',\\n '.join(self.GetJNIFirstParam(native) +\n [JavaDataTypeToC(param.datatype) + ' ' +\n param.name\n for param in native.params])\n\n def GetCalledByNativeParamsInDeclaration(self, called_by_native):\n return ',\\n '.join([JavaDataTypeToC(param.datatype) + ' ' +\n param.name\n for param in called_by_native.params])\n\n def GetForwardDeclaration(self, native):\n template = Template(\"\"\"\nstatic ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS});\n\"\"\")\n values = {'RETURN': JavaDataTypeToC(native.return_type),\n 'NAME': native.name,\n 'PARAMS': self.GetParamsInDeclaration(native)}\n return template.substitute(values)\n\n def GetNativeMethodStubString(self, native):\n \"\"\"Returns stubs for native methods.\"\"\"\n template = Template(\"\"\"\\\nstatic ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS_IN_DECLARATION}) {\n ${P0_TYPE}* native = reinterpret_cast<${P0_TYPE}*>(${PARAM0_NAME});\n CHECK_NATIVE_PTR(env, jcaller, native, \"${NAME}\"${OPTIONAL_ERROR_RETURN});\n return native->${NAME}(${PARAMS_IN_CALL})${POST_CALL};\n}\n\"\"\")\n params = []\n if not self.options.pure_native_methods:\n params = ['env', 'jcaller']\n params_in_call = ', '.join(params + [p.name for p in native.params[1:]])\n\n return_type = JavaDataTypeToC(native.return_type)\n optional_error_return = JavaReturnValueToC(native.return_type)\n if optional_error_return:\n optional_error_return = ', ' + optional_error_return\n post_call = ''\n if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):\n post_call = '.Release()'\n values = {\n 'RETURN': return_type,\n 'OPTIONAL_ERROR_RETURN': optional_error_return,\n 'NAME': native.name,\n 'PARAMS_IN_DECLARATION': self.GetParamsInDeclaration(native),\n 'PARAM0_NAME': native.params[0].name,\n 'P0_TYPE': native.p0_type,\n 'PARAMS_IN_CALL': params_in_call,\n 'POST_CALL': post_call\n }\n return template.substitute(values)\n\n def GetCalledByNativeValues(self, called_by_native):\n \"\"\"Fills in necessary values for the CalledByNative methods.\"\"\"\n if called_by_native.static or called_by_native.is_constructor:\n first_param_in_declaration = ''\n first_param_in_call = ('g_%s_clazz' %\n (called_by_native.java_class_name or\n self.class_name))\n else:\n first_param_in_declaration = ', jobject obj'\n first_param_in_call = 'obj'\n params_in_declaration = self.GetCalledByNativeParamsInDeclaration(\n called_by_native)\n if params_in_declaration:\n params_in_declaration = ', ' + params_in_declaration\n params_in_call = ', '.join(param.name for param in called_by_native.params)\n if params_in_call:\n params_in_call = ', ' + params_in_call\n pre_call = ''\n post_call = ''\n if called_by_native.static_cast:\n pre_call = 'static_cast<%s>(' % called_by_native.static_cast\n post_call = ')'\n check_exception = ''\n if not called_by_native.unchecked:\n check_exception = 'jni_generator::CheckException(env);'\n return_type = JavaDataTypeToC(called_by_native.return_type)\n optional_error_return = JavaReturnValueToC(called_by_native.return_type)\n if optional_error_return:\n optional_error_return = ', ' + optional_error_return\n return_declaration = ''\n return_clause = ''\n if return_type != 'void':\n pre_call = ' ' + pre_call\n return_declaration = return_type + ' ret ='\n if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):\n return_type = 'base::android::ScopedJavaLocalRef<' + return_type + '>'\n return_clause = 'return ' + return_type + '(env, ret);'\n else:\n return_clause = 'return ret;'\n return {\n 'JAVA_CLASS': called_by_native.java_class_name or self.class_name,\n 'RETURN_TYPE': return_type,\n 'OPTIONAL_ERROR_RETURN': optional_error_return,\n 'RETURN_DECLARATION': return_declaration,\n 'RETURN_CLAUSE': return_clause,\n 'FIRST_PARAM_IN_DECLARATION': first_param_in_declaration,\n 'PARAMS_IN_DECLARATION': params_in_declaration,\n 'PRE_CALL': pre_call,\n 'POST_CALL': post_call,\n 'ENV_CALL': called_by_native.env_call,\n 'FIRST_PARAM_IN_CALL': first_param_in_call,\n 'PARAMS_IN_CALL': params_in_call,\n 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,\n 'CHECK_EXCEPTION': check_exception,\n 'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native)\n }\n\n def GetEagerCalledByNativeMethodStub(self, called_by_native):\n \"\"\"Returns the implementation of the called by native method.\"\"\"\n template = Template(\"\"\"\nstatic ${RETURN_TYPE} ${METHOD_ID_VAR_NAME}(\\\nJNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION}) {\n ${RETURN_DECLARATION}${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL},\n g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}${PARAMS_IN_CALL})${POST_CALL};\n ${RETURN_CLAUSE}\n}\"\"\")\n values = self.GetCalledByNativeValues(called_by_native)\n return template.substitute(values)\n\n def GetLazyCalledByNativeMethodStub(self, called_by_native):\n \"\"\"Returns a string.\"\"\"\n function_signature_template = Template(\"\"\"\\\nstatic ${RETURN_TYPE} Java_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}(\\\nJNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION})\"\"\")\n function_header_template = Template(\"\"\"\\\n${FUNCTION_SIGNATURE} {\"\"\")\n function_header_with_unused_template = Template(\"\"\"\\\n${FUNCTION_SIGNATURE} __attribute__ ((unused));\n${FUNCTION_SIGNATURE} {\"\"\")\n template = Template(\"\"\"\nstatic base::subtle::AtomicWord g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = 0;\n${FUNCTION_HEADER}\n /* Must call RegisterNativesImpl() */\n CHECK_CLAZZ(env, ${FIRST_PARAM_IN_CALL},\n g_${JAVA_CLASS}_clazz${OPTIONAL_ERROR_RETURN});\n jmethodID method_id =\n ${GET_METHOD_ID_IMPL}\n ${RETURN_DECLARATION}\n ${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL},\n method_id${PARAMS_IN_CALL})${POST_CALL};\n ${CHECK_EXCEPTION}\n ${RETURN_CLAUSE}\n}\"\"\")\n values = self.GetCalledByNativeValues(called_by_native)\n values['FUNCTION_SIGNATURE'] = (\n function_signature_template.substitute(values))\n if called_by_native.system_class:\n values['FUNCTION_HEADER'] = (\n function_header_with_unused_template.substitute(values))\n else:\n values['FUNCTION_HEADER'] = function_header_template.substitute(values)\n return template.substitute(values)\n\n def GetKMethodArrayEntry(self, native):\n template = Template(\"\"\"\\\n { \"native${NAME}\", ${JNI_SIGNATURE}, reinterpret_cast(${NAME}) },\"\"\")\n values = {'NAME': native.name,\n 'JNI_SIGNATURE': JniParams.Signature(native.params,\n native.return_type,\n True)}\n return template.substitute(values)\n\n def GetUniqueClasses(self, origin):\n ret = {self.class_name: self.fully_qualified_class}\n for entry in origin:\n class_name = self.class_name\n jni_class_path = self.fully_qualified_class\n if entry.java_class_name:\n class_name = entry.java_class_name\n jni_class_path = self.fully_qualified_class + '$' + class_name\n ret[class_name] = jni_class_path\n return ret\n\n def GetClassPathDefinitions(self):\n \"\"\"Returns the ClassPath constants.\"\"\"\n ret = []\n template = Template(\"\"\"\\\nconst char k${JAVA_CLASS}ClassPath[] = \"${JNI_CLASS_PATH}\";\"\"\")\n native_classes = self.GetUniqueClasses(self.natives)\n called_by_native_classes = self.GetUniqueClasses(self.called_by_natives)\n all_classes = native_classes\n all_classes.update(called_by_native_classes)\n for clazz in all_classes:\n values = {\n 'JAVA_CLASS': clazz,\n 'JNI_CLASS_PATH': JniParams.RemapClassName(all_classes[clazz]),\n }\n ret += [template.substitute(values)]\n ret += ''\n for clazz in called_by_native_classes:\n template = Template(\"\"\"\\\n// Leaking this jclass as we cannot use LazyInstance from some threads.\njclass g_${JAVA_CLASS}_clazz = NULL;\"\"\")\n values = {\n 'JAVA_CLASS': clazz,\n }\n ret += [template.substitute(values)]\n return '\\n'.join(ret)\n\n def GetFindClasses(self):\n \"\"\"Returns the imlementation of FindClass for all known classes.\"\"\"\n if self.init_native:\n template = Template(\"\"\"\\\n g_${JAVA_CLASS}_clazz = static_cast(env->NewWeakGlobalRef(clazz));\"\"\")\n else:\n template = Template(\"\"\"\\\n g_${JAVA_CLASS}_clazz = reinterpret_cast(env->NewGlobalRef(\n base::android::GetClass(env, k${JAVA_CLASS}ClassPath).obj()));\"\"\")\n ret = []\n for clazz in self.GetUniqueClasses(self.called_by_natives):\n values = {'JAVA_CLASS': clazz}\n ret += [template.substitute(values)]\n return '\\n'.join(ret)\n\n def GetMethodIDImpl(self, called_by_native):\n \"\"\"Returns the implementation of GetMethodID.\"\"\"\n if self.options.eager_called_by_natives:\n template = Template(\"\"\"\\\nenv->Get${STATIC_METHOD_PART}MethodID(\n g_${JAVA_CLASS}_clazz,\n \"${JNI_NAME}\", ${JNI_SIGNATURE});\"\"\")\n else:\n template = Template(\"\"\"\\\n base::android::MethodID::LazyGet<\n base::android::MethodID::TYPE_${STATIC}>(\n env, g_${JAVA_CLASS}_clazz,\n \"${JNI_NAME}\",\n ${JNI_SIGNATURE},\n &g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME});\n\"\"\")\n jni_name = called_by_native.name\n jni_return_type = called_by_native.return_type\n if called_by_native.is_constructor:\n jni_name = ''\n jni_return_type = 'void'\n if called_by_native.signature:\n signature = called_by_native.signature\n else:\n signature = JniParams.Signature(called_by_native.params,\n jni_return_type,\n True)\n values = {\n 'JAVA_CLASS': called_by_native.java_class_name or self.class_name,\n 'JNI_NAME': jni_name,\n 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,\n 'STATIC': 'STATIC' if called_by_native.static else 'INSTANCE',\n 'STATIC_METHOD_PART': 'Static' if called_by_native.static else '',\n 'JNI_SIGNATURE': signature,\n }\n return template.substitute(values)\n\n\ndef WrapOutput(output):\n ret = []\n for line in output.splitlines():\n # Do not wrap lines under 80 characters or preprocessor directives.\n if len(line) < 80 or line.lstrip()[:1] == '#':\n stripped = line.rstrip()\n if len(ret) == 0 or len(ret[-1]) or len(stripped):\n ret.append(stripped)\n else:\n first_line_indent = ' ' * (len(line) - len(line.lstrip()))\n subsequent_indent = first_line_indent + ' ' * 4\n if line.startswith('//'):\n subsequent_indent = '//' + subsequent_indent\n wrapper = textwrap.TextWrapper(width=80,\n subsequent_indent=subsequent_indent,\n break_long_words=False)\n ret += [wrapped.rstrip() for wrapped in wrapper.wrap(line)]\n ret += ['']\n return '\\n'.join(ret)\n\n\ndef ExtractJarInputFile(jar_file, input_file, out_dir):\n \"\"\"Extracts input file from jar and returns the filename.\n\n The input file is extracted to the same directory that the generated jni\n headers will be placed in. This is passed as an argument to script.\n\n Args:\n jar_file: the jar file containing the input files to extract.\n input_files: the list of files to extract from the jar file.\n out_dir: the name of the directories to extract to.\n\n Returns:\n the name of extracted input file.\n \"\"\"\n jar_file = zipfile.ZipFile(jar_file)\n\n out_dir = os.path.join(out_dir, os.path.dirname(input_file))\n try:\n os.makedirs(out_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n extracted_file_name = os.path.join(out_dir, os.path.basename(input_file))\n with open(extracted_file_name, 'w') as outfile:\n outfile.write(jar_file.read(input_file))\n\n return extracted_file_name\n\n\ndef GenerateJNIHeader(input_file, output_file, options):\n try:\n if os.path.splitext(input_file)[1] == '.class':\n jni_from_javap = JNIFromJavaP.CreateFromClass(input_file, options)\n content = jni_from_javap.GetContent()\n else:\n jni_from_java_source = JNIFromJavaSource.CreateFromFile(\n input_file, options)\n content = jni_from_java_source.GetContent()\n except ParseError, e:\n print e\n sys.exit(1)\n if output_file:\n if not os.path.exists(os.path.dirname(os.path.abspath(output_file))):\n os.makedirs(os.path.dirname(os.path.abspath(output_file)))\n if options.optimize_generation and os.path.exists(output_file):\n with file(output_file, 'r') as f:\n existing_content = f.read()\n if existing_content == content:\n return\n with file(output_file, 'w') as f:\n f.write(content)\n else:\n print output\n\n\ndef GetScriptName():\n script_components = os.path.abspath(sys.argv[0]).split(os.path.sep)\n base_index = 0\n for idx, value in enumerate(script_components):\n if value == 'base' or value == 'third_party':\n base_index = idx\n break\n return os.sep.join(script_components[base_index:])\n\n\ndef main(argv):\n usage = \"\"\"usage: %prog [OPTIONS]\nThis script will parse the given java source code extracting the native\ndeclarations and print the header file to stdout (or a file).\nSee SampleForTests.java for more details.\n \"\"\"\n option_parser = optparse.OptionParser(usage=usage)\n option_parser.add_option('-j', dest='jar_file',\n help='Extract the list of input files from'\n ' a specified jar file.'\n ' Uses javap to extract the methods from a'\n ' pre-compiled class. --input should point'\n ' to pre-compiled Java .class files.')\n option_parser.add_option('-n', dest='namespace',\n help='Uses as a namespace in the generated header '\n 'instead of the javap class name, or when there is '\n 'no JNINamespace annotation in the java source.')\n option_parser.add_option('--input_file',\n help='Single input file name. The output file name '\n 'will be derived from it. Must be used with '\n '--output_dir.')\n option_parser.add_option('--output_dir',\n help='The output directory. Must be used with '\n '--input')\n option_parser.add_option('--optimize_generation', type=\"int\",\n default=0, help='Whether we should optimize JNI '\n 'generation by not regenerating files if they have '\n 'not changed.')\n option_parser.add_option('--jarjar',\n help='Path to optional jarjar rules file.')\n option_parser.add_option('--script_name', default=GetScriptName(),\n help='The name of this script in the generated '\n 'header.')\n option_parser.add_option('--includes',\n help='The comma-separated list of header files to '\n 'include in the generated header.')\n option_parser.add_option('--pure_native_methods',\n action='store_true', dest='pure_native_methods',\n help='When true, the native methods will be called '\n 'without any JNI-specific arguments.')\n option_parser.add_option('--ptr_type', default='int',\n type='choice', choices=['int', 'long'],\n help='The type used to represent native pointers in '\n 'Java code. For 32-bit, use int; '\n 'for 64-bit, use long.')\n option_parser.add_option('--jni_init_native_name', default='',\n help='The name of the JNI registration method that '\n 'is used to initialize all native methods. If a '\n 'method with this name is not present in the Java '\n 'source file, setting this option is a no-op. When '\n 'a method with this name is found however, the '\n 'naming convention Java__ '\n 'will limit the initialization to only the '\n 'top-level class.')\n option_parser.add_option('--eager_called_by_natives',\n action='store_true', dest='eager_called_by_natives',\n help='When true, the called-by-native methods will '\n 'be initialized in a non-atomic way.')\n option_parser.add_option('--cpp', default='cpp',\n help='The path to cpp command.')\n option_parser.add_option('--javap', default='javap',\n help='The path to javap command.')\n options, args = option_parser.parse_args(argv)\n if options.jar_file:\n input_file = ExtractJarInputFile(options.jar_file, options.input_file,\n options.output_dir)\n elif options.input_file:\n input_file = options.input_file\n else:\n option_parser.print_help()\n print '\\nError: Must specify --jar_file or --input_file.'\n return 1\n output_file = None\n if options.output_dir:\n root_name = os.path.splitext(os.path.basename(input_file))[0]\n output_file = os.path.join(options.output_dir, root_name) + '_jni.h'\n if options.jarjar:\n with open(options.jarjar) as f:\n JniParams.SetJarJarMappings(f.read())\n GenerateJNIHeader(input_file, output_file, options)\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475135,"cells":{"repo_name":{"kind":"string","value":"yvaucher/purchase-workflow"},"path":{"kind":"string","value":"__unported__/purchase_group_orders/purchase_group_orders.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"9678"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Alexandre Fayolle\n# Copyright 2012 Camptocamp SA\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport logging\n\nfrom openerp.osv.orm import Model, browse_record, browse_null\nfrom openerp.osv import fields\nfrom openerp import netsvc\n\nclass procurement_order(Model):\n _inherit = 'procurement.order'\n\n _columns = {'sale_id': fields.many2one('sale.order', 'Sale Order',\n help='the sale order which generated the procurement'),\n 'origin': fields.char('Source Document', size=512,\n help=\"Reference of the document that created this Procurement.\\n\"\n \"This is automatically completed by OpenERP.\"),\n }\n def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):\n \"\"\"Create the purchase order from the procurement, using\n the provided field values, after adding the given purchase\n order line in the purchase order.\n\n :params procurement: the procurement object generating the purchase order\n :params dict po_vals: field values for the new purchase order (the\n ``order_line`` field will be overwritten with one\n single line, as passed in ``line_vals``).\n :params dict line_vals: field values of the single purchase order line that\n the purchase order will contain.\n :return: id of the newly created purchase order\n :rtype: int\n \"\"\"\n po_vals.update({'order_line': [(0,0,line_vals)]})\n if procurement.sale_id:\n sale = procurement.sale_id\n update = {'shop_id': sale.shop_id.id,\n 'carrier_id': sale.carrier_id.id}\n po_vals.update(update)\n return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)\n\nclass sale_order(Model):\n _inherit = 'sale.order'\n\n def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None):\n proc_data = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line,\n move_id, date_planned,\n context)\n proc_data['sale_id'] = order.id\n return proc_data\n\nclass purchase_order(Model):\n _inherit = 'purchase.order'\n\n _columns = {\n 'shop_id': fields.many2one('sale.shop', 'Shop',\n help='the shop which generated the sale which triggered the PO'),\n 'carrier_id': fields.many2one('delivery.carrier', 'Carrier',\n help='the carrier in charge for delivering the related sale order'),\n 'carrier_partner_id': fields.related('carrier_id', 'partner_id',\n type='many2one',\n relation='res.partner',\n string='Carrier Name',\n readonly=True,\n help=\"Name of the carrier partner in charge of delivering the related sale order\"),\n 'origin': fields.char('Source Document', size=512,\n help=\"Reference of the document that generated this purchase order request.\"),\n\n }\n def do_merge(self, cr, uid, ids, context=None):\n \"\"\"\n To merge similar type of purchase orders.\n Orders will only be merged if:\n * Purchase Orders are in draft\n * Purchase Orders belong to the same partner\n * Purchase Orders have same stock location, same pricelist\n * Purchase Orders have the same shop and the same carrier (NEW in this module)\n Lines will only be merged if:\n * Order lines are exactly the same except for the quantity and unit\n \"\"\"\n #TOFIX: merged order line should be unlink\n wf_service = netsvc.LocalService(\"workflow\")\n def make_key(br, fields):\n list_key = []\n for field in fields:\n field_val = getattr(br, field)\n if field in ('product_id', 'move_dest_id', 'account_analytic_id'):\n if not field_val:\n field_val = False\n if isinstance(field_val, browse_record):\n field_val = field_val.id\n elif isinstance(field_val, browse_null):\n field_val = False\n elif isinstance(field_val, list):\n field_val = ((6, 0, tuple([v.id for v in field_val])),)\n list_key.append((field, field_val))\n list_key.sort()\n return tuple(list_key)\n\n # compute what the new orders should contain\n new_orders = {}\n for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:\n order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id',\n 'shop_id', 'carrier_id')) # added line\n new_order = new_orders.setdefault(order_key, ({}, []))\n new_order[1].append(porder.id)\n order_infos = new_order[0]\n if not order_infos:\n order_infos.update({\n 'origin': porder.origin,\n 'date_order': porder.date_order,\n 'partner_id': porder.partner_id.id,\n 'partner_address_id': porder.partner_address_id.id,\n 'dest_address_id': porder.dest_address_id.id,\n 'warehouse_id': porder.warehouse_id.id,\n 'location_id': porder.location_id.id,\n 'pricelist_id': porder.pricelist_id.id,\n 'state': 'draft',\n 'order_line': {},\n 'notes': '%s' % (porder.notes or '',),\n 'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,\n 'shop_id': porder.shop_id and porder.shop_id.id, # added line\n 'carrier_id': porder.carrier_id and porder.carrier_id.id, # added line\n })\n else:\n if porder.date_order < order_infos['date_order']:\n order_infos['date_order'] = porder.date_order\n if porder.notes:\n order_infos['notes'] = (order_infos['notes'] or '') + ('\\n%s' % (porder.notes,))\n if porder.origin:\n order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin\n\n for order_line in porder.order_line:\n line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'notes', 'product_id', 'move_dest_id', 'account_analytic_id'))\n o_line = order_infos['order_line'].setdefault(line_key, {})\n if o_line:\n # merge the line with an existing line\n o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']\n else:\n # append a new \"standalone\" line\n for field in ('product_qty', 'product_uom'):\n field_val = getattr(order_line, field)\n if isinstance(field_val, browse_record):\n field_val = field_val.id\n o_line[field] = field_val\n o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0\n\n\n allorders = []\n orders_info = {}\n for order_key, (order_data, old_ids) in new_orders.iteritems():\n # skip merges with only one order\n if len(old_ids) < 2:\n allorders += (old_ids or [])\n continue\n\n # cleanup order line data\n for key, value in order_data['order_line'].iteritems():\n del value['uom_factor']\n value.update(dict(key))\n order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]\n\n # create the new order\n neworder_id = self.create(cr, uid, order_data)\n orders_info.update({neworder_id: old_ids})\n allorders.append(neworder_id)\n\n # make triggers pointing to the old orders point to the new order\n for old_id in old_ids:\n wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)\n wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)\n return orders_info\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475136,"cells":{"repo_name":{"kind":"string","value":"glwu/python-for-android"},"path":{"kind":"string","value":"python3-alpha/python3-src/Lib/distutils/tests/test_install_data.py"},"copies":{"kind":"string","value":"147"},"size":{"kind":"string","value":"2603"},"content":{"kind":"string","value":"\"\"\"Tests for distutils.command.install_data.\"\"\"\nimport sys\nimport os\nimport unittest\nimport getpass\n\nfrom distutils.command.install_data import install_data\nfrom distutils.tests import support\nfrom test.support import run_unittest\n\nclass InstallDataTestCase(support.TempdirManager,\n support.LoggingSilencer,\n support.EnvironGuard,\n unittest.TestCase):\n\n def test_simple_run(self):\n pkg_dir, dist = self.create_dist()\n cmd = install_data(dist)\n cmd.install_dir = inst = os.path.join(pkg_dir, 'inst')\n\n # data_files can contain\n # - simple files\n # - a tuple with a path, and a list of file\n one = os.path.join(pkg_dir, 'one')\n self.write_file(one, 'xxx')\n inst2 = os.path.join(pkg_dir, 'inst2')\n two = os.path.join(pkg_dir, 'two')\n self.write_file(two, 'xxx')\n\n cmd.data_files = [one, (inst2, [two])]\n self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])])\n\n # let's run the command\n cmd.ensure_finalized()\n cmd.run()\n\n # let's check the result\n self.assertEqual(len(cmd.get_outputs()), 2)\n rtwo = os.path.split(two)[-1]\n self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))\n rone = os.path.split(one)[-1]\n self.assertTrue(os.path.exists(os.path.join(inst, rone)))\n cmd.outfiles = []\n\n # let's try with warn_dir one\n cmd.warn_dir = 1\n cmd.ensure_finalized()\n cmd.run()\n\n # let's check the result\n self.assertEqual(len(cmd.get_outputs()), 2)\n self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))\n self.assertTrue(os.path.exists(os.path.join(inst, rone)))\n cmd.outfiles = []\n\n # now using root and empty dir\n cmd.root = os.path.join(pkg_dir, 'root')\n inst3 = os.path.join(cmd.install_dir, 'inst3')\n inst4 = os.path.join(pkg_dir, 'inst4')\n three = os.path.join(cmd.install_dir, 'three')\n self.write_file(three, 'xx')\n cmd.data_files = [one, (inst2, [two]),\n ('inst3', [three]),\n (inst4, [])]\n cmd.ensure_finalized()\n cmd.run()\n\n # let's check the result\n self.assertEqual(len(cmd.get_outputs()), 4)\n self.assertTrue(os.path.exists(os.path.join(inst2, rtwo)))\n self.assertTrue(os.path.exists(os.path.join(inst, rone)))\n\ndef test_suite():\n return unittest.makeSuite(InstallDataTestCase)\n\nif __name__ == \"__main__\":\n run_unittest(test_suite())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475137,"cells":{"repo_name":{"kind":"string","value":"yugangw-msft/azure-cli"},"path":{"kind":"string","value":"src/azure-cli-core/azure/cli/core/extension/tests/latest/test_extension_commands.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"24452"},"content":{"kind":"string","value":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport os\nimport tempfile\nimport unittest\nimport shutil\nimport hashlib\nimport mock\nimport sys\n\nfrom azure.cli.core.util import CLIError\nfrom azure.cli.core.extension import get_extension, build_extension_path\nfrom azure.cli.core.extension.operations import (add_extension_to_path, list_extensions, add_extension,\n show_extension, remove_extension, update_extension,\n list_available_extensions, OUT_KEY_NAME, OUT_KEY_VERSION,\n OUT_KEY_METADATA, OUT_KEY_PATH)\nfrom azure.cli.core.extension._resolve import NoExtensionCandidatesError\nfrom azure.cli.core.mock import DummyCli\n\nfrom . import IndexPatch, mock_ext\n\n\ndef _get_test_data_file(filename):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', filename)\n\n\ndef _compute_file_hash(filename):\n sha256 = hashlib.sha256()\n with open(filename, 'rb') as f:\n sha256.update(f.read())\n return sha256.hexdigest()\n\n\nMY_EXT_NAME = 'myfirstcliextension'\nMY_EXT_SOURCE = _get_test_data_file('myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl')\nMY_BAD_EXT_SOURCE = _get_test_data_file('notanextension.txt')\nMY_SECOND_EXT_NAME_DASHES = 'my-second-cli-extension'\nMY_SECOND_EXT_SOURCE_DASHES = _get_test_data_file('my_second_cli_extension-0.0.1+dev-py2.py3-none-any.whl')\n\n\nclass TestExtensionCommands(unittest.TestCase):\n\n def setUp(self):\n self.ext_dir = tempfile.mkdtemp()\n self.ext_sys_dir = tempfile.mkdtemp()\n self.patchers = [mock.patch('azure.cli.core.extension.EXTENSIONS_DIR', self.ext_dir),\n mock.patch('azure.cli.core.extension.EXTENSIONS_SYS_DIR', self.ext_sys_dir)]\n for patcher in self.patchers:\n patcher.start()\n self.cmd = self._setup_cmd()\n\n def tearDown(self):\n for patcher in self.patchers:\n patcher.stop()\n shutil.rmtree(self.ext_dir, ignore_errors=True)\n shutil.rmtree(self.ext_sys_dir, ignore_errors=True)\n\n def test_no_extensions_dir(self):\n shutil.rmtree(self.ext_dir)\n actual = list_extensions()\n self.assertEqual(len(actual), 0)\n\n def test_no_extensions_in_dir(self):\n actual = list_extensions()\n self.assertEqual(len(actual), 0)\n\n def test_add_list_show_remove_extension(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n actual = list_extensions()\n self.assertEqual(len(actual), 1)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n remove_extension(MY_EXT_NAME)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 0)\n\n def test_add_list_show_remove_system_extension(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True)\n actual = list_extensions()\n self.assertEqual(len(actual), 1)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n remove_extension(MY_EXT_NAME)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 0)\n\n def test_add_list_show_remove_user_system_extensions(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES, system=True)\n actual = list_extensions()\n self.assertEqual(len(actual), 2)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_PATH], build_extension_path(MY_EXT_NAME))\n second_ext = show_extension(MY_SECOND_EXT_NAME_DASHES)\n self.assertEqual(second_ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)\n self.assertEqual(second_ext[OUT_KEY_PATH], build_extension_path(MY_SECOND_EXT_NAME_DASHES, system=True))\n remove_extension(MY_EXT_NAME)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 1)\n remove_extension(MY_SECOND_EXT_NAME_DASHES)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 0)\n\n def test_add_list_show_remove_extension_with_dashes(self):\n add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES)\n actual = list_extensions()\n self.assertEqual(len(actual), 1)\n ext = show_extension(MY_SECOND_EXT_NAME_DASHES)\n self.assertEqual(ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)\n self.assertIn(OUT_KEY_NAME, ext[OUT_KEY_METADATA], \"Unable to get full metadata\")\n self.assertEqual(ext[OUT_KEY_METADATA][OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES)\n remove_extension(MY_SECOND_EXT_NAME_DASHES)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 0)\n\n def test_add_extension_twice(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 1)\n with self.assertRaises(CLIError):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n\n def test_add_same_extension_user_system(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 1)\n with self.assertRaises(CLIError):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True)\n\n def test_add_extension_invalid(self):\n with self.assertRaises(ValueError):\n add_extension(cmd=self.cmd, source=MY_BAD_EXT_SOURCE)\n actual = list_extensions()\n self.assertEqual(len(actual), 0)\n\n def test_add_extension_invalid_whl_name(self):\n with self.assertRaises(CLIError):\n add_extension(cmd=self.cmd, source=os.path.join('invalid', 'ext', 'path', 'file.whl'))\n actual = list_extensions()\n self.assertEqual(len(actual), 0)\n\n def test_add_extension_valid_whl_name_filenotfound(self):\n with self.assertRaises(CLIError):\n add_extension(cmd=self.cmd, source=_get_test_data_file('mywheel-0.0.3+dev-py2.py3-none-any.whl'))\n actual = list_extensions()\n self.assertEqual(len(actual), 0)\n\n def test_add_extension_with_pip_proxy(self):\n extension_name = MY_EXT_NAME\n proxy_param = '--proxy'\n proxy_endpoint = \"https://user:pass@proxy.microsoft.com\"\n computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.shutil'), \\\n mock.patch('azure.cli.core.extension.operations.check_output') as check_output:\n add_extension(cmd=self.cmd, extension_name=extension_name, pip_proxy=proxy_endpoint)\n args = check_output.call_args\n pip_cmd = args[0][0]\n proxy_index = pip_cmd.index(proxy_param)\n assert pip_cmd[proxy_index + 1] == proxy_endpoint\n\n def test_add_extension_verify_no_pip_proxy(self):\n extension_name = MY_EXT_NAME\n computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.shutil'), \\\n mock.patch('azure.cli.core.extension.operations.check_output') as check_output:\n add_extension(cmd=self.cmd, extension_name=extension_name)\n args = check_output.call_args\n pip_cmd = args[0][0]\n if '--proxy' in pip_cmd:\n raise AssertionError(\"proxy parameter in check_output args although no proxy specified\")\n\n def test_add_extension_with_specific_version(self):\n extension_name = MY_EXT_NAME\n extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl'\n extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl'\n\n mocked_index_data = {\n extension_name: [\n mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)),\n mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2))\n ]\n }\n\n with IndexPatch(mocked_index_data):\n add_extension(self.cmd, extension_name=extension_name, version='0.0.3+dev')\n ext = show_extension(extension_name)\n self.assertEqual(ext['name'], extension_name)\n self.assertEqual(ext['version'], '0.0.3+dev')\n\n def test_add_extension_with_non_existing_version(self):\n extension_name = MY_EXT_NAME\n extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl'\n extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl'\n\n mocked_index_data = {\n extension_name: [\n mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)),\n mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2))\n ]\n }\n\n non_existing_version = '0.0.5'\n with IndexPatch(mocked_index_data):\n with self.assertRaisesRegex(CLIError, non_existing_version):\n add_extension(self.cmd, extension_name=extension_name, version=non_existing_version)\n\n def test_add_extension_with_name_valid_checksum(self):\n extension_name = MY_EXT_NAME\n computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)):\n add_extension(cmd=self.cmd, extension_name=extension_name)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n\n def test_add_extension_with_name_invalid_checksum(self):\n extension_name = MY_EXT_NAME\n bad_sha256 = 'thishashisclearlywrong'\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, bad_sha256)):\n with self.assertRaises(CLIError) as err:\n add_extension(cmd=self.cmd, extension_name=extension_name)\n self.assertTrue('The checksum of the extension does not match the expected value.' in str(err.exception))\n\n def test_add_extension_with_name_source_not_whl(self):\n extension_name = 'myextension'\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=('{}.notwhl'.format(extension_name), None)):\n with self.assertRaises(ValueError) as err:\n add_extension(cmd=self.cmd, extension_name=extension_name)\n self.assertTrue('Unknown extension type. Only Python wheels are supported.' in str(err.exception))\n\n def test_add_extension_with_name_but_it_already_exists(self):\n # Add extension without name first\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n # Now add using name\n computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)):\n with mock.patch('azure.cli.core.extension.operations.logger') as mock_logger:\n add_extension(cmd=self.cmd, extension_name=MY_EXT_NAME)\n call_args = mock_logger.warning.call_args\n self.assertEqual(\"Extension '%s' is already installed.\", call_args[0][0])\n self.assertEqual(MY_EXT_NAME, call_args[0][1])\n self.assertEqual(mock_logger.warning.call_count, 1)\n\n def test_update_extension(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')\n computed_extension_sha256 = _compute_file_hash(newer_extension)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)):\n update_extension(self.cmd, MY_EXT_NAME)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev')\n\n def test_update_extension_with_pip_proxy(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')\n computed_extension_sha256 = _compute_file_hash(newer_extension)\n\n proxy_param = '--proxy'\n proxy_endpoint = \"https://user:pass@proxy.microsoft.com\"\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.shutil'), \\\n mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \\\n mock.patch('azure.cli.core.extension.operations.check_output') as check_output:\n\n update_extension(self.cmd, MY_EXT_NAME, pip_proxy=proxy_endpoint)\n args = check_output.call_args\n pip_cmd = args[0][0]\n proxy_index = pip_cmd.index(proxy_param)\n assert pip_cmd[proxy_index + 1] == proxy_endpoint\n\n def test_update_extension_verify_no_pip_proxy(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')\n computed_extension_sha256 = _compute_file_hash(newer_extension)\n\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.shutil'), \\\n mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \\\n mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \\\n mock.patch('azure.cli.core.extension.operations.check_output') as check_output:\n\n update_extension(self.cmd, MY_EXT_NAME)\n args = check_output.call_args\n pip_cmd = args[0][0]\n if '--proxy' in pip_cmd:\n raise AssertionError(\"proxy parameter in check_output args although no proxy specified\")\n\n def test_update_extension_not_found(self):\n with self.assertRaises(CLIError) as err:\n update_extension(self.cmd, MY_EXT_NAME)\n self.assertEqual(str(err.exception), 'The extension {} is not installed.'.format(MY_EXT_NAME))\n\n def test_update_extension_no_updates(self):\n logger_msgs = []\n\n def mock_log_warning(_, msg):\n logger_msgs.append(msg)\n\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', side_effect=NoExtensionCandidatesError()), \\\n mock.patch('logging.Logger.warning', mock_log_warning):\n update_extension(self.cmd, MY_EXT_NAME)\n self.assertTrue(\"No updates available for '{}'.\".format(MY_EXT_NAME) in logger_msgs[0])\n\n def test_update_extension_exception_in_update_and_rolled_back(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')\n bad_sha256 = 'thishashisclearlywrong'\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, bad_sha256)):\n with self.assertRaises(CLIError) as err:\n update_extension(self.cmd, MY_EXT_NAME)\n self.assertTrue('Failed to update. Rolled {} back to {}.'.format(ext['name'], ext[OUT_KEY_VERSION]) in str(err.exception))\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n\n def test_list_available_extensions_default(self):\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:\n list_available_extensions(cli_ctx=self.cmd.cli_ctx)\n c.assert_called_once_with(None, self.cmd.cli_ctx)\n\n def test_list_available_extensions_operations_index_url(self):\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:\n index_url = 'http://contoso.com'\n list_available_extensions(index_url=index_url, cli_ctx=self.cmd.cli_ctx)\n c.assert_called_once_with(index_url, self.cmd.cli_ctx)\n\n def test_list_available_extensions_show_details(self):\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c:\n list_available_extensions(show_details=True, cli_ctx=self.cmd.cli_ctx)\n c.assert_called_once_with(None, self.cmd.cli_ctx)\n\n def test_list_available_extensions_no_show_details(self):\n sample_index_extensions = {\n 'test_sample_extension1': [{\n 'metadata': {\n 'name': 'test_sample_extension1',\n 'summary': 'my summary',\n 'version': '0.1.0'\n }}],\n 'test_sample_extension2': [{\n 'metadata': {\n 'name': 'test_sample_extension2',\n 'summary': 'my summary',\n 'version': '0.1.0',\n 'azext.isPreview': True,\n 'azext.isExperimental': True\n }}]\n }\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):\n res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)\n self.assertIsInstance(res, list)\n self.assertEqual(len(res), len(sample_index_extensions))\n self.assertEqual(res[0]['name'], 'test_sample_extension1')\n self.assertEqual(res[0]['summary'], 'my summary')\n self.assertEqual(res[0]['version'], '0.1.0')\n self.assertEqual(res[0]['preview'], False)\n self.assertEqual(res[0]['experimental'], False)\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):\n res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)\n self.assertIsInstance(res, list)\n self.assertEqual(len(res), len(sample_index_extensions))\n self.assertEqual(res[1]['name'], 'test_sample_extension2')\n self.assertEqual(res[1]['summary'], 'my summary')\n self.assertEqual(res[1]['version'], '0.1.0')\n self.assertEqual(res[1]['preview'], True)\n self.assertEqual(res[1]['experimental'], True)\n\n def test_list_available_extensions_incompatible_cli_version(self):\n sample_index_extensions = {\n 'test_sample_extension1': [{\n 'metadata': {\n \"azext.maxCliCoreVersion\": \"0.0.0\",\n 'name': 'test_sample_extension1',\n 'summary': 'my summary',\n 'version': '0.1.0'\n }}]\n }\n with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions):\n res = list_available_extensions(cli_ctx=self.cmd.cli_ctx)\n self.assertIsInstance(res, list)\n self.assertEqual(len(res), 0)\n\n def test_add_list_show_remove_extension_extra_index_url(self):\n \"\"\"\n Tests extension addition while specifying --extra-index-url parameter.\n :return:\n \"\"\"\n extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple']\n\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls)\n actual = list_extensions()\n self.assertEqual(len(actual), 1)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME)\n remove_extension(MY_EXT_NAME)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 0)\n\n def test_update_extension_extra_index_url(self):\n \"\"\"\n Tests extension update while specifying --extra-index-url parameter.\n :return:\n \"\"\"\n extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple']\n\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev')\n newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl')\n computed_extension_sha256 = _compute_file_hash(newer_extension)\n with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)):\n update_extension(self.cmd, MY_EXT_NAME, pip_extra_index_urls=extra_index_urls)\n ext = show_extension(MY_EXT_NAME)\n self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev')\n\n def test_add_extension_to_path(self):\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n num_exts = len(list_extensions())\n self.assertEqual(num_exts, 1)\n ext = get_extension('myfirstcliextension')\n old_path = sys.path[:]\n try:\n add_extension_to_path(ext.name)\n self.assertSequenceEqual(old_path, sys.path[:-1])\n self.assertEqual(ext.path, sys.path[-1])\n finally:\n sys.path[:] = old_path\n\n def test_add_extension_azure_to_path(self):\n import azure\n import azure.mgmt\n old_path_0 = list(sys.path)\n old_path_1 = list(azure.__path__)\n old_path_2 = list(azure.mgmt.__path__)\n\n add_extension(cmd=self.cmd, source=MY_EXT_SOURCE)\n ext = get_extension('myfirstcliextension')\n azure_dir = os.path.join(ext.path, \"azure\")\n azure_mgmt_dir = os.path.join(azure_dir, \"mgmt\")\n os.mkdir(azure_dir)\n os.mkdir(azure_mgmt_dir)\n\n try:\n add_extension_to_path(ext.name)\n new_path_1 = list(azure.__path__)\n new_path_2 = list(azure.mgmt.__path__)\n finally:\n sys.path.remove(ext.path)\n remove_extension(ext.name)\n if isinstance(azure.__path__, list):\n azure.__path__[:] = old_path_1\n else:\n list(azure.__path__)\n if isinstance(azure.mgmt.__path__, list):\n azure.mgmt.__path__[:] = old_path_2\n else:\n list(azure.mgmt.__path__)\n self.assertSequenceEqual(old_path_1, new_path_1[:-1])\n self.assertSequenceEqual(old_path_2, new_path_2[:-1])\n self.assertEqual(azure_dir, new_path_1[-1])\n self.assertEqual(azure_mgmt_dir, new_path_2[-1])\n self.assertSequenceEqual(old_path_0, list(sys.path))\n self.assertSequenceEqual(old_path_1, list(azure.__path__))\n self.assertSequenceEqual(old_path_2, list(azure.mgmt.__path__))\n\n def _setup_cmd(self):\n cmd = mock.MagicMock()\n cmd.cli_ctx = DummyCli()\n return cmd\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475138,"cells":{"repo_name":{"kind":"string","value":"openstack/taskflow"},"path":{"kind":"string","value":"taskflow/examples/resume_many_flows/run_flow.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"1433"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport os\nimport sys\n\nlogging.basicConfig(level=logging.ERROR)\n\nself_dir = os.path.abspath(os.path.dirname(__file__))\ntop_dir = os.path.abspath(\n os.path.join(self_dir, os.pardir, os.pardir, os.pardir))\nexample_dir = os.path.abspath(os.path.join(self_dir, os.pardir))\n\nsys.path.insert(0, top_dir)\nsys.path.insert(0, self_dir)\nsys.path.insert(0, example_dir)\n\nimport taskflow.engines\n\nimport example_utils # noqa\nimport my_flows # noqa\n\n\nwith example_utils.get_backend() as backend:\n engine = taskflow.engines.load_from_factory(my_flows.flow_factory,\n backend=backend)\n print('Running flow %s %s' % (engine.storage.flow_name,\n engine.storage.flow_uuid))\n engine.run()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475139,"cells":{"repo_name":{"kind":"string","value":"jehine-MSFT/azure-storage-python"},"path":{"kind":"string","value":"tests/blob_performance.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"5539"},"content":{"kind":"string","value":"#-------------------------------------------------------------------------\n# Copyright (c) Microsoft. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#--------------------------------------------------------------------------\nimport os\nimport datetime\nimport sys\n\nfrom azure.storage.blob import (\n BlockBlobService,\n PageBlobService,\n AppendBlobService,\n)\nimport tests.settings_real as settings\n\n# Warning:\n# This script will take a while to run with everything enabled.\n# Edit the lists below to enable only the blob sizes and connection\n# counts that you are interested in.\n\n# NAME, SIZE (MB), +ADD SIZE (B)\nLOCAL_BLOCK_BLOB_FILES = [\n ('BLOC-0080M+000B', 80, 0),\n ('BLOC-0080M+013B', 80, 13),\n ('BLOC-0500M+000B', 500, 0),\n ('BLOC-2500M+000B', 2500, 0),\n]\nLOCAL_PAGE_BLOB_FILES = [\n ('PAGE-0072M+000B', 72, 0),\n ('PAGE-0072M+512B', 72, 512),\n ('PAGE-0500M+000B', 500, 0),\n ('PAGE-2500M+000B', 2500, 0),\n]\n\nLOCAL_APPEND_BLOB_FILES = [\n ('APPD-0072M+000B', 80, 0),\n ('APPD-0072M+512B', 80, 13),\n ('APPD-0500M+000B', 500, 0),\n ('APPD-2500M+000B', 2500, 0),\n]\n\nCONNECTION_COUNTS = [1, 2, 5, 10, 50]\n\nCONTAINER_NAME = 'performance'\n\n\ndef input_file(name):\n return 'input-' + name\n\n\ndef output_file(name):\n return 'output-' + name\n\ndef create_random_content_file(name, size_in_megs, additional_byte_count=0):\n file_name = input_file(name)\n if not os.path.exists(file_name):\n print('generating {0}'.format(name))\n with open(file_name, 'wb') as stream:\n for i in range(size_in_megs):\n stream.write(os.urandom(1048576))\n if additional_byte_count > 0:\n stream.write(os.urandom(additional_byte_count))\n\ndef upload_blob(service, name, connections):\n blob_name = name\n file_name = input_file(name)\n sys.stdout.write('\\tUp:')\n start_time = datetime.datetime.now()\n if isinstance(service, BlockBlobService):\n service.create_blob_from_path(\n CONTAINER_NAME, blob_name, file_name, max_connections=connections)\n elif isinstance(service, PageBlobService):\n service.create_blob_from_path(\n CONTAINER_NAME, blob_name, file_name, max_connections=connections)\n elif isinstance(service, AppendBlobService):\n service.append_blob_from_path(\n CONTAINER_NAME, blob_name, file_name, max_connections=connections)\n else:\n service.create_blob_from_path(\n CONTAINER_NAME, blob_name, file_name, max_connections=connections)\n elapsed_time = datetime.datetime.now() - start_time\n sys.stdout.write('{0}s'.format(elapsed_time.total_seconds()))\n\ndef download_blob(service, name, connections):\n blob_name = name\n target_file_name = output_file(name)\n if os.path.exists(target_file_name):\n os.remove(target_file_name)\n sys.stdout.write('\\tDn:')\n start_time = datetime.datetime.now()\n service.get_blob_to_path(\n CONTAINER_NAME, blob_name, target_file_name, max_connections=connections)\n elapsed_time = datetime.datetime.now() - start_time\n sys.stdout.write('{0}s'.format(elapsed_time.total_seconds()))\n\ndef file_contents_equal(first_file_path, second_file_path):\n first_size = os.path.getsize(first_file_path);\n second_size = os.path.getsize(second_file_path)\n if first_size != second_size:\n return False\n with open(first_file_path, 'rb') as first_stream:\n with open(second_file_path, 'rb') as second_stream:\n while True:\n first_data = first_stream.read(1048576)\n second_data = second_stream.read(1048576)\n if first_data != second_data:\n return False\n if not first_data:\n return True\n\ndef compare_files(name):\n first_file_path = input_file(name)\n second_file_path = output_file(name)\n sys.stdout.write('\\tCmp:')\n if file_contents_equal(first_file_path, second_file_path):\n sys.stdout.write('ok')\n else:\n sys.stdout.write('ERR!')\n\ndef process(service, blobs, counts):\n for name, size_in_megs, additional in blobs:\n create_random_content_file(name, size_in_megs, additional)\n\n for name, _, _ in blobs:\n for max_conn in counts:\n sys.stdout.write('{0}\\tParallel:{1}'.format(name, max_conn))\n upload_blob(service, name, max_conn)\n download_blob(service, name, max_conn)\n compare_files(name)\n print('')\n print('')\n\ndef main():\n bbs = BlockBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)\n pbs = PageBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)\n abs = AppendBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY)\n service.create_container(CONTAINER_NAME)\n\n process(bbs, LOCAL_BLOCK_BLOB_FILES, CONNECTION_COUNTS)\n process(pbs, LOCAL_PAGE_BLOB_FILES, CONNECTION_COUNTS)\n process(abs, LOCAL_APPEND_BLOB_FILES, CONNECTION_COUNTS)\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475140,"cells":{"repo_name":{"kind":"string","value":"MeirKriheli/Open-Knesset"},"path":{"kind":"string","value":"mks/urls.py"},"copies":{"kind":"string","value":"14"},"size":{"kind":"string","value":"2520"},"content":{"kind":"string","value":"from django.conf import settings\nfrom django.conf.urls import url, patterns\nfrom . import views as mkv\nfrom feeds import MemberActivityFeed\n\nmksurlpatterns = patterns('mks.views',\n url(r'^parties-members/$', mkv.PartiesMembersRedirctView.as_view(), name='parties-members-index'),\n url(r'^parties-members/(?P\\d+)/$', mkv.PartiesMembersView.as_view(), name='parties-members-list'),\n url(r'^member/$', mkv.MemberRedirectView.as_view(), name='member-list'),\n url(r'^member/csv$', mkv.MemberCsvView.as_view()),\n url(r'^party/csv$', mkv.PartyCsvView.as_view()),\n url(r'^member/(?P\\d+)/$', 'mk_detail', name='member-detail'),\n url(r'^member/(?P\\d+)/embed/$', mkv.MemberEmbedView.as_view(), name='member-embed'),\n\n # \"more\" actions\n url(r'^member/(?P\\d+)/more_actions/$', mkv.MemeberMoreActionsView.as_view(), name='member-more-actions'),\n url(r'^member/(?P\\d+)/more_legislation/$', mkv.MemeberMoreLegislationView.as_view(), name='member-more-legislation'),\n url(r'^member/(?P\\d+)/more_committee/$', mkv.MemeberMoreCommitteeView.as_view(), name='member-more-committees'),\n url(r'^member/(?P\\d+)/more_plenum/$', mkv.MemeberMorePlenumView.as_view(), name='member-more-plenums'),\n url(r'^member/(?P\\d+)/more_mmm/$', mkv.MemeberMoreMMMView.as_view(), name='member-more-mmm'),\n\n url(r'^member/(?P\\d+)/rss/$', MemberActivityFeed(), name='member-activity-feed'),\n url(r'^member/(?P\\d+)/(?P[\\w\\-\\\"]+)/$', 'mk_detail', name='member-detail-with-slug'),\n # TODO:the next url is hardcoded in a js file\n url(r'^member/auto_complete/$', mkv.member_auto_complete, name='member-auto-complete'),\n url(r'^member/search/?$', mkv.member_by_name, name='member-by-name'),\n url(r'^member/by/(?P' + '|'.join(x[0] for x in mkv.MemberListView.pages) + ')/$', mkv.MemberListView.as_view(), name='member-stats'),\n # a JS view for adding mks tooltips on a page\n url(r'^member/tooltip.js', mkv.members_tooltips, name='member-tooltip'),\n\n url(r'^party/$', mkv.PartyRedirectView.as_view(), name='party-list'),\n url(r'^party/(?P\\d+)/$', mkv.PartyDetailView.as_view(), name='party-detail'),\n url(r'^party/(?P\\d+)/(?P[\\w\\-\\\"]+)/$', mkv.PartyDetailView.as_view(), name='party-detail-with-slug'),\n url(r'^party/by/(?P' + '|'.join(x[0] for x in mkv.PartyListView.pages) + ')/$', mkv.PartyListView.as_view(), name='party-stats'),\n url(r'^party/search/?$', mkv.party_by_name, name='party-by-name'),\n)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475141,"cells":{"repo_name":{"kind":"string","value":"Froggiewalker/geonode"},"path":{"kind":"string","value":"geonode/base/enumerations.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"13719"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#########################################################################\n#\n# Copyright (C) 2012 OpenPlans\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#########################################################################\n\nfrom django.utils.translation import ugettext_lazy as _\n\nLINK_TYPES = ['original', 'data', 'image', 'metadata', 'html',\n 'OGC:WMS', 'OGC:WFS', 'OGC:WCS']\n\nHIERARCHY_LEVELS = (\n ('series', _('series')),\n ('software', _('computer program or routine')),\n ('featureType', _('feature type')),\n ('model', _('copy or imitation of an existing or hypothetical object')),\n ('collectionHardware', _('collection hardware')),\n ('collectionSession', _('collection session')),\n ('nonGeographicDataset', _('non-geographic data')),\n ('propertyType', _('property type')),\n ('fieldSession', _('field session')),\n ('dataset', _('dataset')),\n ('service', _('service interfaces')),\n ('attribute', _('attribute class')),\n ('attributeType', _('characteristic of a feature')),\n ('tile', _('tile or spatial subset of geographic data')),\n ('feature', _('feature')),\n ('dimensionGroup', _('dimension group')),\n)\n\nUPDATE_FREQUENCIES = (\n ('unknown', _('frequency of maintenance for the data is not known')),\n ('continual', _('data is repeatedly and frequently updated')),\n ('notPlanned', _('there are no plans to update the data')),\n ('daily', _('data is updated each day')),\n ('annually', _('data is updated every year')),\n ('asNeeded', _('data is updated as deemed necessary')),\n ('monthly', _('data is updated each month')),\n ('fortnightly', _('data is updated every two weeks')),\n ('irregular',\n _('data is updated in intervals that are uneven in duration')),\n ('weekly', _('data is updated on a weekly basis')),\n ('biannually', _('data is updated twice each year')),\n ('quarterly', _('data is updated every three months')),\n)\n\nCONTACT_FIELDS = [\n 'name',\n 'organization',\n 'position',\n 'voice',\n 'facsimile',\n 'delivery_point',\n 'city',\n 'administrative_area',\n 'postal_code',\n 'country',\n 'email',\n 'role'\n]\n\nDEFAULT_SUPPLEMENTAL_INFORMATION = _(\n _('No information provided')\n)\n\nCOUNTRIES = (\n ('AFG', 'Afghanistan'),\n ('ALA', 'Aland Islands'),\n ('ALB', 'Albania'),\n ('DZA', 'Algeria'),\n ('ASM', 'American Samoa'),\n ('AND', 'Andorra'),\n ('AGO', 'Angola'),\n ('AIA', 'Anguilla'),\n ('ATG', 'Antigua and Barbuda'),\n ('ARG', 'Argentina'),\n ('ARM', 'Armenia'),\n ('ABW', 'Aruba'),\n ('AUS', 'Australia'),\n ('AUT', 'Austria'),\n ('AZE', 'Azerbaijan'),\n ('BHS', 'Bahamas'),\n ('BHR', 'Bahrain'),\n ('BGD', 'Bangladesh'),\n ('BRB', 'Barbados'),\n ('BLR', 'Belarus'),\n ('BEL', 'Belgium'),\n ('BLZ', 'Belize'),\n ('BEN', 'Benin'),\n ('BMU', 'Bermuda'),\n ('BTN', 'Bhutan'),\n ('BOL', 'Bolivia'),\n ('BIH', 'Bosnia and Herzegovina'),\n ('BWA', 'Botswana'),\n ('BRA', 'Brazil'),\n ('VGB', 'British Virgin Islands'),\n ('BRN', 'Brunei Darussalam'),\n ('BGR', 'Bulgaria'),\n ('BFA', 'Burkina Faso'),\n ('BDI', 'Burundi'),\n ('KHM', 'Cambodia'),\n ('CMR', 'Cameroon'),\n ('CAN', 'Canada'),\n ('CPV', 'Cape Verde'),\n ('CYM', 'Cayman Islands'),\n ('CAF', 'Central African Republic'),\n ('TCD', 'Chad'),\n ('CIL', 'Channel Islands'),\n ('CHL', 'Chile'),\n ('CHN', 'China'),\n ('HKG', 'China - Hong Kong'),\n ('MAC', 'China - Macao'),\n ('COL', 'Colombia'),\n ('COM', 'Comoros'),\n ('COG', 'Congo'),\n ('COK', 'Cook Islands'),\n ('CRI', 'Costa Rica'),\n ('CIV', 'Cote d\\'Ivoire'),\n ('HRV', 'Croatia'),\n ('CUB', 'Cuba'),\n ('CYP', 'Cyprus'),\n ('CZE', 'Czech Republic'),\n ('PRK', 'Democratic People\\'s Republic of Korea'),\n ('COD', 'Democratic Republic of the Congo'),\n ('DNK', 'Denmark'),\n ('DJI', 'Djibouti'),\n ('DMA', 'Dominica'),\n ('DOM', 'Dominican Republic'),\n ('ECU', 'Ecuador'),\n ('EGY', 'Egypt'),\n ('SLV', 'El Salvador'),\n ('GNQ', 'Equatorial Guinea'),\n ('ERI', 'Eritrea'),\n ('EST', 'Estonia'),\n ('ETH', 'Ethiopia'),\n ('FRO', 'Faeroe Islands'),\n ('FLK', 'Falkland Islands (Malvinas)'),\n ('FJI', 'Fiji'),\n ('FIN', 'Finland'),\n ('FRA', 'France'),\n ('GUF', 'French Guiana'),\n ('PYF', 'French Polynesia'),\n ('GAB', 'Gabon'),\n ('GMB', 'Gambia'),\n ('GEO', 'Georgia'),\n ('DEU', 'Germany'),\n ('GHA', 'Ghana'),\n ('GIB', 'Gibraltar'),\n ('GRC', 'Greece'),\n ('GRL', 'Greenland'),\n ('GRD', 'Grenada'),\n ('GLP', 'Guadeloupe'),\n ('GUM', 'Guam'),\n ('GTM', 'Guatemala'),\n ('GGY', 'Guernsey'),\n ('GIN', 'Guinea'),\n ('GNB', 'Guinea-Bissau'),\n ('GUY', 'Guyana'),\n ('HTI', 'Haiti'),\n ('VAT', 'Holy See (Vatican City)'),\n ('HND', 'Honduras'),\n ('HUN', 'Hungary'),\n ('ISL', 'Iceland'),\n ('IND', 'India'),\n ('IDN', 'Indonesia'),\n ('IRN', 'Iran'),\n ('IRQ', 'Iraq'),\n ('IRL', 'Ireland'),\n ('IMN', 'Isle of Man'),\n ('ISR', 'Israel'),\n ('ITA', 'Italy'),\n ('JAM', 'Jamaica'),\n ('JPN', 'Japan'),\n ('JEY', 'Jersey'),\n ('JOR', 'Jordan'),\n ('KAZ', 'Kazakhstan'),\n ('KEN', 'Kenya'),\n ('KIR', 'Kiribati'),\n ('KWT', 'Kuwait'),\n ('KGZ', 'Kyrgyzstan'),\n ('LAO', 'Lao People\\'s Democratic Republic'),\n ('LVA', 'Latvia'),\n ('LBN', 'Lebanon'),\n ('LSO', 'Lesotho'),\n ('LBR', 'Liberia'),\n ('LBY', 'Libyan Arab Jamahiriya'),\n ('LIE', 'Liechtenstein'),\n ('LTU', 'Lithuania'),\n ('LUX', 'Luxembourg'),\n ('MKD', 'Macedonia'),\n ('MDG', 'Madagascar'),\n ('MWI', 'Malawi'),\n ('MYS', 'Malaysia'),\n ('MDV', 'Maldives'),\n ('MLI', 'Mali'),\n ('MLT', 'Malta'),\n ('MHL', 'Marshall Islands'),\n ('MTQ', 'Martinique'),\n ('MRT', 'Mauritania'),\n ('MUS', 'Mauritius'),\n ('MYT', 'Mayotte'),\n ('MEX', 'Mexico'),\n ('FSM', 'Micronesia, Federated States of'),\n ('MCO', 'Monaco'),\n ('MNG', 'Mongolia'),\n ('MNE', 'Montenegro'),\n ('MSR', 'Montserrat'),\n ('MAR', 'Morocco'),\n ('MOZ', 'Mozambique'),\n ('MMR', 'Myanmar'),\n ('NAM', 'Namibia'),\n ('NRU', 'Nauru'),\n ('NPL', 'Nepal'),\n ('NLD', 'Netherlands'),\n ('ANT', 'Netherlands Antilles'),\n ('NCL', 'New Caledonia'),\n ('NZL', 'New Zealand'),\n ('NIC', 'Nicaragua'),\n ('NER', 'Niger'),\n ('NGA', 'Nigeria'),\n ('NIU', 'Niue'),\n ('NFK', 'Norfolk Island'),\n ('MNP', 'Northern Mariana Islands'),\n ('NOR', 'Norway'),\n ('PSE', 'Occupied Palestinian Territory'),\n ('OMN', 'Oman'),\n ('PAK', 'Pakistan'),\n ('PLW', 'Palau'),\n ('PAN', 'Panama'),\n ('PNG', 'Papua New Guinea'),\n ('PRY', 'Paraguay'),\n ('PER', 'Peru'),\n ('PHL', 'Philippines'),\n ('PCN', 'Pitcairn'),\n ('POL', 'Poland'),\n ('PRT', 'Portugal'),\n ('PRI', 'Puerto Rico'),\n ('QAT', 'Qatar'),\n ('KOR', 'Republic of Korea'),\n ('MDA', 'Republic of Moldova'),\n ('REU', 'Reunion'),\n ('ROU', 'Romania'),\n ('RUS', 'Russian Federation'),\n ('RWA', 'Rwanda'),\n ('BLM', 'Saint-Barthelemy'),\n ('SHN', 'Saint Helena'),\n ('KNA', 'Saint Kitts and Nevis'),\n ('LCA', 'Saint Lucia'),\n ('MAF', 'Saint-Martin (French part)'),\n ('SPM', 'Saint Pierre and Miquelon'),\n ('VCT', 'Saint Vincent and the Grenadines'),\n ('WSM', 'Samoa'),\n ('SMR', 'San Marino'),\n ('STP', 'Sao Tome and Principe'),\n ('SAU', 'Saudi Arabia'),\n ('SEN', 'Senegal'),\n ('SRB', 'Serbia'),\n ('SYC', 'Seychelles'),\n ('SLE', 'Sierra Leone'),\n ('SGP', 'Singapore'),\n ('SVK', 'Slovakia'),\n ('SVN', 'Slovenia'),\n ('SLB', 'Solomon Islands'),\n ('SOM', 'Somalia'),\n ('ZAF', 'South Africa'),\n ('SSD', 'South Sudan'),\n ('ESP', 'Spain'),\n ('LKA', 'Sri Lanka'),\n ('SDN', 'Sudan'),\n ('SUR', 'Suriname'),\n ('SJM', 'Svalbard and Jan Mayen Islands'),\n ('SWZ', 'Swaziland'),\n ('SWE', 'Sweden'),\n ('CHE', 'Switzerland'),\n ('SYR', 'Syrian Arab Republic'),\n ('TJK', 'Tajikistan'),\n ('THA', 'Thailand'),\n ('TLS', 'Timor-Leste'),\n ('TGO', 'Togo'),\n ('TKL', 'Tokelau'),\n ('TON', 'Tonga'),\n ('TTO', 'Trinidad and Tobago'),\n ('TUN', 'Tunisia'),\n ('TUR', 'Turkey'),\n ('TKM', 'Turkmenistan'),\n ('TCA', 'Turks and Caicos Islands'),\n ('TUV', 'Tuvalu'),\n ('UGA', 'Uganda'),\n ('UKR', 'Ukraine'),\n ('ARE', 'United Arab Emirates'),\n ('GBR', 'United Kingdom'),\n ('TZA', 'United Republic of Tanzania'),\n ('USA', 'United States of America'),\n ('VIR', 'United States Virgin Islands'),\n ('URY', 'Uruguay'),\n ('UZB', 'Uzbekistan'),\n ('VUT', 'Vanuatu'),\n ('VEN', 'Venezuela (Bolivarian Republic of)'),\n ('VNM', 'Viet Nam'),\n ('WLF', 'Wallis and Futuna Islands'),\n ('ESH', 'Western Sahara'),\n ('YEM', 'Yemen'),\n ('ZMB', 'Zambia'),\n ('ZWE', 'Zimbabwe'),\n)\n\n# Taken from http://www.w3.org/WAI/ER/IG/ert/iso639.htm\nALL_LANGUAGES = (\n ('abk', 'Abkhazian'),\n ('aar', 'Afar'),\n ('afr', 'Afrikaans'),\n ('amh', 'Amharic'),\n ('ara', 'Arabic'),\n ('asm', 'Assamese'),\n ('aym', 'Aymara'),\n ('aze', 'Azerbaijani'),\n ('bak', 'Bashkir'),\n ('ben', 'Bengali'),\n ('bih', 'Bihari'),\n ('bis', 'Bislama'),\n ('bre', 'Breton'),\n ('bul', 'Bulgarian'),\n ('bel', 'Byelorussian'),\n ('cat', 'Catalan'),\n ('cos', 'Corsican'),\n ('dan', 'Danish'),\n ('dzo', 'Dzongkha'),\n ('eng', 'English'),\n ('fra', 'French'),\n ('epo', 'Esperanto'),\n ('est', 'Estonian'),\n ('fao', 'Faroese'),\n ('fij', 'Fijian'),\n ('fin', 'Finnish'),\n ('fry', 'Frisian'),\n ('glg', 'Gallegan'),\n ('kal', 'Greenlandic'),\n ('grn', 'Guarani'),\n ('guj', 'Gujarati'),\n ('hau', 'Hausa'),\n ('heb', 'Hebrew'),\n ('hin', 'Hindi'),\n ('hun', 'Hungarian'),\n ('ind', 'Indonesian'),\n ('ina', 'Interlingua (International Auxiliary language Association)'),\n ('iku', 'Inuktitut'),\n ('ipk', 'Inupiak'),\n ('ita', 'Italian'),\n ('jpn', 'Japanese'),\n ('kan', 'Kannada'),\n ('kas', 'Kashmiri'),\n ('kaz', 'Kazakh'),\n ('khm', 'Khmer'),\n ('kin', 'Kinyarwanda'),\n ('kir', 'Kirghiz'),\n ('kor', 'Korean'),\n ('kur', 'Kurdish'),\n ('oci', 'Langue d \\'Oc (post 1500)'),\n ('lao', 'Lao'),\n ('lat', 'Latin'),\n ('lav', 'Latvian'),\n ('lin', 'Lingala'),\n ('lit', 'Lithuanian'),\n ('mlg', 'Malagasy'),\n ('mlt', 'Maltese'),\n ('mar', 'Marathi'),\n ('mol', 'Moldavian'),\n ('mon', 'Mongolian'),\n ('nau', 'Nauru'),\n ('nep', 'Nepali'),\n ('nor', 'Norwegian'),\n ('ori', 'Oriya'),\n ('orm', 'Oromo'),\n ('pan', 'Panjabi'),\n ('pol', 'Polish'),\n ('por', 'Portuguese'),\n ('pus', 'Pushto'),\n ('que', 'Quechua'),\n ('roh', 'Rhaeto-Romance'),\n ('run', 'Rundi'),\n ('rus', 'Russian'),\n ('smo', 'Samoan'),\n ('sag', 'Sango'),\n ('san', 'Sanskrit'),\n ('scr', 'Serbo-Croatian'),\n ('sna', 'Shona'),\n ('snd', 'Sindhi'),\n ('sin', 'Singhalese'),\n ('ssw', 'Siswant'),\n ('slv', 'Slovenian'),\n ('som', 'Somali'),\n ('sot', 'Sotho'),\n ('spa', 'Spanish'),\n ('sun', 'Sudanese'),\n ('swa', 'Swahili'),\n ('tgl', 'Tagalog'),\n ('tgk', 'Tajik'),\n ('tam', 'Tamil'),\n ('tat', 'Tatar'),\n ('tel', 'Telugu'),\n ('tha', 'Thai'),\n ('tir', 'Tigrinya'),\n ('tog', 'Tonga (Nyasa)'),\n ('tso', 'Tsonga'),\n ('tsn', 'Tswana'),\n ('tur', 'Turkish'),\n ('tuk', 'Turkmen'),\n ('twi', 'Twi'),\n ('uig', 'Uighur'),\n ('ukr', 'Ukrainian'),\n ('urd', 'Urdu'),\n ('uzb', 'Uzbek'),\n ('vie', 'Vietnamese'),\n ('vol', 'Volapük'),\n ('wol', 'Wolof'),\n ('xho', 'Xhosa'),\n ('yid', 'Yiddish'),\n ('yor', 'Yoruba'),\n ('zha', 'Zhuang'),\n ('zul', 'Zulu'),\n)\n\nCHARSETS = (('', 'None/Unknown'),\n ('UTF-8', 'UTF-8/Unicode'),\n ('ISO-8859-1', 'Latin1/ISO-8859-1'),\n ('ISO-8859-2', 'Latin2/ISO-8859-2'),\n ('ISO-8859-3', 'Latin3/ISO-8859-3'),\n ('ISO-8859-4', 'Latin4/ISO-8859-4'),\n ('ISO-8859-5', 'Latin5/ISO-8859-5'),\n ('ISO-8859-6', 'Latin6/ISO-8859-6'),\n ('ISO-8859-7', 'Latin7/ISO-8859-7'),\n ('ISO-8859-8', 'Latin8/ISO-8859-8'),\n ('ISO-8859-9', 'Latin9/ISO-8859-9'),\n ('ISO-8859-10', 'Latin10/ISO-8859-10'),\n ('ISO-8859-13', 'Latin13/ISO-8859-13'),\n ('ISO-8859-14', 'Latin14/ISO-8859-14'),\n ('ISO8859-15', 'Latin15/ISO-8859-15'),\n ('Big5', 'BIG5'),\n ('EUC-JP', 'EUC-JP'),\n ('EUC-KR', 'EUC-KR'),\n ('GBK', 'GBK'),\n ('GB18030', 'GB18030'),\n ('Shift_JIS', 'Shift_JIS'),\n ('KOI8-R', 'KOI8-R'),\n ('KOI8-U', 'KOI8-U'),\n ('windows-874', 'Windows CP874'),\n ('windows-1250', 'Windows CP1250'),\n ('windows-1251', 'Windows CP1251'),\n ('windows-1252', 'Windows CP1252'),\n ('windows-1253', 'Windows CP1253'),\n ('windows-1254', 'Windows CP1254'),\n ('windows-1255', 'Windows CP1255'),\n ('windows-1256', 'Windows CP1256'),\n ('windows-1257', 'Windows CP1257'),\n ('windows-1258', 'Windows CP1258'))\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475142,"cells":{"repo_name":{"kind":"string","value":"moio/spacewalk"},"path":{"kind":"string","value":"client/solaris/smartpm/smart/transaction.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"61738"},"content":{"kind":"string","value":"#\n# Copyright (c) 2004 Conectiva, Inc.\n#\n# Written by Gustavo Niemeyer \n#\n# This file is part of Smart Package Manager.\n#\n# Smart Package Manager is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published\n# by the Free Software Foundation; either version 2 of the License, or (at\n# your option) any later version.\n#\n# Smart Package Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Smart Package Manager; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\nfrom smart.const import INSTALL, REMOVE, UPGRADE, FIX, REINSTALL, KEEP\nfrom smart.cache import PreRequires, Package\nfrom smart import *\n\nclass ChangeSet(dict):\n\n def __init__(self, cache, state=None):\n self._cache = cache\n if state:\n self.update(state)\n\n def getCache(self):\n return self._cache\n\n def getState(self):\n return self.copy()\n\n def setState(self, state):\n if state is not self:\n self.clear()\n self.update(state)\n\n def getPersistentState(self):\n state = {}\n for pkg in self:\n state[(pkg.__class__, pkg.name, pkg.version)] = self[pkg]\n return state\n\n def setPersistentState(self, state):\n self.clear()\n for pkg in self._cache.getPackages():\n op = state.get((pkg.__class__, pkg.name, pkg.version))\n if op is not None:\n self[pkg] = op\n\n def copy(self):\n return ChangeSet(self._cache, self)\n\n def set(self, pkg, op, force=False):\n if self.get(pkg) is op:\n return\n if op is INSTALL:\n if force or not pkg.installed:\n self[pkg] = INSTALL\n else:\n if pkg in self:\n del self[pkg]\n else:\n if force or pkg.installed:\n self[pkg] = REMOVE\n else:\n if pkg in self:\n del self[pkg]\n\n def installed(self, pkg):\n op = self.get(pkg)\n return op is INSTALL or pkg.installed and not op is REMOVE\n\n def difference(self, other):\n diff = ChangeSet(self._cache)\n for pkg in self:\n sop = self[pkg]\n if sop is not other.get(pkg):\n diff[pkg] = sop\n return diff\n\n def intersect(self, other):\n isct = ChangeSet(self._cache)\n for pkg in self:\n sop = self[pkg]\n if sop is other.get(pkg):\n isct[pkg] = sop\n return isct\n\n def __str__(self):\n l = []\n for pkg in self:\n l.append(\"%s %s\\n\" % (self[pkg] is INSTALL and \"I\" or \"R\", pkg))\n return \"\".join(l)\n\nclass Policy(object):\n\n def __init__(self, trans):\n self._trans = trans\n self._locked = {}\n self._sysconflocked = []\n self._priorities = {}\n\n def runStarting(self):\n self._priorities.clear()\n cache = self._trans.getCache()\n for pkg in pkgconf.filterByFlag(\"lock\", cache.getPackages()):\n if pkg not in self._locked:\n self._sysconflocked.append(pkg)\n self._locked[pkg] = True\n\n def runFinished(self):\n self._priorities.clear()\n for pkg in self._sysconflocked:\n del self._locked[pkg]\n del self._sysconflocked[:]\n\n def getLocked(self, pkg):\n return pkg in self._locked\n\n def setLocked(self, pkg, flag):\n if flag:\n self._locked[pkg] = True\n else:\n if pkg in self._locked:\n del self._locked[pkg]\n\n def getLockedSet(self):\n return self._locked\n\n def getWeight(self, changeset):\n return 0\n\n def getPriority(self, pkg):\n priority = self._priorities.get(pkg)\n if priority is None:\n self._priorities[pkg] = priority = pkg.getPriority()\n return priority\n\n def getPriorityWeights(self, targetPkg, pkgs):\n set = {}\n lower = None\n for pkg in pkgs:\n priority = self.getPriority(pkg)\n if lower is None or priority < lower:\n lower = priority\n set[pkg] = priority\n for pkg in set:\n set[pkg] = -(set[pkg] - lower)*10\n return set\n\nclass PolicyInstall(Policy):\n \"\"\"Give precedence for keeping functionality in the system.\"\"\"\n\n def runStarting(self):\n Policy.runStarting(self)\n self._upgrading = upgrading = {}\n self._upgraded = upgraded = {}\n self._downgraded = downgraded = {}\n for pkg in self._trans.getCache().getPackages():\n # Precompute upgrade relations.\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if prvpkg.installed:\n if (self.getPriority(pkg) >=\n self.getPriority(prvpkg)):\n upgrading[pkg] = True\n if prvpkg in upgraded:\n upgraded[prvpkg].append(pkg)\n else:\n upgraded[prvpkg] = [pkg]\n else:\n if prvpkg in downgraded:\n downgraded[prvpkg].append(pkg)\n else:\n downgraded[prvpkg] = [pkg]\n # Downgrades are upgrades if they have a higher priority.\n for prv in pkg.provides:\n for upg in prv.upgradedby:\n for upgpkg in upg.packages:\n if upgpkg.installed:\n if (self.getPriority(pkg) >\n self.getPriority(upgpkg)):\n upgrading[pkg] = True\n if upgpkg in upgraded:\n upgraded[upgpkg].append(pkg)\n else:\n upgraded[upgpkg] = [pkg]\n else:\n if upgpkg in downgraded:\n downgraded[upgpkg].append(pkg)\n else:\n downgraded[upgpkg] = [pkg]\n\n def runFinished(self):\n Policy.runFinished(self)\n del self._upgrading\n del self._upgraded\n del self._downgraded\n\n def getWeight(self, changeset):\n weight = 0\n upgrading = self._upgrading\n upgraded = self._upgraded\n downgraded = self._downgraded\n for pkg in changeset:\n if changeset[pkg] is REMOVE:\n # Upgrading a package that will be removed\n # is better than upgrading a package that will\n # stay in the system.\n for upgpkg in upgraded.get(pkg, ()):\n if changeset.get(upgpkg) is INSTALL:\n weight -= 1\n break\n else:\n for dwnpkg in downgraded.get(pkg, ()):\n if changeset.get(dwnpkg) is INSTALL:\n weight += 15\n break\n else:\n weight += 20\n else:\n if pkg in upgrading:\n weight += 2\n else:\n weight += 3\n return weight\n\nclass PolicyRemove(Policy):\n \"\"\"Give precedence to the choice with less changes.\"\"\"\n\n def getWeight(self, changeset):\n weight = 0\n for pkg in changeset:\n if changeset[pkg] is REMOVE:\n weight += 1\n else:\n weight += 5\n return weight\n\nclass PolicyUpgrade(Policy):\n \"\"\"Give precedence to the choice with more upgrades and smaller impact.\"\"\"\n\n def runStarting(self):\n Policy.runStarting(self)\n self._upgrading = upgrading = {}\n self._upgraded = upgraded = {}\n self._sortbonus = sortbonus = {}\n self._requiredbonus = requiredbonus = {}\n queue = self._trans.getQueue()\n for pkg in self._trans.getCache().getPackages():\n # Precompute upgrade relations.\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if (prvpkg.installed and\n self.getPriority(pkg) >= self.getPriority(prvpkg)):\n dct = upgrading.get(pkg)\n if dct:\n dct[prvpkg] = True\n else:\n upgrading[pkg] = {prvpkg: True}\n lst = upgraded.get(prvpkg)\n if lst:\n lst.append(pkg)\n else:\n upgraded[prvpkg] = [pkg]\n # Downgrades are upgrades if they have a higher priority.\n for prv in pkg.provides:\n for upg in prv.upgradedby:\n for upgpkg in upg.packages:\n if (upgpkg.installed and\n self.getPriority(pkg) > self.getPriority(upgpkg)):\n dct = upgrading.get(pkg)\n if dct:\n dct[upgpkg] = True\n else:\n upgrading[pkg] = {upgpkg: True}\n lst = upgraded.get(upgpkg)\n if lst:\n lst.append(pkg)\n else:\n upgraded[upgpkg] = [pkg]\n\n pkgs = self._trans._queue.keys()\n sortUpgrades(pkgs, self)\n for i, pkg in enumerate(pkgs):\n self._sortbonus[pkg] = -1./(i+100)\n\n def runFinished(self):\n Policy.runFinished(self)\n del self._upgrading\n del self._upgraded\n\n def getWeight(self, changeset):\n weight = 0\n upgrading = self._upgrading\n upgraded = self._upgraded\n sortbonus = self._sortbonus\n requiredbonus = self._requiredbonus\n\n installedcount = 0\n upgradedmap = {}\n for pkg in changeset:\n if changeset[pkg] is REMOVE:\n # Upgrading a package that will be removed\n # is better than upgrading a package that will\n # stay in the system.\n lst = upgraded.get(pkg, ())\n for lstpkg in lst:\n if changeset.get(lstpkg) is INSTALL:\n weight -= 1\n break\n else:\n weight += 3\n else:\n installedcount += 1\n upgpkgs = upgrading.get(pkg)\n if upgpkgs:\n weight += sortbonus.get(pkg, 0)\n upgradedmap.update(upgpkgs)\n upgradedcount = len(upgradedmap)\n weight += -30*upgradedcount+(installedcount-upgradedcount)\n return weight\n\nclass Failed(Error): pass\n\nPENDING_REMOVE = 1\nPENDING_INSTALL = 2\nPENDING_UPDOWN = 3\n\nclass Transaction(object):\n def __init__(self, cache, policy=None, changeset=None, queue=None):\n self._cache = cache\n self._policy = policy and policy(self) or Policy(self)\n self._changeset = changeset or ChangeSet(cache)\n self._queue = queue or {}\n\n def clear(self):\n self._changeset.clear()\n self._queue.clear()\n\n def getCache(self):\n return self._cache\n\n def getQueue(self):\n return self._queue\n\n def getPolicy(self):\n return self._policy\n\n def setPolicy(self, policy):\n self._policy = policy(self)\n\n def getWeight(self):\n return self._policy.getWeight(self._changeset)\n\n def getChangeSet(self):\n return self._changeset\n\n def setChangeSet(self, changeset):\n self._changeset = changeset\n\n def getState(self):\n return self._changeset.getState()\n\n def setState(self, state):\n self._changeset.setState(state)\n\n def __nonzero__(self):\n return bool(self._changeset)\n\n def __str__(self):\n return str(self._changeset)\n\n def _install(self, pkg, changeset, locked, pending, depth=0):\n #print \"[%03d] _install(%s)\" % (depth, pkg)\n #depth += 1\n\n locked[pkg] = True\n changeset.set(pkg, INSTALL)\n isinst = changeset.installed\n\n # Remove packages conflicted by this one.\n for cnf in pkg.conflicts:\n for prv in cnf.providedby:\n for prvpkg in prv.packages:\n if prvpkg is pkg:\n continue\n if not isinst(prvpkg):\n locked[prvpkg] = True\n continue\n if prvpkg in locked:\n raise Failed, _(\"Can't install %s: conflicted package \"\n \"%s is locked\") % (pkg, prvpkg)\n self._remove(prvpkg, changeset, locked, pending, depth)\n pending.append((PENDING_UPDOWN, prvpkg))\n\n # Remove packages conflicting with this one.\n for prv in pkg.provides:\n for cnf in prv.conflictedby:\n for cnfpkg in cnf.packages:\n if cnfpkg is pkg:\n continue\n if not isinst(cnfpkg):\n locked[cnfpkg] = True\n continue\n if cnfpkg in locked:\n raise Failed, _(\"Can't install %s: it's conflicted by \"\n \"the locked package %s\") \\\n % (pkg, cnfpkg)\n self._remove(cnfpkg, changeset, locked, pending, depth)\n pending.append((PENDING_UPDOWN, cnfpkg))\n\n # Remove packages with the same name that can't\n # coexist with this one.\n namepkgs = self._cache.getPackages(pkg.name)\n for namepkg in namepkgs:\n if namepkg is not pkg and not pkg.coexists(namepkg):\n if not isinst(namepkg):\n locked[namepkg] = True\n continue\n if namepkg in locked:\n raise Failed, _(\"Can't install %s: it can't coexist \"\n \"with %s\") % (pkg, namepkg)\n self._remove(namepkg, changeset, locked, pending, depth)\n\n # Install packages required by this one.\n for req in pkg.requires:\n\n # Check if someone is already providing it.\n prvpkgs = {}\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if isinst(prvpkg):\n found = True\n break\n if prvpkg not in locked:\n prvpkgs[prvpkg] = True\n else:\n continue\n break\n if found:\n # Someone is already providing it. Good.\n continue\n\n # No one is currently providing it. Do something.\n\n if not prvpkgs:\n # No packages provide it at all. Give up.\n raise Failed, _(\"Can't install %s: no package provides %s\") % \\\n (pkg, req)\n\n if len(prvpkgs) == 1:\n # Don't check locked here. prvpkgs was\n # already filtered above.\n self._install(prvpkgs.popitem()[0], changeset, locked,\n pending, depth)\n else:\n # More than one package provide it. This package\n # must be post-processed.\n pending.append((PENDING_INSTALL, pkg, req, prvpkgs.keys()))\n\n def _remove(self, pkg, changeset, locked, pending, depth=0):\n #print \"[%03d] _remove(%s)\" % (depth, pkg)\n #depth += 1\n\n if pkg.essential:\n raise Failed, _(\"Can't remove %s: it's an essential package\")\n\n locked[pkg] = True\n changeset.set(pkg, REMOVE)\n isinst = changeset.installed\n\n # Check packages requiring this one.\n for prv in pkg.provides:\n for req in prv.requiredby:\n # Check if someone installed is requiring it.\n for reqpkg in req.packages:\n if isinst(reqpkg):\n break\n else:\n # No one requires it, so it doesn't matter.\n continue\n\n # Check if someone installed is still providing it.\n prvpkgs = {}\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if prvpkg is pkg:\n continue\n if isinst(prvpkg):\n found = True\n break\n if prvpkg not in locked:\n prvpkgs[prvpkg] = True\n else:\n continue\n break\n if found:\n # Someone is still providing it. Good.\n continue\n\n # No one is providing it anymore. We'll have to do\n # something about it.\n\n if prvpkgs:\n # There are other options, besides removing.\n pending.append((PENDING_REMOVE, pkg, prv, req.packages,\n prvpkgs.keys()))\n else:\n # Remove every requiring package, or\n # upgrade/downgrade them to something which\n # does not require this dependency.\n for reqpkg in req.packages:\n if not isinst(reqpkg):\n continue\n if reqpkg in locked:\n raise Failed, _(\"Can't remove %s: %s is locked\") \\\n % (pkg, reqpkg)\n self._remove(reqpkg, changeset, locked, pending, depth)\n pending.append((PENDING_UPDOWN, reqpkg))\n\n def _updown(self, pkg, changeset, locked, depth=0):\n #print \"[%03d] _updown(%s)\" % (depth, pkg)\n #depth += 1\n\n isinst = changeset.installed\n getpriority = self._policy.getPriority\n\n pkgpriority = getpriority(pkg)\n\n # Check if any upgrading version of this package is installed.\n # If so, we won't try to install any other version.\n upgpkgs = {}\n for prv in pkg.provides:\n for upg in prv.upgradedby:\n for upgpkg in upg.packages:\n if isinst(upgpkg):\n return\n if getpriority(upgpkg) < pkgpriority:\n continue\n if upgpkg not in locked and upgpkg not in upgpkgs:\n upgpkgs[upgpkg] = True\n # Also check if any downgrading version with a higher\n # priority is installed.\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if getpriority(prvpkg) <= pkgpriority:\n continue\n if isinst(prvpkg):\n return\n if prvpkg not in locked and prvpkg not in upgpkgs:\n upgpkgs[prvpkg] = True\n\n # No, let's try to upgrade it.\n getweight = self._policy.getWeight\n alternatives = [(getweight(changeset), changeset)]\n\n # Check if upgrading is possible.\n for upgpkg in upgpkgs:\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n self._install(upgpkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed:\n pass\n else:\n alternatives.append((getweight(cs), cs))\n\n # Is any downgrading version of this package installed?\n try:\n dwnpkgs = {}\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if getpriority(prvpkg) > pkgpriority:\n continue\n if isinst(prvpkg):\n raise StopIteration\n if prvpkg not in locked:\n dwnpkgs[prvpkg] = True\n # Also check if any upgrading version with a lower\n # priority is installed.\n for prv in pkg.provides:\n for upg in prv.upgradedby:\n for upgpkg in upg.packages:\n if getpriority(upgpkg) >= pkgpriority:\n continue\n if isinst(upgpkg):\n raise StopIteration\n if upgpkg not in locked:\n dwnpkgs[upgpkg] = True\n except StopIteration:\n pass\n else:\n # Check if downgrading is possible.\n for dwnpkg in dwnpkgs:\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n self._install(dwnpkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed:\n pass\n else:\n alternatives.append((getweight(cs), cs))\n\n # If there's only one alternative, it's the one currenlty in use.\n if len(alternatives) > 1:\n alternatives.sort()\n changeset.setState(alternatives[0][1])\n\n def _pending(self, changeset, locked, pending, depth=0):\n #print \"[%03d] _pending()\" % depth\n #depth += 1\n\n isinst = changeset.installed\n getweight = self._policy.getWeight\n\n updown = []\n while pending:\n item = pending.pop(0)\n kind = item[0]\n if kind == PENDING_UPDOWN:\n updown.append(item[1])\n elif kind == PENDING_INSTALL:\n kind, pkg, req, prvpkgs = item\n\n # Check if any prvpkg was already selected for installation\n # due to some other change.\n found = False\n for i in range(len(prvpkgs)-1,-1,-1):\n prvpkg = prvpkgs[i]\n if isinst(prvpkg):\n found = True\n break\n if prvpkg in locked:\n del prvpkgs[i]\n if found:\n continue\n\n if not prvpkgs:\n # No packages provide it at all. Give up.\n raise Failed, _(\"Can't install %s: no package \"\n \"provides %s\") % (pkg, req)\n\n if len(prvpkgs) > 1:\n # More than one package provide it. We use _pending here,\n # since any option must consider the whole change for\n # weighting.\n alternatives = []\n failures = []\n sortUpgrades(prvpkgs)\n keeporder = 0.000001\n pw = self._policy.getPriorityWeights(pkg, prvpkgs)\n for prvpkg in prvpkgs:\n try:\n _pending = []\n cs = changeset.copy()\n lk = locked.copy()\n self._install(prvpkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n failures.append(unicode(e))\n else:\n alternatives.append((getweight(cs)+pw[prvpkg]+\n keeporder, cs, lk))\n keeporder += 0.000001\n if not alternatives:\n raise Failed, _(\"Can't install %s: all packages \"\n \"providing %s failed to install:\\n%s\")\\\n % (pkg, req, \"\\n\".join(failures))\n alternatives.sort()\n changeset.setState(alternatives[0][1])\n if len(alternatives) == 1:\n locked.update(alternatives[0][2])\n else:\n # This turned out to be the only way.\n self._install(prvpkgs[0], changeset, locked,\n pending, depth)\n\n elif kind == PENDING_REMOVE:\n kind, pkg, prv, reqpkgs, prvpkgs = item\n\n # Check if someone installed is still requiring it.\n reqpkgs = [x for x in reqpkgs if isinst(x)]\n if not reqpkgs:\n continue\n\n # Check if someone installed is providing it.\n found = False\n for prvpkg in prvpkgs:\n if isinst(prvpkg):\n found = True\n break\n if found:\n # Someone is still providing it. Good.\n continue\n\n prvpkgs = [x for x in prvpkgs if x not in locked]\n\n # No one is providing it anymore. We'll have to do\n # something about it.\n\n # Try to install other providing packages.\n if prvpkgs:\n\n alternatives = []\n failures = []\n\n pw = self._policy.getPriorityWeights(pkg, prvpkgs)\n for prvpkg in prvpkgs:\n try:\n _pending = []\n cs = changeset.copy()\n lk = locked.copy()\n self._install(prvpkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n failures.append(unicode(e))\n else:\n alternatives.append((getweight(cs)+pw[prvpkg],\n cs, lk))\n\n if not prvpkgs or not alternatives:\n\n # There's no alternatives. We must remove\n # every requiring package.\n\n for reqpkg in reqpkgs:\n if reqpkg in locked and isinst(reqpkg):\n raise Failed, _(\"Can't remove %s: requiring \"\n \"package %s is locked\") % \\\n (pkg, reqpkg)\n for reqpkg in reqpkgs:\n # We check again, since other actions may have\n # changed their state.\n if not isinst(reqpkg):\n continue\n if reqpkg in locked:\n raise Failed, _(\"Can't remove %s: requiring \"\n \"package %s is locked\") % \\\n (pkg, reqpkg)\n self._remove(reqpkg, changeset, locked,\n pending, depth)\n continue\n\n # Then, remove every requiring package, or\n # upgrade/downgrade them to something which\n # does not require this dependency.\n cs = changeset.copy()\n lk = locked.copy()\n try:\n for reqpkg in reqpkgs:\n if reqpkg in locked and isinst(reqpkg):\n raise Failed, _(\"%s is locked\") % reqpkg\n for reqpkg in reqpkgs:\n if not cs.installed(reqpkg):\n continue\n if reqpkg in lk:\n raise Failed, _(\"%s is locked\") % reqpkg\n _pending = []\n self._remove(reqpkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n failures.append(unicode(e))\n else:\n alternatives.append((getweight(cs), cs, lk))\n\n if not alternatives:\n raise Failed, _(\"Can't install %s: all packages providing \"\n \"%s failed to install:\\n%s\") \\\n % (pkg, prv, \"\\n\".join(failures))\n\n alternatives.sort()\n changeset.setState(alternatives[0][1])\n if len(alternatives) == 1:\n locked.update(alternatives[0][2])\n\n for pkg in updown:\n self._updown(pkg, changeset, locked, depth)\n\n del pending[:]\n\n def _upgrade(self, pkgs, changeset, locked, pending, depth=0):\n #print \"[%03d] _upgrade()\" % depth\n #depth += 1\n\n isinst = changeset.installed\n getweight = self._policy.getWeight\n\n sortUpgrades(pkgs, self._policy)\n pkgs.reverse()\n\n lockedstate = {}\n\n origchangeset = changeset.copy()\n\n weight = getweight(changeset)\n for pkg in pkgs:\n if pkg in locked and not isinst(pkg):\n continue\n\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n self._install(pkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n pass\n else:\n lockedstate[pkg] = lk\n csweight = getweight(cs)\n if csweight < weight:\n weight = csweight\n changeset.setState(cs)\n\n lockedstates = {}\n for pkg in pkgs:\n if changeset.get(pkg) is INSTALL:\n state = lockedstate.get(pkg)\n if state:\n lockedstates.update(state)\n\n for pkg in changeset.keys():\n\n op = changeset.get(pkg)\n if (op and op != origchangeset.get(pkg) and\n pkg not in locked and pkg not in lockedstates):\n\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n if op is REMOVE:\n self._install(pkg, cs, lk, _pending, depth)\n elif op is INSTALL:\n self._remove(pkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n pass\n else:\n csweight = getweight(cs)\n if csweight < weight:\n weight = csweight\n changeset.setState(cs)\n\n def _fix(self, pkgs, changeset, locked, pending, depth=0):\n #print \"[%03d] _fix()\" % depth\n #depth += 1\n\n getweight = self._policy.getWeight\n isinst = changeset.installed\n\n for pkg in pkgs:\n\n if not isinst(pkg):\n continue\n\n # Is it broken at all?\n try:\n for req in pkg.requires:\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if isinst(prvpkg):\n break\n else:\n continue\n break\n else:\n iface.debug(_(\"Unsatisfied dependency: \"\n \"%s requires %s\") % (pkg, req))\n raise StopIteration\n for cnf in pkg.conflicts:\n for prv in cnf.providedby:\n for prvpkg in prv.packages:\n if prvpkg is pkg:\n continue\n if isinst(prvpkg):\n iface.debug(_(\"Unsatisfied dependency: \"\n \"%s conflicts with %s\")\n % (pkg, prvpkg))\n raise StopIteration\n for prv in pkg.provides:\n for cnf in prv.conflictedby:\n for cnfpkg in cnf.packages:\n if cnfpkg is pkg:\n continue\n if isinst(cnfpkg):\n iface.debug(_(\"Unsatisfied dependency: \"\n \"%s conflicts with %s\")\n % (cnfpkg, pkg))\n raise StopIteration\n # Check packages with the same name that can't\n # coexist with this one.\n namepkgs = self._cache.getPackages(pkg.name)\n for namepkg in namepkgs:\n if (isinst(namepkg) and namepkg is not pkg\n and not pkg.coexists(namepkg)):\n iface.debug(_(\"Package %s can't coexist with %s\") %\n (namepkg, pkg))\n raise StopIteration\n except StopIteration:\n pass\n else:\n continue\n\n # We have a broken package. Fix it.\n\n alternatives = []\n failures = []\n\n # Try to fix by installing it.\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n self._install(pkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n except Failed, e:\n failures.append(unicode(e))\n else:\n # If they weight the same, it's better to keep the package.\n alternatives.append((getweight(cs)-0.000001, cs))\n\n # Try to fix by removing it.\n try:\n cs = changeset.copy()\n lk = locked.copy()\n _pending = []\n self._remove(pkg, cs, lk, _pending, depth)\n if _pending:\n self._pending(cs, lk, _pending, depth)\n self._updown(pkg, cs, lk, depth)\n except Failed, e:\n failures.append(unicode(e))\n else:\n alternatives.append((getweight(cs), cs))\n\n if not alternatives:\n raise Failed, _(\"Can't fix %s:\\n%s\") % \\\n (pkg, \"\\n\".join(failures))\n\n alternatives.sort()\n changeset.setState(alternatives[0][1])\n\n def enqueue(self, pkg, op):\n if op is UPGRADE:\n isinst = self._changeset.installed\n _upgpkgs = {}\n try:\n pkgpriority = pkg.getPriority()\n for prv in pkg.provides:\n for upg in prv.upgradedby:\n for upgpkg in upg.packages:\n if upgpkg.getPriority() < pkgpriority:\n continue\n if isinst(upgpkg):\n raise StopIteration\n _upgpkgs[upgpkg] = True\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if prvpkg.getPriority() <= pkgpriority:\n continue\n if isinst(prvpkg):\n raise StopIteration\n _upgpkgs[prvpkg] = True\n except StopIteration:\n pass\n else:\n for upgpkg in _upgpkgs:\n self._queue[upgpkg] = op\n else:\n self._queue[pkg] = op\n\n def run(self):\n\n self._policy.runStarting()\n\n try:\n changeset = self._changeset.copy()\n isinst = changeset.installed\n locked = self._policy.getLockedSet().copy()\n pending = []\n\n for pkg in self._queue:\n op = self._queue[pkg]\n if op is KEEP:\n if pkg in changeset:\n del changeset[pkg]\n elif op is INSTALL:\n if not isinst(pkg) and pkg in locked:\n raise Failed, _(\"Can't install %s: it's locked\") % pkg\n changeset.set(pkg, INSTALL)\n elif op is REMOVE:\n if isinst(pkg) and pkg in locked:\n raise Failed, _(\"Can't remove %s: it's locked\") % pkg\n changeset.set(pkg, REMOVE)\n elif op is REINSTALL:\n if pkg in locked:\n raise Failed, _(\"Can't reinstall %s: it's locked\")%pkg\n changeset.set(pkg, INSTALL, force=True)\n\n upgpkgs = []\n fixpkgs = []\n for pkg in self._queue:\n op = self._queue[pkg]\n if op is KEEP:\n if pkg.installed:\n op = INSTALL\n else:\n op = REMOVE\n if op is INSTALL or op is REINSTALL:\n self._install(pkg, changeset, locked, pending)\n elif op is REMOVE:\n self._remove(pkg, changeset, locked, pending)\n elif op is UPGRADE:\n upgpkgs.append(pkg)\n elif op is FIX:\n fixpkgs.append(pkg)\n\n if pending:\n self._pending(changeset, locked, pending)\n\n if upgpkgs:\n self._upgrade(upgpkgs, changeset, locked, pending)\n\n if fixpkgs:\n self._fix(fixpkgs, changeset, locked, pending)\n\n self._changeset.setState(changeset)\n\n finally:\n self._queue.clear()\n self._policy.runFinished()\n\n\nclass ChangeSetSplitter(object):\n # This class operates on *sane* changesets.\n\n DEBUG = 0\n\n def __init__(self, changeset, forcerequires=True):\n self._changeset = changeset\n self._forcerequires = forcerequires\n self._locked = {}\n\n def getForceRequires(self):\n return self._userequires\n\n def setForceRequires(self, flag):\n self._forcerequires = flag\n\n def getLocked(self, pkg):\n return pkg in self._locked\n\n def setLocked(self, pkg, flag):\n if flag:\n self._locked[pkg] = True\n else:\n if pkg in self._locked:\n del self._locked[pkg]\n\n def setLockedSet(self, set):\n self._locked.clear()\n self._locked.update(set)\n\n def resetLocked(self):\n self._locked.clear()\n\n def _remove(self, subset, pkg, locked):\n set = self._changeset\n\n # Include requiring packages being removed, or exclude\n # requiring packages being installed.\n for prv in pkg.provides:\n for req in prv.requiredby:\n\n reqpkgs = [reqpkg for reqpkg in req.packages if\n subset.get(reqpkg) is INSTALL or\n subset.get(reqpkg) is not REMOVE and\n reqpkg.installed]\n\n if not reqpkgs:\n continue\n\n # Check if some package that will stay\n # in the system or some package already\n # selected for installation provide the\n # needed dependency.\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if (subset.get(prvpkg) is INSTALL or\n (prvpkg.installed and not\n subset.get(prvpkg) is REMOVE)):\n found = True\n break\n else:\n continue\n break\n if found:\n continue\n\n # Try to include some providing package\n # that is selected for installation.\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if (set.get(prvpkg) is INSTALL and\n prvpkg not in locked):\n try:\n self.include(subset, prvpkg, locked)\n except Error:\n pass\n else:\n found = True\n break\n else:\n continue\n break\n if found:\n continue\n\n # Now, try to keep in the system some\n # providing package which is already installed.\n found = False\n wasbroken = True\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if set.get(prvpkg) is not REMOVE:\n continue\n wasbroken = False\n # Package is necessarily in subset\n # otherwise we wouldn't get here.\n if prvpkg not in locked:\n try:\n self.exclude(subset, prvpkg, locked)\n except Error:\n pass\n else:\n found = True\n break\n else:\n continue\n break\n if found:\n continue\n\n needed = (not wasbroken and\n (self._forcerequires or\n isinstance(req, PreRequires)))\n\n for reqpkg in reqpkgs:\n\n # Finally, try to exclude the requiring\n # package if it is being installed, or\n # include it if it's being removed.\n reqpkgop = set.get(reqpkg)\n if reqpkgop and reqpkg not in locked:\n try:\n if reqpkgop is INSTALL:\n self.exclude(subset, reqpkg, locked)\n else:\n self.include(subset, reqpkg, locked)\n except Error:\n if needed: raise\n else:\n continue\n\n # Should we care about this?\n if needed:\n raise Error, _(\"No providers for '%s', \"\n \"required by '%s'\") % (req, reqpkg)\n\n # Check upgrading/downgrading packages.\n relpkgs = [upgpkg for prv in pkg.provides\n for upg in prv.upgradedby\n for upgpkg in upg.packages]\n relpkgs.extend([prvpkg for upg in pkg.upgrades\n for prv in upg.providedby\n for prvpkg in prv.packages])\n if set[pkg] is INSTALL:\n # Package is being installed, but excluded from the\n # subset. Exclude every related package which is\n # being removed.\n for relpkg in relpkgs:\n if subset.get(relpkg) is REMOVE:\n if relpkg in locked:\n raise Error, _(\"Package '%s' is locked\") % relpkg\n self.exclude(subset, relpkg, locked)\n else:\n # Package is being removed, and included in the\n # subset. Include every related package which is\n # being installed.\n for relpkg in relpkgs:\n if set.get(relpkg) is INSTALL and relpkg not in subset:\n if relpkg in locked:\n raise Error, _(\"Package '%s' is locked\") % relpkg\n self.include(subset, relpkg, locked)\n\n def _install(self, subset, pkg, locked):\n set = self._changeset\n\n # Check all dependencies needed by this package.\n for req in pkg.requires:\n\n # Check if any already installed or to be installed\n # package will solve the problem.\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if (subset.get(prvpkg) is INSTALL or\n (prvpkg.installed and\n subset.get(prvpkg) is not REMOVE)):\n found = True\n break\n else:\n continue\n break\n if found:\n continue\n\n # Check if any package that could be installed\n # may solve the problem.\n found = False\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if (set.get(prvpkg) is INSTALL\n and prvpkg not in locked):\n try:\n self.include(subset, prvpkg, locked)\n except Error:\n pass\n else:\n found = True\n break\n else:\n continue\n break\n if found:\n continue\n\n # Nope. Let's try to keep in the system some\n # package providing the dependency.\n found = False\n wasbroken = True\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if set.get(prvpkg) is not REMOVE:\n continue\n wasbroken = False\n # Package is necessarily in subset\n # otherwise we wouldn't get here.\n if prvpkg not in locked:\n try:\n self.exclude(subset, prvpkg, locked)\n except Error:\n pass\n else:\n found = True\n break\n else:\n continue\n break\n if found or wasbroken:\n continue\n\n # There are no solutions for the problem.\n # Should we really care about it?\n if (self._forcerequires or\n isinstance(req, PreRequires)):\n raise Error, _(\"No providers for '%s', \"\n \"required by '%s'\") % (req, pkg)\n\n cnfpkgs = [prvpkg for cnf in pkg.conflicts\n for prv in cnf.providedby\n for prvpkg in prv.packages\n if prvpkg is not pkg]\n cnfpkgs.extend([cnfpkg for prv in pkg.provides\n for cnf in prv.conflictedby\n for cnfpkg in cnf.packages\n if cnfpkg is not pkg])\n\n for cnfpkg in cnfpkgs:\n if (subset.get(cnfpkg) is INSTALL or\n cnfpkg.installed and subset.get(cnfpkg) is not REMOVE):\n if cnfpkg not in set:\n raise Error, _(\"Can't remove %s, which conflicts with %s\")\\\n % (cnfpkg, pkg)\n if set[cnfpkg] is INSTALL:\n self.exclude(subset, cnfpkg, locked)\n else:\n self.include(subset, cnfpkg, locked)\n\n # Check upgrading/downgrading packages.\n relpkgs = [upgpkg for prv in pkg.provides\n for upg in prv.upgradedby\n for upgpkg in upg.packages]\n relpkgs.extend([prvpkg for upg in pkg.upgrades\n for prv in upg.providedby\n for prvpkg in prv.packages])\n if set[pkg] is INSTALL:\n # Package is being installed, and included in the\n # subset. Include every related package which is\n # being removed.\n for relpkg in relpkgs:\n if set.get(relpkg) is REMOVE and relpkg not in subset:\n if relpkg in locked:\n raise Error, _(\"Package '%s' is locked\") % relpkg\n self.include(subset, relpkg, locked)\n else:\n # Package is being removed, but excluded from the\n # subset. Exclude every related package which is\n # being installed.\n for relpkg in relpkgs:\n if subset.get(relpkg) is INSTALL:\n if relpkg in locked:\n raise Error, _(\"Package '%s' is locked\") % relpkg\n self.exclude(subset, relpkg, locked)\n\n def include(self, subset, pkg, locked=None):\n set = self._changeset\n\n if locked is None:\n locked = self._locked\n if self.DEBUG: print \"-\"*79\n else:\n locked = locked.copy()\n if self.DEBUG:\n strop = set.get(pkg) is INSTALL and \"INSTALL\" or \"REMOVE\"\n print \"Including %s of %s\" % (strop, pkg)\n\n if pkg not in set:\n raise Error, _(\"Package '%s' is not in changeset\") % pkg\n if pkg in locked:\n raise Error, _(\"Package '%s' is locked\") % pkg\n\n locked[pkg] = True\n\n op = subset[pkg] = set[pkg]\n try:\n if op is INSTALL:\n self._install(subset, pkg, locked)\n else:\n self._remove(subset, pkg, locked)\n except Error, e:\n if self.DEBUG:\n print \"FAILED: Including %s of %s: %s\" % (strop, pkg, e)\n del subset[pkg]\n raise\n\n def exclude(self, subset, pkg, locked=None):\n set = self._changeset\n\n if locked is None:\n locked = self._locked\n if self.DEBUG: print \"-\"*79\n else:\n locked = locked.copy()\n if self.DEBUG:\n strop = set.get(pkg) is INSTALL and \"INSTALL\" or \"REMOVE\"\n print \"Excluding %s of %s\" % (strop, pkg)\n\n if pkg not in set:\n raise Error, _(\"Package '%s' is not in changeset\") % pkg\n if pkg in locked:\n raise Error, _(\"Package '%s' is locked\") % pkg\n\n locked[pkg] = True\n\n if pkg in subset:\n del subset[pkg]\n\n op = set[pkg]\n try:\n if op is INSTALL:\n self._remove(subset, pkg, locked)\n elif op is REMOVE:\n self._install(subset, pkg, locked)\n except Error, e:\n if self.DEBUG:\n print \"FAILED: Excluding %s of %s: %s\" % (strop, pkg, e)\n subset[pkg] = op\n raise\n\n def includeAll(self, subset):\n # Include everything that doesn't change locked packages\n set = self._changeset.get()\n for pkg in set.keys():\n try:\n self.include(subset, pkg)\n except Error:\n pass\n\n def excludeAll(self, subset):\n # Exclude everything that doesn't change locked packages\n set = self._changeset.get()\n for pkg in set.keys():\n try:\n self.exclude(subset, pkg)\n except Error:\n pass\n\ndef sortUpgrades(pkgs, policy=None):\n upgpkgs = {}\n for pkg in pkgs:\n dct = {}\n rupg = recursiveUpgrades(pkg, dct)\n del dct[pkg]\n upgpkgs[pkg] = dct\n pkgs.sort()\n pkgs.reverse()\n newpkgs = []\n priority = {}\n if policy:\n for pkg in pkgs:\n priority[pkg] = policy.getPriority(pkg)\n else:\n for pkg in pkgs:\n priority[pkg] = pkg.getPriority()\n for pkg in pkgs:\n pkgupgs = upgpkgs[pkg]\n for i in range(len(newpkgs)):\n newpkg = newpkgs[i]\n if newpkg in pkgupgs or priority[pkg] > priority[newpkg]:\n newpkgs.insert(i, pkg)\n break\n else:\n newpkgs.append(pkg)\n pkgs[:] = newpkgs\n\ndef recursiveUpgrades(pkg, set):\n set[pkg] = True\n for upg in pkg.upgrades:\n for prv in upg.providedby:\n for prvpkg in prv.packages:\n if prvpkg not in set:\n recursiveUpgrades(prvpkg, set)\n\ndef sortInternalRequires(pkgs):\n rellst = []\n numrel = {}\n pkgmap = dict.fromkeys(pkgs, True)\n for pkg in pkgs:\n rellst.append((recursiveInternalRequires(pkgmap, pkg, numrel), pkg))\n rellst.sort()\n rellst.reverse()\n pkgs[:] = [x[1] for x in rellst]\n\ndef recursiveInternalRequires(pkgmap, pkg, numrel, done=None):\n if done is None:\n done = {}\n done[pkg] = True\n if pkg in numrel:\n return numrel[pkg]\n n = 0\n for prv in pkg.provides:\n for req in prv.requiredby:\n for relpkg in req.packages:\n if relpkg in pkgmap and relpkg not in done:\n n += 1\n if relpkg in numrel:\n n += numrel[relpkg]\n else:\n n += recursiveInternalRequires(pkgmap, relpkg,\n numrel, done)\n numrel[pkg] = n\n return n\n\ndef forwardRequires(pkg, map):\n for req in pkg.requires:\n if req not in map:\n map[req] = True\n for prv in req.providedby:\n if prv not in map:\n map[prv] = True\n for prvpkg in prv.packages:\n if prvpkg not in map:\n map[prvpkg] = True\n forwardRequires(prvpkg, map)\n\ndef backwardRequires(pkg, map):\n for prv in pkg.provides:\n if prv not in map:\n map[prv] = True\n for req in prv.requiredby:\n if req not in map:\n map[req] = True\n for reqpkg in req.packages:\n if reqpkg not in map:\n map[reqpkg] = True\n backwardRequires(reqpkg, map)\n\ndef forwardPkgRequires(pkg, map=None):\n if map is None:\n map = {}\n forwardRequires(pkg, map)\n for item in map.keys():\n if not isinstance(item, Package):\n del map[item]\n return map\n\ndef backwardPkgRequires(pkg, map=None):\n if map is None:\n map = {}\n backwardRequires(pkg, map)\n for item in map.keys():\n if not isinstance(item, Package):\n del map[item]\n return map\n\ndef getAlternates(pkg, cache):\n \"\"\"\n For a given package, return every package that *might* get\n removed if the given package was installed. The alternate\n packages are every package that conflicts with any of the\n required packages, or require any package conflicting with\n any of the required packages.\n \"\"\"\n conflicts = {}\n\n # Direct conflicts.\n for namepkg in cache.getPackages(pkg.name):\n if namepkg is not pkg and not pkg.coexists(namepkg):\n conflicts[(pkg, namepkg)] = True\n for cnf in pkg.conflicts:\n for prv in cnf.providedby:\n for prvpkg in prv.packages:\n if prvpkg is not pkg:\n conflicts[(pkg, prvpkg)] = True\n for prv in pkg.provides:\n for cnf in prv.conflictedby:\n for cnfpkg in cnf.packages:\n if cnfpkg is not pkg:\n conflicts[(pkg, cnfpkg)] = True\n\n # Conflicts of requires.\n queue = [pkg]\n done = {}\n while queue:\n qpkg = queue.pop()\n done[qpkg] = True\n for req in qpkg.requires:\n prvpkgs = {}\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if prvpkg is qpkg or prvpkg is pkg:\n break\n prvpkgs[prvpkg] = True\n else:\n continue\n break\n else:\n for prvpkg in prvpkgs:\n if prvpkg in done:\n continue\n done[prvpkg] = True\n queue.append(prvpkg)\n for namepkg in cache.getPackages(prvpkg.name):\n if (namepkg not in prvpkgs and\n namepkg is not pkg and\n not prvpkg.coexists(namepkg)):\n conflicts[(prvpkg, namepkg)] = True\n for cnf in prvpkg.conflicts:\n for prv in cnf.providedby:\n for _prvpkg in prv.packages:\n if (_prvpkg is not pkg and\n _prvpkg not in prvpkgs):\n conflicts[(prvpkg, _prvpkg)] = True\n for prv in prvpkg.provides:\n for cnf in prv.conflictedby:\n for cnfpkg in cnf.packages:\n if (cnfpkg is not pkg and\n cnfpkg not in prvpkgs):\n conflicts[(prvpkg, cnfpkg)] = True\n\n alternates = {}\n for reqpkg, cnfpkg in conflicts:\n print reqpkg, cnfpkg\n alternates[cnfpkg] = True\n for prv in cnfpkg.provides:\n for req in prv.requiredby:\n # Do not ascend if reqpkg also provides\n # what cnfpkg is offering.\n for _prv in req.providedby:\n if reqpkg in _prv.packages:\n break\n else:\n for _reqpkg in req.packages:\n alternates[_reqpkg] = True\n alternates.update(backwardPkgRequires(_reqpkg))\n\n return alternates\n\ndef checkPackages(cache, pkgs, report=False, all=False, uninstalled=False):\n pkgs.sort()\n\n problems = False\n coexistchecked = {}\n for pkg in pkgs:\n\n if not all:\n if uninstalled:\n for loader in pkg.loaders:\n if not loader.getInstalled():\n break\n else:\n continue\n elif not pkg.installed:\n continue\n\n for req in pkg.requires:\n for prv in req.providedby:\n for prvpkg in prv.packages:\n if all:\n break\n elif uninstalled:\n for loader in prvpkg.loaders:\n if not loader.getInstalled():\n break\n else:\n continue\n break\n elif prvpkg.installed:\n break\n else:\n continue\n break\n else:\n if report:\n iface.info(_(\"Unsatisfied dependency: %s requires %s\") %\n (pkg, req))\n problems = True\n\n if not pkg.installed:\n continue\n\n for cnf in pkg.conflicts:\n for prv in cnf.providedby:\n for prvpkg in prv.packages:\n if prvpkg is pkg:\n continue\n if prvpkg.installed:\n if report:\n iface.info(_(\"Unsatisfied dependency: \"\n \"%s conflicts with %s\") %\n (pkg, prvpkg))\n problems = True\n\n namepkgs = cache.getPackages(pkg.name)\n for namepkg in namepkgs:\n if (namepkg, pkg) in coexistchecked:\n continue\n coexistchecked[(pkg, namepkg)] = True\n if (namepkg.installed and namepkg is not pkg and\n not pkg.coexists(namepkg)):\n if report:\n iface.info(_(\"Package %s can't coexist with %s\") %\n (namepkg, pkg))\n problems = True\n\n return not problems\n\n# vim:ts=4:sw=4:et\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475143,"cells":{"repo_name":{"kind":"string","value":"yewang15215/django"},"path":{"kind":"string","value":"tests/m2m_through_regress/models.py"},"copies":{"kind":"string","value":"273"},"size":{"kind":"string","value":"2771"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n# Forward declared intermediate model\n@python_2_unicode_compatible\nclass Membership(models.Model):\n person = models.ForeignKey('Person', models.CASCADE)\n group = models.ForeignKey('Group', models.CASCADE)\n price = models.IntegerField(default=100)\n\n def __str__(self):\n return \"%s is a member of %s\" % (self.person.name, self.group.name)\n\n\n# using custom id column to test ticket #11107\n@python_2_unicode_compatible\nclass UserMembership(models.Model):\n id = models.AutoField(db_column='usermembership_id', primary_key=True)\n user = models.ForeignKey(User, models.CASCADE)\n group = models.ForeignKey('Group', models.CASCADE)\n price = models.IntegerField(default=100)\n\n def __str__(self):\n return \"%s is a user and member of %s\" % (self.user.username, self.group.name)\n\n\n@python_2_unicode_compatible\nclass Person(models.Model):\n name = models.CharField(max_length=128)\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Group(models.Model):\n name = models.CharField(max_length=128)\n # Membership object defined as a class\n members = models.ManyToManyField(Person, through=Membership)\n user_members = models.ManyToManyField(User, through='UserMembership')\n\n def __str__(self):\n return self.name\n\n\n# A set of models that use an non-abstract inherited model as the 'through' model.\nclass A(models.Model):\n a_text = models.CharField(max_length=20)\n\n\nclass ThroughBase(models.Model):\n a = models.ForeignKey(A, models.CASCADE)\n b = models.ForeignKey('B', models.CASCADE)\n\n\nclass Through(ThroughBase):\n extra = models.CharField(max_length=20)\n\n\nclass B(models.Model):\n b_text = models.CharField(max_length=20)\n a_list = models.ManyToManyField(A, through=Through)\n\n\n# Using to_field on the through model\n@python_2_unicode_compatible\nclass Car(models.Model):\n make = models.CharField(max_length=20, unique=True, null=True)\n drivers = models.ManyToManyField('Driver', through='CarDriver')\n\n def __str__(self):\n return \"%s\" % self.make\n\n\n@python_2_unicode_compatible\nclass Driver(models.Model):\n name = models.CharField(max_length=20, unique=True, null=True)\n\n def __str__(self):\n return \"%s\" % self.name\n\n class Meta:\n ordering = ('name',)\n\n\n@python_2_unicode_compatible\nclass CarDriver(models.Model):\n car = models.ForeignKey('Car', models.CASCADE, to_field='make')\n driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')\n\n def __str__(self):\n return \"pk=%s car=%s driver=%s\" % (str(self.pk), self.car, self.driver)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475144,"cells":{"repo_name":{"kind":"string","value":"danstoner/python_experiments"},"path":{"kind":"string","value":"pgu/pgu/gui/menus.py"},"copies":{"kind":"string","value":"13"},"size":{"kind":"string","value":"3333"},"content":{"kind":"string","value":"\"\"\"\n\"\"\"\nfrom .const import *\nfrom . import table\nfrom . import basic, button\n\nclass _Menu_Options(table.Table):\n def __init__(self,menu,**params):\n table.Table.__init__(self,**params)\n \n self.menu = menu\n \n def event(self,e):\n handled = False\n arect = self.get_abs_rect()\n \n if e.type == MOUSEMOTION:\n abspos = e.pos[0]+arect.x,e.pos[1]+arect.y\n for w in self.menu.container.widgets:\n if not w is self.menu:\n mrect = w.get_abs_rect()\n if mrect.collidepoint(abspos):\n self.menu._close(None)\n w._open(None)\n handled = True\n \n if not handled: table.Table.event(self,e)\n\nclass _Menu(button.Button):\n def __init__(self,parent,widget=None,**params): #TODO widget= could conflict with module widget\n params.setdefault('cls','menu')\n button.Button.__init__(self,widget,**params)\n \n self.parent = parent\n \n self._cls = self.cls\n self.options = _Menu_Options(self, cls=self.cls+\".options\")\n \n self.connect(CLICK,self._open,None)\n \n self.pos = 0\n \n def _open(self,value):\n self.parent.value = self\n self.pcls = 'down'\n \n self.repaint()\n self.container.open(self.options,self.rect.x,self.rect.bottom)\n self.options.connect(BLUR,self._close,None)\n self.options.focus()\n self.repaint()\n \n def _pass(self,value):\n pass\n \n def _close(self,value):\n self.pcls = ''\n self.parent.value = None\n self.repaint()\n self.options.close()\n \n def _valuefunc(self,value):\n self._close(None)\n if value['fnc'] != None:\n value['fnc'](value['value'])\n \n def event(self,e):\n button.Button.event(self,e)\n \n if self.parent.value == self:\n self.pcls = 'down'\n \n def add(self,w,fnc=None,value=None):\n w.style.align = -1\n b = button.Button(w,cls=self.cls+\".option\")\n b.connect(CLICK,self._valuefunc,{'fnc':fnc,'value':value})\n \n self.options.tr()\n self.options.add(b)\n \n return b\n\nclass Menus(table.Table):\n \"\"\"A drop down menu bar.\n\n Example:\n data = [\n ('File/Save', fnc_save, None),\n ('File/New', fnc_new, None),\n ('Edit/Copy', fnc_copy, None),\n ('Edit/Cut', fnc_cut, None),\n ('Help/About', fnc_help, help_about_content),\n ('Help/Reference', fnc_help, help_reference_content),\n ]\n w = Menus(data)\n\n \"\"\"\n \n def __init__(self,data,menu_cls='menu',**params):\n params.setdefault('cls','menus')\n table.Table.__init__(self,**params)\n \n self.value = None\n \n n,m,mt = 0,None,None\n for path,cmd,value in data:\n parts = path.split(\"/\")\n if parts[0] != mt:\n mt = parts[0]\n m = _Menu(self,basic.Label(mt,cls=menu_cls+\".label\"),cls=menu_cls)\n self.add(m,n,0)\n n += 1\n #print (\"add\", parts[1], cmd, value)\n m.add(basic.Label(parts[1],cls=m.cls+\".option.label\"),cmd,value)\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475145,"cells":{"repo_name":{"kind":"string","value":"jeffery9/mixprint_addons"},"path":{"kind":"string","value":"account_check_writing/__openerp__.py"},"copies":{"kind":"string","value":"58"},"size":{"kind":"string","value":"1721"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n{\n 'name': 'Check Writing',\n 'version': '1.1',\n 'author': 'OpenERP SA, NovaPoint Group',\n 'category': 'Generic Modules/Accounting',\n 'description': \"\"\"\nModule for the Check Writing and Check Printing.\n================================================\n \"\"\",\n 'website': 'http://www.openerp.com',\n 'depends' : ['account_voucher'],\n 'data': [\n 'wizard/account_check_batch_printing_view.xml',\n 'account_check_writing_report.xml',\n 'account_view.xml',\n 'account_voucher_view.xml',\n 'account_check_writing_data.xml',\n ],\n 'demo': ['account_demo.xml'],\n 'test': [],\n 'installable': True,\n 'active': False,\n}\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475146,"cells":{"repo_name":{"kind":"string","value":"jolyonb/edx-platform"},"path":{"kind":"string","value":"openedx/core/djangoapps/credentials/signals.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6466"},"content":{"kind":"string","value":"\"\"\"\nThis file contains signal handlers for credentials-related functionality.\n\"\"\"\nfrom logging import getLogger\n\nfrom course_modes.models import CourseMode\nfrom django.contrib.sites.models import Site\nfrom lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate\nfrom lms.djangoapps.grades.api import CourseGradeFactory\nfrom openedx.core.djangoapps.catalog.utils import get_programs\nfrom openedx.core.djangoapps.credentials.models import CredentialsApiConfig\nfrom openedx.core.djangoapps.site_configuration import helpers\n\nfrom .tasks.v1.tasks import send_grade_to_credentials\n\nlog = getLogger(__name__)\n\n\n# \"interesting\" here means \"credentials will want to know about it\"\nINTERESTING_MODES = CourseMode.CERTIFICATE_RELEVANT_MODES\nINTERESTING_STATUSES = [\n CertificateStatuses.notpassing,\n CertificateStatuses.downloadable,\n]\n\n\n# These handlers have Credentials business logic that has bled into the LMS. But we want to filter here in order to\n# not flood our task queue with a bunch of signals. So we put up with it.\n\ndef is_course_run_in_a_program(course_run_key):\n \"\"\" Returns true if the given course key is in any program at all. \"\"\"\n\n # We don't have an easy way to go from course_run_key to a specific site that owns it. So just search each site.\n sites = Site.objects.all()\n str_key = str(course_run_key)\n for site in sites:\n for program in get_programs(site):\n for course in program['courses']:\n for course_run in course['course_runs']:\n if str_key == course_run['key']:\n return True\n return False\n\n\ndef send_grade_if_interesting(user, course_run_key, mode, status, letter_grade, percent_grade, verbose=False):\n \"\"\" Checks if grade is interesting to Credentials and schedules a Celery task if so. \"\"\"\n\n if verbose:\n msg = u\"Starting send_grade_if_interesting with params: \"\\\n u\"user [{username}], \"\\\n u\"course_run_key [{key}], \"\\\n u\"mode [{mode}], \"\\\n u\"status [{status}], \"\\\n u\"letter_grade [{letter_grade}], \"\\\n u\"percent_grade [{percent_grade}], \"\\\n u\"verbose [{verbose}]\"\\\n .format(\n username=getattr(user, 'username', None),\n key=str(course_run_key),\n mode=mode,\n status=status,\n letter_grade=letter_grade,\n percent_grade=percent_grade,\n verbose=verbose\n )\n log.info(msg)\n # Avoid scheduling new tasks if certification is disabled. (Grades are a part of the records/cert story)\n if not CredentialsApiConfig.current().is_learner_issuance_enabled:\n if verbose:\n log.info(\"Skipping send grade: is_learner_issuance_enabled False\")\n return\n\n # Avoid scheduling new tasks if learner records are disabled for this site.\n if not helpers.get_value_for_org(course_run_key.org, 'ENABLE_LEARNER_RECORDS', True):\n if verbose:\n log.info(\n u\"Skipping send grade: ENABLE_LEARNER_RECORDS False for org [{org}]\".format(\n org=course_run_key.org\n )\n )\n return\n\n # Grab mode/status if we don't have them in hand\n if mode is None or status is None:\n try:\n cert = GeneratedCertificate.objects.get(user=user, course_id=course_run_key) # pylint: disable=no-member\n mode = cert.mode\n status = cert.status\n except GeneratedCertificate.DoesNotExist:\n # We only care about grades for which there is a certificate.\n if verbose:\n log.info(\n u\"Skipping send grade: no cert for user [{username}] & course_id [{course_id}]\".format(\n username=getattr(user, 'username', None),\n course_id=str(course_run_key)\n )\n )\n return\n\n # Don't worry about whether it's available as well as awarded. Just awarded is good enough to record a verified\n # attempt at a course. We want even the grades that didn't pass the class because Credentials wants to know about\n # those too.\n if mode not in INTERESTING_MODES or status not in INTERESTING_STATUSES:\n if verbose:\n log.info(\n u\"Skipping send grade: mode/status uninteresting for mode [{mode}] & status [{status}]\".format(\n mode=mode,\n status=status\n )\n )\n return\n\n # If the course isn't in any program, don't bother telling Credentials about it. When Credentials grows support\n # for course records as well as program records, we'll need to open this up.\n if not is_course_run_in_a_program(course_run_key):\n if verbose:\n log.info(\n u\"Skipping send grade: course run not in a program. [{course_id}]\".format(course_id=str(course_run_key))\n )\n return\n\n # Grab grades if we don't have them in hand\n if letter_grade is None or percent_grade is None:\n grade = CourseGradeFactory().read(user, course_key=course_run_key, create_if_needed=False)\n if grade is None:\n if verbose:\n log.info(\n u\"Skipping send grade: No grade found for user [{username}] & course_id [{course_id}]\".format(\n username=getattr(user, 'username', None),\n course_id=str(course_run_key)\n )\n )\n return\n letter_grade = grade.letter_grade\n percent_grade = grade.percent\n\n send_grade_to_credentials.delay(user.username, str(course_run_key), True, letter_grade, percent_grade)\n\n\ndef handle_grade_change(user, course_grade, course_key, **kwargs):\n \"\"\"\n Notifies the Credentials IDA about certain grades it needs for its records, when a grade changes.\n \"\"\"\n send_grade_if_interesting(\n user,\n course_key,\n None,\n None,\n course_grade.letter_grade,\n course_grade.percent,\n verbose=kwargs.get('verbose', False)\n )\n\n\ndef handle_cert_change(user, course_key, mode, status, **kwargs):\n \"\"\"\n Notifies the Credentials IDA about certain grades it needs for its records, when a cert changes.\n \"\"\"\n send_grade_if_interesting(user, course_key, mode, status, None, None, verbose=kwargs.get('verbose', False))\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475147,"cells":{"repo_name":{"kind":"string","value":"mcalhoun/ansible-modules-core"},"path":{"kind":"string","value":"cloud/openstack/quantum_router_interface.py"},"copies":{"kind":"string","value":"99"},"size":{"kind":"string","value":"8558"},"content":{"kind":"string","value":"#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013, Benno Joy \n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see .\n\ntry:\n try:\n from neutronclient.neutron import client\n except ImportError:\n from quantumclient.quantum import client\n from keystoneclient.v2_0 import client as ksclient\n HAVE_DEPS = True\nexcept ImportError:\n HAVE_DEPS = False\n\nDOCUMENTATION = '''\n---\nmodule: quantum_router_interface\nversion_added: \"1.2\"\nauthor: \"Benno Joy (@bennojoy)\"\nshort_description: Attach/Dettach a subnet's interface to a router\ndescription:\n - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.\noptions:\n login_username:\n description:\n - login username to authenticate to keystone\n required: true\n default: admin\n login_password:\n description:\n - Password of login user\n required: true\n default: 'yes'\n login_tenant_name:\n description:\n - The tenant name of the login user\n required: true\n default: 'yes'\n auth_url:\n description:\n - The keystone URL for authentication\n required: false\n default: 'http://127.0.0.1:35357/v2.0/'\n region_name:\n description:\n - Name of the region\n required: false\n default: None\n state:\n description:\n - Indicate desired state of the resource\n choices: ['present', 'absent']\n default: present\n router_name:\n description:\n - Name of the router to which the subnet's interface should be attached.\n required: true\n default: None\n subnet_name:\n description:\n - Name of the subnet to whose interface should be attached to the router.\n required: true\n default: None\n tenant_name:\n description:\n - Name of the tenant whose subnet has to be attached.\n required: false\n default: None\nrequirements:\n - \"python >= 2.6\"\n - \"python-neutronclient or python-quantumclient\"\n - \"python-keystoneclient\"\n'''\n\nEXAMPLES = '''\n# Attach tenant1's subnet to the external router\n- quantum_router_interface: state=present login_username=admin\n login_password=admin\n login_tenant_name=admin\n tenant_name=tenant1\n router_name=external_route\n subnet_name=t1subnet\n'''\n\n\n_os_keystone = None\n_os_tenant_id = None\n\ndef _get_ksclient(module, kwargs):\n try:\n kclient = ksclient.Client(username=kwargs.get('login_username'),\n password=kwargs.get('login_password'),\n tenant_name=kwargs.get('login_tenant_name'),\n auth_url=kwargs.get('auth_url'))\n except Exception, e:\n module.fail_json(msg = \"Error authenticating to the keystone: %s \" % e.message)\n global _os_keystone\n _os_keystone = kclient\n return kclient\n\n\ndef _get_endpoint(module, ksclient):\n try:\n endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')\n except Exception, e:\n module.fail_json(msg = \"Error getting network endpoint: %s\" % e.message)\n return endpoint\n\ndef _get_neutron_client(module, kwargs):\n _ksclient = _get_ksclient(module, kwargs)\n token = _ksclient.auth_token\n endpoint = _get_endpoint(module, _ksclient)\n kwargs = {\n 'token': token,\n 'endpoint_url': endpoint\n }\n try:\n neutron = client.Client('2.0', **kwargs)\n except Exception, e:\n module.fail_json(msg = \"Error in connecting to neutron: %s \" % e.message)\n return neutron\n\ndef _set_tenant_id(module):\n global _os_tenant_id\n if not module.params['tenant_name']:\n login_tenant_name = module.params['login_tenant_name']\n else:\n login_tenant_name = module.params['tenant_name']\n\n for tenant in _os_keystone.tenants.list():\n if tenant.name == login_tenant_name:\n _os_tenant_id = tenant.id\n break\n if not _os_tenant_id:\n module.fail_json(msg = \"The tenant id cannot be found, please check the parameters\")\n\n\ndef _get_router_id(module, neutron):\n kwargs = {\n 'name': module.params['router_name'],\n }\n try:\n routers = neutron.list_routers(**kwargs)\n except Exception, e:\n module.fail_json(msg = \"Error in getting the router list: %s \" % e.message)\n if not routers['routers']:\n return None\n return routers['routers'][0]['id']\n\n\ndef _get_subnet_id(module, neutron):\n subnet_id = None\n kwargs = {\n 'tenant_id': _os_tenant_id,\n 'name': module.params['subnet_name'],\n }\n try:\n subnets = neutron.list_subnets(**kwargs)\n except Exception, e:\n module.fail_json( msg = \" Error in getting the subnet list:%s \" % e.message)\n if not subnets['subnets']:\n return None\n return subnets['subnets'][0]['id']\n\ndef _get_port_id(neutron, module, router_id, subnet_id):\n kwargs = {\n 'tenant_id': _os_tenant_id,\n 'device_id': router_id,\n }\n try:\n ports = neutron.list_ports(**kwargs)\n except Exception, e:\n module.fail_json( msg = \"Error in listing ports: %s\" % e.message)\n if not ports['ports']:\n return None\n for port in ports['ports']:\n for subnet in port['fixed_ips']:\n if subnet['subnet_id'] == subnet_id:\n return port['id']\n return None\n\ndef _add_interface_router(neutron, module, router_id, subnet_id):\n kwargs = {\n 'subnet_id': subnet_id\n }\n try:\n neutron.add_interface_router(router_id, kwargs)\n except Exception, e:\n module.fail_json(msg = \"Error in adding interface to router: %s\" % e.message)\n return True\n\ndef _remove_interface_router(neutron, module, router_id, subnet_id):\n kwargs = {\n 'subnet_id': subnet_id\n }\n try:\n neutron.remove_interface_router(router_id, kwargs)\n except Exception, e:\n module.fail_json(msg=\"Error in removing interface from router: %s\" % e.message)\n return True\n\ndef main():\n argument_spec = openstack_argument_spec()\n argument_spec.update(dict(\n router_name = dict(required=True),\n subnet_name = dict(required=True),\n tenant_name = dict(default=None),\n state = dict(default='present', choices=['absent', 'present']),\n ))\n module = AnsibleModule(argument_spec=argument_spec)\n if not HAVE_DEPS:\n module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')\n\n neutron = _get_neutron_client(module, module.params)\n _set_tenant_id(module)\n\n router_id = _get_router_id(module, neutron)\n if not router_id:\n module.fail_json(msg=\"failed to get the router id, please check the router name\")\n\n subnet_id = _get_subnet_id(module, neutron)\n if not subnet_id:\n module.fail_json(msg=\"failed to get the subnet id, please check the subnet name\")\n\n if module.params['state'] == 'present':\n port_id = _get_port_id(neutron, module, router_id, subnet_id)\n if not port_id:\n _add_interface_router(neutron, module, router_id, subnet_id)\n module.exit_json(changed=True, result=\"created\", id=port_id)\n module.exit_json(changed=False, result=\"success\", id=port_id)\n\n if module.params['state'] == 'absent':\n port_id = _get_port_id(neutron, module, router_id, subnet_id)\n if not port_id:\n module.exit_json(changed = False, result = \"Success\")\n _remove_interface_router(neutron, module, router_id, subnet_id)\n module.exit_json(changed=True, result=\"Deleted\")\n\n# this is magic, see lib/ansible/module.params['common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475148,"cells":{"repo_name":{"kind":"string","value":"kangkot/arangodb"},"path":{"kind":"string","value":"3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/editor/ModuleBrowser.py"},"copies":{"kind":"string","value":"17"},"size":{"kind":"string","value":"7082"},"content":{"kind":"string","value":"# ModuleBrowser.py - A view that provides a module browser for an editor document.\nimport pywin.mfc.docview\nimport win32ui\nimport win32con\nimport commctrl\nimport win32api\nfrom pywin.tools import hierlist, browser\nimport pywin.framework.scriptutils\nimport afxres\n\nimport pyclbr\n\nclass HierListCLBRModule(hierlist.HierListItem):\n def __init__(self, modName, clbrdata):\n self.modName = modName\n self.clbrdata = clbrdata\n def GetText(self):\n return self.modName\n def GetSubList(self):\n ret = []\n for item in self.clbrdata.values():\n if item.__class__ != pyclbr.Class: # ie, it is a pyclbr Function instance (only introduced post 1.5.2)\n ret.append(HierListCLBRFunction( item ) )\n else:\n ret.append(HierListCLBRClass( item) )\n ret.sort()\n return ret\n def IsExpandable(self):\n return 1\n\nclass HierListCLBRItem(hierlist.HierListItem):\n def __init__(self, name, file, lineno, suffix = \"\"):\n self.name = str(name)\n self.file = file\n self.lineno = lineno\n self.suffix = suffix\n def __cmp__(self, other):\n return cmp(self.name, other.name)\n def GetText(self):\n return self.name + self.suffix\n def TakeDefaultAction(self):\n if self.file:\n pywin.framework.scriptutils.JumpToDocument(self.file, self.lineno, bScrollToTop = 1)\n else:\n win32ui.SetStatusText(\"Can not locate the source code for this object.\")\n def PerformItemSelected(self):\n if self.file is None:\n msg = \"%s - source can not be located.\" % (self.name, )\n else:\n msg = \"%s defined at line %d of %s\" % (self.name, self.lineno, self.file)\n win32ui.SetStatusText(msg)\n\nclass HierListCLBRClass(HierListCLBRItem):\n def __init__(self, clbrclass, suffix = \"\"):\n try:\n name = clbrclass.name\n file = clbrclass.file\n lineno = clbrclass.lineno\n self.super = clbrclass.super\n self.methods = clbrclass.methods\n except AttributeError:\n name = clbrclass\n file = lineno = None\n self.super = []; self.methods = {}\n HierListCLBRItem.__init__(self, name, file, lineno, suffix)\n def __cmp__(self,other):\n ret = cmp(self.name,other.name)\n if ret==0 and (self is not other) and self.file==other.file:\n self.methods = other.methods\n self.super = other.super\n self.lineno = other.lineno\n return ret\n def GetSubList(self):\n r1 = []\n for c in self.super:\n r1.append(HierListCLBRClass(c, \" (Parent class)\"))\n r1.sort()\n r2=[]\n for meth, lineno in self.methods.items():\n r2.append(HierListCLBRMethod(meth, self.file, lineno))\n r2.sort()\n return r1+r2\n def IsExpandable(self):\n return len(self.methods) + len(self.super)\n def GetBitmapColumn(self):\n return 21\n\nclass HierListCLBRFunction(HierListCLBRItem):\n def __init__(self, clbrfunc, suffix = \"\"):\n name = clbrfunc.name\n file = clbrfunc.file\n lineno = clbrfunc.lineno\n HierListCLBRItem.__init__(self, name, file, lineno, suffix)\n def GetBitmapColumn(self):\n return 22\n\nclass HierListCLBRMethod(HierListCLBRItem):\n def GetBitmapColumn(self):\n return 22\n\nclass HierListCLBRErrorItem(hierlist.HierListItem):\n def __init__(self, text):\n self.text = text\n def GetText(self):\n return self.text\n def GetSubList(self):\n return [HierListCLBRErrorItem(self.text)]\n def IsExpandable(self):\n return 0\n\nclass HierListCLBRErrorRoot(HierListCLBRErrorItem):\n def IsExpandable(self):\n return 1\n\nclass BrowserView(pywin.mfc.docview.TreeView):\n def OnInitialUpdate(self):\n self.list = None\n rc = self._obj_.OnInitialUpdate()\n self.HookMessage(self.OnSize, win32con.WM_SIZE)\n self.bDirty = 0\n self.destroying = 0\n return rc\n\n def DestroyBrowser(self):\n self.DestroyList()\n\n def OnActivateView(self, activate, av, dv):\n# print \"AV\", self.bDirty, activate\n if activate:\n self.CheckRefreshList()\n return self._obj_.OnActivateView(activate, av, dv)\n\n def _MakeRoot(self):\n path = self.GetDocument().GetPathName()\n if not path:\n return HierListCLBRErrorRoot(\"Error: Can not browse a file until it is saved\")\n else:\n mod, path = pywin.framework.scriptutils.GetPackageModuleName(path)\n if self.bDirty:\n what = \"Refreshing\"\n # Hack for pyclbr being too smart\n try:\n del pyclbr._modules[mod]\n except (KeyError, AttributeError):\n pass\n else:\n what = \"Building\"\n win32ui.SetStatusText(\"%s class list - please wait...\" % (what,), 1)\n win32ui.DoWaitCursor(1)\n try:\n reader = pyclbr.readmodule_ex # new version post 1.5.2\n except AttributeError:\n reader = pyclbr.readmodule\n try:\n data = reader(mod, [path])\n if data:\n return HierListCLBRModule(mod, data)\n else:\n return HierListCLBRErrorRoot(\"No Python classes in module.\")\n\n finally:\n win32ui.DoWaitCursor(0)\n win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE))\n\n def DestroyList(self):\n self.destroying = 1\n list = getattr(self, \"list\", None) # If the document was not successfully opened, we may not have a list.\n self.list = None\n if list is not None:\n list.HierTerm()\n self.destroying = 0\n\n def CheckMadeList(self):\n if self.list is not None or self.destroying: return\n self.rootitem = root = self._MakeRoot()\n self.list = list = hierlist.HierListWithItems( root, win32ui.IDB_BROWSER_HIER)\n list.HierInit(self.GetParentFrame(), self)\n list.SetStyle(commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS)\n\n def CheckRefreshList(self):\n if self.bDirty:\n if self.list is None:\n self.CheckMadeList()\n else:\n new_root = self._MakeRoot()\n if self.rootitem.__class__==new_root.__class__==HierListCLBRModule:\n self.rootitem.modName = new_root.modName\n self.rootitem.clbrdata = new_root.clbrdata\n self.list.Refresh()\n else:\n self.list.AcceptRoot(self._MakeRoot())\n self.bDirty = 0\n\n def OnSize(self, params):\n lparam = params[3]\n w = win32api.LOWORD(lparam)\n h = win32api.HIWORD(lparam)\n if w != 0:\n self.CheckMadeList()\n elif w == 0:\n self.DestroyList()\n return 1\n\n def _UpdateUIForState(self):\n self.bDirty = 1\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475149,"cells":{"repo_name":{"kind":"string","value":"utamaro/youtube-dl"},"path":{"kind":"string","value":"youtube_dl/extractor/tv2.py"},"copies":{"kind":"string","value":"113"},"size":{"kind":"string","value":"4640"},"content":{"kind":"string","value":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n int_or_none,\n float_or_none,\n parse_iso8601,\n remove_end,\n)\n\n\nclass TV2IE(InfoExtractor):\n _VALID_URL = 'http://(?:www\\.)?tv2\\.no/v/(?P\\d+)'\n _TEST = {\n 'url': 'http://www.tv2.no/v/916509/',\n 'md5': '9cb9e3410b18b515d71892f27856e9b1',\n 'info_dict': {\n 'id': '916509',\n 'ext': 'flv',\n 'title': 'Se Gryttens hyllest av Steven Gerrard',\n 'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.',\n 'timestamp': 1431715610,\n 'upload_date': '20150515',\n 'duration': 156.967,\n 'view_count': int,\n 'categories': list,\n }\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n formats = []\n format_urls = []\n for protocol in ('HDS', 'HLS'):\n data = self._download_json(\n 'http://sumo.tv2.no/api/web/asset/%s/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % (video_id, protocol),\n video_id, 'Downloading play JSON')['playback']\n for item in data['items']['item']:\n video_url = item.get('url')\n if not video_url or video_url in format_urls:\n continue\n format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat'))\n if not self._is_valid_url(video_url, video_id, format_id):\n continue\n format_urls.append(video_url)\n ext = determine_ext(video_url)\n if ext == 'f4m':\n formats.extend(self._extract_f4m_formats(\n video_url, video_id, f4m_id=format_id))\n elif ext == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n video_url, video_id, 'mp4', m3u8_id=format_id))\n elif ext == 'ism' or video_url.endswith('.ism/Manifest'):\n pass\n else:\n formats.append({\n 'url': video_url,\n 'format_id': format_id,\n 'tbr': int_or_none(item.get('bitrate')),\n 'filesize': int_or_none(item.get('fileSize')),\n })\n self._sort_formats(formats)\n\n asset = self._download_json(\n 'http://sumo.tv2.no/api/web/asset/%s.json' % video_id,\n video_id, 'Downloading metadata JSON')['asset']\n\n title = asset['title']\n description = asset.get('description')\n timestamp = parse_iso8601(asset.get('createTime'))\n duration = float_or_none(asset.get('accurateDuration') or asset.get('duration'))\n view_count = int_or_none(asset.get('views'))\n categories = asset.get('keywords', '').split(',')\n\n thumbnails = [{\n 'id': thumbnail.get('@type'),\n 'url': thumbnail.get('url'),\n } for _, thumbnail in asset.get('imageVersions', {}).items()]\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'title': title,\n 'description': description,\n 'thumbnails': thumbnails,\n 'timestamp': timestamp,\n 'duration': duration,\n 'view_count': view_count,\n 'categories': categories,\n 'formats': formats,\n }\n\n\nclass TV2ArticleIE(InfoExtractor):\n _VALID_URL = 'http://(?:www\\.)?tv2\\.no/(?:a|\\d{4}/\\d{2}/\\d{2}(/[^/]+)+)/(?P\\d+)'\n _TESTS = [{\n 'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542',\n 'info_dict': {\n 'id': '6930542',\n 'title': 'Russen hetses etter pingvintyveri – innrømmer å ha åpnet luken på buret',\n 'description': 'md5:339573779d3eea3542ffe12006190954',\n },\n 'playlist_count': 2,\n }, {\n 'url': 'http://www.tv2.no/a/6930542',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n playlist_id = self._match_id(url)\n\n webpage = self._download_webpage(url, playlist_id)\n\n entries = [\n self.url_result('http://www.tv2.no/v/%s' % video_id, 'TV2')\n for video_id in re.findall(r'data-assetid=\"(\\d+)\"', webpage)]\n\n title = remove_end(self._og_search_title(webpage), ' - TV2.no')\n description = remove_end(self._og_search_description(webpage), ' - TV2.no')\n\n return self.playlist_result(entries, playlist_id, title, description)\n"},"license":{"kind":"string","value":"unlicense"}}},{"rowIdx":475150,"cells":{"repo_name":{"kind":"string","value":"simsong/grr-insider"},"path":{"kind":"string","value":"lib/objectfilter.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"26902"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2012 Google Inc. All Rights Reserved.\n\n\"\"\"Classes to perform filtering of objects based on their data members.\n\nGiven a list of objects and a textual filter expression, these classes allow\nyou to determine which objects match the filter. The system has two main\npieces: A parser for the supported grammar and a filter implementation.\n\nGiven any complying user-supplied grammar, it is parsed with a custom lexer\nbased on GRR's lexer and then compiled into an actual implementation by using\nthe filter implementation. A filter implementation simply provides actual\nimplementations for the primitives required to perform filtering. The compiled\nresult is always a class supporting the Filter interface.\n\nIf we define a class called Car such as:\n\nclass Car(object):\n def __init__(self, code, color=\"white\", doors=3):\n self.code = code\n self.color = color\n self.doors = 3\n\nAnd we have two instances:\n\n ford_ka = Car(\"FORDKA1\", color=\"grey\")\n toyota_corolla = Car(\"COROLLA1\", color=\"white\", doors=5)\n fleet = [ford_ka, toyota_corolla]\n\nWe want to find cars that are grey and have 3 or more doors. We could filter\nour fleet like this:\n\n criteria = \"(color is grey) and (doors >= 3)\"\n parser = ContextFilterParser(criteria).Parse()\n compiled_filter = parser.Compile(LowercaseAttributeFilterImp)\n\n for car in fleet:\n if compiled_filter.Matches(car):\n print \"Car %s matches the supplied filter.\" % car.code\n\nThe filter expression contains two subexpressions joined by an AND operator:\n \"color is grey\" and \"doors >= 3\"\nThis means we want to search for objects matching these two subexpressions.\nLet's analyze the first one in depth \"color is grey\":\n\n \"color\": the left operand specifies a search path to look for the data. This\n tells our filtering system to look for the color property on passed objects.\n \"is\": the operator. Values retrieved for the \"color\" property will be checked\n against the right operand to see if they are equal.\n \"grey\": the right operand. It specifies an explicit value to check for.\n\nSo each time an object is passed through the filter, it will expand the value\nof the color data member, and compare its value against \"grey\".\n\nBecause data members of objects are often not simple datatypes but other\nobjects, the system allows you to reference data members within other data\nmembers by separating each by a dot. Let's see an example:\n\nLet's add a more complex Car class with default tyre data:\n\nclass CarWithTyres(Car):\n def __init__(self, code, tyres=None, color=\"white\", doors=3):\n super(self, CarWithTyres).__init__(code, color, doors)\n tyres = tyres or Tyre(\"Pirelli\", \"PZERO\")\n\nclass Tyre(object):\n def __init__(self, brand, code):\n self.brand = brand\n self.code = code\n\nAnd two new instances:\n ford_ka = CarWithTyres(\"FORDKA\", color=\"grey\", tyres=Tyre(\"AVON\", \"ZT5\"))\n toyota_corolla = Car(\"COROLLA1\", color=\"white\", doors=5)\n fleet = [ford_ka, toyota_corolla]\n\nTo filter a car based on the tyre brand, we would use a search path of\n\"tyres.brand\".\n\nBecause the filter implementation provides the actual classes that perform\nhandling of the search paths, operators, etc. customizing the behaviour of the\nfilter is easy. Three basic filter implementations are given:\n BaseFilterImplementation: search path expansion is done on attribute names\n as provided (case-sensitive).\n LowercaseAttributeFilterImp: search path expansion is done on the lowercased\n attribute name, so that it only accesses attributes, not methods.\n DictFilterImplementation: search path expansion is done on dictionary access\n to the given object. So \"a.b\" expands the object obj to obj[\"a\"][\"b\"]\n\"\"\"\n\n\n\n\nimport abc\nimport binascii\nimport logging\nimport re\n\nfrom grr.lib import lexer\nfrom grr.lib import utils\n\n\nclass Error(Exception):\n \"\"\"Base module exception.\"\"\"\n\n\nclass MalformedQueryError(Error):\n \"\"\"The provided filter query is malformed.\"\"\"\n\n\nclass ParseError(Error, lexer.ParseError):\n \"\"\"The parser for textual queries returned invalid results.\"\"\"\n\n\nclass InvalidNumberOfOperands(Error):\n \"\"\"The number of operands provided to this operator is wrong.\"\"\"\n\n\nclass Filter(object):\n \"\"\"Base class for every filter.\"\"\"\n\n def __init__(self, arguments=None, value_expander=None):\n \"\"\"Constructor.\n\n Args:\n arguments: Arguments to the filter.\n value_expander: A callable that will be used to expand values for the\n objects passed to this filter. Implementations expanders are provided by\n subclassing ValueExpander.\n\n Raises:\n Error: If the given value_expander is not a subclass of ValueExpander\n \"\"\"\n self.value_expander = None\n self.value_expander_cls = value_expander\n if self.value_expander_cls:\n if not issubclass(self.value_expander_cls, ValueExpander):\n raise Error(\"%s is not a valid value expander\" % (\n self.value_expander_cls))\n self.value_expander = self.value_expander_cls()\n self.args = arguments or []\n logging.debug(\"Adding %s\", arguments)\n\n @abc.abstractmethod\n def Matches(self, obj):\n \"\"\"Whether object obj matches this filter.\"\"\"\n\n def Filter(self, objects):\n \"\"\"Returns a list of objects that pass the filter.\"\"\"\n return filter(self.Matches, objects)\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__,\n \", \".join([str(arg) for arg in self.args]))\n\n\nclass AndFilter(Filter):\n \"\"\"Performs a boolean AND of the given Filter instances as arguments.\n\n Note that if no conditions are passed, all objects will pass.\n \"\"\"\n\n def Matches(self, obj):\n for child_filter in self.args:\n if not child_filter.Matches(obj):\n return False\n return True\n\n\nclass OrFilter(Filter):\n \"\"\"Performs a boolean OR of the given Filter instances as arguments.\n\n Note that if no conditions are passed, all objects will pass.\n \"\"\"\n\n def Matches(self, obj):\n if not self.args: return True\n for child_filter in self.args:\n if child_filter.Matches(obj):\n return True\n return False\n\n\nclass Operator(Filter):\n \"\"\"Base class for all operators.\"\"\"\n\n\nclass IdentityFilter(Operator):\n def Matches(self, _):\n return True\n\n\nclass UnaryOperator(Operator):\n \"\"\"Base class for unary operators.\"\"\"\n\n def __init__(self, operand, **kwargs):\n \"\"\"Constructor.\"\"\"\n\n super(UnaryOperator, self).__init__(arguments=[operand], **kwargs)\n if len(self.args) != 1:\n raise InvalidNumberOfOperands(\"Only one operand is accepted by %s. \"\n \"Received %d.\" % (self.__class__.__name__,\n len(self.args)))\n\n\nclass BinaryOperator(Operator):\n \"\"\"Base class for binary operators.\n\n The left operand is always a path into the object which will be expanded for\n values. The right operand is a value defined at initialization and is stored\n at self.right_operand.\n \"\"\"\n\n def __init__(self, arguments=None, **kwargs):\n super(BinaryOperator, self).__init__(arguments=arguments, **kwargs)\n if len(self.args) != 2:\n raise InvalidNumberOfOperands(\"Only two operands are accepted by %s. \"\n \"Received %d.\" % (self.__class__.__name__,\n len(self.args)))\n self.left_operand = self.args[0]\n self.right_operand = self.args[1]\n\n\nclass GenericBinaryOperator(BinaryOperator):\n \"\"\"Allows easy implementations of operators.\"\"\"\n\n def Operation(self, x, y):\n \"\"\"Performs the operation between two values.\"\"\"\n\n def Operate(self, values):\n \"\"\"Takes a list of values and if at least one matches, returns True.\"\"\"\n for val in values:\n try:\n logging.debug(\"Operating %s with x=%s and y=%s\",\n self.__class__.__name__, val, self.right_operand)\n if self.Operation(val, self.right_operand):\n return True\n else:\n continue\n except (ValueError, TypeError):\n continue\n return False\n\n def Matches(self, obj):\n key = self.left_operand\n values = self.value_expander.Expand(obj, key)\n if values and self.Operate(values):\n return True\n return False\n\n\nclass Equals(GenericBinaryOperator):\n \"\"\"Matches objects when the right operand equals the expanded value.\"\"\"\n\n def Operation(self, x, y):\n return x == y\n\n\nclass NotEquals(GenericBinaryOperator):\n \"\"\"Matches when the right operand isn't equal to the expanded value.\"\"\"\n\n def Operate(self, values):\n return not Equals(arguments=self.args,\n value_expander=self.value_expander_cls).Operate(values)\n\n\nclass Less(GenericBinaryOperator):\n \"\"\"Whether the expanded value >= right_operand.\"\"\"\n\n def Operation(self, x, y):\n return x < y\n\n\nclass LessEqual(GenericBinaryOperator):\n \"\"\"Whether the expanded value <= right_operand.\"\"\"\n\n def Operation(self, x, y):\n return x <= y\n\n\nclass Greater(GenericBinaryOperator):\n \"\"\"Whether the expanded value > right_operand.\"\"\"\n\n def Operation(self, x, y):\n return x > y\n\n\nclass GreaterEqual(GenericBinaryOperator):\n \"\"\"Whether the expanded value >= right_operand.\"\"\"\n\n def Operation(self, x, y):\n return x >= y\n\n\nclass Contains(GenericBinaryOperator):\n \"\"\"Whether the right operand is contained in the value.\"\"\"\n\n def Operation(self, x, y):\n return y in x\n\n\nclass NotContains(GenericBinaryOperator):\n \"\"\"Whether the right operand is not contained in the values.\"\"\"\n\n def Operate(self, values):\n return not Contains(arguments=self.args,\n value_expander=self.value_expander_cls).Operate(values)\n\n\n# TODO(user): Change to an N-ary Operator?\nclass InSet(GenericBinaryOperator):\n \"\"\"Whether all values are contained within the right operand.\"\"\"\n\n def Operation(self, x, y):\n \"\"\"Whether x is fully contained in y.\"\"\"\n if x in y:\n return True\n\n # x might be an iterable\n # first we need to skip strings or we'll do silly things\n if (isinstance(x, basestring)\n or isinstance(x, bytes)):\n return False\n\n try:\n for value in x:\n if value not in y:\n return False\n return True\n except TypeError:\n return False\n\n\nclass NotInSet(GenericBinaryOperator):\n \"\"\"Whether at least a value is not present in the right operand.\"\"\"\n\n def Operate(self, values):\n return not InSet(arguments=self.args,\n value_expander=self.value_expander_cls).Operate(values)\n\n\nclass Regexp(GenericBinaryOperator):\n \"\"\"Whether the value matches the regexp in the right operand.\"\"\"\n\n def __init__(self, *children, **kwargs):\n super(Regexp, self).__init__(*children, **kwargs)\n logging.debug(\"Compiled: %s\", self.right_operand)\n try:\n self.compiled_re = re.compile(utils.SmartUnicode(self.right_operand))\n except re.error:\n raise ValueError(\"Regular expression \\\"%s\\\" is malformed.\" %\n self.right_operand)\n\n def Operation(self, x, y):\n try:\n if self.compiled_re.search(utils.SmartUnicode(x)):\n return True\n except TypeError:\n return False\n\n\nclass Context(Operator):\n \"\"\"Restricts the child operators to a specific context within the object.\n\n Solves the context problem. The context problem is the following:\n Suppose you store a list of loaded DLLs within a process. Suppose that for\n each of these DLLs you store the number of imported functions and each of the\n imported functions name.\n\n Imagine that a malicious DLL is injected into processes and its indicators are\n that it only imports one function and that it is RegQueryValueEx. You'd write\n your indicator like this:\n\n\n AndOperator(\n Equal(\"ImportedDLLs.ImpFunctions.Name\", \"RegQueryValueEx\"),\n Equal(\"ImportedDLLs.NumImpFunctions\", \"1\")\n )\n\n Now imagine you have these two processes on a given system.\n\n Process1\n +[0]__ImportedDlls\n +[0]__Name: \"notevil.dll\"\n |[0]__ImpFunctions\n | +[1]__Name: \"CreateFileA\"\n |[0]__NumImpFunctions: 1\n |\n +[1]__Name: \"alsonotevil.dll\"\n |[1]__ImpFunctions\n | +[0]__Name: \"RegQueryValueEx\"\n | +[1]__Name: \"CreateFileA\"\n |[1]__NumImpFunctions: 2\n\n Process2\n +[0]__ImportedDlls\n +[0]__Name: \"evil.dll\"\n |[0]__ImpFunctions\n | +[0]__Name: \"RegQueryValueEx\"\n |[0]__NumImpFunctions: 1\n\n Both Process1 and Process2 match your query, as each of the indicators are\n evaluated separatedly. While you wanted to express \"find me processes that\n have a DLL that has both one imported function and ReqQueryValueEx is in the\n list of imported functions\", your indicator actually means \"find processes\n that have at least a DLL with 1 imported functions and at least one DLL that\n imports the ReqQueryValueEx function\".\n\n To write such an indicator you need to specify a context of ImportedDLLs for\n these two clauses. Such that you convert your indicator to:\n\n Context(\"ImportedDLLs\",\n AndOperator(\n Equal(\"ImpFunctions.Name\", \"RegQueryValueEx\"),\n Equal(\"NumImpFunctions\", \"1\")\n ))\n\n Context will execute the filter specified as the second parameter for each of\n the objects under \"ImportedDLLs\", thus applying the condition per DLL, not per\n object and returning the right result.\n \"\"\"\n\n def __init__(self, arguments=None, **kwargs):\n if len(arguments) != 2:\n raise InvalidNumberOfOperands(\"Context accepts only 2 operands.\")\n super(Context, self).__init__(arguments=arguments, **kwargs)\n self.context, self.condition = self.args\n\n def Matches(self, obj):\n for object_list in self.value_expander.Expand(obj, self.context):\n for sub_object in object_list:\n if self.condition.Matches(sub_object):\n return True\n return False\n\n\nOP2FN = {\"equals\": Equals,\n \"is\": Equals,\n \"==\": Equals,\n \"notequals\": NotEquals,\n \"isnot\": NotEquals,\n \"!=\": NotEquals,\n \"contains\": Contains,\n \"notcontains\": NotContains,\n \">\": Greater,\n \">=\": GreaterEqual,\n \"<\": Less,\n \"<=\": LessEqual,\n \"inset\": InSet,\n \"notinset\": NotInSet,\n \"regexp\": Regexp,\n }\n\n\nclass ValueExpander(object):\n \"\"\"Encapsulates the logic to expand values available in an object.\n\n Once instantiated and called, this class returns all the values that follow a\n given field path.\n \"\"\"\n\n FIELD_SEPARATOR = \".\"\n\n def _GetAttributeName(self, path):\n \"\"\"Returns the attribute name to fetch given a path.\"\"\"\n return path[0]\n\n def _GetValue(self, obj, attr_name):\n \"\"\"Returns the value of tha attribute attr_name.\"\"\"\n raise NotImplementedError()\n\n def _AtLeaf(self, attr_value):\n \"\"\"Called when at a leaf value. Should yield a value.\"\"\"\n yield attr_value\n\n def _AtNonLeaf(self, attr_value, path):\n \"\"\"Called when at a non-leaf value. Should recurse and yield values.\"\"\"\n try:\n # Check first for iterables\n # If it's a dictionary, we yield it\n if isinstance(attr_value, dict):\n yield attr_value\n else:\n # If it's an iterable, we recurse on each value.\n for sub_obj in attr_value:\n for value in self.Expand(sub_obj, path[1:]):\n yield value\n except TypeError: # This is then not iterable, we recurse with the value\n for value in self.Expand(attr_value, path[1:]):\n yield value\n\n def Expand(self, obj, path):\n \"\"\"Returns a list of all the values for the given path in the object obj.\n\n Given a path such as [\"sub1\", \"sub2\"] it returns all the values available\n in obj.sub1.sub2 as a list. sub1 and sub2 must be data attributes or\n properties.\n\n If sub1 returns a list of objects, or a generator, Expand aggregates the\n values for the remaining path for each of the objects, thus returning a\n list of all the values under the given path for the input object.\n\n Args:\n obj: An object that will be traversed for the given path\n path: A list of strings\n\n Yields:\n The values once the object is traversed.\n \"\"\"\n if isinstance(path, basestring):\n path = path.split(self.FIELD_SEPARATOR)\n\n attr_name = self._GetAttributeName(path)\n attr_value = self._GetValue(obj, attr_name)\n if attr_value is None:\n return\n\n if len(path) == 1:\n for value in self._AtLeaf(attr_value):\n yield value\n else:\n for value in self._AtNonLeaf(attr_value, path):\n yield value\n\n\nclass AttributeValueExpander(ValueExpander):\n \"\"\"An expander that gives values based on object attribute names.\"\"\"\n\n def _GetValue(self, obj, attr_name):\n return getattr(obj, attr_name, None)\n\n\nclass LowercaseAttributeValueExpander(AttributeValueExpander):\n \"\"\"An expander that lowercases all attribute names before access.\"\"\"\n\n def _GetAttributeName(self, path):\n return path[0].lower()\n\n\nclass DictValueExpander(ValueExpander):\n \"\"\"An expander that gets values from dictionary access to the object.\"\"\"\n\n def _GetValue(self, obj, attr_name):\n return obj.get(attr_name, None)\n\n\n### PARSER DEFINITION\nclass BasicExpression(lexer.Expression):\n def Compile(self, filter_implementation):\n arguments = [self.attribute]\n op_str = self.operator.lower()\n operator = filter_implementation.OPS.get(op_str, None)\n if not operator:\n raise ParseError(\"Unknown operator %s provided.\" % self.operator)\n arguments.extend(self.args)\n expander = filter_implementation.FILTERS[\"ValueExpander\"]\n return operator(arguments=arguments, value_expander=expander)\n\n\nclass ContextExpression(lexer.Expression):\n \"\"\"Represents the context operator.\"\"\"\n\n def __init__(self, attribute=\"\", part=None):\n self.attribute = attribute\n self.args = []\n if part: self.args.append(part)\n super(ContextExpression, self).__init__()\n\n def __str__(self):\n return \"Context(%s %s)\" % (\n self.attribute, [str(x) for x in self.args])\n\n def SetExpression(self, expression):\n if isinstance(expression, lexer.Expression):\n self.args = [expression]\n else:\n raise ParseError(\"Expected expression, got %s\" % expression)\n\n def Compile(self, filter_implementation):\n arguments = [self.attribute]\n for arg in self.args:\n arguments.append(arg.Compile(filter_implementation))\n expander = filter_implementation.FILTERS[\"ValueExpander\"]\n context_cls = filter_implementation.FILTERS[\"Context\"]\n return context_cls(arguments=arguments,\n value_expander=expander)\n\n\nclass BinaryExpression(lexer.BinaryExpression):\n def Compile(self, filter_implemention):\n \"\"\"Compile the binary expression into a filter object.\"\"\"\n operator = self.operator.lower()\n if operator == \"and\" or operator == \"&&\":\n method = \"AndFilter\"\n elif operator == \"or\" or operator == \"||\":\n method = \"OrFilter\"\n else:\n raise ParseError(\"Invalid binary operator %s\" % operator)\n\n args = [x.Compile(filter_implemention) for x in self.args]\n return filter_implemention.FILTERS[method](arguments=args)\n\n\nclass IdentityExpression(lexer.Expression):\n def Compile(self, filter_implementation):\n return filter_implementation.FILTERS[\"IdentityFilter\"]()\n\n\nclass Parser(lexer.SearchParser):\n \"\"\"Parses and generates an AST for a query written in the described language.\n\n Examples of valid syntax:\n size is 40\n (name contains \"Program Files\" AND hash.md5 is \"123abc\")\n @imported_modules (num_symbols = 14 AND symbol.name is \"FindWindow\")\n \"\"\"\n expression_cls = BasicExpression\n binary_expression_cls = BinaryExpression\n context_cls = ContextExpression\n identity_expression_cls = IdentityExpression\n\n tokens = [\n # Operators and related tokens\n lexer.Token(\"INITIAL\", r\"\\@[\\w._0-9]+\",\n \"ContextOperator,PushState\", \"CONTEXTOPEN\"),\n lexer.Token(\"INITIAL\", r\"[^\\s\\(\\)]\", \"PushState,PushBack\", \"ATTRIBUTE\"),\n lexer.Token(\"INITIAL\", r\"\\(\", \"PushState,BracketOpen\", None),\n lexer.Token(\"INITIAL\", r\"\\)\", \"BracketClose\", \"BINARY\"),\n\n # Context\n lexer.Token(\"CONTEXTOPEN\", r\"\\(\", \"BracketOpen\", \"INITIAL\"),\n\n # Double quoted string\n lexer.Token(\"STRING\", \"\\\"\", \"PopState,StringFinish\", None),\n lexer.Token(\"STRING\", r\"\\\\x(..)\", \"HexEscape\", None),\n lexer.Token(\"STRING\", r\"\\\\(.)\", \"StringEscape\", None),\n lexer.Token(\"STRING\", r\"[^\\\\\\\"]+\", \"StringInsert\", None),\n\n # Single quoted string\n lexer.Token(\"SQ_STRING\", \"'\", \"PopState,StringFinish\", None),\n lexer.Token(\"SQ_STRING\", r\"\\\\x(..)\", \"HexEscape\", None),\n lexer.Token(\"SQ_STRING\", r\"\\\\(.)\", \"StringEscape\", None),\n lexer.Token(\"SQ_STRING\", r\"[^\\\\']+\", \"StringInsert\", None),\n\n # Basic expression\n lexer.Token(\"ATTRIBUTE\", r\"[\\w._0-9]+\", \"StoreAttribute\", \"OPERATOR\"),\n lexer.Token(\"OPERATOR\", r\"(\\w+|[<>!=]=?)\", \"StoreOperator\", \"ARG\"),\n lexer.Token(\"ARG\", r\"(\\d+\\.\\d+)\", \"InsertFloatArg\", \"ARG\"),\n lexer.Token(\"ARG\", r\"(0x\\d+)\", \"InsertInt16Arg\", \"ARG\"),\n lexer.Token(\"ARG\", r\"(\\d+)\", \"InsertIntArg\", \"ARG\"),\n lexer.Token(\"ARG\", \"\\\"\", \"PushState,StringStart\", \"STRING\"),\n lexer.Token(\"ARG\", \"'\", \"PushState,StringStart\", \"SQ_STRING\"),\n # When the last parameter from arg_list has been pushed\n\n # State where binary operators are supported (AND, OR)\n lexer.Token(\"BINARY\", r\"(?i)(and|or|\\&\\&|\\|\\|)\",\n \"BinaryOperator\", \"INITIAL\"),\n # - We can also skip spaces\n lexer.Token(\"BINARY\", r\"\\s+\", None, None),\n # - But if it's not \"and\" or just spaces we have to go back\n lexer.Token(\"BINARY\", \".\", \"PushBack,PopState\", None),\n\n # Skip whitespace.\n lexer.Token(\".\", r\"\\s+\", None, None),\n ]\n\n def InsertArg(self, string=\"\", **_):\n \"\"\"Insert an arg to the current expression.\"\"\"\n logging.debug(\"Storing Argument %s\", string)\n\n # This expression is complete\n if self.current_expression.AddArg(string):\n self.stack.append(self.current_expression)\n self.current_expression = self.expression_cls()\n # We go to the BINARY state, to find if there's an AND or OR operator\n return \"BINARY\"\n\n def InsertFloatArg(self, string=\"\", **_):\n \"\"\"Inserts a Float argument.\"\"\"\n try:\n float_value = float(string)\n return self.InsertArg(float_value)\n except (TypeError, ValueError):\n raise ParseError(\"%s is not a valid float.\" % string)\n\n def InsertIntArg(self, string=\"\", **_):\n \"\"\"Inserts an Integer argument.\"\"\"\n try:\n int_value = int(string)\n return self.InsertArg(int_value)\n except (TypeError, ValueError):\n raise ParseError(\"%s is not a valid integer.\" % string)\n\n def InsertInt16Arg(self, string=\"\", **_):\n \"\"\"Inserts an Integer in base16 argument.\"\"\"\n try:\n int_value = int(string, 16)\n return self.InsertArg(int_value)\n except (TypeError, ValueError):\n raise ParseError(\"%s is not a valid base16 integer.\" % string)\n\n def StringFinish(self, **_):\n if self.state == \"ATTRIBUTE\":\n return self.StoreAttribute(string=self.string)\n\n elif self.state == \"ARG\":\n return self.InsertArg(string=self.string)\n\n def StringEscape(self, string, match, **_):\n \"\"\"Escape backslashes found inside a string quote.\n\n Backslashes followed by anything other than [\\'\"rnbt] will raise an Error.\n\n Args:\n string: The string that matched.\n match: The match object (m.group(1) is the escaped code)\n\n Raises:\n ParseError: When the escaped string is not one of [\\'\"rnbt]\n \"\"\"\n if match.group(1) in \"\\\\'\\\"rnbt\":\n self.string += string.decode(\"string_escape\")\n else:\n raise ParseError(\"Invalid escape character %s.\" % string)\n\n def HexEscape(self, string, match, **_):\n \"\"\"Converts a hex escaped string.\"\"\"\n logging.debug(\"HexEscape matched %s\", string)\n hex_string = match.group(1)\n try:\n self.string += binascii.unhexlify(hex_string)\n except TypeError:\n raise ParseError(\"Invalid hex escape %s\" % string)\n\n def ContextOperator(self, string=\"\", **_):\n self.stack.append(self.context_cls(string[1:]))\n\n def Reduce(self):\n \"\"\"Reduce the token stack into an AST.\"\"\"\n # Check for sanity\n if self.state != \"INITIAL\" and self.state != \"BINARY\":\n self.Error(\"Premature end of expression\")\n\n length = len(self.stack)\n while length > 1:\n # Precendence order\n self._CombineParenthesis()\n self._CombineBinaryExpressions(\"and\")\n self._CombineBinaryExpressions(\"or\")\n self._CombineContext()\n\n # No change\n if len(self.stack) == length: break\n length = len(self.stack)\n\n if length != 1:\n self.Error(\"Illegal query expression\")\n\n return self.stack[0]\n\n def Error(self, message=None, _=None):\n raise ParseError(\"%s in position %s: %s <----> %s )\" % (\n message, len(self.processed_buffer), self.processed_buffer,\n self.buffer))\n\n def _CombineBinaryExpressions(self, operator):\n for i in range(1, len(self.stack)-1):\n item = self.stack[i]\n if (isinstance(item, lexer.BinaryExpression) and\n item.operator.lower() == operator.lower() and\n isinstance(self.stack[i-1], lexer.Expression) and\n isinstance(self.stack[i+1], lexer.Expression)):\n lhs = self.stack[i-1]\n rhs = self.stack[i+1]\n\n self.stack[i].AddOperands(lhs, rhs)\n self.stack[i-1] = None\n self.stack[i+1] = None\n\n self.stack = filter(None, self.stack)\n\n def _CombineContext(self):\n # Context can merge from item 0\n for i in range(len(self.stack)-1, 0, -1):\n item = self.stack[i-1]\n if (isinstance(item, ContextExpression) and\n isinstance(self.stack[i], lexer.Expression)):\n expression = self.stack[i]\n self.stack[i-1].SetExpression(expression)\n self.stack[i] = None\n\n self.stack = filter(None, self.stack)\n\n\n### FILTER IMPLEMENTATIONS\n\n\nclass BaseFilterImplementation(object):\n \"\"\"Defines the base implementation of an object filter by its attributes.\n\n Inherit from this class, switch any of the needed operators and pass it to\n the Compile method of a parsed string to obtain an executable filter.\n \"\"\"\n\n OPS = OP2FN\n FILTERS = {\"ValueExpander\": AttributeValueExpander,\n \"AndFilter\": AndFilter,\n \"OrFilter\": OrFilter,\n \"IdentityFilter\": IdentityFilter,\n \"Context\": Context}\n\n\nclass LowercaseAttributeFilterImplementation(BaseFilterImplementation):\n \"\"\"Does field name access on the lowercase version of names.\n\n Useful to only access attributes and properties with Google's python naming\n style.\n \"\"\"\n\n FILTERS = {}\n FILTERS.update(BaseFilterImplementation.FILTERS)\n FILTERS.update({\"ValueExpander\": LowercaseAttributeValueExpander})\n\n\nclass DictFilterImplementation(BaseFilterImplementation):\n \"\"\"Does value fetching by dictionary access on the object.\"\"\"\n\n FILTERS = {}\n FILTERS.update(BaseFilterImplementation.FILTERS)\n FILTERS.update({\"ValueExpander\": DictValueExpander})\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475151,"cells":{"repo_name":{"kind":"string","value":"synologix/enigma2"},"path":{"kind":"string","value":"lib/python/Tools/Notifications.py"},"copies":{"kind":"string","value":"66"},"size":{"kind":"string","value":"1963"},"content":{"kind":"string","value":"notifications = [ ]\n\nnotificationAdded = [ ]\n\n# notifications which are currently on screen (and might be closed by similiar notifications)\ncurrent_notifications = [ ]\n\ndef __AddNotification(fnc, screen, id, *args, **kwargs):\n\tif \".MessageBox'>\" in `screen`:\n\t\tkwargs[\"simple\"] = True\n\tnotifications.append((fnc, screen, args, kwargs, id))\n\tfor x in notificationAdded:\n\t\tx()\n\ndef AddNotification(screen, *args, **kwargs):\n\tAddNotificationWithCallback(None, screen, *args, **kwargs)\n\ndef AddNotificationWithCallback(fnc, screen, *args, **kwargs):\n\t__AddNotification(fnc, screen, None, *args, **kwargs)\n\ndef AddNotificationParentalControl(fnc, screen, *args, **kwargs):\n\tRemovePopup(\"Parental control\")\n\t__AddNotification(fnc, screen, \"Parental control\", *args, **kwargs)\n\ndef AddNotificationWithID(id, screen, *args, **kwargs):\n\t__AddNotification(None, screen, id, *args, **kwargs)\n\ndef AddNotificationWithIDCallback(fnc, id, screen, *args, **kwargs):\n\t__AddNotification(fnc, screen, id, *args, **kwargs)\n\n# we don't support notifications with callback and ID as this\n# would require manually calling the callback on cancelled popups.\n\ndef RemovePopup(id):\n\t# remove similiar notifications\n\tprint \"RemovePopup, id =\", id\n\tfor x in notifications:\n\t\tif x[4] and x[4] == id:\n\t\t\tprint \"(found in notifications)\"\n\t\t\tnotifications.remove(x)\n\n\tfor x in current_notifications:\n\t\tif x[0] == id:\n\t\t\tprint \"(found in current notifications)\"\n\t\t\tx[1].close()\n\nfrom Screens.MessageBox import MessageBox\n\ndef AddPopup(text, type, timeout, id = None):\n\tif id is not None:\n\t\tRemovePopup(id)\n\tprint \"AddPopup, id =\", id\n\tAddNotificationWithID(id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = True)\n\ndef AddPopupWithCallback(fnc, text, type, timeout, id = None):\n\tif id is not None:\n\t\tRemovePopup(id)\n\tprint \"AddPopup, id =\", id\n\tAddNotificationWithIDCallback(fnc, id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = False)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475152,"cells":{"repo_name":{"kind":"string","value":"BMan-L/shadowsocks"},"path":{"kind":"string","value":"tests/nose_plugin.py"},"copies":{"kind":"string","value":"1072"},"size":{"kind":"string","value":"1164"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2015 clowwindy\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport nose\nfrom nose.plugins.base import Plugin\n\n\nclass ExtensionPlugin(Plugin):\n\n name = \"ExtensionPlugin\"\n\n def options(self, parser, env):\n Plugin.options(self, parser, env)\n\n def configure(self, options, config):\n Plugin.configure(self, options, config)\n self.enabled = True\n\n def wantFile(self, file):\n return file.endswith('.py')\n\n def wantDirectory(self, directory):\n return True\n\n def wantModule(self, file):\n return True\n\n\nif __name__ == '__main__':\n nose.main(addplugins=[ExtensionPlugin()])\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475153,"cells":{"repo_name":{"kind":"string","value":"slint/zenodo"},"path":{"kind":"string","value":"zenodo/modules/exporter/__init__.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1189"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# This file is part of Zenodo.\n# Copyright (C) 2018 CERN.\n#\n# Zenodo is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Zenodo is distributed in the hope that it will be\n# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Zenodo; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307, USA.\n#\n# In applying this license, CERN does not\n# waive the privileges and immunities granted to it by virtue of its status\n# as an Intergovernmental Organization or submit itself to any jurisdiction.\n\n\"\"\"Exporter programmatic API.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom .api import Exporter\nfrom .streams import BZip2ResultStream, ResultStream\nfrom .writers import BucketWriter, filename_factory\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475154,"cells":{"repo_name":{"kind":"string","value":"flos-club/eekk"},"path":{"kind":"string","value":"libmat2/abstract.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1460"},"content":{"kind":"string","value":"import abc\nimport os\nimport re\nfrom typing import Set, Dict, Union\n\nassert Set # make pyflakes happy\n\n\nclass AbstractParser(abc.ABC):\n \"\"\" This is the base class of every parser.\n It might yield `ValueError` on instantiation on invalid files,\n and `RuntimeError` when something went wrong in `remove_all`.\n \"\"\"\n meta_list = set() # type: Set[str]\n mimetypes = set() # type: Set[str]\n\n def __init__(self, filename: str) -> None:\n \"\"\"\n :raises ValueError: Raised upon an invalid file\n \"\"\"\n if re.search('^[a-z0-9./]', filename) is None:\n # Some parsers are calling external binaries,\n # this prevents shell command injections\n filename = os.path.join('.', filename)\n\n self.filename = filename\n fname, extension = os.path.splitext(filename)\n\n # Special case for tar.gz, tar.bz2, … files\n if fname.endswith('.tar') and len(fname) > 4:\n fname, extension = fname[:-4], '.tar' + extension\n\n self.output_filename = fname + '.cleaned' + extension\n self.lightweight_cleaning = False\n\n @abc.abstractmethod\n def get_meta(self) -> Dict[str, Union[str, dict]]:\n \"\"\"Return all the metadata of the current file\"\"\"\n\n @abc.abstractmethod\n def remove_all(self) -> bool:\n \"\"\"\n Remove all the metadata of the current file\n\n :raises RuntimeError: Raised if the cleaning process went wrong.\n \"\"\"\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475155,"cells":{"repo_name":{"kind":"string","value":"aragos/tichu-tournament"},"path":{"kind":"string","value":"api/src/welcome_handler.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3909"},"content":{"kind":"string","value":"import webapp2\nimport json\n\nfrom generic_handler import GenericHandler\nfrom google.appengine.api import mail\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.api.app_identity import get_application_id\nfrom handler_utils import CheckUserOwnsTournamentAndMaybeReturnStatus\nfrom handler_utils import GetTourneyWithIdAndMaybeReturnStatus\nfrom handler_utils import SetErrorStatus\nfrom models import Tournament\nfrom models import PlayerPair\n\nclass WelcomeHandler(GenericHandler):\n ''' Handles reuqests to /api/tournament/:id/welcome. Responsible for emailing\n players with their player codes.\n '''\n\n @ndb.toplevel\n def post(self, id):\n ''' Sends an email for all email addresses in the request.\n Checks that emails belong to players in the tournament and sends the email\n only to valid addresses.\n\n Args: \n id: tournament ID to look up. Tournament must already have been\n created.\n '''\n user = users.get_current_user()\n tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)\n if not tourney:\n return\n if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response, user,\n tourney):\n return\n\n request_dict = self._ParseRequestAndMaybeSetStatus()\n if not request_dict:\n return\n self._SendEmails(request_dict, user, tourney)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.set_status(201)\n\n def _SendEmails(self, request_dict, user, tourney):\n '''Sends a welcome email for all email addresses in the request_dict.\n\n Args: \n request_dict: Parsed JSON dict.\n user: The ndb.User owning this tournament.\n tourney: The tournament model object. \n ''' \n player_pairs = PlayerPair._query(ancestor=tourney.key).fetch()\n requested_emails = request_dict[\"emails\"]\n for player_pair in player_pairs:\n for player in player_pair.player_list():\n if player.get(\"email\") not in requested_emails:\n continue\n player_name = player.get(\"name\")\n player_greeting = \"Dear {},\".format(player_name) if player_name else \"Greetings!\"\n email_text = \"\"\"{} \n\\nWelcome to Tichu tournament \\\"{}\\\". Your pair's ID is {}.\nYou can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}. \n\\nGood Luck!\nYour friendly neighborhood tournament director\"\"\".format(\n player_greeting, tourney.name, player_pair.id, player_pair.id)\n email_html = \"\"\"{}\n
\n
Welcome to Tichu tournament \\\"{}\\\". Your pair's ID is {}.\nYou can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}. \n
\n
Good Luck!\n
Your friendly neighborhood tournament director\n\"\"\".format(player_greeting, tourney.name, player_pair.id, player_pair.id)\n mail.send_mail(\n sender=\"{} \".format(tourney.name, get_application_id()),\n to=player[\"email\"],\n subject=\"Your Tichu Tournament Pair Code\",\n body=email_text,\n html=email_html,\n reply_to=user.email())\n\n\n def _ParseRequestAndMaybeSetStatus(self): \n ''' Parses the client request for email sents an error status if the\n request is unreadable or the email list is empty. \n\n Returns: dict corresponding to the parsed request.s\n ''' \n try:\n request_dict = json.loads(self.request.body)\n except ValueError:\n SetErrorStatus(self.response, 500, \"Invalid Input\",\n \"Unable to parse request body as JSON object\")\n return None\n request_dict[\"emails\"] = [e for e in request_dict[\"emails\"] if e and e != \"\"]\n if len(request_dict[\"emails\"]) == 0:\n SetErrorStatus(self.response, 400, \"Invalid Input\",\n \"No emails specified.\")\n return None\n return request_dict"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475156,"cells":{"repo_name":{"kind":"string","value":"PHOTOX/fuase"},"path":{"kind":"string","value":"ase/ase/tasks/io.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"1499"},"content":{"kind":"string","value":"import numpy as np\n\nfrom ase.parallel import world\n\ntry:\n import json\nexcept ImportError:\n json = None\n\n\nif json is None:\n def dumps(obj):\n if isinstance(obj, str):\n return '\"' + obj + '\"'\n if isinstance(obj, (int, float)):\n return repr(obj)\n if isinstance(obj, dict):\n return '{' + ', '.join(dumps(key) + ': ' + dumps(value)\n for key, value in obj.items()) + '}'\n return '[' + ','.join(dumps(value) for value in obj) + ']'\n\n loads = eval\nelse:\n class NDArrayEncoder(json.JSONEncoder):\n def __init__(self):\n json.JSONEncoder.__init__(self, sort_keys=True, indent=4)\n\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n \n\n dumps = NDArrayEncoder().encode\n loads = json.loads\n\n\ndef numpyfy(obj):\n if isinstance(obj, dict):\n return dict((key, numpyfy(value)) for key, value in obj.items())\n if isinstance(obj, list):\n try:\n obj = np.array(obj)\n except ValueError:\n obj = [numpyfy(value) for value in obj]\n return obj\n\n\ndef write_json(name, results):\n if world.rank == 0:\n fd = open(name, 'w')\n fd.write(dumps(results))\n fd.close()\n\n\ndef read_json(name):\n fd = open(name, 'r')\n results = loads(fd.read())\n fd.close()\n world.barrier()\n return numpyfy(results)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475157,"cells":{"repo_name":{"kind":"string","value":"luser/socorro"},"path":{"kind":"string","value":"socorro/unittest/external/fs/test_fslegacydatedradixtreestorage.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"17117"},"content":{"kind":"string","value":"import os\nimport shutil\nfrom mock import Mock\nfrom configman import ConfigurationManager\nfrom nose.tools import eq_, ok_, assert_raises\n\nfrom socorro.external.fs.crashstorage import (\n FSLegacyDatedRadixTreeStorage,\n FSTemporaryStorage\n)\nfrom socorro.external.crashstorage_base import (\n CrashIDNotFound,\n MemoryDumpsMapping,\n)\nfrom socorro.unittest.testbase import TestCase\n\n\nclass TestFSLegacyDatedRadixTreeStorage(TestCase):\n CRASH_ID_1 = \"0bba929f-8721-460c-dead-a43c20071025\"\n CRASH_ID_2 = \"0bba929f-8721-460c-dead-a43c20071026\"\n CRASH_ID_3 = \"0bba929f-8721-460c-dddd-a43c20071025\"\n\n def setUp(self):\n with self._common_config_setup().context() as config:\n self.fsrts = FSLegacyDatedRadixTreeStorage(config)\n\n def tearDown(self):\n shutil.rmtree(self.fsrts.config.fs_root)\n\n def _common_config_setup(self):\n mock_logging = Mock()\n required_config = FSLegacyDatedRadixTreeStorage.get_required_config()\n required_config.add_option('logger', default=mock_logging)\n config_manager = ConfigurationManager(\n [required_config],\n app_name='testapp',\n app_version='1.0',\n app_description='app description',\n values_source_list=[{\n 'logger': mock_logging,\n 'minute_slice_interval': 1\n }],\n argv_source=[]\n )\n return config_manager\n\n def _make_test_crash(self):\n self.fsrts.save_raw_crash({\n \"test\": \"TEST\"\n }, MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }), self.CRASH_ID_1)\n\n def _make_test_crash_3(self):\n self.fsrts.save_raw_crash({\n \"test\": \"TEST\"\n }, MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }), self.CRASH_ID_3)\n\n\n def test_save_raw_crash(self):\n self._make_test_crash()\n ok_(os.path.islink(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1))))\n ok_(os.path.exists(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1),\n self.CRASH_ID_1)))\n\n def test_get_raw_crash(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'],\n \"TEST\")\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash,\n self.CRASH_ID_2)\n\n def test_get_raw_dump(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'),\n \"bar\")\n eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1,\n self.fsrts.config.dump_field),\n \"baz\")\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump,\n self.CRASH_ID_2, \"foo\")\n assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1,\n \"foor\")\n\n def test_get_raw_dumps(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }))\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps,\n self.CRASH_ID_2)\n\n def test_remove(self):\n self._make_test_crash()\n self.fsrts.remove(self.CRASH_ID_1)\n\n parent = os.path.realpath(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1)))\n\n p = os.path.join(parent, self.CRASH_ID_1)\n ok_(not os.path.exists(p))\n\n assert_raises(CrashIDNotFound, self.fsrts.remove,\n self.CRASH_ID_2)\n\n def test_new_crashes(self):\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n self.fsrts._current_slot = lambda: ['00', '00_01']\n eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1])\n eq_(list(self.fsrts.new_crashes()), [])\n self.fsrts.remove(self.CRASH_ID_1)\n del self.fsrts._current_slot\n\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n\n date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,\n ['00', '00_00'])\n\n new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,\n ['00', '00_01'])\n\n webhead_path = os.sep.join([new_date_path, 'webhead_0'])\n\n os.mkdir(new_date_path)\n os.rename(date_path, webhead_path)\n\n os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1]))\n os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix(\n self.CRASH_ID_1)),\n os.sep.join([webhead_path, self.CRASH_ID_1]))\n\n self.fsrts._current_slot = lambda: ['00', '00_02']\n eq_(list(self.fsrts.new_crashes()),\n [self.CRASH_ID_1])\n\n def test_orphaned_symlink_clean_up(self):\n # Bug 971496 identified a problem where a second crash coming in with\n # the same crash id would derail saving the second crash and leave\n # an extra undeleted symbolic link in the file system. This link\n # would be sited as undeleted on every run of 'new_crashes'.\n # this test shows that we can clean these extra symlinks if we\n # encounter them.\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n self.fsrts._current_slot = lambda: ['00', '00_01']\n # make sure we can't create the duplicate in a different slot\n assert_raises(OSError, self._make_test_crash)\n # make sure the second slot exists so we can make the bogus symlink\n self._make_test_crash_3()\n # create bogus orphan link\n self.fsrts._create_name_to_date_symlink(\n self.CRASH_ID_1,\n self.fsrts._current_slot()\n )\n ok_(os.path.islink(\n './crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-'\n 'a43c20071025'\n ))\n # run through the new_crashes iterator which will yield each of the\n # crashes that has been submitted since the last run of new_crashes.\n # this should cause all the symlinks to be removed.\n # we don't bother saving the crashes, as we don't need them.\n for x in self.fsrts.new_crashes():\n pass\n ok_(not os.path.exists(\n './crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-'\n 'a43c20071025'\n ))\n\nclass MyFSTemporaryStorage(FSTemporaryStorage):\n def _get_current_date(self):\n return \"25\"\n\nclass TestFSTemporaryStorage(TestCase):\n CRASH_ID_1 = \"0bba929f-8721-460c-dead-a43c20071025\"\n CRASH_ID_2 = \"0bba929f-8721-460c-dead-a43c20071026\"\n CRASH_ID_3 = \"0bba929f-8721-460c-dddd-a43c20071025\"\n CRASH_ID_4 = \"0bba929f-8721-460c-dddd-a43c20071125\"\n\n def setUp(self):\n with self._common_config_setup().context() as config:\n self.fsrts = MyFSTemporaryStorage(config)\n\n def tearDown(self):\n shutil.rmtree(self.fsrts.config.fs_root)\n\n def _common_config_setup(self):\n mock_logging = Mock()\n required_config = MyFSTemporaryStorage.get_required_config()\n required_config.add_option('logger', default=mock_logging)\n config_manager = ConfigurationManager(\n [required_config],\n app_name='testapp',\n app_version='1.0',\n app_description='app description',\n values_source_list=[{\n 'logger': mock_logging,\n 'minute_slice_interval': 1\n }],\n argv_source=[]\n )\n return config_manager\n\n def _make_test_crash(self):\n self.fsrts.save_raw_crash(\n {\"test\": \"TEST\"},\n MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }),\n self.CRASH_ID_1\n )\n\n def _make_test_crash_3(self):\n self.fsrts.save_raw_crash(\n {\"test\": \"TEST\"},\n MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }),\n self.CRASH_ID_3\n )\n\n def _make_test_crash_4(self):\n self.fsrts.save_raw_crash(\n {\"test\": \"TEST\"},\n MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }),\n self.CRASH_ID_4\n )\n\n def test_save_raw_crash(self):\n self._make_test_crash()\n ok_(os.path.islink(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1))))\n ok_(os.path.exists(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1),\n self.CRASH_ID_1)))\n\n def test_get_raw_crash(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'],\n \"TEST\")\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash,\n self.CRASH_ID_2)\n\n def test_get_raw_dump(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'),\n \"bar\")\n eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1,\n self.fsrts.config.dump_field),\n \"baz\")\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump,\n self.CRASH_ID_2, \"foo\")\n assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1,\n \"foor\")\n\n def test_get_raw_dumps(self):\n self._make_test_crash()\n eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }))\n assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps,\n self.CRASH_ID_2)\n\n def test_remove(self):\n self._make_test_crash()\n self.fsrts.remove(self.CRASH_ID_1)\n\n parent = os.path.realpath(\n os.path.join(\n self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1),\n self.fsrts._get_date_root_name(self.CRASH_ID_1)))\n\n p = os.path.join(parent, self.CRASH_ID_1)\n ok_(not os.path.exists(p))\n\n assert_raises(CrashIDNotFound, self.fsrts.remove,\n self.CRASH_ID_2)\n\n def test_new_crashes(self):\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n self.fsrts._current_slot = lambda: ['00', '00_01']\n eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1])\n eq_(list(self.fsrts.new_crashes()), [])\n self.fsrts.remove(self.CRASH_ID_1)\n del self.fsrts._current_slot\n\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n\n date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,\n ['00', '00_00'])\n\n new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1,\n ['00', '00_01'])\n\n webhead_path = os.sep.join([new_date_path, 'webhead_0'])\n\n os.mkdir(new_date_path)\n os.rename(date_path, webhead_path)\n\n os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1]))\n os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix(\n self.CRASH_ID_1)),\n os.sep.join([webhead_path, self.CRASH_ID_1]))\n\n self.fsrts._current_slot = lambda: ['00', '00_02']\n eq_(list(self.fsrts.new_crashes()),\n [self.CRASH_ID_1])\n\n def test_orphaned_symlink_clean_up(self):\n # Bug 971496 identified a problem where a second crash coming in with\n # the same crash id would derail saving the second crash and leave\n # an extra undeleted symbolic link in the file system. This link\n # would be sited as undeleted on every run of 'new_crashes'.\n # this test shows that we can clean these extra symlinks if we\n # encounter them.\n self.fsrts._current_slot = lambda: ['00', '00_00']\n self._make_test_crash()\n self.fsrts._current_slot = lambda: ['00', '00_01']\n # make sure we can't create the duplicate in a different slot\n assert_raises(OSError, self._make_test_crash)\n # make sure the second slot exists so we can make the bogus symlink\n self._make_test_crash_3()\n # create bogus orphan link\n self.fsrts._create_name_to_date_symlink(\n self.CRASH_ID_1,\n self.fsrts._current_slot()\n )\n ok_(os.path.islink(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-'\n 'a43c20071025'\n ))\n ok_(os.path.islink(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-'\n 'a43c20071025'\n ))\n # make sure all slots in use are traversable\n self.fsrts._current_slot = lambda: ['00', '00_02']\n # run through the new_crashes iterator which will yield each of the\n # crashes that has been submitted since the last run of new_crashes.\n # this should cause all the symlinks to be removed.\n # we don't bother saving the crashes, as we don't need them.\n for x in self.fsrts.new_crashes():\n pass\n ok_(not os.path.exists(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025'\n ))\n\n def test_make_sure_days_recycle(self):\n self.fsrts._current_slot = lambda: ['00', '00_01']\n self._make_test_crash()\n self._make_test_crash_3()\n self._make_test_crash_4()\n ok_(os.path.exists(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025'\n ))\n ok_(os.path.exists(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071025'\n ))\n ok_(os.path.exists(\n './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071125'\n ))\n for x in self.fsrts.new_crashes():\n pass\n\n def _secondary_config_setup(self):\n mock_logging = Mock()\n required_config = FSLegacyDatedRadixTreeStorage.get_required_config()\n required_config.add_option('logger', default=mock_logging)\n config_manager = ConfigurationManager(\n [required_config],\n app_name='testapp',\n app_version='1.0',\n app_description='app description',\n values_source_list=[{\n 'logger': mock_logging,\n 'minute_slice_interval': 1\n }],\n argv_source=[]\n )\n return config_manager\n\n def test_make_sure_old_style_date_directories_are_traversed(self):\n with self._secondary_config_setup().context() as config:\n self.fsrts_old = FSLegacyDatedRadixTreeStorage(config)\n self.fsrts_old._current_slot = lambda: ['00', '00_00']\n # save crash 1 in old system\n self.fsrts_old.save_raw_crash({\n \"test\": \"TEST\"\n }, MemoryDumpsMapping({\n 'foo': 'bar',\n self.fsrts.config.dump_field: 'baz'\n }), self.CRASH_ID_1)\n ok_(os.path.exists(\n './crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-'\n 'a43c20071025'\n ))\n\n self.fsrts._current_slot = lambda: ['00', '00_00']\n #save crash 3 in new system\n self._make_test_crash_3()\n\n ok_(os.path.exists(\n './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'\n ))\n\n # consume crashes\n for x in self.fsrts.new_crashes():\n pass\n\n # should be consumed because it isn't in our working tree or slot\n ok_(not os.path.exists(\n './crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-'\n 'a43c20071025'\n ))\n\n # should not be consumed, while in working tree, it is in active slot\n ok_(os.path.exists(\n './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'\n ))\n\n # switch to next active slot\n self.fsrts._current_slot = lambda: ['00', '00_01']\n\n # consume crashes\n for x in self.fsrts.new_crashes():\n pass\n\n # should be consumed because it is in working tree and inactive slot\n ok_( not os.path.exists(\n './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025'\n ))\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":475158,"cells":{"repo_name":{"kind":"string","value":"ianan/demreg"},"path":{"kind":"string","value":"python/dem_reg_map.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1067"},"content":{"kind":"string","value":"import numpy as np\n\ndef dem_reg_map(sigmaa,sigmab,U,W,data,err,reg_tweak,nmu=500):\n \"\"\"\n dem_reg_map\n computes the regularisation parameter\n \n Inputs\n\n sigmaa: \n gsv vector\n sigmab: \n gsv vector\n U: \n gsvd matrix\n V: \n gsvd matrix\n data: \n dn data\n err: \n dn error\n reg_tweak: \n how much to adjust the chisq each iteration\n\n Outputs\n\n opt:\n regularization paramater\n\n \"\"\"\n \n\n nf=data.shape[0]\n nreg=sigmaa.shape[0]\n\n arg=np.zeros([nreg,nmu])\n discr=np.zeros([nmu])\n\n sigs=sigmaa[:nf]/sigmab[:nf]\n maxx=max(sigs)\n minx=min(sigs)**2.0*1E-2\n\n step=(np.log(maxx)-np.log(minx))/(nmu-1.)\n mu=np.exp(np.arange(nmu)*step)*minx\n for kk in np.arange(nf):\n coef=data@U[kk,:]-sigmaa[kk]\n for ii in np.arange(nmu):\n arg[kk,ii]=(mu[ii]*sigmab[kk]**2*coef/(sigmaa[kk]**2+mu[ii]*sigmab[kk]**2))**2\n \n discr=np.sum(arg,axis=0)-np.sum(err**2)*reg_tweak\n \n opt=mu[np.argmin(np.abs(discr))]\n\n return opt"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475159,"cells":{"repo_name":{"kind":"string","value":"guorendong/iridium-browser-ubuntu"},"path":{"kind":"string","value":"third_party/webpagereplay/third_party/dns/resolver.py"},"copies":{"kind":"string","value":"215"},"size":{"kind":"string","value":"28920"},"content":{"kind":"string","value":"# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.\n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose with or without fee is hereby granted,\n# provided that the above copyright notice and this permission notice\n# appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\n# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n\"\"\"DNS stub resolver.\n\n@var default_resolver: The default resolver object\n@type default_resolver: dns.resolver.Resolver object\"\"\"\n\nimport socket\nimport sys\nimport time\n\nimport dns.exception\nimport dns.message\nimport dns.name\nimport dns.query\nimport dns.rcode\nimport dns.rdataclass\nimport dns.rdatatype\n\nif sys.platform == 'win32':\n import _winreg\n\nclass NXDOMAIN(dns.exception.DNSException):\n \"\"\"The query name does not exist.\"\"\"\n pass\n\n# The definition of the Timeout exception has moved from here to the\n# dns.exception module. We keep dns.resolver.Timeout defined for\n# backwards compatibility.\n\nTimeout = dns.exception.Timeout\n\nclass NoAnswer(dns.exception.DNSException):\n \"\"\"The response did not contain an answer to the question.\"\"\"\n pass\n\nclass NoNameservers(dns.exception.DNSException):\n \"\"\"No non-broken nameservers are available to answer the query.\"\"\"\n pass\n\nclass NotAbsolute(dns.exception.DNSException):\n \"\"\"Raised if an absolute domain name is required but a relative name\n was provided.\"\"\"\n pass\n\nclass NoRootSOA(dns.exception.DNSException):\n \"\"\"Raised if for some reason there is no SOA at the root name.\n This should never happen!\"\"\"\n pass\n\n\nclass Answer(object):\n \"\"\"DNS stub resolver answer\n\n Instances of this class bundle up the result of a successful DNS\n resolution.\n\n For convenience, the answer object implements much of the sequence\n protocol, forwarding to its rrset. E.g. \"for a in answer\" is\n equivalent to \"for a in answer.rrset\", \"answer[i]\" is equivalent\n to \"answer.rrset[i]\", and \"answer[i:j]\" is equivalent to\n \"answer.rrset[i:j]\".\n\n Note that CNAMEs or DNAMEs in the response may mean that answer\n node's name might not be the query name.\n\n @ivar qname: The query name\n @type qname: dns.name.Name object\n @ivar rdtype: The query type\n @type rdtype: int\n @ivar rdclass: The query class\n @type rdclass: int\n @ivar response: The response message\n @type response: dns.message.Message object\n @ivar rrset: The answer\n @type rrset: dns.rrset.RRset object\n @ivar expiration: The time when the answer expires\n @type expiration: float (seconds since the epoch)\n \"\"\"\n def __init__(self, qname, rdtype, rdclass, response):\n self.qname = qname\n self.rdtype = rdtype\n self.rdclass = rdclass\n self.response = response\n min_ttl = -1\n rrset = None\n for count in xrange(0, 15):\n try:\n rrset = response.find_rrset(response.answer, qname,\n rdclass, rdtype)\n if min_ttl == -1 or rrset.ttl < min_ttl:\n min_ttl = rrset.ttl\n break\n except KeyError:\n if rdtype != dns.rdatatype.CNAME:\n try:\n crrset = response.find_rrset(response.answer,\n qname,\n rdclass,\n dns.rdatatype.CNAME)\n if min_ttl == -1 or crrset.ttl < min_ttl:\n min_ttl = crrset.ttl\n for rd in crrset:\n qname = rd.target\n break\n continue\n except KeyError:\n raise NoAnswer\n raise NoAnswer\n if rrset is None:\n raise NoAnswer\n self.rrset = rrset\n self.expiration = time.time() + min_ttl\n\n def __getattr__(self, attr):\n if attr == 'name':\n return self.rrset.name\n elif attr == 'ttl':\n return self.rrset.ttl\n elif attr == 'covers':\n return self.rrset.covers\n elif attr == 'rdclass':\n return self.rrset.rdclass\n elif attr == 'rdtype':\n return self.rrset.rdtype\n else:\n raise AttributeError(attr)\n\n def __len__(self):\n return len(self.rrset)\n\n def __iter__(self):\n return iter(self.rrset)\n\n def __getitem__(self, i):\n return self.rrset[i]\n\n def __delitem__(self, i):\n del self.rrset[i]\n\n def __getslice__(self, i, j):\n return self.rrset[i:j]\n\n def __delslice__(self, i, j):\n del self.rrset[i:j]\n\nclass Cache(object):\n \"\"\"Simple DNS answer cache.\n\n @ivar data: A dictionary of cached data\n @type data: dict\n @ivar cleaning_interval: The number of seconds between cleanings. The\n default is 300 (5 minutes).\n @type cleaning_interval: float\n @ivar next_cleaning: The time the cache should next be cleaned (in seconds\n since the epoch.)\n @type next_cleaning: float\n \"\"\"\n\n def __init__(self, cleaning_interval=300.0):\n \"\"\"Initialize a DNS cache.\n\n @param cleaning_interval: the number of seconds between periodic\n cleanings. The default is 300.0\n @type cleaning_interval: float.\n \"\"\"\n\n self.data = {}\n self.cleaning_interval = cleaning_interval\n self.next_cleaning = time.time() + self.cleaning_interval\n\n def maybe_clean(self):\n \"\"\"Clean the cache if it's time to do so.\"\"\"\n\n now = time.time()\n if self.next_cleaning <= now:\n keys_to_delete = []\n for (k, v) in self.data.iteritems():\n if v.expiration <= now:\n keys_to_delete.append(k)\n for k in keys_to_delete:\n del self.data[k]\n now = time.time()\n self.next_cleaning = now + self.cleaning_interval\n\n def get(self, key):\n \"\"\"Get the answer associated with I{key}. Returns None if\n no answer is cached for the key.\n @param key: the key\n @type key: (dns.name.Name, int, int) tuple whose values are the\n query name, rdtype, and rdclass.\n @rtype: dns.resolver.Answer object or None\n \"\"\"\n\n self.maybe_clean()\n v = self.data.get(key)\n if v is None or v.expiration <= time.time():\n return None\n return v\n\n def put(self, key, value):\n \"\"\"Associate key and value in the cache.\n @param key: the key\n @type key: (dns.name.Name, int, int) tuple whose values are the\n query name, rdtype, and rdclass.\n @param value: The answer being cached\n @type value: dns.resolver.Answer object\n \"\"\"\n\n self.maybe_clean()\n self.data[key] = value\n\n def flush(self, key=None):\n \"\"\"Flush the cache.\n\n If I{key} is specified, only that item is flushed. Otherwise\n the entire cache is flushed.\n\n @param key: the key to flush\n @type key: (dns.name.Name, int, int) tuple or None\n \"\"\"\n\n if not key is None:\n if self.data.has_key(key):\n del self.data[key]\n else:\n self.data = {}\n self.next_cleaning = time.time() + self.cleaning_interval\n\nclass Resolver(object):\n \"\"\"DNS stub resolver\n\n @ivar domain: The domain of this host\n @type domain: dns.name.Name object\n @ivar nameservers: A list of nameservers to query. Each nameserver is\n a string which contains the IP address of a nameserver.\n @type nameservers: list of strings\n @ivar search: The search list. If the query name is a relative name,\n the resolver will construct an absolute query name by appending the search\n names one by one to the query name.\n @type search: list of dns.name.Name objects\n @ivar port: The port to which to send queries. The default is 53.\n @type port: int\n @ivar timeout: The number of seconds to wait for a response from a\n server, before timing out.\n @type timeout: float\n @ivar lifetime: The total number of seconds to spend trying to get an\n answer to the question. If the lifetime expires, a Timeout exception\n will occur.\n @type lifetime: float\n @ivar keyring: The TSIG keyring to use. The default is None.\n @type keyring: dict\n @ivar keyname: The TSIG keyname to use. The default is None.\n @type keyname: dns.name.Name object\n @ivar keyalgorithm: The TSIG key algorithm to use. The default is\n dns.tsig.default_algorithm.\n @type keyalgorithm: string\n @ivar edns: The EDNS level to use. The default is -1, no Edns.\n @type edns: int\n @ivar ednsflags: The EDNS flags\n @type ednsflags: int\n @ivar payload: The EDNS payload size. The default is 0.\n @type payload: int\n @ivar cache: The cache to use. The default is None.\n @type cache: dns.resolver.Cache object\n \"\"\"\n def __init__(self, filename='/etc/resolv.conf', configure=True):\n \"\"\"Initialize a resolver instance.\n\n @param filename: The filename of a configuration file in\n standard /etc/resolv.conf format. This parameter is meaningful\n only when I{configure} is true and the platform is POSIX.\n @type filename: string or file object\n @param configure: If True (the default), the resolver instance\n is configured in the normal fashion for the operating system\n the resolver is running on. (I.e. a /etc/resolv.conf file on\n POSIX systems and from the registry on Windows systems.)\n @type configure: bool\"\"\"\n\n self.reset()\n if configure:\n if sys.platform == 'win32':\n self.read_registry()\n elif filename:\n self.read_resolv_conf(filename)\n\n def reset(self):\n \"\"\"Reset all resolver configuration to the defaults.\"\"\"\n self.domain = \\\n dns.name.Name(dns.name.from_text(socket.gethostname())[1:])\n if len(self.domain) == 0:\n self.domain = dns.name.root\n self.nameservers = []\n self.search = []\n self.port = 53\n self.timeout = 2.0\n self.lifetime = 30.0\n self.keyring = None\n self.keyname = None\n self.keyalgorithm = dns.tsig.default_algorithm\n self.edns = -1\n self.ednsflags = 0\n self.payload = 0\n self.cache = None\n\n def read_resolv_conf(self, f):\n \"\"\"Process f as a file in the /etc/resolv.conf format. If f is\n a string, it is used as the name of the file to open; otherwise it\n is treated as the file itself.\"\"\"\n if isinstance(f, str) or isinstance(f, unicode):\n try:\n f = open(f, 'r')\n except IOError:\n # /etc/resolv.conf doesn't exist, can't be read, etc.\n # We'll just use the default resolver configuration.\n self.nameservers = ['127.0.0.1']\n return\n want_close = True\n else:\n want_close = False\n try:\n for l in f:\n if len(l) == 0 or l[0] == '#' or l[0] == ';':\n continue\n tokens = l.split()\n if len(tokens) == 0:\n continue\n if tokens[0] == 'nameserver':\n self.nameservers.append(tokens[1])\n elif tokens[0] == 'domain':\n self.domain = dns.name.from_text(tokens[1])\n elif tokens[0] == 'search':\n for suffix in tokens[1:]:\n self.search.append(dns.name.from_text(suffix))\n finally:\n if want_close:\n f.close()\n if len(self.nameservers) == 0:\n self.nameservers.append('127.0.0.1')\n\n def _determine_split_char(self, entry):\n #\n # The windows registry irritatingly changes the list element\n # delimiter in between ' ' and ',' (and vice-versa) in various\n # versions of windows.\n #\n if entry.find(' ') >= 0:\n split_char = ' '\n elif entry.find(',') >= 0:\n split_char = ','\n else:\n # probably a singleton; treat as a space-separated list.\n split_char = ' '\n return split_char\n\n def _config_win32_nameservers(self, nameservers):\n \"\"\"Configure a NameServer registry entry.\"\"\"\n # we call str() on nameservers to convert it from unicode to ascii\n nameservers = str(nameservers)\n split_char = self._determine_split_char(nameservers)\n ns_list = nameservers.split(split_char)\n for ns in ns_list:\n if not ns in self.nameservers:\n self.nameservers.append(ns)\n\n def _config_win32_domain(self, domain):\n \"\"\"Configure a Domain registry entry.\"\"\"\n # we call str() on domain to convert it from unicode to ascii\n self.domain = dns.name.from_text(str(domain))\n\n def _config_win32_search(self, search):\n \"\"\"Configure a Search registry entry.\"\"\"\n # we call str() on search to convert it from unicode to ascii\n search = str(search)\n split_char = self._determine_split_char(search)\n search_list = search.split(split_char)\n for s in search_list:\n if not s in self.search:\n self.search.append(dns.name.from_text(s))\n\n def _config_win32_fromkey(self, key):\n \"\"\"Extract DNS info from a registry key.\"\"\"\n try:\n servers, rtype = _winreg.QueryValueEx(key, 'NameServer')\n except WindowsError:\n servers = None\n if servers:\n self._config_win32_nameservers(servers)\n try:\n dom, rtype = _winreg.QueryValueEx(key, 'Domain')\n if dom:\n self._config_win32_domain(dom)\n except WindowsError:\n pass\n else:\n try:\n servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')\n except WindowsError:\n servers = None\n if servers:\n self._config_win32_nameservers(servers)\n try:\n dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')\n if dom:\n self._config_win32_domain(dom)\n except WindowsError:\n pass\n try:\n search, rtype = _winreg.QueryValueEx(key, 'SearchList')\n except WindowsError:\n search = None\n if search:\n self._config_win32_search(search)\n\n def read_registry(self):\n \"\"\"Extract resolver configuration from the Windows registry.\"\"\"\n lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)\n want_scan = False\n try:\n try:\n # XP, 2000\n tcp_params = _winreg.OpenKey(lm,\n r'SYSTEM\\CurrentControlSet'\n r'\\Services\\Tcpip\\Parameters')\n want_scan = True\n except EnvironmentError:\n # ME\n tcp_params = _winreg.OpenKey(lm,\n r'SYSTEM\\CurrentControlSet'\n r'\\Services\\VxD\\MSTCP')\n try:\n self._config_win32_fromkey(tcp_params)\n finally:\n tcp_params.Close()\n if want_scan:\n interfaces = _winreg.OpenKey(lm,\n r'SYSTEM\\CurrentControlSet'\n r'\\Services\\Tcpip\\Parameters'\n r'\\Interfaces')\n try:\n i = 0\n while True:\n try:\n guid = _winreg.EnumKey(interfaces, i)\n i += 1\n key = _winreg.OpenKey(interfaces, guid)\n if not self._win32_is_nic_enabled(lm, guid, key):\n continue\n try:\n self._config_win32_fromkey(key)\n finally:\n key.Close()\n except EnvironmentError:\n break\n finally:\n interfaces.Close()\n finally:\n lm.Close()\n\n def _win32_is_nic_enabled(self, lm, guid, interface_key):\n # Look in the Windows Registry to determine whether the network\n # interface corresponding to the given guid is enabled.\n #\n # (Code contributed by Paul Marks, thanks!)\n #\n try:\n # This hard-coded location seems to be consistent, at least\n # from Windows 2000 through Vista.\n connection_key = _winreg.OpenKey(\n lm,\n r'SYSTEM\\CurrentControlSet\\Control\\Network'\n r'\\{4D36E972-E325-11CE-BFC1-08002BE10318}'\n r'\\%s\\Connection' % guid)\n\n try:\n # The PnpInstanceID points to a key inside Enum\n (pnp_id, ttype) = _winreg.QueryValueEx(\n connection_key, 'PnpInstanceID')\n\n if ttype != _winreg.REG_SZ:\n raise ValueError\n\n device_key = _winreg.OpenKey(\n lm, r'SYSTEM\\CurrentControlSet\\Enum\\%s' % pnp_id)\n\n try:\n # Get ConfigFlags for this device\n (flags, ttype) = _winreg.QueryValueEx(\n device_key, 'ConfigFlags')\n\n if ttype != _winreg.REG_DWORD:\n raise ValueError\n\n # Based on experimentation, bit 0x1 indicates that the\n # device is disabled.\n return not (flags & 0x1)\n\n finally:\n device_key.Close()\n finally:\n connection_key.Close()\n except (EnvironmentError, ValueError):\n # Pre-vista, enabled interfaces seem to have a non-empty\n # NTEContextList; this was how dnspython detected enabled\n # nics before the code above was contributed. We've retained\n # the old method since we don't know if the code above works\n # on Windows 95/98/ME.\n try:\n (nte, ttype) = _winreg.QueryValueEx(interface_key,\n 'NTEContextList')\n return nte is not None\n except WindowsError:\n return False\n\n def _compute_timeout(self, start):\n now = time.time()\n if now < start:\n if start - now > 1:\n # Time going backwards is bad. Just give up.\n raise Timeout\n else:\n # Time went backwards, but only a little. This can\n # happen, e.g. under vmware with older linux kernels.\n # Pretend it didn't happen.\n now = start\n duration = now - start\n if duration >= self.lifetime:\n raise Timeout\n return min(self.lifetime - duration, self.timeout)\n\n def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,\n tcp=False, source=None):\n \"\"\"Query nameservers to find the answer to the question.\n\n The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects\n of the appropriate type, or strings that can be converted into objects\n of the appropriate type. E.g. For I{rdtype} the integer 2 and the\n the string 'NS' both mean to query for records with DNS rdata type NS.\n\n @param qname: the query name\n @type qname: dns.name.Name object or string\n @param rdtype: the query type\n @type rdtype: int or string\n @param rdclass: the query class\n @type rdclass: int or string\n @param tcp: use TCP to make the query (default is False).\n @type tcp: bool\n @param source: bind to this IP address (defaults to machine default IP).\n @type source: IP address in dotted quad notation\n @rtype: dns.resolver.Answer instance\n @raises Timeout: no answers could be found in the specified lifetime\n @raises NXDOMAIN: the query name does not exist\n @raises NoAnswer: the response did not contain an answer\n @raises NoNameservers: no non-broken nameservers are available to\n answer the question.\"\"\"\n\n if isinstance(qname, (str, unicode)):\n qname = dns.name.from_text(qname, None)\n if isinstance(rdtype, str):\n rdtype = dns.rdatatype.from_text(rdtype)\n if isinstance(rdclass, str):\n rdclass = dns.rdataclass.from_text(rdclass)\n qnames_to_try = []\n if qname.is_absolute():\n qnames_to_try.append(qname)\n else:\n if len(qname) > 1:\n qnames_to_try.append(qname.concatenate(dns.name.root))\n if self.search:\n for suffix in self.search:\n qnames_to_try.append(qname.concatenate(suffix))\n else:\n qnames_to_try.append(qname.concatenate(self.domain))\n all_nxdomain = True\n start = time.time()\n for qname in qnames_to_try:\n if self.cache:\n answer = self.cache.get((qname, rdtype, rdclass))\n if answer:\n return answer\n request = dns.message.make_query(qname, rdtype, rdclass)\n if not self.keyname is None:\n request.use_tsig(self.keyring, self.keyname, self.keyalgorithm)\n request.use_edns(self.edns, self.ednsflags, self.payload)\n response = None\n #\n # make a copy of the servers list so we can alter it later.\n #\n nameservers = self.nameservers[:]\n backoff = 0.10\n while response is None:\n if len(nameservers) == 0:\n raise NoNameservers\n for nameserver in nameservers[:]:\n timeout = self._compute_timeout(start)\n try:\n if tcp:\n response = dns.query.tcp(request, nameserver,\n timeout, self.port,\n source=source)\n else:\n response = dns.query.udp(request, nameserver,\n timeout, self.port,\n source=source)\n except (socket.error, dns.exception.Timeout):\n #\n # Communication failure or timeout. Go to the\n # next server\n #\n response = None\n continue\n except dns.query.UnexpectedSource:\n #\n # Who knows? Keep going.\n #\n response = None\n continue\n except dns.exception.FormError:\n #\n # We don't understand what this server is\n # saying. Take it out of the mix and\n # continue.\n #\n nameservers.remove(nameserver)\n response = None\n continue\n rcode = response.rcode()\n if rcode == dns.rcode.NOERROR or \\\n rcode == dns.rcode.NXDOMAIN:\n break\n #\n # We got a response, but we're not happy with the\n # rcode in it. Remove the server from the mix if\n # the rcode isn't SERVFAIL.\n #\n if rcode != dns.rcode.SERVFAIL:\n nameservers.remove(nameserver)\n response = None\n if not response is None:\n break\n #\n # All nameservers failed!\n #\n if len(nameservers) > 0:\n #\n # But we still have servers to try. Sleep a bit\n # so we don't pound them!\n #\n timeout = self._compute_timeout(start)\n sleep_time = min(timeout, backoff)\n backoff *= 2\n time.sleep(sleep_time)\n if response.rcode() == dns.rcode.NXDOMAIN:\n continue\n all_nxdomain = False\n break\n if all_nxdomain:\n raise NXDOMAIN\n answer = Answer(qname, rdtype, rdclass, response)\n if self.cache:\n self.cache.put((qname, rdtype, rdclass), answer)\n return answer\n\n def use_tsig(self, keyring, keyname=None,\n algorithm=dns.tsig.default_algorithm):\n \"\"\"Add a TSIG signature to the query.\n\n @param keyring: The TSIG keyring to use; defaults to None.\n @type keyring: dict\n @param keyname: The name of the TSIG key to use; defaults to None.\n The key must be defined in the keyring. If a keyring is specified\n but a keyname is not, then the key used will be the first key in the\n keyring. Note that the order of keys in a dictionary is not defined,\n so applications should supply a keyname when a keyring is used, unless\n they know the keyring contains only one key.\n @param algorithm: The TSIG key algorithm to use. The default\n is dns.tsig.default_algorithm.\n @type algorithm: string\"\"\"\n self.keyring = keyring\n if keyname is None:\n self.keyname = self.keyring.keys()[0]\n else:\n self.keyname = keyname\n self.keyalgorithm = algorithm\n\n def use_edns(self, edns, ednsflags, payload):\n \"\"\"Configure Edns.\n\n @param edns: The EDNS level to use. The default is -1, no Edns.\n @type edns: int\n @param ednsflags: The EDNS flags\n @type ednsflags: int\n @param payload: The EDNS payload size. The default is 0.\n @type payload: int\"\"\"\n\n if edns is None:\n edns = -1\n self.edns = edns\n self.ednsflags = ednsflags\n self.payload = payload\n\ndefault_resolver = None\n\ndef get_default_resolver():\n \"\"\"Get the default resolver, initializing it if necessary.\"\"\"\n global default_resolver\n if default_resolver is None:\n default_resolver = Resolver()\n return default_resolver\n\ndef query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,\n tcp=False, source=None):\n \"\"\"Query nameservers to find the answer to the question.\n\n This is a convenience function that uses the default resolver\n object to make the query.\n @see: L{dns.resolver.Resolver.query} for more information on the\n parameters.\"\"\"\n return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)\n\ndef zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):\n \"\"\"Find the name of the zone which contains the specified name.\n\n @param name: the query name\n @type name: absolute dns.name.Name object or string\n @param rdclass: The query class\n @type rdclass: int\n @param tcp: use TCP to make the query (default is False).\n @type tcp: bool\n @param resolver: the resolver to use\n @type resolver: dns.resolver.Resolver object or None\n @rtype: dns.name.Name\"\"\"\n\n if isinstance(name, (str, unicode)):\n name = dns.name.from_text(name, dns.name.root)\n if resolver is None:\n resolver = get_default_resolver()\n if not name.is_absolute():\n raise NotAbsolute(name)\n while 1:\n try:\n answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)\n return name\n except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):\n try:\n name = name.parent()\n except dns.name.NoParent:\n raise NoRootSOA\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475160,"cells":{"repo_name":{"kind":"string","value":"ezequielpereira/Time-Line"},"path":{"kind":"string","value":"autopilot/autopilotlib/instructions/selectmenu.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3493"},"content":{"kind":"string","value":"# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg\n#\n# This file is part of Timeline.\n#\n# Timeline is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Timeline is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Timeline. If not, see .\n\n\nimport wx\n\nfrom autopilotlib.instructions.instruction import Instruction\nfrom autopilotlib.app.logger import Logger\nfrom autopilotlib.app.exceptions import NotFoundException\nfrom autopilotlib.app.decorators import Overrides\n\n\nclass SelectMenuInstruction(Instruction):\n \"\"\"\n 0 1 2 3 4 5 6 7\n command object ( arg1 , arg2 [ , arg]* )\n \n command ::= Select\n object ::= Menu | Mnu\n arg ::= STRING | TEXT \n \n Select a menu in the the menu hierarchy, given by the args.\n At least 2 targets must be present.\n \n Example 1: Select menu (Show, Sidebar)\n Example 2: Select menu (Show, \"Balloons on hover\")\n Example 3: Select Menu(File, New, \"File Timeline...\") \n \"\"\" \n \n @Overrides(Instruction) \n def execute(self, manuscript, win):\n manuscript.execute_next_instruction()\n self._select_menu(win)\n \n def _select_menu(self, win):\n try:\n item_id = self._find_menu_item_id(win)\n win.click_menu_item(item_id) \n except NotFoundException:\n Logger.add_error(\"Menu not found\")\n \n def _find_menu_item_id(self, win):\n labels = self.get_all_args()\n menu_bar = self._get_menu_bar(win)\n inx = menu_bar.FindMenu(labels[0])\n menu = menu_bar.GetMenu(inx)\n labels = labels [1:]\n while len(labels) > 0:\n item_id = self._get_menu_item_id(menu, labels[0])\n if len(labels) > 1:\n menu_item = menu_bar.FindItemById(item_id)\n menu = menu_item.GetSubMenu()\n labels = labels [1:]\n return item_id\n\n def _get_menu_bar(self, win):\n menu_bar = win.GetMenuBar()\n if menu_bar is None:\n raise NotFoundException()\n return menu_bar\n\n def _get_menu_item_id(self, menu, label):\n valid_labels = self._get_valid_labels(label)\n for label in valid_labels:\n item_id = menu.FindItem(label)\n if item_id != wx.NOT_FOUND:\n return item_id\n return wx.NOT_FOUND\n \n def _get_valid_labels(self, label):\n valid_labels = [label]\n self._get_elipsis_label(label, valid_labels)\n self._get_accelerator_labels(label, valid_labels)\n return valid_labels \n \n def _get_elipsis_label(self, label, alternative_labels):\n alternative_labels.append(label + \"...\")\n \n def _get_accelerator_labels(self, label, alternative_labels):\n for i in range(len(label)):\n alternative_label = label[0:i] + \"&\" + label[i:]\n alternative_labels.append(alternative_label)\n return alternative_labels\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475161,"cells":{"repo_name":{"kind":"string","value":"xbmc/atv2"},"path":{"kind":"string","value":"xbmc/lib/libPython/Python/Tools/scripts/combinerefs.py"},"copies":{"kind":"string","value":"102"},"size":{"kind":"string","value":"4381"},"content":{"kind":"string","value":"#! /usr/bin/env python\n\n\"\"\"\ncombinerefs path\n\nA helper for analyzing PYTHONDUMPREFS output.\n\nWhen the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown\ntime Py_Finalize() prints the list of all live objects twice: first it\nprints the repr() of each object while the interpreter is still fully intact.\nAfter cleaning up everything it can, it prints all remaining live objects\nagain, but the second time just prints their addresses, refcounts, and type\nnames (because the interpreter has been torn down, calling repr methods at\nthis point can get into infinite loops or blow up).\n\nSave all this output into a file, then run this script passing the path to\nthat file. The script finds both output chunks, combines them, then prints\na line of output for each object still alive at the end:\n\n address refcnt typename repr\n\naddress is the address of the object, in whatever format the platform C\nproduces for a %p format code.\n\nrefcnt is of the form\n\n \"[\" ref \"]\"\n\nwhen the object's refcount is the same in both PYTHONDUMPREFS output blocks,\nor\n\n \"[\" ref_before \"->\" ref_after \"]\"\n\nif the refcount changed.\n\ntypename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS\noutput block.\n\nrepr is repr(object), extracted from the first PYTHONDUMPREFS output block.\nCAUTION: If object is a container type, it may not actually contain all the\nobjects shown in the repr: the repr was captured from the first output block,\nand some of the containees may have been released since then. For example,\nit's common for the line showing the dict of interned strings to display\nstrings that no longer exist at the end of Py_Finalize; this can be recognized\n(albeit painfully) because such containees don't have a line of their own.\n\nThe objects are listed in allocation order, with most-recently allocated\nprinted first, and the first object allocated printed last.\n\n\nSimple examples:\n\n 00857060 [14] str '__len__'\n\nThe str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS\noutput blocks said there were 14 references to it. This is probably due to\nC modules that intern the string \"__len__\" and keep a reference to it in a\nfile static.\n\n 00857038 [46->5] tuple ()\n\n46-5 = 41 references to the empty tuple were removed by the cleanup actions\nbetween the times PYTHONDUMPREFS produced output.\n\n 00858028 [1025->1456] str ''\n\nThe string '', which is used in dictobject.c to overwrite a real\nkey that gets deleted, grew several hundred references during cleanup. It\nsuggests that stuff did get removed from dicts by cleanup, but that the dicts\nthemselves are staying alive for some reason. \"\"\"\n\nimport re\nimport sys\n\n# Generate lines from fileiter. If whilematch is true, continue reading\n# while the regexp object pat matches line. If whilematch is false, lines\n# are read so long as pat doesn't match them. In any case, the first line\n# that doesn't match pat (when whilematch is true), or that does match pat\n# (when whilematch is false), is lost, and fileiter will resume at the line\n# following it.\ndef read(fileiter, pat, whilematch):\n for line in fileiter:\n if bool(pat.match(line)) == whilematch:\n yield line\n else:\n break\n\ndef combine(fname):\n f = file(fname)\n fi = iter(f)\n\n for line in read(fi, re.compile(r'^Remaining objects:$'), False):\n pass\n\n crack = re.compile(r'([a-zA-Z\\d]+) \\[(\\d+)\\] (.*)')\n addr2rc = {}\n addr2guts = {}\n before = 0\n for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):\n m = crack.match(line)\n if m:\n addr, addr2rc[addr], addr2guts[addr] = m.groups()\n before += 1\n else:\n print '??? skipped:', line\n\n after = 0\n for line in read(fi, crack, True):\n after += 1\n m = crack.match(line)\n assert m\n addr, rc, guts = m.groups() # guts is type name here\n if addr not in addr2rc:\n print '??? new object created while tearing down:', line.rstrip()\n continue\n print addr,\n if rc == addr2rc[addr]:\n print '[%s]' % rc,\n else:\n print '[%s->%s]' % (addr2rc[addr], rc),\n print guts, addr2guts[addr]\n\n f.close()\n print \"%d objects before, %d after\" % (before, after)\n\nif __name__ == '__main__':\n combine(sys.argv[1])\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475162,"cells":{"repo_name":{"kind":"string","value":"matthew-tucker/mne-python"},"path":{"kind":"string","value":"examples/inverse/plot_read_inverse.py"},"copies":{"kind":"string","value":"42"},"size":{"kind":"string","value":"1384"},"content":{"kind":"string","value":"\"\"\"\n===========================\nReading an inverse operator\n===========================\n\nThe inverse operator's source space is shown in 3D.\n\"\"\"\n# Author: Alexandre Gramfort \n#\n# License: BSD (3-clause)\n\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nfname = data_path\nfname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\n\ninv = read_inverse_operator(fname)\n\nprint(\"Method: %s\" % inv['methods'])\nprint(\"fMRI prior: %s\" % inv['fmri_prior'])\nprint(\"Number of sources: %s\" % inv['nsource'])\nprint(\"Number of channels: %s\" % inv['nchan'])\n\n###############################################################################\n# Show result on 3D source space\nlh_points = inv['src'][0]['rr']\nlh_faces = inv['src'][0]['use_tris']\nrh_points = inv['src'][1]['rr']\nrh_faces = inv['src'][1]['use_tris']\nfrom mayavi import mlab # noqa\n\nmlab.figure(size=(600, 600), bgcolor=(0, 0, 0))\nmesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2],\n lh_faces, colormap='RdBu')\nmesh.module_manager.scalar_lut_manager.reverse_lut = True\n\nmesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2],\n rh_faces, colormap='RdBu')\nmesh.module_manager.scalar_lut_manager.reverse_lut = True\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475163,"cells":{"repo_name":{"kind":"string","value":"swapnakrishnan2k/tp-qemu"},"path":{"kind":"string","value":"qemu/tests/nx.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"2652"},"content":{"kind":"string","value":"import os\nimport logging\n\nfrom autotest.client.shared import error\n\nfrom virttest import data_dir\n\n\n@error.context_aware\ndef run(test, params, env):\n \"\"\"\n try to exploit the guest to test whether nx(cpu) bit takes effect.\n\n 1) boot the guest\n 2) cp the exploit prog into the guest\n 3) run the exploit\n\n :param test: QEMU test object\n :param params: Dictionary with the test parameters\n :param env: Dictionary with test environment.\n \"\"\"\n\n vm = env.get_vm(params[\"main_vm\"])\n vm.verify_alive()\n session = vm.wait_for_login(timeout=int(params.get(\"login_timeout\", 360)))\n\n exploit_cmd = params.get(\"exploit_cmd\", \"\")\n if not exploit_cmd or session.cmd_status(\"test -x %s\" % exploit_cmd):\n exploit_file = os.path.join(data_dir.get_deps_dir(), 'nx', 'x64_sc_rdo.c')\n dst_dir = '/tmp'\n\n error.context(\"Copy the Exploit file to guest.\", logging.info)\n vm.copy_files_to(exploit_file, dst_dir)\n\n error.context(\"Build exploit program in guest.\", logging.info)\n build_exploit = \"gcc -o /tmp/nx_exploit /tmp/x64_sc_rdo.c\"\n if session.cmd_status(build_exploit):\n raise error.TestError(\"Failed to build the exploit program\")\n\n exploit_cmd = \"/tmp/nx_exploit\"\n\n error.context(\"Run exploit program in guest.\", logging.info)\n # if nx is enabled (by default), the program failed.\n # segmentation error. return value of shell is not zero.\n exec_res = session.cmd_status(exploit_cmd)\n nx_on = params.get('nx_on', 'yes')\n if nx_on == 'yes':\n if exec_res:\n logging.info('NX works good.')\n error.context(\"Using execstack to remove the protection.\",\n logging.info)\n enable_exec = 'execstack -s %s' % exploit_cmd\n if session.cmd_status(enable_exec):\n if session.cmd_status(\"execstack --help\"):\n msg = \"Please make sure guest have execstack command.\"\n raise error.TestError(msg)\n raise error.TestError('Failed to enable the execstack')\n\n if session.cmd_status(exploit_cmd):\n raise error.TestFail('NX is still protecting. Error.')\n else:\n logging.info('NX is disabled as desired. good')\n else:\n raise error.TestFail('Fatal Error: NX does not protect anything!')\n else:\n if exec_res:\n msg = \"qemu fail to disable 'nx' flag or the exploit is corrupted.\"\n raise error.TestError(msg)\n else:\n logging.info('NX is disabled, and this Test Case passed.')\n if session:\n session.close()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475164,"cells":{"repo_name":{"kind":"string","value":"juanmont/one"},"path":{"kind":"string","value":".vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/transforms/frontmatter.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"19456"},"content":{"kind":"string","value":"# $Id: frontmatter.py 7897 2015-05-29 11:48:20Z milde $\n# Author: David Goodger, Ueli Schlaepfer \n# Copyright: This module has been placed in the public domain.\n\n\"\"\"\nTransforms related to the front matter of a document or a section\n(information found before the main text):\n\n- `DocTitle`: Used to transform a lone top level section's title to\n the document title, promote a remaining lone top-level section's\n title to the document subtitle, and determine the document's title\n metadata (document['title']) based on the document title and/or the\n \"title\" setting.\n\n- `SectionSubTitle`: Used to transform a lone subsection into a\n subtitle.\n\n- `DocInfo`: Used to transform a bibliographic field list into docinfo\n elements.\n\"\"\"\n\n__docformat__ = 'reStructuredText'\n\nimport re\nfrom docutils import nodes, utils\nfrom docutils.transforms import TransformError, Transform\n\n\nclass TitlePromoter(Transform):\n\n \"\"\"\n Abstract base class for DocTitle and SectionSubTitle transforms.\n \"\"\"\n\n def promote_title(self, node):\n \"\"\"\n Transform the following tree::\n\n \n
\n \n ...\n\n into ::\n\n <node>\n <title>\n ...\n\n `node` is normally a document.\n \"\"\"\n # Type check\n if not isinstance(node, nodes.Element):\n raise TypeError, 'node must be of Element-derived type.'\n\n # `node` must not have a title yet.\n assert not (len(node) and isinstance(node[0], nodes.title))\n section, index = self.candidate_index(node)\n if index is None:\n return None\n\n # Transfer the section's attributes to the node:\n # NOTE: Change second parameter to False to NOT replace\n # attributes that already exist in node with those in\n # section\n # NOTE: Remove third parameter to NOT copy the 'source'\n # attribute from section\n node.update_all_atts_concatenating(section, True, True)\n\n # setup_child is called automatically for all nodes.\n node[:] = (section[:1] # section title\n + node[:index] # everything that was in the\n # node before the section\n + section[1:]) # everything that was in the section\n assert isinstance(node[0], nodes.title)\n return 1\n\n def promote_subtitle(self, node):\n \"\"\"\n Transform the following node tree::\n\n <node>\n <title>\n <section>\n <title>\n ...\n\n into ::\n\n <node>\n <title>\n <subtitle>\n ...\n \"\"\"\n # Type check\n if not isinstance(node, nodes.Element):\n raise TypeError, 'node must be of Element-derived type.'\n\n subsection, index = self.candidate_index(node)\n if index is None:\n return None\n subtitle = nodes.subtitle()\n\n # Transfer the subsection's attributes to the new subtitle\n # NOTE: Change second parameter to False to NOT replace\n # attributes that already exist in node with those in\n # section\n # NOTE: Remove third parameter to NOT copy the 'source'\n # attribute from section\n subtitle.update_all_atts_concatenating(subsection, True, True)\n\n # Transfer the contents of the subsection's title to the\n # subtitle:\n subtitle[:] = subsection[0][:]\n node[:] = (node[:1] # title\n + [subtitle]\n # everything that was before the section:\n + node[1:index]\n # everything that was in the subsection:\n + subsection[1:])\n return 1\n\n def candidate_index(self, node):\n \"\"\"\n Find and return the promotion candidate and its index.\n\n Return (None, None) if no valid candidate was found.\n \"\"\"\n index = node.first_child_not_matching_class(\n nodes.PreBibliographic)\n if index is None or len(node) > (index + 1) or \\\n not isinstance(node[index], nodes.section):\n return None, None\n else:\n return node[index], index\n\n\nclass DocTitle(TitlePromoter):\n\n \"\"\"\n In reStructuredText_, there is no way to specify a document title\n and subtitle explicitly. Instead, we can supply the document title\n (and possibly the subtitle as well) implicitly, and use this\n two-step transform to \"raise\" or \"promote\" the title(s) (and their\n corresponding section contents) to the document level.\n\n 1. If the document contains a single top-level section as its\n first non-comment element, the top-level section's title\n becomes the document's title, and the top-level section's\n contents become the document's immediate contents. The lone\n top-level section header must be the first non-comment element\n in the document.\n\n For example, take this input text::\n\n =================\n Top-Level Title\n =================\n\n A paragraph.\n\n Once parsed, it looks like this::\n\n <document>\n <section names=\"top-level title\">\n <title>\n Top-Level Title\n <paragraph>\n A paragraph.\n\n After running the DocTitle transform, we have::\n\n <document names=\"top-level title\">\n <title>\n Top-Level Title\n <paragraph>\n A paragraph.\n\n 2. If step 1 successfully determines the document title, we\n continue by checking for a subtitle.\n\n If the lone top-level section itself contains a single\n second-level section as its first non-comment element, that\n section's title is promoted to the document's subtitle, and\n that section's contents become the document's immediate\n contents. Given this input text::\n\n =================\n Top-Level Title\n =================\n\n Second-Level Title\n ~~~~~~~~~~~~~~~~~~\n\n A paragraph.\n\n After parsing and running the Section Promotion transform, the\n result is::\n\n <document names=\"top-level title\">\n <title>\n Top-Level Title\n <subtitle names=\"second-level title\">\n Second-Level Title\n <paragraph>\n A paragraph.\n\n (Note that the implicit hyperlink target generated by the\n \"Second-Level Title\" is preserved on the \"subtitle\" element\n itself.)\n\n Any comment elements occurring before the document title or\n subtitle are accumulated and inserted as the first body elements\n after the title(s).\n\n This transform also sets the document's metadata title\n (document['title']).\n\n .. _reStructuredText: http://docutils.sf.net/rst.html\n \"\"\"\n\n default_priority = 320\n\n def set_metadata(self):\n \"\"\"\n Set document['title'] metadata title from the following\n sources, listed in order of priority:\n\n * Existing document['title'] attribute.\n * \"title\" setting.\n * Document title node (as promoted by promote_title).\n \"\"\"\n if not self.document.hasattr('title'):\n if self.document.settings.title is not None:\n self.document['title'] = self.document.settings.title\n elif len(self.document) and isinstance(self.document[0], nodes.title):\n self.document['title'] = self.document[0].astext()\n\n def apply(self):\n if getattr(self.document.settings, 'doctitle_xform', 1):\n # promote_(sub)title defined in TitlePromoter base class.\n if self.promote_title(self.document):\n # If a title has been promoted, also try to promote a\n # subtitle.\n self.promote_subtitle(self.document)\n # Set document['title'].\n self.set_metadata()\n\n\nclass SectionSubTitle(TitlePromoter):\n\n \"\"\"\n This works like document subtitles, but for sections. For example, ::\n\n <section>\n <title>\n Title\n <section>\n <title>\n Subtitle\n ...\n\n is transformed into ::\n\n <section>\n <title>\n Title\n <subtitle>\n Subtitle\n ...\n\n For details refer to the docstring of DocTitle.\n \"\"\"\n\n default_priority = 350\n\n def apply(self):\n if not getattr(self.document.settings, 'sectsubtitle_xform', 1):\n return\n for section in self.document.traverse(nodes.section):\n # On our way through the node tree, we are deleting\n # sections, but we call self.promote_subtitle for those\n # sections nonetheless. To do: Write a test case which\n # shows the problem and discuss on Docutils-develop.\n self.promote_subtitle(section)\n\n\nclass DocInfo(Transform):\n\n \"\"\"\n This transform is specific to the reStructuredText_ markup syntax;\n see \"Bibliographic Fields\" in the `reStructuredText Markup\n Specification`_ for a high-level description. This transform\n should be run *after* the `DocTitle` transform.\n\n Given a field list as the first non-comment element after the\n document title and subtitle (if present), registered bibliographic\n field names are transformed to the corresponding DTD elements,\n becoming child elements of the \"docinfo\" element (except for a\n dedication and/or an abstract, which become \"topic\" elements after\n \"docinfo\").\n\n For example, given this document fragment after parsing::\n\n <document>\n <title>\n Document Title\n <field_list>\n <field>\n <field_name>\n Author\n <field_body>\n <paragraph>\n A. Name\n <field>\n <field_name>\n Status\n <field_body>\n <paragraph>\n $RCSfile$\n ...\n\n After running the bibliographic field list transform, the\n resulting document tree would look like this::\n\n <document>\n <title>\n Document Title\n <docinfo>\n <author>\n A. Name\n <status>\n frontmatter.py\n ...\n\n The \"Status\" field contained an expanded RCS keyword, which is\n normally (but optionally) cleaned up by the transform. The sole\n contents of the field body must be a paragraph containing an\n expanded RCS keyword of the form \"$keyword: expansion text $\". Any\n RCS keyword can be processed in any bibliographic field. The\n dollar signs and leading RCS keyword name are removed. Extra\n processing is done for the following RCS keywords:\n\n - \"RCSfile\" expands to the name of the file in the RCS or CVS\n repository, which is the name of the source file with a \",v\"\n suffix appended. The transform will remove the \",v\" suffix.\n\n - \"Date\" expands to the format \"YYYY/MM/DD hh:mm:ss\" (in the UTC\n time zone). The RCS Keywords transform will extract just the\n date itself and transform it to an ISO 8601 format date, as in\n \"2000-12-31\".\n\n (Since the source file for this text is itself stored under CVS,\n we can't show an example of the \"Date\" RCS keyword because we\n can't prevent any RCS keywords used in this explanation from\n being expanded. Only the \"RCSfile\" keyword is stable; its\n expansion text changes only if the file name changes.)\n\n .. _reStructuredText: http://docutils.sf.net/rst.html\n .. _reStructuredText Markup Specification:\n http://docutils.sf.net/docs/ref/rst/restructuredtext.html\n \"\"\"\n\n default_priority = 340\n\n biblio_nodes = {\n 'author': nodes.author,\n 'authors': nodes.authors,\n 'organization': nodes.organization,\n 'address': nodes.address,\n 'contact': nodes.contact,\n 'version': nodes.version,\n 'revision': nodes.revision,\n 'status': nodes.status,\n 'date': nodes.date,\n 'copyright': nodes.copyright,\n 'dedication': nodes.topic,\n 'abstract': nodes.topic}\n \"\"\"Canonical field name (lowcased) to node class name mapping for\n bibliographic fields (field_list).\"\"\"\n\n def apply(self):\n if not getattr(self.document.settings, 'docinfo_xform', 1):\n return\n document = self.document\n index = document.first_child_not_matching_class(\n nodes.PreBibliographic)\n if index is None:\n return\n candidate = document[index]\n if isinstance(candidate, nodes.field_list):\n biblioindex = document.first_child_not_matching_class(\n (nodes.Titular, nodes.Decorative))\n nodelist = self.extract_bibliographic(candidate)\n del document[index] # untransformed field list (candidate)\n document[biblioindex:biblioindex] = nodelist\n\n def extract_bibliographic(self, field_list):\n docinfo = nodes.docinfo()\n bibliofields = self.language.bibliographic_fields\n labels = self.language.labels\n topics = {'dedication': None, 'abstract': None}\n for field in field_list:\n try:\n name = field[0][0].astext()\n normedname = nodes.make_id(name)\n if not (len(field) == 2 and normedname in bibliofields\n and self.check_empty_biblio_field(field, name)):\n raise TransformError\n canonical = bibliofields[normedname]\n biblioclass = self.biblio_nodes[canonical]\n if issubclass(biblioclass, nodes.TextElement):\n if not self.check_compound_biblio_field(field, name):\n raise TransformError\n utils.clean_rcs_keywords(\n field[1][0], self.rcs_keyword_substitutions)\n docinfo.append(biblioclass('', '', *field[1][0]))\n elif issubclass(biblioclass, nodes.authors):\n self.extract_authors(field, name, docinfo)\n elif issubclass(biblioclass, nodes.topic):\n if topics[canonical]:\n field[-1] += self.document.reporter.warning(\n 'There can only be one \"%s\" field.' % name,\n base_node=field)\n raise TransformError\n title = nodes.title(name, labels[canonical])\n topics[canonical] = biblioclass(\n '', title, classes=[canonical], *field[1].children)\n else:\n docinfo.append(biblioclass('', *field[1].children))\n except TransformError:\n if len(field[-1]) == 1 \\\n and isinstance(field[-1][0], nodes.paragraph):\n utils.clean_rcs_keywords(\n field[-1][0], self.rcs_keyword_substitutions)\n if normedname and normedname not in bibliofields:\n field['classes'].append(normedname)\n docinfo.append(field)\n nodelist = []\n if len(docinfo) != 0:\n nodelist.append(docinfo)\n for name in ('dedication', 'abstract'):\n if topics[name]:\n nodelist.append(topics[name])\n return nodelist\n\n def check_empty_biblio_field(self, field, name):\n if len(field[-1]) < 1:\n field[-1] += self.document.reporter.warning(\n 'Cannot extract empty bibliographic field \"%s\".' % name,\n base_node=field)\n return None\n return 1\n\n def check_compound_biblio_field(self, field, name):\n if len(field[-1]) > 1:\n field[-1] += self.document.reporter.warning(\n 'Cannot extract compound bibliographic field \"%s\".' % name,\n base_node=field)\n return None\n if not isinstance(field[-1][0], nodes.paragraph):\n field[-1] += self.document.reporter.warning(\n 'Cannot extract bibliographic field \"%s\" containing '\n 'anything other than a single paragraph.' % name,\n base_node=field)\n return None\n return 1\n\n rcs_keyword_substitutions = [\n (re.compile(r'\\$' r'Date: (\\d\\d\\d\\d)[-/](\\d\\d)[-/](\\d\\d)[ T][\\d:]+'\n r'[^$]* \\$', re.IGNORECASE), r'\\1-\\2-\\3'),\n (re.compile(r'\\$' r'RCSfile: (.+),v \\$', re.IGNORECASE), r'\\1'),\n (re.compile(r'\\$[a-zA-Z]+: (.+) \\$'), r'\\1'),]\n\n def extract_authors(self, field, name, docinfo):\n try:\n if len(field[1]) == 1:\n if isinstance(field[1][0], nodes.paragraph):\n authors = self.authors_from_one_paragraph(field)\n elif isinstance(field[1][0], nodes.bullet_list):\n authors = self.authors_from_bullet_list(field)\n else:\n raise TransformError\n else:\n authors = self.authors_from_paragraphs(field)\n authornodes = [nodes.author('', '', *author)\n for author in authors if author]\n if len(authornodes) >= 1:\n docinfo.append(nodes.authors('', *authornodes))\n else:\n raise TransformError\n except TransformError:\n field[-1] += self.document.reporter.warning(\n 'Bibliographic field \"%s\" incompatible with extraction: '\n 'it must contain either a single paragraph (with authors '\n 'separated by one of \"%s\"), multiple paragraphs (one per '\n 'author), or a bullet list with one paragraph (one author) '\n 'per item.'\n % (name, ''.join(self.language.author_separators)),\n base_node=field)\n raise\n\n def authors_from_one_paragraph(self, field):\n text = field[1][0].astext().strip()\n if not text:\n raise TransformError\n for authorsep in self.language.author_separators:\n authornames = text.split(authorsep)\n if len(authornames) > 1:\n break\n authornames = [author.strip() for author in authornames]\n authors = [[nodes.Text(author)] for author in authornames if author]\n return authors\n\n def authors_from_bullet_list(self, field):\n authors = []\n for item in field[1][0]:\n if len(item) != 1 or not isinstance(item[0], nodes.paragraph):\n raise TransformError\n authors.append(item[0].children)\n if not authors:\n raise TransformError\n return authors\n\n def authors_from_paragraphs(self, field):\n for item in field[1]:\n if not isinstance(item, nodes.paragraph):\n raise TransformError\n authors = [item.children for item in field[1]]\n return authors\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475165,"cells":{"repo_name":{"kind":"string","value":"fnp/pylucene"},"path":{"kind":"string","value":"test/test_BooleanPrefixQuery.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2671"},"content":{"kind":"string","value":"# ====================================================================\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ====================================================================\n\nfrom unittest import TestCase, main\nfrom lucene import *\n\n\nclass BooleanPrefixQueryTestCase(TestCase):\n \"\"\"\n Unit tests ported from Java Lucene\n \"\"\"\n\n def getCount(self, r, q):\n\n if BooleanQuery.instance_(q):\n return len(BooleanQuery.cast_(q).getClauses())\n elif ConstantScoreQuery.instance_(q):\n iter = ConstantScoreQuery.cast_(q).getFilter().getDocIdSet(r).iterator()\n count = 0\n while iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS:\n count += 1\n\n return count\n else:\n self.fail(\"unexpected query \" + q)\n\n def testMethod(self):\n\n directory = RAMDirectory()\n categories = [\"food\", \"foodanddrink\", \"foodanddrinkandgoodtimes\",\n \"food and drink\"]\n\n try:\n writer = IndexWriter(directory, WhitespaceAnalyzer(), True,\n IndexWriter.MaxFieldLength.LIMITED)\n for category in categories:\n doc = Document()\n doc.add(Field(\"category\", category, Field.Store.YES,\n Field.Index.NOT_ANALYZED))\n writer.addDocument(doc)\n\n writer.close()\n \n reader = IndexReader.open(directory, True)\n query = PrefixQuery(Term(\"category\", \"foo\"))\n rw1 = query.rewrite(reader)\n \n bq = BooleanQuery()\n bq.add(query, BooleanClause.Occur.MUST)\n \n rw2 = bq.rewrite(reader)\n except Exception, e:\n self.fail(e)\n\n self.assertEqual(self.getCount(reader, rw1), self.getCount(reader, rw2),\n \"Number of Clauses Mismatch\")\n\n\nif __name__ == \"__main__\":\n import sys, lucene\n lucene.initVM()\n if '-loop' in sys.argv:\n sys.argv.remove('-loop')\n while True:\n try:\n main()\n except:\n pass\n else:\n main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475166,"cells":{"repo_name":{"kind":"string","value":"cython-testbed/pandas"},"path":{"kind":"string","value":"pandas/tests/extension/base/dtype.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"2874"},"content":{"kind":"string","value":"import warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom .base import BaseExtensionTests\n\n\nclass BaseDtypeTests(BaseExtensionTests):\n \"\"\"Base class for ExtensionDtype classes\"\"\"\n\n def test_name(self, dtype):\n assert isinstance(dtype.name, str)\n\n def test_kind(self, dtype):\n valid = set('biufcmMOSUV')\n if dtype.kind is not None:\n assert dtype.kind in valid\n\n def test_construct_from_string_own_name(self, dtype):\n result = dtype.construct_from_string(dtype.name)\n assert type(result) is type(dtype)\n\n # check OK as classmethod\n result = type(dtype).construct_from_string(dtype.name)\n assert type(result) is type(dtype)\n\n def test_is_dtype_from_name(self, dtype):\n result = type(dtype).is_dtype(dtype.name)\n assert result is True\n\n def test_is_dtype_unboxes_dtype(self, data, dtype):\n assert dtype.is_dtype(data) is True\n\n def test_is_dtype_from_self(self, dtype):\n result = type(dtype).is_dtype(dtype)\n assert result is True\n\n def test_is_not_string_type(self, dtype):\n return not pd.api.types.is_string_dtype(dtype)\n\n def test_is_not_object_type(self, dtype):\n return not pd.api.types.is_object_dtype(dtype)\n\n def test_eq_with_str(self, dtype):\n assert dtype == dtype.name\n assert dtype != dtype.name + '-suffix'\n\n def test_eq_with_numpy_object(self, dtype):\n assert dtype != np.dtype('object')\n\n def test_eq_with_self(self, dtype):\n assert dtype == dtype\n assert dtype != object()\n\n def test_array_type(self, data, dtype):\n assert dtype.construct_array_type() is type(data)\n\n def test_check_dtype(self, data):\n dtype = data.dtype\n\n # check equivalency for using .dtypes\n df = pd.DataFrame({'A': pd.Series(data, dtype=dtype),\n 'B': data,\n 'C': 'foo', 'D': 1})\n\n # np.dtype('int64') == 'Int64' == 'int64'\n # so can't distinguish\n if dtype.name == 'Int64':\n expected = pd.Series([True, True, False, True],\n index=list('ABCD'))\n else:\n expected = pd.Series([True, True, False, False],\n index=list('ABCD'))\n\n # XXX: This should probably be *fixed* not ignored.\n # See libops.scalar_compare\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n result = df.dtypes == str(dtype)\n\n self.assert_series_equal(result, expected)\n\n expected = pd.Series([True, True, False, False],\n index=list('ABCD'))\n result = df.dtypes.apply(str) == str(dtype)\n self.assert_series_equal(result, expected)\n\n def test_hashable(self, dtype):\n hash(dtype) # no error\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475167,"cells":{"repo_name":{"kind":"string","value":"andreparames/odoo"},"path":{"kind":"string","value":"addons/website_membership/models/product.py"},"copies":{"kind":"string","value":"338"},"size":{"kind":"string","value":"1264"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\n\nclass product_template(osv.Model):\n _inherit = 'product.template'\n\n _columns = {\n 'website_published': fields.boolean('Available in the website', copy=False),\n }\n _defaults = {\n 'website_published': False,\n }\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475168,"cells":{"repo_name":{"kind":"string","value":"jylaxp/django"},"path":{"kind":"string","value":"django/db/migrations/operations/models.py"},"copies":{"kind":"string","value":"290"},"size":{"kind":"string","value":"21735"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.db.migrations.operations.base import Operation\nfrom django.db.migrations.state import ModelState\nfrom django.db.models.options import normalize_together\nfrom django.utils import six\nfrom django.utils.functional import cached_property\n\n\nclass CreateModel(Operation):\n \"\"\"\n Create a model's table.\n \"\"\"\n\n serialization_expand_args = ['fields', 'options', 'managers']\n\n def __init__(self, name, fields, options=None, bases=None, managers=None):\n self.name = name\n self.fields = fields\n self.options = options or {}\n self.bases = bases or (models.Model,)\n self.managers = managers or []\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'fields': self.fields,\n }\n if self.options:\n kwargs['options'] = self.options\n if self.bases and self.bases != (models.Model,):\n kwargs['bases'] = self.bases\n if self.managers and self.managers != [('objects', models.Manager())]:\n kwargs['managers'] = self.managers\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n state.add_model(ModelState(\n app_label,\n self.name,\n list(self.fields),\n dict(self.options),\n tuple(self.bases),\n list(self.managers),\n ))\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.create_model(model)\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n model = from_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.delete_model(model)\n\n def describe(self):\n return \"Create %smodel %s\" % (\"proxy \" if self.options.get(\"proxy\", False) else \"\", self.name)\n\n def references_model(self, name, app_label=None):\n strings_to_check = [self.name]\n # Check we didn't inherit from the model\n for base in self.bases:\n if isinstance(base, six.string_types):\n strings_to_check.append(base.split(\".\")[-1])\n # Check we have no FKs/M2Ms with it\n for fname, field in self.fields:\n if field.remote_field:\n if isinstance(field.remote_field.model, six.string_types):\n strings_to_check.append(field.remote_field.model.split(\".\")[-1])\n # Now go over all the strings and compare them\n for string in strings_to_check:\n if string.lower() == name.lower():\n return True\n return False\n\n\nclass DeleteModel(Operation):\n \"\"\"\n Drops a model's table.\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n state.remove_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n model = from_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.delete_model(model)\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, model):\n schema_editor.create_model(model)\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def describe(self):\n return \"Delete model %s\" % (self.name, )\n\n\nclass RenameModel(Operation):\n \"\"\"\n Renames a model.\n \"\"\"\n\n def __init__(self, old_name, new_name):\n self.old_name = old_name\n self.new_name = new_name\n\n @cached_property\n def old_name_lower(self):\n return self.old_name.lower()\n\n @cached_property\n def new_name_lower(self):\n return self.new_name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'old_name': self.old_name,\n 'new_name': self.new_name,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n apps = state.apps\n model = apps.get_model(app_label, self.old_name)\n model._meta.apps = apps\n # Get all of the related objects we need to repoint\n all_related_objects = (\n f for f in model._meta.get_fields(include_hidden=True)\n if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)\n )\n # Rename the model\n state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]\n state.models[app_label, self.new_name_lower].name = self.new_name\n state.remove_model(app_label, self.old_name_lower)\n # Repoint the FKs and M2Ms pointing to us\n for related_object in all_related_objects:\n if related_object.model is not model:\n # The model being renamed does not participate in this relation\n # directly. Rather, a superclass does.\n continue\n # Use the new related key for self referential related objects.\n if related_object.related_model == model:\n related_key = (app_label, self.new_name_lower)\n else:\n related_key = (\n related_object.related_model._meta.app_label,\n related_object.related_model._meta.model_name,\n )\n new_fields = []\n for name, field in state.models[related_key].fields:\n if name == related_object.field.name:\n field = field.clone()\n field.remote_field.model = \"%s.%s\" % (app_label, self.new_name)\n new_fields.append((name, field))\n state.models[related_key].fields = new_fields\n state.reload_model(*related_key)\n state.reload_model(app_label, self.new_name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n new_model = to_state.apps.get_model(app_label, self.new_name)\n if self.allow_migrate_model(schema_editor.connection.alias, new_model):\n old_model = from_state.apps.get_model(app_label, self.old_name)\n # Move the main table\n schema_editor.alter_db_table(\n new_model,\n old_model._meta.db_table,\n new_model._meta.db_table,\n )\n # Alter the fields pointing to us\n for related_object in old_model._meta.related_objects:\n if related_object.related_model == old_model:\n model = new_model\n related_key = (app_label, self.new_name_lower)\n else:\n model = related_object.related_model\n related_key = (\n related_object.related_model._meta.app_label,\n related_object.related_model._meta.model_name,\n )\n to_field = to_state.apps.get_model(\n *related_key\n )._meta.get_field(related_object.field.name)\n schema_editor.alter_field(\n model,\n related_object.field,\n to_field,\n )\n # Rename M2M fields whose name is based on this model's name.\n fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)\n for (old_field, new_field) in fields:\n # Skip self-referential fields as these are renamed above.\n if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:\n continue\n # Rename the M2M table that's based on this model's name.\n old_m2m_model = old_field.remote_field.through\n new_m2m_model = new_field.remote_field.through\n schema_editor.alter_db_table(\n new_m2m_model,\n old_m2m_model._meta.db_table,\n new_m2m_model._meta.db_table,\n )\n # Rename the column in the M2M table that's based on this\n # model's name.\n schema_editor.alter_field(\n new_m2m_model,\n old_m2m_model._meta.get_field(old_model._meta.model_name),\n new_m2m_model._meta.get_field(new_model._meta.model_name),\n )\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower\n self.new_name, self.old_name = self.old_name, self.new_name\n\n self.database_forwards(app_label, schema_editor, from_state, to_state)\n\n self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower\n self.new_name, self.old_name = self.old_name, self.new_name\n\n def references_model(self, name, app_label=None):\n return (\n name.lower() == self.old_name_lower or\n name.lower() == self.new_name_lower\n )\n\n def describe(self):\n return \"Rename model %s to %s\" % (self.old_name, self.new_name)\n\n\nclass AlterModelTable(Operation):\n \"\"\"\n Renames a model's table\n \"\"\"\n\n def __init__(self, name, table):\n self.name = name\n self.table = table\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'table': self.table,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n state.models[app_label, self.name_lower].options[\"db_table\"] = self.table\n state.reload_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n new_model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, new_model):\n old_model = from_state.apps.get_model(app_label, self.name)\n schema_editor.alter_db_table(\n new_model,\n old_model._meta.db_table,\n new_model._meta.db_table,\n )\n # Rename M2M fields whose name is based on this model's db_table\n for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):\n if new_field.remote_field.through._meta.auto_created:\n schema_editor.alter_db_table(\n new_field.remote_field.through,\n old_field.remote_field.through._meta.db_table,\n new_field.remote_field.through._meta.db_table,\n )\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n return self.database_forwards(app_label, schema_editor, from_state, to_state)\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def describe(self):\n return \"Rename table for %s to %s\" % (self.name, self.table)\n\n\nclass AlterUniqueTogether(Operation):\n \"\"\"\n Changes the value of unique_together to the target one.\n Input value of unique_together must be a set of tuples.\n \"\"\"\n option_name = \"unique_together\"\n\n def __init__(self, name, unique_together):\n self.name = name\n unique_together = normalize_together(unique_together)\n self.unique_together = set(tuple(cons) for cons in unique_together)\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'unique_together': self.unique_together,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n model_state = state.models[app_label, self.name_lower]\n model_state.options[self.option_name] = self.unique_together\n state.reload_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n new_model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, new_model):\n old_model = from_state.apps.get_model(app_label, self.name)\n schema_editor.alter_unique_together(\n new_model,\n getattr(old_model._meta, self.option_name, set()),\n getattr(new_model._meta, self.option_name, set()),\n )\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n return self.database_forwards(app_label, schema_editor, from_state, to_state)\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def references_field(self, model_name, name, app_label=None):\n return (\n self.references_model(model_name, app_label) and\n (\n not self.unique_together or\n any((name in together) for together in self.unique_together)\n )\n )\n\n def describe(self):\n return \"Alter %s for %s (%s constraint(s))\" % (self.option_name, self.name, len(self.unique_together or ''))\n\n\nclass AlterIndexTogether(Operation):\n \"\"\"\n Changes the value of index_together to the target one.\n Input value of index_together must be a set of tuples.\n \"\"\"\n option_name = \"index_together\"\n\n def __init__(self, name, index_together):\n self.name = name\n index_together = normalize_together(index_together)\n self.index_together = set(tuple(cons) for cons in index_together)\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'index_together': self.index_together,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n model_state = state.models[app_label, self.name_lower]\n model_state.options[self.option_name] = self.index_together\n state.reload_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n new_model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, new_model):\n old_model = from_state.apps.get_model(app_label, self.name)\n schema_editor.alter_index_together(\n new_model,\n getattr(old_model._meta, self.option_name, set()),\n getattr(new_model._meta, self.option_name, set()),\n )\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n return self.database_forwards(app_label, schema_editor, from_state, to_state)\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def references_field(self, model_name, name, app_label=None):\n return (\n self.references_model(model_name, app_label) and\n (\n not self.index_together or\n any((name in together) for together in self.index_together)\n )\n )\n\n def describe(self):\n return \"Alter %s for %s (%s constraint(s))\" % (self.option_name, self.name, len(self.index_together or ''))\n\n\nclass AlterOrderWithRespectTo(Operation):\n \"\"\"\n Represents a change with the order_with_respect_to option.\n \"\"\"\n\n def __init__(self, name, order_with_respect_to):\n self.name = name\n self.order_with_respect_to = order_with_respect_to\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'order_with_respect_to': self.order_with_respect_to,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n model_state = state.models[app_label, self.name_lower]\n model_state.options['order_with_respect_to'] = self.order_with_respect_to\n state.reload_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n to_model = to_state.apps.get_model(app_label, self.name)\n if self.allow_migrate_model(schema_editor.connection.alias, to_model):\n from_model = from_state.apps.get_model(app_label, self.name)\n # Remove a field if we need to\n if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:\n schema_editor.remove_field(from_model, from_model._meta.get_field(\"_order\"))\n # Add a field if we need to (altering the column is untouched as\n # it's likely a rename)\n elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:\n field = to_model._meta.get_field(\"_order\")\n if not field.has_default():\n field.default = 0\n schema_editor.add_field(\n from_model,\n field,\n )\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n self.database_forwards(app_label, schema_editor, from_state, to_state)\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def references_field(self, model_name, name, app_label=None):\n return (\n self.references_model(model_name, app_label) and\n (\n self.order_with_respect_to is None or\n name == self.order_with_respect_to\n )\n )\n\n def describe(self):\n return \"Set order_with_respect_to on %s to %s\" % (self.name, self.order_with_respect_to)\n\n\nclass AlterModelOptions(Operation):\n \"\"\"\n Sets new model options that don't directly affect the database schema\n (like verbose_name, permissions, ordering). Python code in migrations\n may still need them.\n \"\"\"\n\n # Model options we want to compare and preserve in an AlterModelOptions op\n ALTER_OPTION_KEYS = [\n \"get_latest_by\",\n \"managed\",\n \"ordering\",\n \"permissions\",\n \"default_permissions\",\n \"select_on_save\",\n \"verbose_name\",\n \"verbose_name_plural\",\n ]\n\n def __init__(self, name, options):\n self.name = name\n self.options = options\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n kwargs = {\n 'name': self.name,\n 'options': self.options,\n }\n return (\n self.__class__.__name__,\n [],\n kwargs\n )\n\n def state_forwards(self, app_label, state):\n model_state = state.models[app_label, self.name_lower]\n model_state.options = dict(model_state.options)\n model_state.options.update(self.options)\n for key in self.ALTER_OPTION_KEYS:\n if key not in self.options and key in model_state.options:\n del model_state.options[key]\n state.reload_model(app_label, self.name_lower)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def describe(self):\n return \"Change Meta options on %s\" % (self.name, )\n\n\nclass AlterModelManagers(Operation):\n \"\"\"\n Alters the model's managers\n \"\"\"\n\n serialization_expand_args = ['managers']\n\n def __init__(self, name, managers):\n self.name = name\n self.managers = managers\n\n @cached_property\n def name_lower(self):\n return self.name.lower()\n\n def deconstruct(self):\n return (\n self.__class__.__name__,\n [self.name, self.managers],\n {}\n )\n\n def state_forwards(self, app_label, state):\n model_state = state.models[app_label, self.name_lower]\n model_state.managers = list(self.managers)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def references_model(self, name, app_label=None):\n return name.lower() == self.name_lower\n\n def describe(self):\n return \"Change managers on %s\" % (self.name, )\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475169,"cells":{"repo_name":{"kind":"string","value":"zanderle/django"},"path":{"kind":"string","value":"tests/template_tests/syntax_tests/test_filter_tag.py"},"copies":{"kind":"string","value":"521"},"size":{"kind":"string","value":"1795"},"content":{"kind":"string","value":"from django.template import TemplateSyntaxError\nfrom django.test import SimpleTestCase\n\nfrom ..utils import setup\n\n\nclass FilterTagTests(SimpleTestCase):\n\n @setup({'filter01': '{% filter upper %}{% endfilter %}'})\n def test_filter01(self):\n output = self.engine.render_to_string('filter01')\n self.assertEqual(output, '')\n\n @setup({'filter02': '{% filter upper %}django{% endfilter %}'})\n def test_filter02(self):\n output = self.engine.render_to_string('filter02')\n self.assertEqual(output, 'DJANGO')\n\n @setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'})\n def test_filter03(self):\n output = self.engine.render_to_string('filter03')\n self.assertEqual(output, 'django')\n\n @setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'})\n def test_filter04(self):\n output = self.engine.render_to_string('filter04', {'remove': 'spam'})\n self.assertEqual(output, 'django')\n\n @setup({'filter05': '{% filter safe %}fail{% endfilter %}'})\n def test_filter05(self):\n with self.assertRaises(TemplateSyntaxError):\n self.engine.get_template('filter05')\n\n @setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'})\n def test_filter05bis(self):\n with self.assertRaises(TemplateSyntaxError):\n self.engine.get_template('filter05bis')\n\n @setup({'filter06': '{% filter escape %}fail{% endfilter %}'})\n def test_filter06(self):\n with self.assertRaises(TemplateSyntaxError):\n self.engine.get_template('filter06')\n\n @setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'})\n def test_filter06bis(self):\n with self.assertRaises(TemplateSyntaxError):\n self.engine.get_template('filter06bis')\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475170,"cells":{"repo_name":{"kind":"string","value":"jamesmarva/docker-py"},"path":{"kind":"string","value":"docker/errors.py"},"copies":{"kind":"string","value":"39"},"size":{"kind":"string","value":"2469"},"content":{"kind":"string","value":"# Copyright 2014 dotCloud inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\n\nclass APIError(requests.exceptions.HTTPError):\n def __init__(self, message, response, explanation=None):\n # requests 1.2 supports response as a keyword argument, but\n # requests 1.1 doesn't\n super(APIError, self).__init__(message)\n self.response = response\n\n self.explanation = explanation\n\n if self.explanation is None and response.content:\n self.explanation = response.content.strip()\n\n def __str__(self):\n message = super(APIError, self).__str__()\n\n if self.is_client_error():\n message = '{0} Client Error: {1}'.format(\n self.response.status_code, self.response.reason)\n\n elif self.is_server_error():\n message = '{0} Server Error: {1}'.format(\n self.response.status_code, self.response.reason)\n\n if self.explanation:\n message = '{0} (\"{1}\")'.format(message, self.explanation)\n\n return message\n\n def is_client_error(self):\n return 400 <= self.response.status_code < 500\n\n def is_server_error(self):\n return 500 <= self.response.status_code < 600\n\n\nclass DockerException(Exception):\n pass\n\n\nclass NotFound(APIError):\n pass\n\n\nclass InvalidVersion(DockerException):\n pass\n\n\nclass InvalidRepository(DockerException):\n pass\n\n\nclass InvalidConfigFile(DockerException):\n pass\n\n\nclass DeprecatedMethod(DockerException):\n pass\n\n\nclass TLSParameterError(DockerException):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg + (\". TLS configurations should map the Docker CLI \"\n \"client configurations. See \"\n \"http://docs.docker.com/examples/https/ for \"\n \"API details.\")\n\n\nclass NullResource(DockerException, ValueError):\n pass\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475171,"cells":{"repo_name":{"kind":"string","value":"Zanzibar82/script.module.urlresolver"},"path":{"kind":"string","value":"lib/urlresolver/plugins/vidstream.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"2431"},"content":{"kind":"string","value":"\"\"\"\n urlresolver XBMC Addon\n Copyright (C) 2011 t0mm0\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nimport re\nfrom t0mm0.common.net import Net\nfrom urlresolver.plugnplay.interfaces import UrlResolver\nfrom urlresolver.plugnplay.interfaces import PluginSettings\nfrom urlresolver.plugnplay import Plugin\nfrom urlresolver import common\n\nclass VidstreamResolver(Plugin, UrlResolver, PluginSettings):\n implements = [UrlResolver, PluginSettings]\n name = \"vidstream\"\n domains = [\"vidstream.in\"]\n\n def __init__(self):\n p = self.get_setting('priority') or 100\n self.priority = int(p)\n self.net = Net()\n #e.g. http://vidstream.in/xdfaay6ccwqj\n self.pattern = 'http://((?:www.)?vidstream.in)/(.*)'\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n resp = self.net.http_GET(web_url)\n html = resp.content\n post_url = resp.get_url()\n form_values = {}\n for i in re.finditer('<input.*?name=\"(.*?)\".*?value=\"(.*?)\">', html):\n form_values[i.group(1)] = i.group(2)\n html = self.net.http_POST(post_url, form_data=form_values).content\n\n # get stream url\n pattern = 'file:\\s*\"([^\"]+)\",'\n r = re.search(pattern, html)\n if r:\n return r.group(1)\n else:\n raise UrlResolver.ResolverError('File Not Found or removed')\n\n def get_url(self, host, media_id):\n return 'http://vidstream.in/%s' % (media_id)\n\n def get_host_and_id(self, url):\n r = re.search(self.pattern, url)\n if r:\n return r.groups()\n else:\n return False\n\n def valid_url(self, url, host):\n if self.get_setting('enabled') == 'false': return False\n return re.match(self.pattern, url) or self.name in host\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475172,"cells":{"repo_name":{"kind":"string","value":"varunnaganathan/django"},"path":{"kind":"string","value":"django/utils/dateformat.py"},"copies":{"kind":"string","value":"110"},"size":{"kind":"string","value":"11592"},"content":{"kind":"string","value":"\"\"\"\nPHP date() style date formatting\nSee http://www.php.net/date for format strings\n\nUsage:\n>>> import datetime\n>>> d = datetime.datetime.now()\n>>> df = DateFormat(d)\n>>> print(df.format('jS F Y H:i'))\n7th October 2003 11:39\n>>>\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport calendar\nimport datetime\nimport re\nimport time\n\nfrom django.utils import six\nfrom django.utils.dates import (\n MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,\n)\nfrom django.utils.encoding import force_text\nfrom django.utils.timezone import get_default_timezone, is_aware, is_naive\nfrom django.utils.translation import ugettext as _\n\nre_formatchars = re.compile(r'(?<!\\\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')\nre_escaped = re.compile(r'\\\\(.)')\n\n\nclass Formatter(object):\n def format(self, formatstr):\n pieces = []\n for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):\n if i % 2:\n pieces.append(force_text(getattr(self, piece)()))\n elif piece:\n pieces.append(re_escaped.sub(r'\\1', piece))\n return ''.join(pieces)\n\n\nclass TimeFormat(Formatter):\n\n def __init__(self, obj):\n self.data = obj\n self.timezone = None\n\n # We only support timezone when formatting datetime objects,\n # not date objects (timezone information not appropriate),\n # or time objects (against established django policy).\n if isinstance(obj, datetime.datetime):\n if is_naive(obj):\n self.timezone = get_default_timezone()\n else:\n self.timezone = obj.tzinfo\n\n def a(self):\n \"'a.m.' or 'p.m.'\"\n if self.data.hour > 11:\n return _('p.m.')\n return _('a.m.')\n\n def A(self):\n \"'AM' or 'PM'\"\n if self.data.hour > 11:\n return _('PM')\n return _('AM')\n\n def B(self):\n \"Swatch Internet time\"\n raise NotImplementedError('may be implemented in a future release')\n\n def e(self):\n \"\"\"\n Timezone name.\n\n If timezone information is not available, this method returns\n an empty string.\n \"\"\"\n if not self.timezone:\n return \"\"\n\n try:\n if hasattr(self.data, 'tzinfo') and self.data.tzinfo:\n # Have to use tzinfo.tzname and not datetime.tzname\n # because datatime.tzname does not expect Unicode\n return self.data.tzinfo.tzname(self.data) or \"\"\n except NotImplementedError:\n pass\n return \"\"\n\n def f(self):\n \"\"\"\n Time, in 12-hour hours and minutes, with minutes left off if they're\n zero.\n Examples: '1', '1:30', '2:05', '2'\n Proprietary extension.\n \"\"\"\n if self.data.minute == 0:\n return self.g()\n return '%s:%s' % (self.g(), self.i())\n\n def g(self):\n \"Hour, 12-hour format without leading zeros; i.e. '1' to '12'\"\n if self.data.hour == 0:\n return 12\n if self.data.hour > 12:\n return self.data.hour - 12\n return self.data.hour\n\n def G(self):\n \"Hour, 24-hour format without leading zeros; i.e. '0' to '23'\"\n return self.data.hour\n\n def h(self):\n \"Hour, 12-hour format; i.e. '01' to '12'\"\n return '%02d' % self.g()\n\n def H(self):\n \"Hour, 24-hour format; i.e. '00' to '23'\"\n return '%02d' % self.G()\n\n def i(self):\n \"Minutes; i.e. '00' to '59'\"\n return '%02d' % self.data.minute\n\n def O(self):\n \"\"\"\n Difference to Greenwich time in hours; e.g. '+0200', '-0430'.\n\n If timezone information is not available, this method returns\n an empty string.\n \"\"\"\n if not self.timezone:\n return \"\"\n\n seconds = self.Z()\n if seconds == \"\":\n return \"\"\n sign = '-' if seconds < 0 else '+'\n seconds = abs(seconds)\n return \"%s%02d%02d\" % (sign, seconds // 3600, (seconds // 60) % 60)\n\n def P(self):\n \"\"\"\n Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off\n if they're zero and the strings 'midnight' and 'noon' if appropriate.\n Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'\n Proprietary extension.\n \"\"\"\n if self.data.minute == 0 and self.data.hour == 0:\n return _('midnight')\n if self.data.minute == 0 and self.data.hour == 12:\n return _('noon')\n return '%s %s' % (self.f(), self.a())\n\n def s(self):\n \"Seconds; i.e. '00' to '59'\"\n return '%02d' % self.data.second\n\n def T(self):\n \"\"\"\n Time zone of this machine; e.g. 'EST' or 'MDT'.\n\n If timezone information is not available, this method returns\n an empty string.\n \"\"\"\n if not self.timezone:\n return \"\"\n\n name = None\n try:\n name = self.timezone.tzname(self.data)\n except Exception:\n # pytz raises AmbiguousTimeError during the autumn DST change.\n # This happens mainly when __init__ receives a naive datetime\n # and sets self.timezone = get_default_timezone().\n pass\n if name is None:\n name = self.format('O')\n return six.text_type(name)\n\n def u(self):\n \"Microseconds; i.e. '000000' to '999999'\"\n return '%06d' % self.data.microsecond\n\n def Z(self):\n \"\"\"\n Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for\n timezones west of UTC is always negative, and for those east of UTC is\n always positive.\n\n If timezone information is not available, this method returns\n an empty string.\n \"\"\"\n if not self.timezone:\n return \"\"\n\n try:\n offset = self.timezone.utcoffset(self.data)\n except Exception:\n # pytz raises AmbiguousTimeError during the autumn DST change.\n # This happens mainly when __init__ receives a naive datetime\n # and sets self.timezone = get_default_timezone().\n return \"\"\n\n # `offset` is a datetime.timedelta. For negative values (to the west of\n # UTC) only days can be negative (days=-1) and seconds are always\n # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)\n # Positive offsets have days=0\n return offset.days * 86400 + offset.seconds\n\n\nclass DateFormat(TimeFormat):\n year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]\n\n def b(self):\n \"Month, textual, 3 letters, lowercase; e.g. 'jan'\"\n return MONTHS_3[self.data.month]\n\n def c(self):\n \"\"\"\n ISO 8601 Format\n Example : '2008-01-02T10:30:00.000123'\n \"\"\"\n return self.data.isoformat()\n\n def d(self):\n \"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'\"\n return '%02d' % self.data.day\n\n def D(self):\n \"Day of the week, textual, 3 letters; e.g. 'Fri'\"\n return WEEKDAYS_ABBR[self.data.weekday()]\n\n def E(self):\n \"Alternative month names as required by some locales. Proprietary extension.\"\n return MONTHS_ALT[self.data.month]\n\n def F(self):\n \"Month, textual, long; e.g. 'January'\"\n return MONTHS[self.data.month]\n\n def I(self):\n \"'1' if Daylight Savings Time, '0' otherwise.\"\n try:\n if self.timezone and self.timezone.dst(self.data):\n return '1'\n else:\n return '0'\n except Exception:\n # pytz raises AmbiguousTimeError during the autumn DST change.\n # This happens mainly when __init__ receives a naive datetime\n # and sets self.timezone = get_default_timezone().\n return ''\n\n def j(self):\n \"Day of the month without leading zeros; i.e. '1' to '31'\"\n return self.data.day\n\n def l(self):\n \"Day of the week, textual, long; e.g. 'Friday'\"\n return WEEKDAYS[self.data.weekday()]\n\n def L(self):\n \"Boolean for whether it is a leap year; i.e. True or False\"\n return calendar.isleap(self.data.year)\n\n def m(self):\n \"Month; i.e. '01' to '12'\"\n return '%02d' % self.data.month\n\n def M(self):\n \"Month, textual, 3 letters; e.g. 'Jan'\"\n return MONTHS_3[self.data.month].title()\n\n def n(self):\n \"Month without leading zeros; i.e. '1' to '12'\"\n return self.data.month\n\n def N(self):\n \"Month abbreviation in Associated Press style. Proprietary extension.\"\n return MONTHS_AP[self.data.month]\n\n def o(self):\n \"ISO 8601 year number matching the ISO week number (W)\"\n return self.data.isocalendar()[0]\n\n def r(self):\n \"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'\"\n return self.format('D, j M Y H:i:s O')\n\n def S(self):\n \"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'\"\n if self.data.day in (11, 12, 13): # Special case\n return 'th'\n last = self.data.day % 10\n if last == 1:\n return 'st'\n if last == 2:\n return 'nd'\n if last == 3:\n return 'rd'\n return 'th'\n\n def t(self):\n \"Number of days in the given month; i.e. '28' to '31'\"\n return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]\n\n def U(self):\n \"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)\"\n if isinstance(self.data, datetime.datetime) and is_aware(self.data):\n return int(calendar.timegm(self.data.utctimetuple()))\n else:\n return int(time.mktime(self.data.timetuple()))\n\n def w(self):\n \"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)\"\n return (self.data.weekday() + 1) % 7\n\n def W(self):\n \"ISO-8601 week number of year, weeks starting on Monday\"\n # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt\n week_number = None\n jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1\n weekday = self.data.weekday() + 1\n day_of_year = self.z()\n if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:\n if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):\n week_number = 53\n else:\n week_number = 52\n else:\n if calendar.isleap(self.data.year):\n i = 366\n else:\n i = 365\n if (i - day_of_year) < (4 - weekday):\n week_number = 1\n else:\n j = day_of_year + (7 - weekday) + (jan1_weekday - 1)\n week_number = j // 7\n if jan1_weekday > 4:\n week_number -= 1\n return week_number\n\n def y(self):\n \"Year, 2 digits; e.g. '99'\"\n return six.text_type(self.data.year)[2:]\n\n def Y(self):\n \"Year, 4 digits; e.g. '1999'\"\n return self.data.year\n\n def z(self):\n \"Day of the year; i.e. '0' to '365'\"\n doy = self.year_days[self.data.month] + self.data.day\n if self.L() and self.data.month > 2:\n doy += 1\n return doy\n\n\ndef format(value, format_string):\n \"Convenience function\"\n df = DateFormat(value)\n return df.format(format_string)\n\n\ndef time_format(value, format_string):\n \"Convenience function\"\n tf = TimeFormat(value)\n return tf.format(format_string)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475173,"cells":{"repo_name":{"kind":"string","value":"teamfx/openjfx-9-dev-rt"},"path":{"kind":"string","value":"modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/find_files_unittest.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"2675"},"content":{"kind":"string","value":"# Copyright (C) 2011 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nimport unittest\n\nfrom webkitpy.common.system.filesystem import FileSystem\nimport find_files\n\n\nclass MockWinFileSystem(object):\n def join(self, *paths):\n return '\\\\'.join(paths)\n\n def normpath(self, path):\n return path.replace('/', '\\\\')\n\n\nclass TestWinNormalize(unittest.TestCase):\n def assert_filesystem_normalizes(self, filesystem):\n self.assertEqual(find_files._normalize(filesystem, \"c:\\\\foo\",\n ['fast/html', 'fast/canvas/*', 'compositing/foo.html']),\n ['c:\\\\foo\\\\fast\\html', 'c:\\\\foo\\\\fast\\canvas\\*', 'c:\\\\foo\\compositing\\\\foo.html'])\n\n def test_mocked_win(self):\n # This tests test_files.normalize, using portable behavior emulating\n # what we think Windows is supposed to do. This test will run on all\n # platforms.\n self.assert_filesystem_normalizes(MockWinFileSystem())\n\n def test_win(self):\n # This tests the actual windows platform, to ensure we get the same\n # results that we get in test_mocked_win().\n if not sys.platform.startswith('win'):\n return\n self.assert_filesystem_normalizes(FileSystem())\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475174,"cells":{"repo_name":{"kind":"string","value":"coecms/CMIP5-utils"},"path":{"kind":"string","value":"fetch_step2.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"17182"},"content":{"kind":"string","value":"# Paola Petrelli - paolap@utas.edu.au 4th March 2014\n# Last changed on 26th of March 2014\n# Updates list:\n# 26/03/2014 - output files and table csv file are created after \n# collecting data; calling process_file with multiprocessing \n# module to speed up hash checksum (md5/sha256) \n# 01/04/2014 - exclude the ACCESS and CSIRO models from check\n# 03/09/2014 trying to substitute the google file with a csv table\n# 01/12/2014 script has been divided into two steps, this is first step fetch_step1.py\n# that runs search on ESGF node and can be run interactively, the second step fetch_step2.py should be run in the queue\n# 21/05/2015 comments updated, introduce argparse to manage inputs, added extra argument\n# \"node\" to choose automatically between different nodes: only pcmdi and dkrz (default) are available at the moment\n# 09/02/2016 pmcdi9.llnl.gov changed to pcmdi.llnl.gov, in step2 added extra file path checks to take into account that servers pcmdi3/7/9 are now aims3\n#\n# Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5\n# published files responding to the constraints passed as arguments.\n# The search is run on one of the ESGF node but it searches through all the available\n# nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment\n# should be specified but models are optionals. The search is limited to the first 10000 matches,\n# to change this you have to change the pcmdi_url variable in the code.\n# The second step returns 3 files listing: the published files available on raijin (variables_replica.csv),\n# the published files that need downloading and/or updating (variables_to_download.csv),\n# the variable/model/experiment combination not yet published (variables_not_published).\n# Uses md5/sha256 checksum to determine if a file already existing on raijin is exactly the same as the latest published version\n# If you have to parse a big number of files, you can speed up the process by using multithread module \"Pool\"\n# if you're doing this you should run the second step in the queue, which is the reason why the script is split into 2 steps.\n# To do that you can change the threads number from 1 (to run interactively) to the number of cpus you're requesting, in line 340 \n# async_results = Pool(16).map_async(process_file, result)\n# The maximum number of threads depends on the number of cpus you're using, in above example 16 cpus.\n#\n# If the \"table\" option is selected it returns also a table csv file summarising the search results. \n#\n# The CMIP5 replica data is stored on raijin.nci.org.au under\n# /g/data1/ua6/unofficial-ESG-replica/tmp/tree\n#\n# Example of how to run on raijin.nci.org.au\n#\n# module load python/2.7.3 (default on raijin)\n# python fetch_step2.py -v ua_Amon tos_Omon -m CCSM4 -e rcp45 -o out -t\n# NB needs python version 2.7 or more recent\n#\n# - the variable argument is passed as variable-name_cmip-table, this avoids confusion if looking for variables from different cmip tables\n# - multiple arguments can be passed to \"-v\", \"-m\", \"-e\";\n# - to pass multiple arguments, declare the option once followed by all the desired values (as above);\n# - default output files root is \"variables\"\n# - you need to pass at least one experiment and one variable, models are optional.\n# - output file is optional, default is \"variables\"\n# - table is optional, default is False\n\nimport sys, argparse\nimport subprocess, re, itertools\nfrom multiprocessing import Pool\nimport os.path as opath # to manage files and dirs\n\n# help functions\ndef VarCmipTable(v):\n if \"_\" not in v:\n raise TypeError(\"String '%s' does not match required format: var_cmip-table, ie tas_Amon\"%(v,))\n else:\n return v\n\ndef parse_input():\n ''' Parse input arguments '''\n parser = argparse.ArgumentParser(description='''Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5\n published files responding to the constraints passed as arguments.\n The search is run on one of the ESGF node but it searches through all the available\n nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment\n should be specified but models are optionals. The search is limited to the first 1000 matches,\n to change this you have to change the pcmdi_url variable in the code.''')\n parser.add_argument('-e','--experiment', type=str, nargs=\"*\", help='CMIP5 experiment', required=True)\n parser.add_argument('-m','--model', type=str, nargs=\"*\", help='', required=False)\n parser.add_argument('-v','--variable', type=VarCmipTable, nargs=\"*\", help='combination of CMIP5 variable & cmip_table Ex. tas_Amon', required=True)\n parser.add_argument('-t','--table', action='store_true', default='store_false', help=\"csv table option, default is False\",\n required=False)\n parser.add_argument('-o','--output', type=str, nargs=\"?\", default=\"variables\", help='''output files root, \n default is variables''', required=False)\n return vars(parser.parse_args())\n\n sys.exit()\n\n\ndef assign_constraint():\n ''' Assign default values and input to constraints '''\n global var0, exp0, mod0, table, outfile\n var0 = []\n exp0 = []\n mod0 = []\n outfile = 'variables'\n# assign constraints from arguments list\n args = parse_input()\n var0=args[\"variable\"]\n if args[\"model\"]: mod0=args[\"model\"]\n exp0=args[\"experiment\"]\n table=args[\"table\"]\n outfile=args[\"output\"]\n return\n\n\ndef correct_model(model):\n ''' Correct name of models that have two, to make search work '''\n# list model as dict{dir name : search name}\n models={\"ACCESS1-0\" : \"ACCESS1.0\", \"ACCESS1-3\" : \"ACCESS1.3\",\n \"CESM1-BGC\" : \"CESM1(BGC)\", \"CESM1-CAM5\" : \"CESM1(CAM5)\",\n \"CESM1-CAM5-1-FV2\" : \"CESM1(CAM5.1,FV2)\", \"CESM1-WACCM\" : \"CESM1(WACCM)\",\n \"CESM1-FASTCHEM\" : \"CESM1(FASTCHEM)\", \"bcc-csm1-1\" : \"BCC-CSM1.1\",\n \"bcc-csm1-1-m\" : \"BCC-CSM1.1(m)\", \"inmcm4\" : \"INM-CM4\"} \n# if the current model is one of the dict keys, change name\n if model in models.keys():\n return models[model]\n return model\n\n\ndef tree_exist(furl):\n ''' Return True if file exists in tmp/tree '''\n replica_dir = \"/g/data1/ua6/unofficial-ESG-replica/tmp/tree/\"\n tree_path = replica_dir + furl \n return [opath.exists(tree_path),tree_path]\n\n \ndef write_file():\n ''' Write info on file to download or replica output '''\n global info\n files = {\"R\" : orep, \"D\" : odown}\n for item in info.values():\n outfile = files[item[-1]]\n outfile.write(\",\".join(item[0:-1])+\"\\n\")\n\n\ndef file_details(fname):\n ''' Split the filename in variable, MIP code, model, experiment, ensemble (period is excluded) '''\n namebits = fname.replace(\"'\",\"\").split('_')\n if len(namebits) >= 5:\n details = namebits[0:5]\n else:\n details = []\n return details\n\n\ndef find_string(bits,string):\n ''' Returns matching string if found in directory structure '''\n dummy = filter(lambda el: re.findall( string, el), bits)\n if len(dummy) == 0:\n return 'no_version'\n else:\n return dummy[0]\n\n\ndef get_info(fname,path):\n ''' Collect the info on a file form its path return it in a list '''\n version = '[a-z]*201[0-9][0-1][0-9][0-3][0-9]'\n bits = path.split('/')\n finfo = file_details(fname)\n finfo.append(find_string(bits[:-1],version)) \n finfo.append(path) \n return finfo\n\n\n\ndef parse_file(wgetfile,varlist,modlist,exp):\n ''' extract file list from wget file '''\n# open wget file, read content saving to a list of lines and close again\n infile = open(wgetfile,'r')\n lines = infile.readlines()\n infile.close\n# if wget didn't return files print a warning and exit function \n if lines[0] == \"No files were found that matched the query\":\n print lines[0] + \" for \", varlist, modlist, exp\n return False \n else:\n# select only the files lines starting as var_cmortable_model_exp ...\n result=[]\n# if modlist empty add to it a regex string indicating model name\n if len(modlist) > 0:\n comb_constr = itertools.product(*[varlist,modlist])\n filestrs = [\"_\".join(x) for x in comb_constr]\n else:\n filestrs = [var + '_[A-Za-z0-9-.()]*_' for var in varlist] \n for line in lines:\n match = [re.search(pat,line) for pat in filestrs]\n if match.count(None) != len(match) and line.find(exp):\n [fname,furl,hash_type,fhash] = line.replace(\"'\",\"\").split()\n if hash_type in [\"SHA256\",\"sha256\",\"md5\",\"MD5\"]:\n result.append([fname, furl.replace(\"http://\",\"\"), fhash, hash_type])\n else: \n print \"Error in parse_file() is selecting the wrong lines!\"\n print line\n sys.exit()\n return result \n \n\ndef check_hash(tree_path,fhash,hash_type):\n ''' Execute md5sum/sha256sum on file on tree and return True,f same as in wget file '''\n hash_cmd=\"md5sum\"\n if hash_type in [\"SHA256\",\"sha256\"]: hash_cmd=\"sha256sum\"\n tree_hash = subprocess.check_output([hash_cmd, tree_path]).split()[0]\n return tree_hash == fhash\n\n\ndef process_file(result):\n ''' Check if file exist on tree and if True check md5/sha265 hash '''\n info = {}\n [fname,furl,fhash,hash_type]=result\n [bool,tree_path]=tree_exist(furl)\n# some servers have updated name: for ex pcmdi9.llnl.gov is now aims3.llnl.gov so we need to substitute and check that too\n print furl, bool\n if not bool and furl[0:14]=='aims3.llnl.gov':\n for num in [3,7,9]:\n other_furl=furl.replace('aims3','pcmdi'+str(num)) \n print \"other_furl \", other_furl\n [bool,tree_path]=tree_exist(other_furl)\n if bool:\n print \"bool after extra check for num \", bool, num\n break\n info[furl] = get_info(fname,tree_path)\n# if file exists in tree compare md5/sha256 with values in wgetfile, else add to update\n if \"ACCESS\" in fname or \"CSIRO\" in fname or (bool and check_hash(tree_path,fhash,hash_type)):\n info[furl].append(\"R\")\n else:\n info[furl][-1] = \"http://\" + furl\n info[furl].append(\"D\")\n return info\n\n\ndef retrieve_info(query_item):\n ''' retrieve items of info related to input query combination '''\n global info\n # info order is: 0-var, 1-mip, 2-mod, 3-exp, 4-ens, 5-ver, 6-fname, 7-status\n var, mip = query_item[0].split(\"_\")\n rows={}\n # add the items in info with matching var,mip,exp to rows as dictionaries \n for item in info.values():\n if var == item[0] and mip == item[1] and query_item[-1] == item[3]:\n key = (item[2], item[4], item[5])\n try:\n rows[key].append(item[7])\n except:\n rows[key] = [item[7]]\n# loop through mod_ens_vers combination counting files to download/update\n newrows=[]\n for key in rows.keys():\n ndown = rows[key].count(\"D\") \n status = key[2] + \" \" + str(len(rows[key])) + \" files, \" + str(ndown) + \" to update\"\n newrows.append([tuple(key[0:2]), status])\n return newrows\n\n\ndef result_matrix(querypub,exp0):\n ''' Build a matrix of the results to output to csv table '''\n global gmatrix\n # querypub contains only published combinations\n # initialize dictionary of exp/matrices\n gmatrix = {}\n for exp in exp0:\n # for each var_mip retrieve_info create a dict{var_mip:[[(mod1,ens1), details list][(mod1,ens2), details list],[..]]}\n # they are added to exp_dict and each key will be column header, (mod1,ens1) will indicate row and details will be cell value\n exp_dict={}\n infoexp = [x for x in querypub if x[-1] == exp]\n for item in infoexp:\n exp_dict[item[0]]=retrieve_info(item) \n gmatrix[exp]= exp_dict\n return \n\n\ndef compare_query(var0,mod0,exp0):\n ''' compare the var_mod_exp combinations found with the requested ones '''\n global info, opub\n # for each el. of info: join var_mip, transform to tuple, finally convert modified info to set\n info_set = set(map(tuple,[[\"_\".join(x[0:2])] + x[2:-4] for x in info.values()]))\n # create set with all possible combinations of var_mip,model,exp based on constraints\n # if models not specified create a model list based on wget result\n if len(mod0) < 1: mod0 = [x[2] for x in info.values()]\n comb_query = set(itertools.product(*[var0,mod0,exp0]))\n # the difference between two sets gives combinations not published yet\n nopub_set = comb_query.difference(info_set)\n for item in nopub_set:\n opub.write(\",\".join(item) + \"\\n\")\n # write a matrix to pass results to csv table in suitable format\n if table: result_matrix(comb_query.difference(nopub_set),exp0)\n return nopub_set \n\n\ndef write_table(nopub):\n ''' write a csv table to summarise search '''\n global gmatrix\n for exp in exp0:\n # length of dictionary gmatrix[exp] is number of var_mip columns\n # maximum length of list in each dict inside gmatrix[exp] is number of mod/ens rows\n emat = gmatrix[exp]\n klist = emat.keys()\n # check if there are extra variables never published\n evar = list(set( [np[0] for np in nopub if np[0] not in klist if np[-1]==exp ] ))\n # calculate ncol,nrow keeping into account var never published\n ncol = len(klist) +2 + len(evar)\n nrow = max([len(emat[x]) for x in klist]) +1\n # open/create a csv file for each experiment\n try:\n csv = open(exp+\".csv\",\"w\") \n except:\n print \"Can not open file \" + exp + \".csv\" \n csv.write(\" model_ensemble/variable,\" + \",\".join(klist+evar) + \"\\n\") \n # pre-fill all values with \"NP\", leave 1 column and 1 row for headers \n # write first two columns with all (mod,ens) pairs\n col1= [emat[var][i][0] for var in klist for i in range(len(emat[var])) ]\n col1 = list(set(col1))\n col1_sort=sorted(col1)\n # write first column with mod_ens combinations & save row indexes in dict where keys are (mod,ens) combination\n # print col1_sort\n for modens in col1_sort:\n csv.write(modens[0] + \"_\" + modens[1]) \n for var in klist:\n line = [item[1].replace(\", \" , \" (\") for item in emat[var] if item[0] == modens]\n if len(line) > 0:\n csv.write(\", \" + \" \".join(line) + \")\")\n else:\n csv.write(\",NP\")\n if len(evar) > 0:\n for var in evar:\n csv.write(\",NP\")\n csv.write(\"\\n\")\n csv.close()\n print \"Data written in table \"\n return\n\ndef main():\n ''' Main program starts here '''\n global opub, odown, orep, info\n# somefile is false starting turns to true if at elast one file found\n somefile=False\n# read inputs and assign constraints\n assign_constraint()\n fdown = outfile + '_to_download.csv'\n frep = outfile + '_replica.csv'\n fpub = outfile + '_not_published.csv'\n# test reading inputs\n print var0\n print exp0\n print mod0\n print fdown\n print frep\n print fpub\n# if one of the output files exists issue a warning an exit\n if opath.isfile(fdown) or opath.isfile(frep) or opath.isfile(fpub):\n print \"Warning: one of the output files exists, exit to not overwrite!\"\n sys.exit() \n info={}\n# loop through experiments, 1st create a wget request for exp, then parse_file \n for exp in exp0:\n wgetfile = \"wget_\" + exp + \".out\"\n result=parse_file(wgetfile,var0,mod0,exp)\n# if found any files matching constraints, process them one by one\n# using multiprocessing Pool to parallelise process_file \n if result:\n async_results = Pool(1).map_async(process_file, result)\n for dinfo in async_results.get():\n info.update(dinfo)\n somefile=True\n print \"Finished checksum for existing files\" \n# if it couldn't find any file for any experiment then exit\n if not somefile: \n sys.exit(\"No files found for any of the experiments, exiting!\") \n# open not published file\n opub=open(fpub, \"w\")\n opub.write(\"var_mip-table, model, experiment\\n\")\n# build all requested combinations and compare to files found\n nopub_set = compare_query(var0,mod0,exp0)\n# write replica and download output files\n# open output files and write header\n odown=open(fdown, \"w\")\n odown.write(\"var, mip_table, model, experiment, ensemble, version, file url\\n\")\n orep=open(frep, \"w\")\n orep.write(\"var, mip_table, model, experiment, ensemble, version, filepath\\n\")\n write_file()\n# close all the output files\n odown.close()\n orep.close()\n opub.close()\n print \"Finished to write output files\" \n# if table option create/open spreadsheet\n# if table option write summary table in csv file\n if table: \n write_table(nopub_set)\n\n# check python version and then call main()\nif sys.version_info < ( 2, 7):\n # python too old, kill the script\n sys.exit(\"This script requires Python 2.7 or newer!\")\nmain()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475175,"cells":{"repo_name":{"kind":"string","value":"Dexhub/MTX"},"path":{"kind":"string","value":"src/mem/slicc/ast/OutPortDeclAST.py"},"copies":{"kind":"string","value":"92"},"size":{"kind":"string","value":"2802"},"content":{"kind":"string","value":"# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood\n# Copyright (c) 2009 The Hewlett-Packard Development Company\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met: redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer;\n# redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution;\n# neither the name of the copyright holders nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom slicc.ast.DeclAST import DeclAST\nfrom slicc.ast.TypeAST import TypeAST\nfrom slicc.symbols import Var\nfrom slicc.symbols import Type\n\nclass OutPortDeclAST(DeclAST):\n def __init__(self, slicc, ident, msg_type, var_expr, pairs):\n super(OutPortDeclAST, self).__init__(slicc, pairs)\n\n self.ident = ident\n self.msg_type = msg_type\n self.var_expr = var_expr\n self.queue_type = TypeAST(slicc, \"OutPort\")\n\n def __repr__(self):\n return \"[OutPortDecl: %r]\" % self.ident\n\n def generate(self):\n code = self.slicc.codeFormatter(newlines=False)\n\n queue_type = self.var_expr.generate(code)\n if not queue_type.isOutPort:\n self.error(\"The outport queue's type must have the 'outport' \" +\n \"attribute. Type '%s' does not have this attribute.\",\n (queue_type))\n\n if not self.symtab.find(self.msg_type.ident, Type):\n self.error(\"The message type '%s' does not exist.\",\n self.msg_type.ident)\n\n var = Var(self.symtab, self.ident, self.location, self.queue_type.type,\n str(code), self.pairs)\n self.symtab.newSymbol(var)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475176,"cells":{"repo_name":{"kind":"string","value":"geomapdev/idea-box"},"path":{"kind":"string","value":"src/idea/tests/editidea_tests.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"6582"},"content":{"kind":"string","value":"from django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom idea import models, views\nfrom idea.forms import IdeaForm, PrivateIdeaForm\nfrom idea.tests.utils import mock_req, random_user, login, create_superuser\nfrom datetime import date\nfrom mock import patch\n\ndef create_idea(user=None):\n if not user:\n user = random_user()\n state = models.State.objects.get(name='Active')\n idea = models.Idea(creator=user, title='Transit subsidy to Mars', \n text='Aliens need assistance.', state=state)\n banner = models.Banner(id=1, title=\"AAAA\", text=\"text1\",\n start_date=date.today())\n banner.save()\n idea.banner = banner\n idea.save()\n idea.tags.add(\"test tag\")\n return idea\n\nclass EditIdeaTest(TestCase):\n fixtures = ['state']\n\n def setUp(self):\n create_superuser()\n\n def test_edit_good_idea(self):\n \"\"\" Test an normal POST submission to edit an idea. \"\"\"\n user = login(self)\n idea = create_idea(user=user)\n\n self.assertEquals(models.Idea.objects.all().count(), 1)\n new_title = \"new title\"\n new_summary = \"new summary\"\n new_text = \"new text\"\n new_banner = models.Banner(id=2, title=\"BBB\", text=\"text2\",\n start_date=date.today())\n new_banner.save()\n resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)),\n {'title':new_title,\n 'summary':new_summary,\n 'text':new_text,\n 'banner': new_banner.id})\n self.assertEqual(resp.status_code, 302)\n self.assertIn('detail', resp['Location'])\n self.assertEquals(models.Idea.objects.all().count(), 1)\n\n # ensure editing an idea does not up the vote count\n # vote count is 0 because votes are added in views.add_idea, which is not used in this test\n num_voters = get_user_model().objects.filter(vote__idea__pk=idea.id, vote__vote=1).count()\n self.assertEqual(num_voters, 0)\n\n refresh_idea = models.Idea.objects.get(id=idea.id)\n self.assertEqual(refresh_idea.title, new_title)\n self.assertEqual(refresh_idea.summary, new_summary)\n self.assertEqual(refresh_idea.text, new_text)\n self.assertEqual(refresh_idea.banner, new_banner)\n\n # verify the expected fields remain the same\n self.assertEqual(refresh_idea.tags.count(), 1)\n self.assertEqual(refresh_idea.tags.all()[0].name, \"test tag\")\n self.assertEqual(refresh_idea.creator, idea.creator)\n\n def test_bad_edit_idea(self):\n \"\"\" Test an incomplete POST submission to edit an idea. \"\"\"\n user = login(self)\n idea = create_idea(user=user)\n\n resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'text':'new title'})\n self.assertEqual(resp.status_code, 200)\n self.assertIn('This field is required.', resp.content)\n self.assertEquals(models.Idea.objects.all().count(), 1)\n\n refresh_idea = models.Idea.objects.get(id=idea.id)\n self.assertEqual(refresh_idea.title, idea.title)\n self.assertEqual(refresh_idea.banner, idea.banner)\n\n def test_must_be_logged_in(self):\n \"\"\" A user must be logged in to edit an idea. \"\"\"\n user = login(self)\n idea = create_idea(user=user)\n self.client.logout()\n resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text'})\n self.assertEqual(resp.status_code, 302)\n self.assertIn('login', resp['Location'])\n\n def test_edit_ignores_tags(self):\n \"\"\" A user must be logged in to edit an idea. \"\"\"\n\n user = login(self)\n idea = create_idea(user=user)\n resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text', 'tags':'sample, newtag'})\n self.assertEqual(resp.status_code, 302)\n self.assertIn('detail', resp['Location'])\n\n refresh_idea = models.Idea.objects.get(id=idea.id)\n self.assertEqual(refresh_idea.tags.count(), 1)\n self.assertEqual(refresh_idea.tags.all()[0].name, \"test tag\")\n\n @patch('idea.views.render')\n def test_edit_idea_with_private_banner(self, render):\n \"\"\"\n Verify that the private banner field auto-populates properly\n \"\"\"\n user = login(self)\n state = models.State.objects.get(name='Active')\n\n idea1 = models.Idea(creator=user, title='Transit subsidy to Venus', \n text='Aliens need assistance.', state=state)\n banner1 = models.Banner(id=1, title=\"AAAA\", text=\"text1\",\n start_date=date.today(), is_private=True)\n banner1.save()\n idea1.banner = banner1\n idea1.save()\n\n idea2 = models.Idea(creator=user, title='Transit subsidy to Venus', \n text='Aliens need assistance.', state=state)\n banner2 = models.Banner(id=2, title=\"BBBB\", text=\"text2\",\n start_date=date.today())\n banner2.save()\n idea2.banner = banner2\n idea2.save()\n\n views.edit_idea(mock_req(user=user), idea1.id)\n context = render.call_args[0][2]\n self.assertTrue('form' in context)\n self.assertTrue(isinstance(context['form'], PrivateIdeaForm))\n banner_field = context['form'].fields['banner']\n selected = context['form'].initial['banner']\n self.assertEqual(banner1.id, selected)\n self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, None)\n self.assertIn(banner1, banner_field._queryset)\n self.assertNotIn(banner2, banner_field._queryset)\n\n views.edit_idea(mock_req(user=user), idea2.id)\n context = render.call_args[0][2]\n self.assertTrue('form' in context)\n self.assertTrue(isinstance(context['form'], IdeaForm))\n self.assertFalse(isinstance(context['form'], PrivateIdeaForm))\n banner_field = context['form'].fields['banner']\n selected = context['form'].initial['banner']\n self.assertEqual(banner2.id, selected)\n self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, 'Select')\n self.assertNotIn(banner1, banner_field._queryset)\n self.assertIn(banner2, banner_field._queryset)\n"},"license":{"kind":"string","value":"cc0-1.0"}}},{"rowIdx":475177,"cells":{"repo_name":{"kind":"string","value":"boyuegame/kbengine"},"path":{"kind":"string","value":"kbe/res/scripts/common/Lib/test/test_bz2.py"},"copies":{"kind":"string","value":"72"},"size":{"kind":"string","value":"32972"},"content":{"kind":"string","value":"from test import support\nfrom test.support import bigmemtest, _4G\n\nimport unittest\nfrom io import BytesIO\nimport os\nimport pickle\nimport random\nimport subprocess\nimport sys\nfrom test.support import unlink\n\ntry:\n import threading\nexcept ImportError:\n threading = None\n\n# Skip tests if the bz2 module doesn't exist.\nbz2 = support.import_module('bz2')\nfrom bz2 import BZ2File, BZ2Compressor, BZ2Decompressor\n\n\nclass BaseTest(unittest.TestCase):\n \"Base for other testcases.\"\n\n TEXT_LINES = [\n b'root:x:0:0:root:/root:/bin/bash\\n',\n b'bin:x:1:1:bin:/bin:\\n',\n b'daemon:x:2:2:daemon:/sbin:\\n',\n b'adm:x:3:4:adm:/var/adm:\\n',\n b'lp:x:4:7:lp:/var/spool/lpd:\\n',\n b'sync:x:5:0:sync:/sbin:/bin/sync\\n',\n b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\\n',\n b'halt:x:7:0:halt:/sbin:/sbin/halt\\n',\n b'mail:x:8:12:mail:/var/spool/mail:\\n',\n b'news:x:9:13:news:/var/spool/news:\\n',\n b'uucp:x:10:14:uucp:/var/spool/uucp:\\n',\n b'operator:x:11:0:operator:/root:\\n',\n b'games:x:12:100:games:/usr/games:\\n',\n b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\\n',\n b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\\n',\n b'nobody:x:65534:65534:Nobody:/home:\\n',\n b'postfix:x:100:101:postfix:/var/spool/postfix:\\n',\n b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\\n',\n b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\\n',\n b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\\n',\n b'www:x:103:104::/var/www:/bin/false\\n',\n ]\n TEXT = b''.join(TEXT_LINES)\n DATA = b'BZh91AY&SY.\\xc8N\\x18\\x00\\x01>_\\x80\\x00\\x10@\\x02\\xff\\xf0\\x01\\x07n\\x00?\\xe7\\xff\\xe00\\x01\\x99\\xaa\\x00\\xc0\\x03F\\x86\\x8c#&\\x83F\\x9a\\x03\\x06\\xa6\\xd0\\xa6\\x93M\\x0fQ\\xa7\\xa8\\x06\\x804hh\\x12$\\x11\\xa4i4\\xf14S\\xd2<Q\\xb5\\x0fH\\xd3\\xd4\\xdd\\xd5\\x87\\xbb\\xf8\\x94\\r\\x8f\\xafI\\x12\\xe1\\xc9\\xf8/E\\x00pu\\x89\\x12]\\xc9\\xbbDL\\nQ\\x0e\\t1\\x12\\xdf\\xa0\\xc0\\x97\\xac2O9\\x89\\x13\\x94\\x0e\\x1c7\\x0ed\\x95I\\x0c\\xaaJ\\xa4\\x18L\\x10\\x05#\\x9c\\xaf\\xba\\xbc/\\x97\\x8a#C\\xc8\\xe1\\x8cW\\xf9\\xe2\\xd0\\xd6M\\xa7\\x8bXa<e\\x84t\\xcbL\\xb3\\xa7\\xd9\\xcd\\xd1\\xcb\\x84.\\xaf\\xb3\\xab\\xab\\xad`n}\\xa0lh\\tE,\\x8eZ\\x15\\x17VH>\\x88\\xe5\\xcd9gd6\\x0b\\n\\xe9\\x9b\\xd5\\x8a\\x99\\xf7\\x08.K\\x8ev\\xfb\\xf7xw\\xbb\\xdf\\xa1\\x92\\xf1\\xdd|/\";\\xa2\\xba\\x9f\\xd5\\xb1#A\\xb6\\xf6\\xb3o\\xc9\\xc5y\\\\\\xebO\\xe7\\x85\\x9a\\xbc\\xb6f8\\x952\\xd5\\xd7\"%\\x89>V,\\xf7\\xa6z\\xe2\\x9f\\xa3\\xdf\\x11\\x11\"\\xd6E)I\\xa9\\x13^\\xca\\xf3r\\xd0\\x03U\\x922\\xf26\\xec\\xb6\\xed\\x8b\\xc3U\\x13\\x9d\\xc5\\x170\\xa4\\xfa^\\x92\\xacDF\\x8a\\x97\\xd6\\x19\\xfe\\xdd\\xb8\\xbd\\x1a\\x9a\\x19\\xa3\\x80ankR\\x8b\\xe5\\xd83]\\xa9\\xc6\\x08\\x82f\\xf6\\xb9\"6l$\\xb8j@\\xc0\\x8a\\xb0l1..\\xbak\\x83ls\\x15\\xbc\\xf4\\xc1\\x13\\xbe\\xf8E\\xb8\\x9d\\r\\xa8\\x9dk\\x84\\xd3n\\xfa\\xacQ\\x07\\xb1%y\\xaav\\xb4\\x08\\xe0z\\x1b\\x16\\xf5\\x04\\xe9\\xcc\\xb9\\x08z\\x1en7.G\\xfc]\\xc9\\x14\\xe1B@\\xbb!8`'\n EMPTY_DATA = b'BZh9\\x17rE8P\\x90\\x00\\x00\\x00\\x00'\n BAD_DATA = b'this is not a valid bzip2 file'\n\n def setUp(self):\n self.filename = support.TESTFN\n\n def tearDown(self):\n if os.path.isfile(self.filename):\n os.unlink(self.filename)\n\n if sys.platform == \"win32\":\n # bunzip2 isn't available to run on Windows.\n def decompress(self, data):\n return bz2.decompress(data)\n else:\n def decompress(self, data):\n pop = subprocess.Popen(\"bunzip2\", shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n pop.stdin.write(data)\n pop.stdin.close()\n ret = pop.stdout.read()\n pop.stdout.close()\n if pop.wait() != 0:\n ret = bz2.decompress(data)\n return ret\n\n\nclass BZ2FileTest(BaseTest):\n \"Test the BZ2File class.\"\n\n def createTempFile(self, streams=1, suffix=b\"\"):\n with open(self.filename, \"wb\") as f:\n f.write(self.DATA * streams)\n f.write(suffix)\n\n def testBadArgs(self):\n self.assertRaises(TypeError, BZ2File, 123.456)\n self.assertRaises(ValueError, BZ2File, \"/dev/null\", \"z\")\n self.assertRaises(ValueError, BZ2File, \"/dev/null\", \"rx\")\n self.assertRaises(ValueError, BZ2File, \"/dev/null\", \"rbt\")\n self.assertRaises(ValueError, BZ2File, \"/dev/null\", compresslevel=0)\n self.assertRaises(ValueError, BZ2File, \"/dev/null\", compresslevel=10)\n\n def testRead(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.read, None)\n self.assertEqual(bz2f.read(), self.TEXT)\n\n def testReadBadFile(self):\n self.createTempFile(streams=0, suffix=self.BAD_DATA)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(OSError, bz2f.read)\n\n def testReadMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.read, None)\n self.assertEqual(bz2f.read(), self.TEXT * 5)\n\n def testReadMonkeyMultiStream(self):\n # Test BZ2File.read() on a multi-stream archive where a stream\n # boundary coincides with the end of the raw read buffer.\n buffer_size = bz2._BUFFER_SIZE\n bz2._BUFFER_SIZE = len(self.DATA)\n try:\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.read, None)\n self.assertEqual(bz2f.read(), self.TEXT * 5)\n finally:\n bz2._BUFFER_SIZE = buffer_size\n\n def testReadTrailingJunk(self):\n self.createTempFile(suffix=self.BAD_DATA)\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(bz2f.read(), self.TEXT)\n\n def testReadMultiStreamTrailingJunk(self):\n self.createTempFile(streams=5, suffix=self.BAD_DATA)\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(bz2f.read(), self.TEXT * 5)\n\n def testRead0(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.read, None)\n self.assertEqual(bz2f.read(0), b\"\")\n\n def testReadChunk10(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n text = b''\n while True:\n str = bz2f.read(10)\n if not str:\n break\n text += str\n self.assertEqual(text, self.TEXT)\n\n def testReadChunk10MultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n text = b''\n while True:\n str = bz2f.read(10)\n if not str:\n break\n text += str\n self.assertEqual(text, self.TEXT * 5)\n\n def testRead100(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(bz2f.read(100), self.TEXT[:100])\n\n def testPeek(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n pdata = bz2f.peek()\n self.assertNotEqual(len(pdata), 0)\n self.assertTrue(self.TEXT.startswith(pdata))\n self.assertEqual(bz2f.read(), self.TEXT)\n\n def testReadInto(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n n = 128\n b = bytearray(n)\n self.assertEqual(bz2f.readinto(b), n)\n self.assertEqual(b, self.TEXT[:n])\n n = len(self.TEXT) - n\n b = bytearray(len(self.TEXT))\n self.assertEqual(bz2f.readinto(b), n)\n self.assertEqual(b[:n], self.TEXT[-n:])\n\n def testReadLine(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.readline, None)\n for line in self.TEXT_LINES:\n self.assertEqual(bz2f.readline(), line)\n\n def testReadLineMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.readline, None)\n for line in self.TEXT_LINES * 5:\n self.assertEqual(bz2f.readline(), line)\n\n def testReadLines(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.readlines, None)\n self.assertEqual(bz2f.readlines(), self.TEXT_LINES)\n\n def testReadLinesMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.readlines, None)\n self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)\n\n def testIterator(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)\n\n def testIteratorMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)\n\n def testClosedIteratorDeadlock(self):\n # Issue #3309: Iteration on a closed BZ2File should release the lock.\n self.createTempFile()\n bz2f = BZ2File(self.filename)\n bz2f.close()\n self.assertRaises(ValueError, next, bz2f)\n # This call will deadlock if the above call failed to release the lock.\n self.assertRaises(ValueError, bz2f.readlines)\n\n def testWrite(self):\n with BZ2File(self.filename, \"w\") as bz2f:\n self.assertRaises(TypeError, bz2f.write)\n bz2f.write(self.TEXT)\n with open(self.filename, 'rb') as f:\n self.assertEqual(self.decompress(f.read()), self.TEXT)\n\n def testWriteChunks10(self):\n with BZ2File(self.filename, \"w\") as bz2f:\n n = 0\n while True:\n str = self.TEXT[n*10:(n+1)*10]\n if not str:\n break\n bz2f.write(str)\n n += 1\n with open(self.filename, 'rb') as f:\n self.assertEqual(self.decompress(f.read()), self.TEXT)\n\n def testWriteNonDefaultCompressLevel(self):\n expected = bz2.compress(self.TEXT, compresslevel=5)\n with BZ2File(self.filename, \"w\", compresslevel=5) as bz2f:\n bz2f.write(self.TEXT)\n with open(self.filename, \"rb\") as f:\n self.assertEqual(f.read(), expected)\n\n def testWriteLines(self):\n with BZ2File(self.filename, \"w\") as bz2f:\n self.assertRaises(TypeError, bz2f.writelines)\n bz2f.writelines(self.TEXT_LINES)\n # Issue #1535500: Calling writelines() on a closed BZ2File\n # should raise an exception.\n self.assertRaises(ValueError, bz2f.writelines, [\"a\"])\n with open(self.filename, 'rb') as f:\n self.assertEqual(self.decompress(f.read()), self.TEXT)\n\n def testWriteMethodsOnReadOnlyFile(self):\n with BZ2File(self.filename, \"w\") as bz2f:\n bz2f.write(b\"abc\")\n\n with BZ2File(self.filename, \"r\") as bz2f:\n self.assertRaises(OSError, bz2f.write, b\"a\")\n self.assertRaises(OSError, bz2f.writelines, [b\"a\"])\n\n def testAppend(self):\n with BZ2File(self.filename, \"w\") as bz2f:\n self.assertRaises(TypeError, bz2f.write)\n bz2f.write(self.TEXT)\n with BZ2File(self.filename, \"a\") as bz2f:\n self.assertRaises(TypeError, bz2f.write)\n bz2f.write(self.TEXT)\n with open(self.filename, 'rb') as f:\n self.assertEqual(self.decompress(f.read()), self.TEXT * 2)\n\n def testSeekForward(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.seek)\n bz2f.seek(150)\n self.assertEqual(bz2f.read(), self.TEXT[150:])\n\n def testSeekForwardAcrossStreams(self):\n self.createTempFile(streams=2)\n with BZ2File(self.filename) as bz2f:\n self.assertRaises(TypeError, bz2f.seek)\n bz2f.seek(len(self.TEXT) + 150)\n self.assertEqual(bz2f.read(), self.TEXT[150:])\n\n def testSeekBackwards(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n bz2f.read(500)\n bz2f.seek(-150, 1)\n self.assertEqual(bz2f.read(), self.TEXT[500-150:])\n\n def testSeekBackwardsAcrossStreams(self):\n self.createTempFile(streams=2)\n with BZ2File(self.filename) as bz2f:\n readto = len(self.TEXT) + 100\n while readto > 0:\n readto -= len(bz2f.read(readto))\n bz2f.seek(-150, 1)\n self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)\n\n def testSeekBackwardsFromEnd(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(-150, 2)\n self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])\n\n def testSeekBackwardsFromEndAcrossStreams(self):\n self.createTempFile(streams=2)\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(-1000, 2)\n self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])\n\n def testSeekPostEnd(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(150000)\n self.assertEqual(bz2f.tell(), len(self.TEXT))\n self.assertEqual(bz2f.read(), b\"\")\n\n def testSeekPostEndMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(150000)\n self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)\n self.assertEqual(bz2f.read(), b\"\")\n\n def testSeekPostEndTwice(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(150000)\n bz2f.seek(150000)\n self.assertEqual(bz2f.tell(), len(self.TEXT))\n self.assertEqual(bz2f.read(), b\"\")\n\n def testSeekPostEndTwiceMultiStream(self):\n self.createTempFile(streams=5)\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(150000)\n bz2f.seek(150000)\n self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)\n self.assertEqual(bz2f.read(), b\"\")\n\n def testSeekPreStart(self):\n self.createTempFile()\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(-150)\n self.assertEqual(bz2f.tell(), 0)\n self.assertEqual(bz2f.read(), self.TEXT)\n\n def testSeekPreStartMultiStream(self):\n self.createTempFile(streams=2)\n with BZ2File(self.filename) as bz2f:\n bz2f.seek(-150)\n self.assertEqual(bz2f.tell(), 0)\n self.assertEqual(bz2f.read(), self.TEXT * 2)\n\n def testFileno(self):\n self.createTempFile()\n with open(self.filename, 'rb') as rawf:\n bz2f = BZ2File(rawf)\n try:\n self.assertEqual(bz2f.fileno(), rawf.fileno())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.fileno)\n\n def testSeekable(self):\n bz2f = BZ2File(BytesIO(self.DATA))\n try:\n self.assertTrue(bz2f.seekable())\n bz2f.read()\n self.assertTrue(bz2f.seekable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.seekable)\n\n bz2f = BZ2File(BytesIO(), \"w\")\n try:\n self.assertFalse(bz2f.seekable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.seekable)\n\n src = BytesIO(self.DATA)\n src.seekable = lambda: False\n bz2f = BZ2File(src)\n try:\n self.assertFalse(bz2f.seekable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.seekable)\n\n def testReadable(self):\n bz2f = BZ2File(BytesIO(self.DATA))\n try:\n self.assertTrue(bz2f.readable())\n bz2f.read()\n self.assertTrue(bz2f.readable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.readable)\n\n bz2f = BZ2File(BytesIO(), \"w\")\n try:\n self.assertFalse(bz2f.readable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.readable)\n\n def testWritable(self):\n bz2f = BZ2File(BytesIO(self.DATA))\n try:\n self.assertFalse(bz2f.writable())\n bz2f.read()\n self.assertFalse(bz2f.writable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.writable)\n\n bz2f = BZ2File(BytesIO(), \"w\")\n try:\n self.assertTrue(bz2f.writable())\n finally:\n bz2f.close()\n self.assertRaises(ValueError, bz2f.writable)\n\n def testOpenDel(self):\n self.createTempFile()\n for i in range(10000):\n o = BZ2File(self.filename)\n del o\n\n def testOpenNonexistent(self):\n self.assertRaises(OSError, BZ2File, \"/non/existent\")\n\n def testReadlinesNoNewline(self):\n # Issue #1191043: readlines() fails on a file containing no newline.\n data = b'BZh91AY&SY\\xd9b\\x89]\\x00\\x00\\x00\\x03\\x80\\x04\\x00\\x02\\x00\\x0c\\x00 \\x00!\\x9ah3M\\x13<]\\xc9\\x14\\xe1BCe\\x8a%t'\n with open(self.filename, \"wb\") as f:\n f.write(data)\n with BZ2File(self.filename) as bz2f:\n lines = bz2f.readlines()\n self.assertEqual(lines, [b'Test'])\n with BZ2File(self.filename) as bz2f:\n xlines = list(bz2f.readlines())\n self.assertEqual(xlines, [b'Test'])\n\n def testContextProtocol(self):\n f = None\n with BZ2File(self.filename, \"wb\") as f:\n f.write(b\"xxx\")\n f = BZ2File(self.filename, \"rb\")\n f.close()\n try:\n with f:\n pass\n except ValueError:\n pass\n else:\n self.fail(\"__enter__ on a closed file didn't raise an exception\")\n try:\n with BZ2File(self.filename, \"wb\") as f:\n 1/0\n except ZeroDivisionError:\n pass\n else:\n self.fail(\"1/0 didn't raise an exception\")\n\n @unittest.skipUnless(threading, 'Threading required for this test.')\n def testThreading(self):\n # Issue #7205: Using a BZ2File from several threads shouldn't deadlock.\n data = b\"1\" * 2**20\n nthreads = 10\n with BZ2File(self.filename, 'wb') as f:\n def comp():\n for i in range(5):\n f.write(data)\n threads = [threading.Thread(target=comp) for i in range(nthreads)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n def testWithoutThreading(self):\n module = support.import_fresh_module(\"bz2\", blocked=(\"threading\",))\n with module.BZ2File(self.filename, \"wb\") as f:\n f.write(b\"abc\")\n with module.BZ2File(self.filename, \"rb\") as f:\n self.assertEqual(f.read(), b\"abc\")\n\n def testMixedIterationAndReads(self):\n self.createTempFile()\n linelen = len(self.TEXT_LINES[0])\n halflen = linelen // 2\n with BZ2File(self.filename) as bz2f:\n bz2f.read(halflen)\n self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])\n self.assertEqual(bz2f.read(), self.TEXT[linelen:])\n with BZ2File(self.filename) as bz2f:\n bz2f.readline()\n self.assertEqual(next(bz2f), self.TEXT_LINES[1])\n self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])\n with BZ2File(self.filename) as bz2f:\n bz2f.readlines()\n self.assertRaises(StopIteration, next, bz2f)\n self.assertEqual(bz2f.readlines(), [])\n\n def testMultiStreamOrdering(self):\n # Test the ordering of streams when reading a multi-stream archive.\n data1 = b\"foo\" * 1000\n data2 = b\"bar\" * 1000\n with BZ2File(self.filename, \"w\") as bz2f:\n bz2f.write(data1)\n with BZ2File(self.filename, \"a\") as bz2f:\n bz2f.write(data2)\n with BZ2File(self.filename) as bz2f:\n self.assertEqual(bz2f.read(), data1 + data2)\n\n def testOpenBytesFilename(self):\n str_filename = self.filename\n try:\n bytes_filename = str_filename.encode(\"ascii\")\n except UnicodeEncodeError:\n self.skipTest(\"Temporary file name needs to be ASCII\")\n with BZ2File(bytes_filename, \"wb\") as f:\n f.write(self.DATA)\n with BZ2File(bytes_filename, \"rb\") as f:\n self.assertEqual(f.read(), self.DATA)\n # Sanity check that we are actually operating on the right file.\n with BZ2File(str_filename, \"rb\") as f:\n self.assertEqual(f.read(), self.DATA)\n\n\n # Tests for a BZ2File wrapping another file object:\n\n def testReadBytesIO(self):\n with BytesIO(self.DATA) as bio:\n with BZ2File(bio) as bz2f:\n self.assertRaises(TypeError, bz2f.read, None)\n self.assertEqual(bz2f.read(), self.TEXT)\n self.assertFalse(bio.closed)\n\n def testPeekBytesIO(self):\n with BytesIO(self.DATA) as bio:\n with BZ2File(bio) as bz2f:\n pdata = bz2f.peek()\n self.assertNotEqual(len(pdata), 0)\n self.assertTrue(self.TEXT.startswith(pdata))\n self.assertEqual(bz2f.read(), self.TEXT)\n\n def testWriteBytesIO(self):\n with BytesIO() as bio:\n with BZ2File(bio, \"w\") as bz2f:\n self.assertRaises(TypeError, bz2f.write)\n bz2f.write(self.TEXT)\n self.assertEqual(self.decompress(bio.getvalue()), self.TEXT)\n self.assertFalse(bio.closed)\n\n def testSeekForwardBytesIO(self):\n with BytesIO(self.DATA) as bio:\n with BZ2File(bio) as bz2f:\n self.assertRaises(TypeError, bz2f.seek)\n bz2f.seek(150)\n self.assertEqual(bz2f.read(), self.TEXT[150:])\n\n def testSeekBackwardsBytesIO(self):\n with BytesIO(self.DATA) as bio:\n with BZ2File(bio) as bz2f:\n bz2f.read(500)\n bz2f.seek(-150, 1)\n self.assertEqual(bz2f.read(), self.TEXT[500-150:])\n\n def test_read_truncated(self):\n # Drop the eos_magic field (6 bytes) and CRC (4 bytes).\n truncated = self.DATA[:-10]\n with BZ2File(BytesIO(truncated)) as f:\n self.assertRaises(EOFError, f.read)\n with BZ2File(BytesIO(truncated)) as f:\n self.assertEqual(f.read(len(self.TEXT)), self.TEXT)\n self.assertRaises(EOFError, f.read, 1)\n # Incomplete 4-byte file header, and block header of at least 146 bits.\n for i in range(22):\n with BZ2File(BytesIO(truncated[:i])) as f:\n self.assertRaises(EOFError, f.read, 1)\n\n\nclass BZ2CompressorTest(BaseTest):\n def testCompress(self):\n bz2c = BZ2Compressor()\n self.assertRaises(TypeError, bz2c.compress)\n data = bz2c.compress(self.TEXT)\n data += bz2c.flush()\n self.assertEqual(self.decompress(data), self.TEXT)\n\n def testCompressEmptyString(self):\n bz2c = BZ2Compressor()\n data = bz2c.compress(b'')\n data += bz2c.flush()\n self.assertEqual(data, self.EMPTY_DATA)\n\n def testCompressChunks10(self):\n bz2c = BZ2Compressor()\n n = 0\n data = b''\n while True:\n str = self.TEXT[n*10:(n+1)*10]\n if not str:\n break\n data += bz2c.compress(str)\n n += 1\n data += bz2c.flush()\n self.assertEqual(self.decompress(data), self.TEXT)\n\n @bigmemtest(size=_4G + 100, memuse=2)\n def testCompress4G(self, size):\n # \"Test BZ2Compressor.compress()/flush() with >4GiB input\"\n bz2c = BZ2Compressor()\n data = b\"x\" * size\n try:\n compressed = bz2c.compress(data)\n compressed += bz2c.flush()\n finally:\n data = None # Release memory\n data = bz2.decompress(compressed)\n try:\n self.assertEqual(len(data), size)\n self.assertEqual(len(data.strip(b\"x\")), 0)\n finally:\n data = None\n\n def testPickle(self):\n with self.assertRaises(TypeError):\n pickle.dumps(BZ2Compressor())\n\n\nclass BZ2DecompressorTest(BaseTest):\n def test_Constructor(self):\n self.assertRaises(TypeError, BZ2Decompressor, 42)\n\n def testDecompress(self):\n bz2d = BZ2Decompressor()\n self.assertRaises(TypeError, bz2d.decompress)\n text = bz2d.decompress(self.DATA)\n self.assertEqual(text, self.TEXT)\n\n def testDecompressChunks10(self):\n bz2d = BZ2Decompressor()\n text = b''\n n = 0\n while True:\n str = self.DATA[n*10:(n+1)*10]\n if not str:\n break\n text += bz2d.decompress(str)\n n += 1\n self.assertEqual(text, self.TEXT)\n\n def testDecompressUnusedData(self):\n bz2d = BZ2Decompressor()\n unused_data = b\"this is unused data\"\n text = bz2d.decompress(self.DATA+unused_data)\n self.assertEqual(text, self.TEXT)\n self.assertEqual(bz2d.unused_data, unused_data)\n\n def testEOFError(self):\n bz2d = BZ2Decompressor()\n text = bz2d.decompress(self.DATA)\n self.assertRaises(EOFError, bz2d.decompress, b\"anything\")\n self.assertRaises(EOFError, bz2d.decompress, b\"\")\n\n @bigmemtest(size=_4G + 100, memuse=3.3)\n def testDecompress4G(self, size):\n # \"Test BZ2Decompressor.decompress() with >4GiB input\"\n blocksize = 10 * 1024 * 1024\n block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')\n try:\n data = block * (size // blocksize + 1)\n compressed = bz2.compress(data)\n bz2d = BZ2Decompressor()\n decompressed = bz2d.decompress(compressed)\n self.assertTrue(decompressed == data)\n finally:\n data = None\n compressed = None\n decompressed = None\n\n def testPickle(self):\n with self.assertRaises(TypeError):\n pickle.dumps(BZ2Decompressor())\n\n\nclass CompressDecompressTest(BaseTest):\n def testCompress(self):\n data = bz2.compress(self.TEXT)\n self.assertEqual(self.decompress(data), self.TEXT)\n\n def testCompressEmptyString(self):\n text = bz2.compress(b'')\n self.assertEqual(text, self.EMPTY_DATA)\n\n def testDecompress(self):\n text = bz2.decompress(self.DATA)\n self.assertEqual(text, self.TEXT)\n\n def testDecompressEmpty(self):\n text = bz2.decompress(b\"\")\n self.assertEqual(text, b\"\")\n\n def testDecompressToEmptyString(self):\n text = bz2.decompress(self.EMPTY_DATA)\n self.assertEqual(text, b'')\n\n def testDecompressIncomplete(self):\n self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])\n\n def testDecompressBadData(self):\n self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)\n\n def testDecompressMultiStream(self):\n text = bz2.decompress(self.DATA * 5)\n self.assertEqual(text, self.TEXT * 5)\n\n def testDecompressTrailingJunk(self):\n text = bz2.decompress(self.DATA + self.BAD_DATA)\n self.assertEqual(text, self.TEXT)\n\n def testDecompressMultiStreamTrailingJunk(self):\n text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)\n self.assertEqual(text, self.TEXT * 5)\n\n\nclass OpenTest(BaseTest):\n \"Test the open function.\"\n\n def open(self, *args, **kwargs):\n return bz2.open(*args, **kwargs)\n\n def test_binary_modes(self):\n for mode in (\"wb\", \"xb\"):\n if mode == \"xb\":\n unlink(self.filename)\n with self.open(self.filename, mode) as f:\n f.write(self.TEXT)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read())\n self.assertEqual(file_data, self.TEXT)\n with self.open(self.filename, \"rb\") as f:\n self.assertEqual(f.read(), self.TEXT)\n with self.open(self.filename, \"ab\") as f:\n f.write(self.TEXT)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read())\n self.assertEqual(file_data, self.TEXT * 2)\n\n def test_implicit_binary_modes(self):\n # Test implicit binary modes (no \"b\" or \"t\" in mode string).\n for mode in (\"w\", \"x\"):\n if mode == \"x\":\n unlink(self.filename)\n with self.open(self.filename, mode) as f:\n f.write(self.TEXT)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read())\n self.assertEqual(file_data, self.TEXT)\n with self.open(self.filename, \"r\") as f:\n self.assertEqual(f.read(), self.TEXT)\n with self.open(self.filename, \"a\") as f:\n f.write(self.TEXT)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read())\n self.assertEqual(file_data, self.TEXT * 2)\n\n def test_text_modes(self):\n text = self.TEXT.decode(\"ascii\")\n text_native_eol = text.replace(\"\\n\", os.linesep)\n for mode in (\"wt\", \"xt\"):\n if mode == \"xt\":\n unlink(self.filename)\n with self.open(self.filename, mode) as f:\n f.write(text)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read()).decode(\"ascii\")\n self.assertEqual(file_data, text_native_eol)\n with self.open(self.filename, \"rt\") as f:\n self.assertEqual(f.read(), text)\n with self.open(self.filename, \"at\") as f:\n f.write(text)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read()).decode(\"ascii\")\n self.assertEqual(file_data, text_native_eol * 2)\n\n def test_x_mode(self):\n for mode in (\"x\", \"xb\", \"xt\"):\n unlink(self.filename)\n with self.open(self.filename, mode) as f:\n pass\n with self.assertRaises(FileExistsError):\n with self.open(self.filename, mode) as f:\n pass\n\n def test_fileobj(self):\n with self.open(BytesIO(self.DATA), \"r\") as f:\n self.assertEqual(f.read(), self.TEXT)\n with self.open(BytesIO(self.DATA), \"rb\") as f:\n self.assertEqual(f.read(), self.TEXT)\n text = self.TEXT.decode(\"ascii\")\n with self.open(BytesIO(self.DATA), \"rt\") as f:\n self.assertEqual(f.read(), text)\n\n def test_bad_params(self):\n # Test invalid parameter combinations.\n self.assertRaises(ValueError,\n self.open, self.filename, \"wbt\")\n self.assertRaises(ValueError,\n self.open, self.filename, \"xbt\")\n self.assertRaises(ValueError,\n self.open, self.filename, \"rb\", encoding=\"utf-8\")\n self.assertRaises(ValueError,\n self.open, self.filename, \"rb\", errors=\"ignore\")\n self.assertRaises(ValueError,\n self.open, self.filename, \"rb\", newline=\"\\n\")\n\n def test_encoding(self):\n # Test non-default encoding.\n text = self.TEXT.decode(\"ascii\")\n text_native_eol = text.replace(\"\\n\", os.linesep)\n with self.open(self.filename, \"wt\", encoding=\"utf-16-le\") as f:\n f.write(text)\n with open(self.filename, \"rb\") as f:\n file_data = self.decompress(f.read()).decode(\"utf-16-le\")\n self.assertEqual(file_data, text_native_eol)\n with self.open(self.filename, \"rt\", encoding=\"utf-16-le\") as f:\n self.assertEqual(f.read(), text)\n\n def test_encoding_error_handler(self):\n # Test with non-default encoding error handler.\n with self.open(self.filename, \"wb\") as f:\n f.write(b\"foo\\xffbar\")\n with self.open(self.filename, \"rt\", encoding=\"ascii\", errors=\"ignore\") \\\n as f:\n self.assertEqual(f.read(), \"foobar\")\n\n def test_newline(self):\n # Test with explicit newline (universal newline mode disabled).\n text = self.TEXT.decode(\"ascii\")\n with self.open(self.filename, \"wt\", newline=\"\\n\") as f:\n f.write(text)\n with self.open(self.filename, \"rt\", newline=\"\\r\") as f:\n self.assertEqual(f.readlines(), [text])\n\n\ndef test_main():\n support.run_unittest(\n BZ2FileTest,\n BZ2CompressorTest,\n BZ2DecompressorTest,\n CompressDecompressTest,\n OpenTest,\n )\n support.reap_children()\n\nif __name__ == '__main__':\n test_main()\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":475178,"cells":{"repo_name":{"kind":"string","value":"chylli/phantomjs"},"path":{"kind":"string","value":"src/qt/qtwebkit/Source/WebCore/inspector/compile-front-end.py"},"copies":{"kind":"string","value":"116"},"size":{"kind":"string","value":"15388"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2012 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport os.path\nimport generate_protocol_externs\nimport shutil\nimport sys\nimport tempfile\n\ninspector_path = \"Source/WebCore/inspector\"\ninspector_frontend_path = inspector_path + \"/front-end\"\nprotocol_externs_path = inspector_frontend_path + \"/protocol-externs.js\"\n\ngenerate_protocol_externs.generate_protocol_externs(protocol_externs_path, inspector_path + \"/Inspector.json\")\n\njsmodule_name_prefix = \"jsmodule_\"\nmodules = [\n {\n \"name\": \"common\",\n \"dependencies\": [],\n \"sources\": [\n \"Color.js\",\n \"DOMExtension.js\",\n \"Object.js\",\n \"ParsedURL.js\",\n \"Progress.js\",\n \"Settings.js\",\n \"UIString.js\",\n \"UserMetrics.js\",\n \"utilities.js\",\n ]\n },\n {\n \"name\": \"sdk\",\n \"dependencies\": [\"common\"],\n \"sources\": [\n \"ApplicationCacheModel.js\",\n \"CompilerScriptMapping.js\",\n \"ConsoleModel.js\",\n \"ContentProvider.js\",\n \"ContentProviderBasedProjectDelegate.js\",\n \"ContentProviders.js\",\n \"CookieParser.js\",\n \"CSSMetadata.js\",\n \"CSSStyleModel.js\",\n \"BreakpointManager.js\",\n \"Database.js\",\n \"DOMAgent.js\",\n \"DOMStorage.js\",\n \"DebuggerModel.js\",\n \"DebuggerScriptMapping.js\",\n \"FileManager.js\",\n \"FileMapping.js\",\n \"FileSystemMapping.js\",\n \"FileSystemModel.js\",\n \"FileSystemProjectDelegate.js\",\n \"FileUtils.js\",\n \"HAREntry.js\",\n \"IndexedDBModel.js\",\n \"InspectorBackend.js\",\n \"IsolatedFileSystemManager.js\",\n \"IsolatedFileSystem.js\",\n \"Linkifier.js\",\n \"NetworkLog.js\",\n \"NetworkUISourceCodeProvider.js\",\n \"PresentationConsoleMessageHelper.js\",\n \"RuntimeModel.js\",\n \"SASSSourceMapping.js\",\n \"Script.js\",\n \"ScriptFormatter.js\",\n \"ScriptSnippetModel.js\",\n \"SimpleWorkspaceProvider.js\",\n \"SnippetStorage.js\",\n \"SourceMapping.js\",\n \"StylesSourceMapping.js\",\n \"TimelineManager.js\",\n \"RemoteObject.js\",\n \"Resource.js\",\n \"DefaultScriptMapping.js\",\n \"ResourceScriptMapping.js\",\n \"LiveEditSupport.js\",\n \"ResourceTreeModel.js\",\n \"ResourceType.js\",\n \"ResourceUtils.js\",\n \"SourceMap.js\",\n \"NetworkManager.js\",\n \"NetworkRequest.js\",\n \"UISourceCode.js\",\n \"UserAgentSupport.js\",\n \"Workspace.js\",\n \"protocol-externs.js\",\n ]\n },\n {\n \"name\": \"ui\",\n \"dependencies\": [\"common\"],\n \"sources\": [\n \"Checkbox.js\",\n \"ContextMenu.js\",\n \"DOMSyntaxHighlighter.js\",\n \"DataGrid.js\",\n \"DefaultTextEditor.js\",\n \"Dialog.js\",\n \"DockController.js\",\n \"Drawer.js\",\n \"EmptyView.js\",\n \"GoToLineDialog.js\",\n \"HelpScreen.js\",\n \"InspectorView.js\",\n \"KeyboardShortcut.js\",\n \"OverviewGrid.js\",\n \"Panel.js\",\n \"PanelEnablerView.js\",\n \"Placard.js\",\n \"Popover.js\",\n \"ProgressIndicator.js\",\n \"PropertiesSection.js\",\n \"SearchController.js\",\n \"Section.js\",\n \"SidebarPane.js\",\n \"SidebarTreeElement.js\",\n \"ShortcutsScreen.js\",\n \"ShowMoreDataGridNode.js\",\n \"SidebarOverlay.js\",\n \"SoftContextMenu.js\",\n \"SourceTokenizer.js\",\n \"Spectrum.js\",\n \"SplitView.js\",\n \"SidebarView.js\",\n \"StatusBarButton.js\",\n \"SuggestBox.js\",\n \"TabbedPane.js\",\n \"TextEditor.js\",\n \"TextEditorHighlighter.js\",\n \"TextEditorModel.js\",\n \"TextPrompt.js\",\n \"TextUtils.js\",\n \"TimelineGrid.js\",\n \"Toolbar.js\",\n \"UIUtils.js\",\n \"View.js\",\n \"ViewportControl.js\",\n \"treeoutline.js\",\n ]\n },\n {\n \"name\": \"components\",\n \"dependencies\": [\"sdk\", \"ui\"],\n \"sources\": [\n \"AdvancedSearchController.js\",\n \"HandlerRegistry.js\",\n \"ConsoleMessage.js\",\n \"CookiesTable.js\",\n \"DOMBreakpointsSidebarPane.js\",\n \"DOMPresentationUtils.js\",\n \"ElementsTreeOutline.js\",\n \"FontView.js\",\n \"ImageView.js\",\n \"NativeBreakpointsSidebarPane.js\",\n \"InspectElementModeController.js\",\n \"ObjectPopoverHelper.js\",\n \"ObjectPropertiesSection.js\",\n \"SourceFrame.js\",\n \"ResourceView.js\",\n ]\n },\n {\n \"name\": \"elements\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"CSSNamedFlowCollectionsView.js\",\n \"CSSNamedFlowView.js\",\n \"ElementsPanel.js\",\n \"ElementsPanelDescriptor.js\",\n \"EventListenersSidebarPane.js\",\n \"MetricsSidebarPane.js\",\n \"PropertiesSidebarPane.js\",\n \"StylesSidebarPane.js\",\n ]\n },\n {\n \"name\": \"network\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"NetworkItemView.js\",\n \"RequestCookiesView.js\",\n \"RequestHeadersView.js\",\n \"RequestHTMLView.js\",\n \"RequestJSONView.js\",\n \"RequestPreviewView.js\",\n \"RequestResponseView.js\",\n \"RequestTimingView.js\",\n \"RequestView.js\",\n \"ResourceWebSocketFrameView.js\",\n \"NetworkPanel.js\",\n \"NetworkPanelDescriptor.js\",\n ]\n },\n {\n \"name\": \"resources\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"ApplicationCacheItemsView.js\",\n \"CookieItemsView.js\",\n \"DatabaseQueryView.js\",\n \"DatabaseTableView.js\",\n \"DirectoryContentView.js\",\n \"DOMStorageItemsView.js\",\n \"FileContentView.js\",\n \"FileSystemView.js\",\n \"IndexedDBViews.js\",\n \"ResourcesPanel.js\",\n ]\n },\n {\n \"name\": \"workers\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"WorkerManager.js\",\n ]\n },\n {\n \"name\": \"scripts\",\n \"dependencies\": [\"components\", \"workers\"],\n \"sources\": [\n \"BreakpointsSidebarPane.js\",\n \"CallStackSidebarPane.js\",\n \"FilteredItemSelectionDialog.js\",\n \"JavaScriptSourceFrame.js\",\n \"NavigatorOverlayController.js\",\n \"NavigatorView.js\",\n \"RevisionHistoryView.js\",\n \"ScopeChainSidebarPane.js\",\n \"ScriptsNavigator.js\",\n \"ScriptsPanel.js\",\n \"ScriptsPanelDescriptor.js\",\n \"ScriptsSearchScope.js\",\n \"SnippetJavaScriptSourceFrame.js\",\n \"StyleSheetOutlineDialog.js\",\n \"TabbedEditorContainer.js\",\n \"UISourceCodeFrame.js\",\n \"WatchExpressionsSidebarPane.js\",\n \"WorkersSidebarPane.js\",\n ]\n },\n {\n \"name\": \"console\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"ConsoleView.js\",\n \"ConsolePanel.js\",\n ]\n },\n {\n \"name\": \"timeline\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"DOMCountersGraph.js\",\n \"MemoryStatistics.js\",\n \"NativeMemoryGraph.js\",\n \"TimelineModel.js\",\n \"TimelineOverviewPane.js\",\n \"TimelinePanel.js\",\n \"TimelinePanelDescriptor.js\",\n \"TimelinePresentationModel.js\",\n \"TimelineFrameController.js\"\n ]\n },\n {\n \"name\": \"audits\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"AuditCategories.js\",\n \"AuditController.js\",\n \"AuditFormatters.js\",\n \"AuditLauncherView.js\",\n \"AuditResultView.js\",\n \"AuditRules.js\",\n \"AuditsPanel.js\",\n ]\n },\n {\n \"name\": \"extensions\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"ExtensionAPI.js\",\n \"ExtensionAuditCategory.js\",\n \"ExtensionPanel.js\",\n \"ExtensionRegistryStub.js\",\n \"ExtensionServer.js\",\n \"ExtensionView.js\",\n ]\n },\n {\n \"name\": \"settings\",\n \"dependencies\": [\"components\", \"extensions\"],\n \"sources\": [\n \"SettingsScreen.js\",\n \"OverridesView.js\",\n ]\n },\n {\n \"name\": \"tests\",\n \"dependencies\": [\"components\"],\n \"sources\": [\n \"TestController.js\",\n ]\n },\n {\n \"name\": \"profiler\",\n \"dependencies\": [\"components\", \"workers\"],\n \"sources\": [\n \"BottomUpProfileDataGridTree.js\",\n \"CPUProfileView.js\",\n \"CSSSelectorProfileView.js\",\n \"FlameChart.js\",\n \"HeapSnapshot.js\",\n \"HeapSnapshotDataGrids.js\",\n \"HeapSnapshotGridNodes.js\",\n \"HeapSnapshotLoader.js\",\n \"HeapSnapshotProxy.js\",\n \"HeapSnapshotView.js\",\n \"HeapSnapshotWorker.js\",\n \"HeapSnapshotWorkerDispatcher.js\",\n \"JSHeapSnapshot.js\",\n \"NativeHeapSnapshot.js\",\n \"ProfileDataGridTree.js\",\n \"ProfilesPanel.js\",\n \"ProfilesPanelDescriptor.js\",\n \"ProfileLauncherView.js\",\n \"TopDownProfileDataGridTree.js\",\n \"CanvasProfileView.js\",\n ]\n },\n {\n \"name\": \"host_stub\",\n \"dependencies\": [\"components\", \"profiler\", \"timeline\"],\n \"sources\": [\n \"InspectorFrontendAPI.js\",\n \"InspectorFrontendHostStub.js\",\n ]\n }\n]\n\nmodules_by_name = {}\nfor module in modules:\n modules_by_name[module[\"name\"]] = module\n\n\ndef dump_module(name, recursively, processed_modules):\n if name in processed_modules:\n return \"\"\n processed_modules[name] = True\n module = modules_by_name[name]\n command = \"\"\n if recursively:\n for dependency in module[\"dependencies\"]:\n command += dump_module(dependency, recursively, processed_modules)\n command += \" \\\\\\n --module \" + jsmodule_name_prefix + module[\"name\"] + \":\"\n command += str(len(module[\"sources\"]))\n firstDependency = True\n for dependency in module[\"dependencies\"]:\n if firstDependency:\n command += \":\"\n else:\n command += \",\"\n firstDependency = False\n command += jsmodule_name_prefix + dependency\n for script in module[\"sources\"]:\n command += \" \\\\\\n --js \" + inspector_frontend_path + \"/\" + script\n return command\n\nmodules_dir = tempfile.mkdtemp()\ncompiler_command = \"java -jar ~/closure/compiler.jar --summary_detail_level 3 --compilation_level SIMPLE_OPTIMIZATIONS --warning_level VERBOSE --language_in ECMASCRIPT5 --accept_const_keyword --module_output_path_prefix %s/ \\\\\\n\" % modules_dir\n\nprocess_recursively = len(sys.argv) > 1\nif process_recursively:\n module_name = sys.argv[1]\n if module_name != \"all\":\n modules = []\n for i in range(1, len(sys.argv)):\n modules.append(modules_by_name[sys.argv[i]])\n for module in modules:\n command = compiler_command\n command += \" --externs \" + inspector_frontend_path + \"/externs.js\"\n command += dump_module(module[\"name\"], True, {})\n print \"Compiling \\\"\" + module[\"name\"] + \"\\\"\"\n os.system(command)\nelse:\n command = compiler_command\n command += \" --externs \" + inspector_frontend_path + \"/externs.js\"\n for module in modules:\n command += dump_module(module[\"name\"], False, {})\n os.system(command)\n\nif not process_recursively:\n print \"Compiling InjectedScriptSource.js...\"\n os.system(\"echo \\\"var injectedScriptValue = \\\" > \" + inspector_path + \"/\" + \"InjectedScriptSourceTmp.js\")\n os.system(\"cat \" + inspector_path + \"/\" + \"InjectedScriptSource.js\" + \" >> \" + inspector_path + \"/\" + \"InjectedScriptSourceTmp.js\")\n command = compiler_command\n command += \" --externs \" + inspector_path + \"/\" + \"InjectedScriptExterns.js\" + \" \\\\\\n\"\n command += \" --externs \" + protocol_externs_path + \" \\\\\\n\"\n command += \" --module \" + jsmodule_name_prefix + \"injected_script\" + \":\" + \"1\" + \" \\\\\\n\"\n command += \" --js \" + inspector_path + \"/\" + \"InjectedScriptSourceTmp.js\" + \" \\\\\\n\"\n command += \"\\n\"\n os.system(command)\n os.system(\"rm \" + inspector_path + \"/\" + \"InjectedScriptSourceTmp.js\")\n\n print \"Compiling InjectedScriptCanvasModuleSource.js...\"\n os.system(\"echo \\\"var injectedScriptCanvasModuleValue = \\\" > \" + inspector_path + \"/\" + \"InjectedScriptCanvasModuleSourceTmp.js\")\n os.system(\"cat \" + inspector_path + \"/\" + \"InjectedScriptCanvasModuleSource.js\" + \" >> \" + inspector_path + \"/\" + \"InjectedScriptCanvasModuleSourceTmp.js\")\n command = compiler_command\n command += \" --externs \" + inspector_path + \"/\" + \"InjectedScriptExterns.js\" + \" \\\\\\n\"\n command += \" --externs \" + protocol_externs_path + \" \\\\\\n\"\n command += \" --module \" + jsmodule_name_prefix + \"injected_script\" + \":\" + \"1\" + \" \\\\\\n\"\n command += \" --js \" + inspector_path + \"/\" + \"InjectedScriptCanvasModuleSourceTmp.js\" + \" \\\\\\n\"\n command += \"\\n\"\n os.system(command)\n os.system(\"rm \" + inspector_path + \"/\" + \"InjectedScriptCanvasModuleSourceTmp.js\")\n\nshutil.rmtree(modules_dir)\n#os.system(\"rm \" + protocol_externs_path)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475179,"cells":{"repo_name":{"kind":"string","value":"lxml/lxml"},"path":{"kind":"string","value":"src/lxml/tests/test_relaxng.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8434"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTest cases related to RelaxNG parsing and validation\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nfrom .common_imports import (\n etree, BytesIO, _bytes, HelperTestCase, fileInTestDir, make_doctest, skipif\n)\n\ntry:\n import rnc2rng\nexcept ImportError:\n rnc2rng = None\n\n\nclass ETreeRelaxNGTestCase(HelperTestCase):\n def test_relaxng(self):\n tree_valid = self.parse('<a><b></b></a>')\n tree_invalid = self.parse('<a><c></c></a>')\n schema = self.parse('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <zeroOrMore>\n <element name=\"b\">\n <text />\n </element>\n </zeroOrMore>\n</element>\n''')\n schema = etree.RelaxNG(schema)\n self.assertTrue(schema.validate(tree_valid))\n self.assertFalse(schema.error_log.filter_from_errors())\n\n self.assertFalse(schema.validate(tree_invalid))\n self.assertTrue(schema.error_log.filter_from_errors())\n\n self.assertTrue(schema.validate(tree_valid)) # repeat valid\n self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid\n\n def test_relaxng_stringio(self):\n tree_valid = self.parse('<a><b></b></a>')\n tree_invalid = self.parse('<a><c></c></a>')\n schema_file = BytesIO('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <zeroOrMore>\n <element name=\"b\">\n <text />\n </element>\n </zeroOrMore>\n</element>\n''')\n schema = etree.RelaxNG(file=schema_file)\n self.assertTrue(schema.validate(tree_valid))\n self.assertFalse(schema.validate(tree_invalid))\n\n def test_relaxng_elementtree_error(self):\n self.assertRaises(ValueError, etree.RelaxNG, etree.ElementTree())\n\n def test_relaxng_error(self):\n tree_invalid = self.parse('<a><c></c></a>')\n schema = self.parse('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <zeroOrMore>\n <element name=\"b\">\n <text />\n </element>\n </zeroOrMore>\n</element>\n''')\n schema = etree.RelaxNG(schema)\n self.assertFalse(schema.validate(tree_invalid))\n errors = schema.error_log\n self.assertTrue([log for log in errors\n if log.level_name == \"ERROR\"])\n self.assertTrue([log for log in errors\n if \"not expect\" in log.message])\n\n def test_relaxng_generic_error(self):\n tree_invalid = self.parse('''\\\n <test>\n <reference id=\"my-ref\">This is my unique ref.</reference>\n <data ref=\"my-ref\">Valid data</data>\n <data ref=\"myref\">Invalid data</data>\n </test>\n ''')\n schema = self.parse('''\\\n <grammar datatypeLibrary=\"http://www.w3.org/2001/XMLSchema-datatypes\"\n xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <define name=\"by-ref\">\n <data type=\"IDREF\"/>\n </define>\n <start>\n <element name=\"test\">\n <zeroOrMore>\n <element name=\"reference\">\n <attribute name=\"id\">\n <data type=\"ID\"/>\n </attribute>\n <text/>\n </element>\n </zeroOrMore>\n <zeroOrMore>\n <element name=\"data\">\n <attribute name=\"ref\">\n <data type=\"IDREF\"/>\n </attribute>\n <text/>\n </element>\n </zeroOrMore>\n </element>\n </start>\n </grammar>\n ''')\n\n schema = etree.RelaxNG(schema)\n self.assertFalse(schema.validate(tree_invalid))\n errors = schema.error_log\n self.assertTrue(errors)\n self.assertTrue([log for log in errors if \"IDREF\" in log.message])\n self.assertTrue([log for log in errors if \"myref\" in log.message])\n\n def test_relaxng_invalid_schema(self):\n schema = self.parse('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <zeroOrMore>\n <element name=\"b\" />\n </zeroOrMore>\n</element>\n''')\n self.assertRaises(etree.RelaxNGParseError,\n etree.RelaxNG, schema)\n\n def test_relaxng_invalid_schema2(self):\n schema = self.parse('''\\\n<grammar xmlns=\"http://relaxng.org/ns/structure/1.0\" />\n''')\n self.assertRaises(etree.RelaxNGParseError,\n etree.RelaxNG, schema)\n\n def test_relaxng_invalid_schema3(self):\n schema = self.parse('''\\\n<grammar xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <define name=\"test\">\n <element name=\"test\"/>\n </define>\n</grammar>\n''')\n self.assertRaises(etree.RelaxNGParseError,\n etree.RelaxNG, schema)\n\n def test_relaxng_invalid_schema4(self):\n # segfault\n schema = self.parse('''\\\n<element name=\"a\" xmlns=\"mynamespace\" />\n''')\n self.assertRaises(etree.RelaxNGParseError,\n etree.RelaxNG, schema)\n\n def test_relaxng_include(self):\n # this will only work if we access the file through path or\n # file object..\n f = open(fileInTestDir('test1.rng'), 'rb')\n try:\n schema = etree.RelaxNG(file=f)\n finally:\n f.close()\n\n def test_relaxng_shortcut(self):\n tree_valid = self.parse('<a><b></b></a>')\n tree_invalid = self.parse('<a><c></c></a>')\n schema = self.parse('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <zeroOrMore>\n <element name=\"b\">\n <text />\n </element>\n </zeroOrMore>\n</element>\n''')\n self.assertTrue(tree_valid.relaxng(schema))\n self.assertFalse(tree_invalid.relaxng(schema))\n\n def test_multiple_elementrees(self):\n tree = self.parse('<a><b>B</b><c>C</c></a>')\n schema = etree.RelaxNG( self.parse('''\\\n<element name=\"a\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <element name=\"b\">\n <text />\n </element>\n <element name=\"c\">\n <text />\n </element>\n</element>\n''') )\n self.assertTrue(schema.validate(tree))\n self.assertFalse(schema.error_log.filter_from_errors())\n\n self.assertTrue(schema.validate(tree)) # repeat valid\n self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid\n\n schema = etree.RelaxNG( self.parse('''\\\n<element name=\"b\" xmlns=\"http://relaxng.org/ns/structure/1.0\">\n <text />\n</element>\n''') )\n c_tree = etree.ElementTree(tree.getroot()[1])\n self.assertEqual(self._rootstring(c_tree), _bytes('<c>C</c>'))\n self.assertFalse(schema.validate(c_tree))\n self.assertTrue(schema.error_log.filter_from_errors())\n\n b_tree = etree.ElementTree(tree.getroot()[0])\n self.assertEqual(self._rootstring(b_tree), _bytes('<b>B</b>'))\n self.assertTrue(schema.validate(b_tree))\n self.assertFalse(schema.error_log.filter_from_errors())\n\n\nclass RelaxNGCompactTestCase(HelperTestCase):\n\n pytestmark = skipif('rnc2rng is None')\n\n def test_relaxng_compact(self):\n tree_valid = self.parse('<a><b>B</b><c>C</c></a>')\n tree_invalid = self.parse('<a><b></b></a>')\n schema = etree.RelaxNG(file=fileInTestDir('test.rnc'))\n self.assertTrue(schema.validate(tree_valid))\n self.assertFalse(schema.validate(tree_invalid))\n\n def test_relaxng_compact_file_obj(self):\n with open(fileInTestDir('test.rnc'), 'r') as f:\n schema = etree.RelaxNG(file=f)\n\n tree_valid = self.parse('<a><b>B</b><c>C</c></a>')\n tree_invalid = self.parse('<a><b></b></a>')\n self.assertTrue(schema.validate(tree_valid))\n self.assertFalse(schema.validate(tree_invalid))\n\n def test_relaxng_compact_str(self):\n tree_valid = self.parse('<a><b>B</b></a>')\n tree_invalid = self.parse('<a><b>X</b></a>')\n rnc_str = 'element a { element b { \"B\" } }'\n schema = etree.RelaxNG.from_rnc_string(rnc_str)\n self.assertTrue(schema.validate(tree_valid))\n self.assertFalse(schema.validate(tree_invalid))\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTests([unittest.makeSuite(ETreeRelaxNGTestCase)])\n suite.addTests(\n [make_doctest('../../../doc/validation.txt')])\n if rnc2rng is not None:\n suite.addTests([unittest.makeSuite(RelaxNGCompactTestCase)])\n return suite\n\nif __name__ == '__main__':\n print('to test use test.py %s' % __file__)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475180,"cells":{"repo_name":{"kind":"string","value":"robertwb/incubator-beam"},"path":{"kind":"string","value":"sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query1.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"1608"},"content":{"kind":"string","value":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Nexmark Query 1: Convert bid prices from dollars to euros.\n\nThe Nexmark suite is a series of queries (streaming pipelines) performed\non a simulation of auction events.\n\nThis query converts bid prices from dollars to euros.\nIt illustrates a simple map.\n\"\"\"\n# pytype: skip-file\n\nimport apache_beam as beam\nfrom apache_beam.testing.benchmarks.nexmark.models import nexmark_model\nfrom apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util\n\nUSD_TO_EURO = 0.89\n\n\ndef load(events, metadata=None, pipeline_options=None):\n return (\n events\n | nexmark_query_util.JustBids()\n | 'ConvertToEuro' >> beam.Map(\n lambda bid: nexmark_model.Bid(\n bid.auction,\n bid.bidder,\n bid.price * USD_TO_EURO,\n bid.date_time,\n bid.extra)))\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475181,"cells":{"repo_name":{"kind":"string","value":"jiaweizhou/kubernetes"},"path":{"kind":"string","value":"cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py"},"copies":{"kind":"string","value":"213"},"size":{"kind":"string","value":"4138"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright 2015 The Kubernetes Authors All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shlex\nimport subprocess\nfrom path import path\n\n\ndef run(command, shell=False):\n \"\"\" A convience method for executing all the commands. \"\"\"\n print(command)\n if shell is False:\n command = shlex.split(command)\n output = subprocess.check_output(command, shell=shell)\n print(output)\n return output\n\n\nclass KubernetesInstaller():\n \"\"\"\n This class contains the logic needed to install kuberentes binary files.\n \"\"\"\n\n def __init__(self, arch, version, output_dir):\n \"\"\" Gather the required variables for the install. \"\"\"\n # The kubernetes-master charm needs certain commands to be aliased.\n self.aliases = {'kube-apiserver': 'apiserver',\n 'kube-controller-manager': 'controller-manager',\n 'kube-proxy': 'kube-proxy',\n 'kube-scheduler': 'scheduler',\n 'kubectl': 'kubectl',\n 'kubelet': 'kubelet'}\n self.arch = arch\n self.version = version\n self.output_dir = path(output_dir)\n\n def build(self, branch):\n \"\"\" Build kubernetes from a github repository using the Makefile. \"\"\"\n # Remove any old build artifacts.\n make_clean = 'make clean'\n run(make_clean)\n # Always checkout the master to get the latest repository information.\n git_checkout_cmd = 'git checkout master'\n run(git_checkout_cmd)\n # When checking out a tag, delete the old branch (not master).\n if branch != 'master':\n git_drop_branch = 'git branch -D {0}'.format(self.version)\n print(git_drop_branch)\n rc = subprocess.call(git_drop_branch.split())\n if rc != 0:\n print('returned: %d' % rc)\n # Make sure the git repository is up-to-date.\n git_fetch = 'git fetch origin {0}'.format(branch)\n run(git_fetch)\n\n if branch == 'master':\n git_reset = 'git reset --hard origin/master'\n run(git_reset)\n else:\n # Checkout a branch of kubernetes so the repo is correct.\n checkout = 'git checkout -b {0} {1}'.format(self.version, branch)\n run(checkout)\n\n # Create an environment with the path to the GO binaries included.\n go_path = ('/usr/local/go/bin', os.environ.get('PATH', ''))\n go_env = os.environ.copy()\n go_env['PATH'] = ':'.join(go_path)\n print(go_env['PATH'])\n\n # Compile the binaries with the make command using the WHAT variable.\n make_what = \"make all WHAT='cmd/kube-apiserver cmd/kubectl \"\\\n \"cmd/kube-controller-manager plugin/cmd/kube-scheduler \"\\\n \"cmd/kubelet cmd/kube-proxy'\"\n print(make_what)\n rc = subprocess.call(shlex.split(make_what), env=go_env)\n\n def install(self, install_dir=path('/usr/local/bin')):\n \"\"\" Install kubernetes binary files from the output directory. \"\"\"\n\n if not install_dir.isdir():\n install_dir.makedirs_p()\n\n # Create the symbolic links to the real kubernetes binaries.\n for key, value in self.aliases.iteritems():\n target = self.output_dir / key\n if target.exists():\n link = install_dir / value\n if link.exists():\n link.remove()\n target.symlink(link)\n else:\n print('Error target file {0} does not exist.'.format(target))\n exit(1)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475182,"cells":{"repo_name":{"kind":"string","value":"loopCM/chromium"},"path":{"kind":"string","value":"media/tools/constrained_network_server/cn.py"},"copies":{"kind":"string","value":"186"},"size":{"kind":"string","value":"4311"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"A script for configuring constraint networks.\n\nSets up a constrained network configuration on a specific port. Traffic on this\nport will be redirected to another local server port.\n\nThe configuration includes bandwidth, latency, and packet loss.\n\"\"\"\n\nimport collections\nimport logging\nimport optparse\nimport traffic_control\n\n# Default logging is ERROR. Use --verbose to enable DEBUG logging.\n_DEFAULT_LOG_LEVEL = logging.ERROR\n\nDispatcher = collections.namedtuple('Dispatcher', ['dispatch', 'requires_ports',\n 'desc'])\n\n# Map of command names to traffic_control functions.\nCOMMANDS = {\n # Adds a new constrained network configuration.\n 'add': Dispatcher(traffic_control.CreateConstrainedPort,\n requires_ports=True, desc='Add a new constrained port.'),\n\n # Deletes an existing constrained network configuration.\n 'del': Dispatcher(traffic_control.DeleteConstrainedPort,\n requires_ports=True, desc='Delete a constrained port.'),\n\n # Deletes all constrained network configurations.\n 'teardown': Dispatcher(traffic_control.TearDown,\n requires_ports=False,\n desc='Teardown all constrained ports.')\n}\n\n\ndef _ParseArgs():\n \"\"\"Define and parse command-line arguments.\n\n Returns:\n tuple as (command, configuration):\n command: one of the possible commands to setup, delete or teardown the\n constrained network.\n configuration: a map of constrained network properties to their values.\n \"\"\"\n parser = optparse.OptionParser()\n\n indent_first = parser.formatter.indent_increment\n opt_width = parser.formatter.help_position - indent_first\n\n cmd_usage = []\n for s in COMMANDS:\n cmd_usage.append('%*s%-*s%s' %\n (indent_first, '', opt_width, s, COMMANDS[s].desc))\n\n parser.usage = ('usage: %%prog {%s} [options]\\n\\n%s' %\n ('|'.join(COMMANDS.keys()), '\\n'.join(cmd_usage)))\n\n parser.add_option('--port', type='int',\n help='The port to apply traffic control constraints to.')\n parser.add_option('--server-port', type='int',\n help='Port to forward traffic on --port to.')\n parser.add_option('--bandwidth', type='int',\n help='Bandwidth of the network in kbit/s.')\n parser.add_option('--latency', type='int',\n help=('Latency (delay) added to each outgoing packet in '\n 'ms.'))\n parser.add_option('--loss', type='int',\n help='Packet-loss percentage on outgoing packets. ')\n parser.add_option('--interface', type='string',\n help=('Interface to setup constraints on. Use \"lo\" for a '\n 'local client.'))\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n default=False, help='Turn on verbose output.')\n options, args = parser.parse_args()\n\n _SetLogger(options.verbose)\n\n # Check a valid command was entered\n if not args or args[0].lower() not in COMMANDS:\n parser.error('Please specify a command {%s}.' % '|'.join(COMMANDS.keys()))\n user_cmd = args[0].lower()\n\n # Check if required options are available\n if COMMANDS[user_cmd].requires_ports:\n if not (options.port and options.server_port):\n parser.error('Please provide port and server-port values.')\n\n config = {\n 'port': options.port,\n 'server_port': options.server_port,\n 'interface': options.interface,\n 'latency': options.latency,\n 'bandwidth': options.bandwidth,\n 'loss': options.loss\n }\n return user_cmd, config\n\n\ndef _SetLogger(verbose):\n log_level = _DEFAULT_LOG_LEVEL\n if verbose:\n log_level = logging.DEBUG\n logging.basicConfig(level=log_level, format='%(message)s')\n\n\ndef Main():\n \"\"\"Get the command and configuration of the network to set up.\"\"\"\n user_cmd, config = _ParseArgs()\n\n try:\n COMMANDS[user_cmd].dispatch(config)\n except traffic_control.TrafficControlError as e:\n logging.error('Error: %s\\n\\nOutput: %s', e.msg, e.error)\n\n\nif __name__ == '__main__':\n Main()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475183,"cells":{"repo_name":{"kind":"string","value":"muntasirsyed/intellij-community"},"path":{"kind":"string","value":"plugins/hg4idea/testData/bin/hgext/largefiles/wirestore.py"},"copies":{"kind":"string","value":"97"},"size":{"kind":"string","value":"1336"},"content":{"kind":"string","value":"# Copyright 2010-2011 Fog Creek Software\n#\n# This software may be used and distributed according to the terms of the\n# GNU General Public License version 2 or any later version.\n\n'''largefile store working over Mercurial's wire protocol'''\n\nimport lfutil\nimport remotestore\n\nclass wirestore(remotestore.remotestore):\n def __init__(self, ui, repo, remote):\n cap = remote.capable('largefiles')\n if not cap:\n raise lfutil.storeprotonotcapable([])\n storetypes = cap.split(',')\n if 'serve' not in storetypes:\n raise lfutil.storeprotonotcapable(storetypes)\n self.remote = remote\n super(wirestore, self).__init__(ui, repo, remote.url())\n\n def _put(self, hash, fd):\n return self.remote.putlfile(hash, fd)\n\n def _get(self, hash):\n return self.remote.getlfile(hash)\n\n def _stat(self, hashes):\n '''For each hash, return 0 if it is available, other values if not.\n It is usually 2 if the largefile is missing, but might be 1 the server\n has a corrupted copy.'''\n batch = self.remote.batch()\n futures = {}\n for hash in hashes:\n futures[hash] = batch.statlfile(hash)\n batch.submit()\n retval = {}\n for hash in hashes:\n retval[hash] = futures[hash].value\n return retval\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475184,"cells":{"repo_name":{"kind":"string","value":"komsas/OpenUpgrade"},"path":{"kind":"string","value":"addons/crm/wizard/crm_phonecall_to_phonecall.py"},"copies":{"kind":"string","value":"40"},"size":{"kind":"string","value":"4562"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n\nimport time\n\nclass crm_phonecall2phonecall(osv.osv_memory):\n _name = 'crm.phonecall2phonecall'\n _description = 'Phonecall To Phonecall'\n\n _columns = {\n 'name' : fields.char('Call summary', size=64, required=True, select=1),\n 'user_id' : fields.many2one('res.users',\"Assign To\"),\n 'contact_name':fields.char('Contact', size=64),\n 'phone':fields.char('Phone', size=64),\n 'categ_id': fields.many2one('crm.case.categ', 'Category', \\\n domain=\"['|',('section_id','=',False),('section_id','=',section_id),\\\n ('object_id.model', '=', 'crm.phonecall')]\"), \n 'date': fields.datetime('Date'),\n 'section_id':fields.many2one('crm.case.section','Sales Team'),\n 'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),\n 'partner_id' : fields.many2one('res.partner', \"Partner\"),\n 'note':fields.text('Note')\n }\n\n\n def action_cancel(self, cr, uid, ids, context=None):\n \"\"\"\n Closes Phonecall to Phonecall form\n \"\"\"\n return {'type':'ir.actions.act_window_close'}\n\n def action_schedule(self, cr, uid, ids, context=None):\n value = {}\n if context is None:\n context = {}\n phonecall = self.pool.get('crm.phonecall')\n phonecall_ids = context and context.get('active_ids') or []\n for this in self.browse(cr, uid, ids, context=context):\n phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \\\n this.user_id and this.user_id.id or False, \\\n this.section_id and this.section_id.id or False, \\\n this.categ_id and this.categ_id.id or False, \\\n action=this.action, context=context)\n\n return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)\n \n def default_get(self, cr, uid, fields, context=None):\n \"\"\"\n This function gets default values\n \n \"\"\"\n res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)\n record_id = context and context.get('active_id', False) or False\n res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})\n if record_id:\n phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)\n\n categ_id = False\n data_obj = self.pool.get('ir.model.data')\n try:\n res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')\n categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id\n except ValueError:\n pass\n\n if 'name' in fields:\n res.update({'name': phonecall.name})\n if 'user_id' in fields:\n res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})\n if 'date' in fields:\n res.update({'date': False})\n if 'section_id' in fields:\n res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False})\n if 'categ_id' in fields:\n res.update({'categ_id': categ_id})\n if 'partner_id' in fields:\n res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})\n return res\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475185,"cells":{"repo_name":{"kind":"string","value":"fernandoacorreia/DjangoWAWSLogging"},"path":{"kind":"string","value":"DjangoWAWSLogging/env/Lib/site-packages/django/contrib/flatpages/tests/views.py"},"copies":{"kind":"string","value":"77"},"size":{"kind":"string","value":"6226"},"content":{"kind":"string","value":"import os\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.flatpages.models import FlatPage\nfrom django.test import TestCase\n\nclass FlatpageViewTests(TestCase):\n fixtures = ['sample_flatpages']\n urls = 'django.contrib.flatpages.tests.urls'\n\n def setUp(self):\n self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES\n flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'\n if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class)\n self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS\n settings.TEMPLATE_DIRS = (\n os.path.join(\n os.path.dirname(__file__),\n 'templates'\n ),\n )\n self.old_LOGIN_URL = settings.LOGIN_URL\n settings.LOGIN_URL = '/accounts/login/'\n\n def tearDown(self):\n settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES\n settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS\n settings.LOGIN_URL = self.old_LOGIN_URL\n\n def test_view_flatpage(self):\n \"A flatpage can be served through a view\"\n response = self.client.get('/flatpage_root/flatpage/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<p>Isn't it flat!</p>\")\n\n def test_view_non_existent_flatpage(self):\n \"A non-existent flatpage raises 404 when served through a view\"\n response = self.client.get('/flatpage_root/no_such_flatpage/')\n self.assertEqual(response.status_code, 404)\n\n def test_view_authenticated_flatpage(self):\n \"A flatpage served through a view can require authentication\"\n response = self.client.get('/flatpage_root/sekrit/')\n self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')\n User.objects.create_user('testuser', 'test@example.com', 's3krit')\n self.client.login(username='testuser',password='s3krit')\n response = self.client.get('/flatpage_root/sekrit/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<p>Isn't it sekrit!</p>\")\n\n def test_fallback_flatpage(self):\n \"A fallback flatpage won't be served if the middleware is disabled\"\n response = self.client.get('/flatpage/')\n self.assertEqual(response.status_code, 404)\n\n def test_fallback_non_existent_flatpage(self):\n \"A non-existent flatpage won't be served if the fallback middlware is disabled\"\n response = self.client.get('/no_such_flatpage/')\n self.assertEqual(response.status_code, 404)\n\n def test_view_flatpage_special_chars(self):\n \"A flatpage with special chars in the URL can be served through a view\"\n fp = FlatPage.objects.create(\n url=\"/some.very_special~chars-here/\",\n title=\"A very special page\",\n content=\"Isn't it special!\",\n enable_comments=False,\n registration_required=False,\n )\n fp.sites.add(settings.SITE_ID)\n\n response = self.client.get('/flatpage_root/some.very_special~chars-here/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"<p>Isn't it special!</p>\")\n\n\nclass FlatpageViewAppendSlashTests(TestCase):\n fixtures = ['sample_flatpages']\n urls = 'django.contrib.flatpages.tests.urls'\n\n def setUp(self):\n self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES\n flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'\n if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class)\n self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS\n settings.TEMPLATE_DIRS = (\n os.path.join(\n os.path.dirname(__file__),\n 'templates'\n ),\n )\n self.old_LOGIN_URL = settings.LOGIN_URL\n settings.LOGIN_URL = '/accounts/login/'\n self.old_APPEND_SLASH = settings.APPEND_SLASH\n settings.APPEND_SLASH = True\n\n def tearDown(self):\n settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES\n settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS\n settings.LOGIN_URL = self.old_LOGIN_URL\n settings.APPEND_SLASH = self.old_APPEND_SLASH\n\n def test_redirect_view_flatpage(self):\n \"A flatpage can be served through a view and should add a slash\"\n response = self.client.get('/flatpage_root/flatpage')\n self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)\n\n def test_redirect_view_non_existent_flatpage(self):\n \"A non-existent flatpage raises 404 when served through a view and should not add a slash\"\n response = self.client.get('/flatpage_root/no_such_flatpage')\n self.assertEqual(response.status_code, 404)\n\n def test_redirect_fallback_flatpage(self):\n \"A fallback flatpage won't be served if the middleware is disabled and should not add a slash\"\n response = self.client.get('/flatpage')\n self.assertEqual(response.status_code, 404)\n\n def test_redirect_fallback_non_existent_flatpage(self):\n \"A non-existent flatpage won't be served if the fallback middlware is disabled and should not add a slash\"\n response = self.client.get('/no_such_flatpage')\n self.assertEqual(response.status_code, 404)\n\n def test_redirect_view_flatpage_special_chars(self):\n \"A flatpage with special chars in the URL can be served through a view and should add a slash\"\n fp = FlatPage.objects.create(\n url=\"/some.very_special~chars-here/\",\n title=\"A very special page\",\n content=\"Isn't it special!\",\n enable_comments=False,\n registration_required=False,\n )\n fp.sites.add(1)\n\n response = self.client.get('/flatpage_root/some.very_special~chars-here')\n self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475186,"cells":{"repo_name":{"kind":"string","value":"turbokongen/home-assistant"},"path":{"kind":"string","value":"tests/components/nx584/test_binary_sensor.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"7175"},"content":{"kind":"string","value":"\"\"\"The tests for the nx584 sensor platform.\"\"\"\nfrom unittest import mock\n\nfrom nx584 import client as nx584_client\nimport pytest\nimport requests\n\nfrom homeassistant.components.nx584 import binary_sensor as nx584\nfrom homeassistant.setup import async_setup_component\n\n\nclass StopMe(Exception):\n \"\"\"Stop helper.\"\"\"\n\n\n@pytest.fixture\ndef fake_zones():\n \"\"\"Fixture for fake zones.\n\n Returns:\n list: List of fake zones\n \"\"\"\n return [\n {\"name\": \"front\", \"number\": 1},\n {\"name\": \"back\", \"number\": 2},\n {\"name\": \"inside\", \"number\": 3},\n ]\n\n\n@pytest.fixture\ndef client(fake_zones):\n \"\"\"Fixture for client.\n\n Args:\n fake_zones (list): Fixture of fake zones\n\n Yields:\n MagicMock: Client Mock\n \"\"\"\n with mock.patch.object(nx584_client, \"Client\") as _mock_client:\n client = nx584_client.Client.return_value\n client.list_zones.return_value = fake_zones\n client.get_version.return_value = \"1.1\"\n\n yield _mock_client\n\n\n@pytest.mark.usefixtures(\"client\")\n@mock.patch(\"homeassistant.components.nx584.binary_sensor.NX584Watcher\")\n@mock.patch(\"homeassistant.components.nx584.binary_sensor.NX584ZoneSensor\")\ndef test_nx584_sensor_setup_defaults(mock_nx, mock_watcher, hass, fake_zones):\n \"\"\"Test the setup with no configuration.\"\"\"\n add_entities = mock.MagicMock()\n config = {\n \"host\": nx584.DEFAULT_HOST,\n \"port\": nx584.DEFAULT_PORT,\n \"exclude_zones\": [],\n \"zone_types\": {},\n }\n assert nx584.setup_platform(hass, config, add_entities)\n mock_nx.assert_has_calls([mock.call(zone, \"opening\") for zone in fake_zones])\n assert add_entities.called\n assert nx584_client.Client.call_count == 1\n assert nx584_client.Client.call_args == mock.call(\"http://localhost:5007\")\n\n\n@pytest.mark.usefixtures(\"client\")\n@mock.patch(\"homeassistant.components.nx584.binary_sensor.NX584Watcher\")\n@mock.patch(\"homeassistant.components.nx584.binary_sensor.NX584ZoneSensor\")\ndef test_nx584_sensor_setup_full_config(mock_nx, mock_watcher, hass, fake_zones):\n \"\"\"Test the setup with full configuration.\"\"\"\n config = {\n \"host\": \"foo\",\n \"port\": 123,\n \"exclude_zones\": [2],\n \"zone_types\": {3: \"motion\"},\n }\n add_entities = mock.MagicMock()\n assert nx584.setup_platform(hass, config, add_entities)\n mock_nx.assert_has_calls(\n [\n mock.call(fake_zones[0], \"opening\"),\n mock.call(fake_zones[2], \"motion\"),\n ]\n )\n assert add_entities.called\n assert nx584_client.Client.call_count == 1\n assert nx584_client.Client.call_args == mock.call(\"http://foo:123\")\n assert mock_watcher.called\n\n\nasync def _test_assert_graceful_fail(hass, config):\n \"\"\"Test the failing.\"\"\"\n assert not await async_setup_component(hass, \"nx584\", config)\n\n\n@pytest.mark.usefixtures(\"client\")\n@pytest.mark.parametrize(\n \"config\",\n [\n ({\"exclude_zones\": [\"a\"]}),\n ({\"zone_types\": {\"a\": \"b\"}}),\n ({\"zone_types\": {1: \"notatype\"}}),\n ({\"zone_types\": {\"notazone\": \"motion\"}}),\n ],\n)\nasync def test_nx584_sensor_setup_bad_config(hass, config):\n \"\"\"Test the setup with bad configuration.\"\"\"\n await _test_assert_graceful_fail(hass, config)\n\n\n@pytest.mark.usefixtures(\"client\")\n@pytest.mark.parametrize(\n \"exception_type\",\n [\n pytest.param(requests.exceptions.ConnectionError, id=\"connect_failed\"),\n pytest.param(IndexError, id=\"no_partitions\"),\n ],\n)\nasync def test_nx584_sensor_setup_with_exceptions(hass, exception_type):\n \"\"\"Test the setup handles exceptions.\"\"\"\n nx584_client.Client.return_value.list_zones.side_effect = exception_type\n await _test_assert_graceful_fail(hass, {})\n\n\n@pytest.mark.usefixtures(\"client\")\nasync def test_nx584_sensor_setup_version_too_old(hass):\n \"\"\"Test if version is too old.\"\"\"\n nx584_client.Client.return_value.get_version.return_value = \"1.0\"\n await _test_assert_graceful_fail(hass, {})\n\n\n@pytest.mark.usefixtures(\"client\")\ndef test_nx584_sensor_setup_no_zones(hass):\n \"\"\"Test the setup with no zones.\"\"\"\n nx584_client.Client.return_value.list_zones.return_value = []\n add_entities = mock.MagicMock()\n assert nx584.setup_platform(hass, {}, add_entities)\n assert not add_entities.called\n\n\ndef test_nx584_zone_sensor_normal():\n \"\"\"Test for the NX584 zone sensor.\"\"\"\n zone = {\"number\": 1, \"name\": \"foo\", \"state\": True}\n sensor = nx584.NX584ZoneSensor(zone, \"motion\")\n assert \"foo\" == sensor.name\n assert not sensor.should_poll\n assert sensor.is_on\n assert sensor.device_state_attributes[\"zone_number\"] == 1\n\n zone[\"state\"] = False\n assert not sensor.is_on\n\n\n@mock.patch.object(nx584.NX584ZoneSensor, \"schedule_update_ha_state\")\ndef test_nx584_watcher_process_zone_event(mock_update):\n \"\"\"Test the processing of zone events.\"\"\"\n zone1 = {\"number\": 1, \"name\": \"foo\", \"state\": True}\n zone2 = {\"number\": 2, \"name\": \"bar\", \"state\": True}\n zones = {\n 1: nx584.NX584ZoneSensor(zone1, \"motion\"),\n 2: nx584.NX584ZoneSensor(zone2, \"motion\"),\n }\n watcher = nx584.NX584Watcher(None, zones)\n watcher._process_zone_event({\"zone\": 1, \"zone_state\": False})\n assert not zone1[\"state\"]\n assert mock_update.call_count == 1\n\n\n@mock.patch.object(nx584.NX584ZoneSensor, \"schedule_update_ha_state\")\ndef test_nx584_watcher_process_zone_event_missing_zone(mock_update):\n \"\"\"Test the processing of zone events with missing zones.\"\"\"\n watcher = nx584.NX584Watcher(None, {})\n watcher._process_zone_event({\"zone\": 1, \"zone_state\": False})\n assert not mock_update.called\n\n\ndef test_nx584_watcher_run_with_zone_events():\n \"\"\"Test the zone events.\"\"\"\n empty_me = [1, 2]\n\n def fake_get_events():\n \"\"\"Return nothing twice, then some events.\"\"\"\n if empty_me:\n empty_me.pop()\n else:\n return fake_events\n\n client = mock.MagicMock()\n fake_events = [\n {\"zone\": 1, \"zone_state\": True, \"type\": \"zone_status\"},\n {\"zone\": 2, \"foo\": False},\n ]\n client.get_events.side_effect = fake_get_events\n watcher = nx584.NX584Watcher(client, {})\n\n @mock.patch.object(watcher, \"_process_zone_event\")\n def run(fake_process):\n \"\"\"Run a fake process.\"\"\"\n fake_process.side_effect = StopMe\n with pytest.raises(StopMe):\n watcher._run()\n assert fake_process.call_count == 1\n assert fake_process.call_args == mock.call(fake_events[0])\n\n run()\n assert 3 == client.get_events.call_count\n\n\n@mock.patch(\"time.sleep\")\ndef test_nx584_watcher_run_retries_failures(mock_sleep):\n \"\"\"Test the retries with failures.\"\"\"\n empty_me = [1, 2]\n\n def fake_run():\n \"\"\"Fake runner.\"\"\"\n if empty_me:\n empty_me.pop()\n raise requests.exceptions.ConnectionError()\n raise StopMe()\n\n watcher = nx584.NX584Watcher(None, {})\n with mock.patch.object(watcher, \"_run\") as mock_inner:\n mock_inner.side_effect = fake_run\n with pytest.raises(StopMe):\n watcher.run()\n assert 3 == mock_inner.call_count\n mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475187,"cells":{"repo_name":{"kind":"string","value":"GabrielNicolasAvellaneda/chemlab"},"path":{"kind":"string","value":"chemlab/db/toxnetdb.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"2107"},"content":{"kind":"string","value":"'''Database for toxnet'''\nfrom .base import AbstractDB, EntryNotFound\n\n# Python 2-3 compatibility\ntry:\n from urllib.parse import quote_plus\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import quote_plus\n from urllib2 import urlopen\n\nimport re\n\nclass ToxNetDB(AbstractDB):\n def __init__(self):\n self.baseurl = 'http://toxgate.nlm.nih.gov'\n\n\n def get(self, feature, query):\n searchurl = self.baseurl + '/cgi-bin/sis/search/x?dbs+hsdb:%s'%quote_plus(query)\n result = urlopen(searchurl).read()\n \n try:\n result= str(result, 'utf-8')\n except TypeError:\n pass\n \n if not result:\n raise EntryNotFound()\n \n #print result\n firstresult = re.findall(r'\\<Id>(.*?)\\</Id>', result)[0].split()[0]\n\n retrieveurl = self.baseurl + '/cgi-bin/sis/search/r?dbs+hsdb:@term+@DOCNO+%s'%firstresult\n result = urlopen(retrieveurl).read()\n \n try:\n result = str(result, 'utf-8')\n except TypeError:\n pass\n \n tocregex = 'SRC=\"(.*?)\"'\n basesearch = re.findall(tocregex, result)[0]\n basesearch = ':'.join(basesearch.split(':')[:-1])\n \n if feature == 'boiling point':\n bprequest = urlopen(self.baseurl + basesearch + ':bp').read()\n # Massaging this request is not easy\n \n try: # python3\n bprequest = str(bprequest, 'utf-8')\n except TypeError:\n pass\n \n res = re.findall(r\">\\s*(.*?)\\s*deg C\", bprequest)\n #print res\n return float(res[0])\n \n if feature == 'melting point':\n bprequest = urlopen(self.baseurl + basesearch + ':mp').read()\n try: # python3\n bprequest = str(bprequest, 'utf-8')\n except TypeError:\n pass\n \n # Massaging this request is not easy\n res = re.findall(r\">\\s*(.*?)\\s*deg C\", bprequest)\n return float(res[0])\n \n "},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475188,"cells":{"repo_name":{"kind":"string","value":"akozumpl/dnf"},"path":{"kind":"string","value":"tests/test_commands.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"39804"},"content":{"kind":"string","value":"# Copyright (C) 2012-2014 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# the GNU General Public License v.2, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY expressed or implied, including the implied warranties of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details. You should have received a copy of the\n# GNU General Public License along with this program; if not, write to the\n# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the\n# source code or documentation are not subject to the GNU General Public\n# License and may only be used or replicated with the express permission of\n# Red Hat, Inc.\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom tests import support\nfrom tests.support import mock\n\nimport dnf.cli.commands\nimport dnf.cli.commands.group\nimport dnf.cli.commands.install\nimport dnf.cli.commands.reinstall\nimport dnf.cli.commands.upgrade\nimport dnf.repo\nimport itertools\nimport logging\nimport tests.support\nimport unittest\n\nlogger = logging.getLogger('dnf')\n\n\nclass CommandsCliTest(support.TestCase):\n def setUp(self):\n self.base = support.MockBase()\n self.cli = self.base.mock_cli()\n\n def test_erase_configure(self):\n erase_cmd = dnf.cli.commands.EraseCommand(self.cli)\n erase_cmd.configure([])\n self.assertTrue(self.cli.demands.allow_erasing)\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n def test_history_get_error_output_rollback_transactioncheckerror(self):\n \"\"\"Test get_error_output with the history rollback and a TransactionCheckError.\"\"\"\n cmd = dnf.cli.commands.HistoryCommand(self.cli)\n self.base.basecmd = 'history'\n self.base.extcmds = ('rollback', '1')\n\n lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError())\n\n self.assertEqual(\n lines,\n ('Cannot rollback transaction 1, doing so would result in an '\n 'inconsistent package database.',))\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n def test_history_get_error_output_undo_transactioncheckerror(self):\n \"\"\"Test get_error_output with the history undo and a TransactionCheckError.\"\"\"\n cmd = dnf.cli.commands.HistoryCommand(self.cli)\n self.base.basecmd = 'history'\n self.base.extcmds = ('undo', '1')\n\n lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError())\n\n self.assertEqual(\n lines,\n ('Cannot undo transaction 1, doing so would result in an '\n 'inconsistent package database.',))\n\n @staticmethod\n @mock.patch('dnf.Base.fill_sack')\n def _do_makecache(cmd, fill_sack):\n return cmd.run(['timer'])\n\n def assertLastInfo(self, logger, msg):\n self.assertEqual(logger.info.mock_calls[-1],\n mock.call(msg))\n\n @mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger)\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.util.on_ac_power', return_value=True)\n def test_makecache_timer(self, _on_ac_power, logger):\n cmd = dnf.cli.commands.MakeCacheCommand(self.cli)\n\n self.base.conf.metadata_timer_sync = 0\n self.assertFalse(self._do_makecache(cmd))\n self.assertLastInfo(logger, u'Metadata timer caching disabled.')\n\n self.base.conf.metadata_timer_sync = 5 # resync after 5 seconds\n self.base._persistor.since_last_makecache = mock.Mock(return_value=3)\n self.assertFalse(self._do_makecache(cmd))\n self.assertLastInfo(logger, u'Metadata cache refreshed recently.')\n\n self.base._persistor.since_last_makecache = mock.Mock(return_value=10)\n self.base._sack = 'nonempty'\n r = support.MockRepo(\"glimpse\", None)\n self.base.repos.add(r)\n\n # regular case 1: metadata is already expired:\n r.metadata_expire_in = mock.Mock(return_value=(False, 0))\n r.sync_strategy = dnf.repo.SYNC_TRY_CACHE\n self.assertTrue(self._do_makecache(cmd))\n self.assertLastInfo(logger, u'Metadata cache created.')\n self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED)\n\n # regular case 2: metadata is cached and will expire later than\n # metadata_timer_sync:\n r.metadata_expire_in = mock.Mock(return_value=(True, 100))\n r.sync_strategy = dnf.repo.SYNC_TRY_CACHE\n self.assertTrue(self._do_makecache(cmd))\n self.assertLastInfo(logger, u'Metadata cache created.')\n self.assertEqual(r.sync_strategy, dnf.repo.SYNC_TRY_CACHE)\n\n # regular case 3: metadata is cached but will eqpire before\n # metadata_timer_sync:\n r.metadata_expire_in = mock.Mock(return_value=(True, 4))\n r.sync_strategy = dnf.repo.SYNC_TRY_CACHE\n self.assertTrue(self._do_makecache(cmd))\n self.assertLastInfo(logger, u'Metadata cache created.')\n self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED)\n\n @mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger)\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.util.on_ac_power', return_value=False)\n def test_makecache_timer_battery(self, _on_ac_power, logger):\n cmd = dnf.cli.commands.MakeCacheCommand(self.cli)\n self.base.conf.metadata_timer_sync = 5\n\n self.assertFalse(self._do_makecache(cmd))\n msg = u'Metadata timer caching disabled when running on a battery.'\n self.assertLastInfo(logger, msg)\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.util.on_ac_power', return_value=None)\n def test_makecache_timer_battery2(self, _on_ac_power):\n cmd = dnf.cli.commands.MakeCacheCommand(self.cli)\n self.base.conf.metadata_timer_sync = 5\n self.assertTrue(self._do_makecache(cmd))\n\nclass CommandTest(support.TestCase):\n def test_canonical(self):\n cmd = dnf.cli.commands.upgrade.UpgradeCommand(None)\n (base, ext) = cmd.canonical(['update', 'cracker', 'filling'])\n self.assertEqual(base, 'upgrade')\n self.assertEqual(ext, ['cracker', 'filling'])\n\nclass EraseCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.EraseCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(EraseCommandTest, self).setUp()\n base = support.BaseCliStub()\n base.init_sack()\n self.cmd = dnf.cli.commands.EraseCommand(base.mock_cli())\n\n def test_run(self):\n \"\"\"Test whether the package is installed.\"\"\"\n self.cmd.run(['pepper'])\n\n self.assertResult(\n self.cmd.base,\n self.cmd.base.sack.query().installed().filter(name__neq='pepper'))\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n def test_run_notfound(self):\n \"\"\"Test whether it fails if the package cannot be found.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error, self.cmd.run, ['non-existent'])\n\n self.assertEqual(stdout.getvalue(),\n 'No match for argument: non-existent\\n')\n self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed())\n\nclass InstallCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.install.InstallCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(InstallCommandTest, self).setUp()\n base = support.BaseCliStub('main')\n base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH)\n base.init_sack()\n self._cmd = dnf.cli.commands.install.InstallCommand(base.mock_cli())\n\n def test_configure(self):\n cli = self._cmd.cli\n self._cmd.configure([])\n self.assertFalse(cli.demands.allow_erasing)\n self.assertTrue(cli.demands.sack_activation)\n\n def test_run_group(self):\n \"\"\"Test whether a group is installed.\"\"\"\n self._cmd.run(['@Solid Ground'])\n\n base = self._cmd.cli.base\n self.assertResult(base, itertools.chain(\n base.sack.query().installed(),\n dnf.subject.Subject('trampoline').get_best_query(base.sack)))\n\n @mock.patch('dnf.cli.commands.install._',\n dnf.pycomp.NullTranslations().ugettext)\n def test_run_group_notfound(self):\n \"\"\"Test whether it fails if the group cannot be found.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error,\n self._cmd.run, ['@non-existent'])\n\n self.assertEqual(stdout.getvalue(),\n \"Warning: Group 'non-existent' does not exist.\\n\")\n self.assertResult(self._cmd.cli.base,\n self._cmd.cli.base.sack.query().installed())\n\n def test_run_package(self):\n \"\"\"Test whether a package is installed.\"\"\"\n self._cmd.run(['lotus'])\n\n base = self._cmd.cli.base\n self.assertResult(base, itertools.chain(\n base.sack.query().installed(),\n dnf.subject.Subject('lotus.x86_64').get_best_query(base.sack)))\n\n @mock.patch('dnf.cli.commands.install._',\n dnf.pycomp.NullTranslations().ugettext)\n def test_run_package_notfound(self):\n \"\"\"Test whether it fails if the package cannot be found.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error,\n self._cmd.run, ['non-existent'])\n\n self.assertEqual(stdout.getvalue(),\n 'No package non-existent available.\\n')\n self.assertResult(self._cmd.cli.base,\n self._cmd.cli.base.sack.query().installed())\n\nclass ReinstallCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.ReinstallCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(ReinstallCommandTest, self).setUp()\n base = support.BaseCliStub('main')\n base.init_sack()\n self._cmd = dnf.cli.commands.reinstall.ReinstallCommand(base.mock_cli())\n\n def test_run(self):\n \"\"\"Test whether the package is installed.\"\"\"\n self._cmd.run(['pepper'])\n\n base = self._cmd.cli.base\n self.assertResult(base, itertools.chain(\n base.sack.query().installed().filter(name__neq='pepper'),\n dnf.subject.Subject('pepper.x86_64').get_best_query(base.sack)\n .available()))\n\n @mock.patch('dnf.cli.commands.reinstall._',\n dnf.pycomp.NullTranslations().ugettext)\n def test_run_notinstalled(self):\n \"\"\"Test whether it fails if the package is not installed.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['lotus'])\n\n self.assertEqual(stdout.getvalue(), 'No match for argument: lotus\\n')\n self.assertResult(self._cmd.cli.base,\n self._cmd.cli.base.sack.query().installed())\n\n @mock.patch('dnf.cli.commands.reinstall._',\n dnf.pycomp.NullTranslations().ugettext)\n def test_run_notavailable(self):\n \"\"\"Test whether it fails if the package is not available.\"\"\"\n base = self._cmd.cli.base\n holes_query = dnf.subject.Subject('hole').get_best_query(base.sack)\n for pkg in holes_query.installed():\n self._cmd.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self._cmd.base.yumdb.get_package(pkg).from_repo = 'unknown'\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['hole'])\n\n self.assertEqual(\n stdout.getvalue(),\n 'Installed package hole-1-1.x86_64 (from unknown) not available.\\n')\n self.assertResult(base, base.sack.query().installed())\n\nclass RepoPkgsCommandTest(unittest.TestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsCommandTest, self).setUp()\n cli = support.BaseCliStub().mock_cli()\n self.cmd = dnf.cli.commands.RepoPkgsCommand(cli)\n\n def test_configure_badargs(self):\n \"\"\"Test whether the method does not fail even in case of wrong args.\"\"\"\n self.cmd.configure([])\n\nclass RepoPkgsCheckUpdateSubCommandTest(unittest.TestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsCheckUpdateSubCommandTest, self).setUp()\n base = support.BaseCliStub('main', 'updates', 'third_party')\n self.cli = base.mock_cli()\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n def test(self):\n \"\"\"Test whether only upgrades in the repository are listed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('updates', [])\n\n self.assertEqual(\n stdout.getvalue(),\n u'\\n'\n u'hole.x86_64 1-2'\n u' updates\\n'\n u'hole.x86_64 2-1'\n u' updates\\n'\n u'pepper.x86_64 20-1'\n u' updates\\n'\n u'Obsoleting Packages\\n'\n u'hole.i686 2-1'\n u' updates\\n'\n u' tour.noarch 5-0'\n u' @System\\n'\n u'hole.x86_64 2-1'\n u' updates\\n'\n u' tour.noarch 5-0'\n u' @System\\n')\n self.assertEqual(self.cli.demands.success_exit_status, 100)\n\n def test_not_found(self):\n \"\"\"Test whether exit code differs if updates are not found.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli)\n cmd.run_on_repo('main', [])\n self.assertNotEqual(self.cli.demands.success_exit_status, 100)\n\nclass RepoPkgsInfoSubCommandTest(unittest.TestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.InfoSubCommand`` class.\"\"\"\n\n AVAILABLE_TITLE = u'Available Packages\\n'\n\n HOLE_I686_INFO = (u'Name : hole\\n'\n u'Arch : i686\\n'\n u'Epoch : 0\\n'\n u'Version : 2\\n'\n u'Release : 1\\n'\n u'Size : 0.0 \\n'\n u'Repo : updates\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n'\n u'\\n')\n\n HOLE_X86_64_INFO = (u'Name : hole\\n'\n u'Arch : x86_64\\n'\n u'Epoch : 0\\n'\n u'Version : 2\\n'\n u'Release : 1\\n'\n u'Size : 0.0 \\n'\n u'Repo : updates\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n\\n')\n\n INSTALLED_TITLE = u'Installed Packages\\n'\n\n PEPPER_SYSTEM_INFO = (u'Name : pepper\\n'\n u'Arch : x86_64\\n'\n u'Epoch : 0\\n'\n u'Version : 20\\n'\n u'Release : 0\\n'\n u'Size : 0.0 \\n'\n u'Repo : @System\\n'\n u'From repo : main\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n\\n')\n\n PEPPER_UPDATES_INFO = (u'Name : pepper\\n'\n u'Arch : x86_64\\n'\n u'Epoch : 0\\n'\n u'Version : 20\\n'\n u'Release : 1\\n'\n u'Size : 0.0 \\n'\n u'Repo : updates\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n\\n')\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsInfoSubCommandTest, self).setUp()\n base = support.BaseCliStub('main', 'updates', 'third_party')\n base.conf.recent = 7\n self.cli = base.mock_cli()\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_all(self):\n \"\"\"Test whether only packages related to the repository are listed.\"\"\"\n for pkg in self.cli.base.sack.query().installed().filter(name='pepper'):\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = 'main'\n\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('main', ['all', '*p*'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((\n self.INSTALLED_TITLE,\n self.PEPPER_SYSTEM_INFO,\n self.AVAILABLE_TITLE,\n u'Name : pepper\\n'\n u'Arch : src\\n'\n u'Epoch : 0\\n'\n u'Version : 20\\n'\n u'Release : 0\\n'\n u'Size : 0.0 \\n'\n u'Repo : main\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n'\n u'\\n',\n u'Name : trampoline\\n'\n u'Arch : noarch\\n'\n u'Epoch : 0\\n'\n u'Version : 2.1\\n'\n u'Release : 1\\n'\n u'Size : 0.0 \\n'\n u'Repo : main\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n'\n u'\\n')))\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_available(self):\n \"\"\"Test whether only packages in the repository are listed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('updates', ['available'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((\n self.AVAILABLE_TITLE,\n self.HOLE_I686_INFO,\n self.HOLE_X86_64_INFO,\n self.PEPPER_UPDATES_INFO)))\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_extras(self):\n \"\"\"Test whether only extras installed from the repository are listed.\"\"\"\n for pkg in self.cli.base.sack.query().installed().filter(name='tour'):\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = 'unknown'\n\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('unknown', ['extras'])\n\n self.assertEqual(\n stdout.getvalue(),\n u'Extra Packages\\n'\n u'Name : tour\\n'\n u'Arch : noarch\\n'\n u'Epoch : 0\\n'\n u'Version : 5\\n'\n u'Release : 0\\n'\n u'Size : 0.0 \\n'\n u'Repo : @System\\n'\n u'From repo : unknown\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n\\n')\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_installed(self):\n \"\"\"Test whether only packages installed from the repository are listed.\"\"\"\n for pkg in self.cli.base.sack.query().installed().filter(name='pepper'):\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = 'main'\n\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('main', ['installed'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((self.INSTALLED_TITLE, self.PEPPER_SYSTEM_INFO)))\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_obsoletes(self):\n \"\"\"Test whether only obsoletes in the repository are listed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('updates', ['obsoletes'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((\n u'Obsoleting Packages\\n',\n self.HOLE_I686_INFO,\n self.HOLE_X86_64_INFO)))\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_recent(self):\n \"\"\"Test whether only packages in the repository are listed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with mock.patch('time.time', return_value=0), \\\n support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('updates', ['recent'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((\n u'Recently Added Packages\\n',\n self.HOLE_I686_INFO,\n self.HOLE_X86_64_INFO,\n self.PEPPER_UPDATES_INFO)))\n\n @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext)\n @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext)\n def test_info_upgrades(self):\n \"\"\"Test whether only upgrades in the repository are listed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli)\n with support.patch_std_streams() as (stdout, _):\n cmd.run_on_repo('updates', ['upgrades'])\n\n self.assertEqual(\n stdout.getvalue(),\n ''.join((\n u'Upgraded Packages\\n'\n u'Name : hole\\n'\n u'Arch : x86_64\\n'\n u'Epoch : 0\\n'\n u'Version : 1\\n'\n u'Release : 2\\n'\n u'Size : 0.0 \\n'\n u'Repo : updates\\n'\n u'Summary : \\n'\n u'License : \\n'\n u'Description : \\n'\n u'\\n',\n self.HOLE_X86_64_INFO,\n self.PEPPER_UPDATES_INFO)))\n\nclass RepoPkgsInstallSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.InstallSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsInstallSubCommandTest, self).setUp()\n base = support.BaseCliStub('main', 'third_party')\n base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH)\n base.repos['third_party'].enablegroups = False\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether all packages from the repository are installed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.InstallSubCommand(self.cli)\n cmd.run_on_repo('third_party', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='hole'),\n self.cli.base.sack.query().available().filter(reponame='third_party',\n arch='x86_64')))\n\nclass RepoPkgsMoveToSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsMoveToSubCommandTest, self).setUp()\n base = support.BaseCliStub('distro', 'main')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether only packages in the repository are installed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand(self.cli)\n cmd.run_on_repo('distro', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='tour'),\n dnf.subject.Subject('tour-5-0').get_best_query(self.cli.base.sack)\n .available()))\n\nclass RepoPkgsReinstallOldSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsReinstallOldSubCommandTest, self).setUp()\n base = support.BaseCliStub('main')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether all packages from the repository are reinstalled.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n reponame = 'main' if pkg.name != 'pepper' else 'non-main'\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = reponame\n\n cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand(self.cli)\n cmd.run_on_repo('main', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='librita'),\n dnf.subject.Subject('librita.i686').get_best_query(self.cli.base.sack)\n .installed(),\n dnf.subject.Subject('librita').get_best_query(self.cli.base.sack)\n .available()))\n\nclass RepoPkgsReinstallSubCommandTest(unittest.TestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsReinstallSubCommandTest, self).setUp()\n self.cli = support.BaseCliStub('main').mock_cli()\n\n self.mock = mock.Mock()\n old_run_patcher = mock.patch(\n 'dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand.run_on_repo',\n self.mock.reinstall_old_run)\n move_run_patcher = mock.patch(\n 'dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand.run_on_repo',\n self.mock.move_to_run)\n\n old_run_patcher.start()\n self.addCleanup(old_run_patcher.stop)\n move_run_patcher.start()\n self.addCleanup(move_run_patcher.stop)\n\n def test_all_fails(self):\n \"\"\"Test whether it fails if everything fails.\"\"\"\n self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test')\n self.mock.move_to_run.side_effect = dnf.exceptions.Error('test')\n\n cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)\n self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'main', [])\n\n self.assertEqual(self.mock.mock_calls,\n [mock.call.reinstall_old_run('main', []),\n mock.call.move_to_run('main', [])])\n\n def test_all_moveto(self):\n \"\"\"Test whether reinstall-old is called first and move-to next.\"\"\"\n self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test')\n\n cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)\n cmd.run_on_repo('main', [])\n\n self.assertEqual(self.mock.mock_calls,\n [mock.call.reinstall_old_run('main', []),\n mock.call.move_to_run('main', [])])\n\n def test_all_reinstallold(self):\n \"\"\"Test whether only reinstall-old is called.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli)\n cmd.run_on_repo('main', [])\n\n self.assertEqual(self.mock.mock_calls,\n [mock.call.reinstall_old_run('main', [])])\n\nclass RepoPkgsRemoveOrDistroSyncSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``RemoveOrDistroSyncSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsRemoveOrDistroSyncSubCommandTest, self).setUp()\n self.cli = support.BaseCliStub('distro').mock_cli()\n self.cli.base.init_sack()\n\n def test_run_on_repo_spec_sync(self):\n \"\"\"Test running with a package which can be synchronized.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n data = support.RPMDBAdditionalDataPackageStub()\n data.from_repo = 'non-distro' if pkg.name == 'pepper' else 'distro'\n self.cli.base.yumdb.db[str(pkg)] = data\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(\n self.cli)\n cmd.run_on_repo('non-distro', ['pepper'])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='pepper'),\n dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack)\n .available()))\n\n def test_run_on_repo_spec_remove(self):\n \"\"\"Test running with a package which must be removed.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n data = support.RPMDBAdditionalDataPackageStub()\n data.from_repo = 'non-distro' if pkg.name == 'hole' else 'distro'\n self.cli.base.yumdb.db[str(pkg)] = data\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(\n self.cli)\n cmd.run_on_repo('non-distro', ['hole'])\n\n self.assertResult(\n self.cli.base,\n self.cli.base.sack.query().installed().filter(name__neq='hole'))\n\n def test_run_on_repo_all(self):\n \"\"\"Test running without a package specification.\"\"\"\n nondist = {'pepper', 'hole'}\n for pkg in self.cli.base.sack.query().installed():\n data = support.RPMDBAdditionalDataPackageStub()\n data.from_repo = 'non-distro' if pkg.name in nondist else 'distro'\n self.cli.base.yumdb.db[str(pkg)] = data\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(\n self.cli)\n cmd.run_on_repo('non-distro', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='pepper')\n .filter(name__neq='hole'),\n dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack)\n .available()))\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n def test_run_on_repo_spec_notinstalled(self):\n \"\"\"Test running with a package which is not installed.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(\n self.cli)\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error,\n cmd.run_on_repo, 'non-distro', ['not-installed'])\n\n self.assertIn('No match for argument: not-installed\\n', stdout.getvalue(),\n 'mismatch not logged')\n\n @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext)\n def test_run_on_repo_all_notinstalled(self):\n \"\"\"Test running with a repository from which nothing is installed.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand(\n self.cli)\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error,\n cmd.run_on_repo, 'non-distro', [])\n\n self.assertIn('No package installed from the repository.\\n',\n stdout.getvalue(), 'mismatch not logged')\n\nclass RepoPkgsRemoveOrReinstallSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsRemoveOrReinstallSubCommandTest, self).setUp()\n base = support.BaseCliStub('distro')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all_not_installed(self):\n \"\"\"Test whether it fails if no package is installed from the repository.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(\n self.cli)\n self.assertRaises(dnf.exceptions.Error,\n cmd.run_on_repo, 'non-distro', [])\n\n self.assertResult(self.cli.base, self.cli.base.sack.query().installed())\n\n def test_all_reinstall(self):\n \"\"\"Test whether all packages from the repository are reinstalled.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n reponame = 'distro' if pkg.name != 'tour' else 'non-distro'\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = reponame\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(\n self.cli)\n cmd.run_on_repo('non-distro', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='tour'),\n dnf.subject.Subject('tour').get_best_query(self.cli.base.sack)\n .available()))\n\n def test_all_remove(self):\n \"\"\"Test whether all packages from the repository are removed.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n reponame = 'distro' if pkg.name != 'hole' else 'non-distro'\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = reponame\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand(\n self.cli)\n cmd.run_on_repo('non-distro', [])\n\n self.assertResult(\n self.cli.base,\n self.cli.base.sack.query().installed().filter(name__neq='hole'))\n\nclass RepoPkgsRemoveSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsRemoveSubCommandTest, self).setUp()\n base = support.BaseCliStub('main')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether only packages from the repository are removed.\"\"\"\n for pkg in self.cli.base.sack.query().installed():\n reponame = 'main' if pkg.name == 'pepper' else 'non-main'\n self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub()\n self.cli.base.yumdb.get_package(pkg).from_repo = reponame\n\n cmd = dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand(self.cli)\n cmd.run_on_repo('main', [])\n\n self.assertResult(\n self.cli.base,\n self.cli.base.sack.query().installed().filter(name__neq='pepper'))\n\nclass RepoPkgsUpgradeSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsUpgradeSubCommandTest, self).setUp()\n base = support.BaseCliStub('updates', 'third_party')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether all packages from the repository are installed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand(self.cli)\n cmd.run_on_repo('third_party', [])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='hole'),\n self.cli.base.sack.query().upgrades().filter(reponame='third_party',\n arch='x86_64')))\n\nclass RepoPkgsUpgradeToSubCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(RepoPkgsUpgradeToSubCommandTest, self).setUp()\n base = support.BaseCliStub('updates', 'third_party')\n base.init_sack()\n self.cli = base.mock_cli()\n\n def test_all(self):\n \"\"\"Test whether the package from the repository is installed.\"\"\"\n cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand(self.cli)\n cmd.run_on_repo('updates', ['hole-1-2'])\n\n self.assertResult(self.cli.base, itertools.chain(\n self.cli.base.sack.query().installed().filter(name__neq='hole'),\n dnf.subject.Subject('hole-1-2.x86_64').get_best_query(self.cli.base.sack)\n .filter(reponame='updates')))\n\nclass UpgradeCommandTest(support.ResultTestCase):\n\n \"\"\"Tests of ``dnf.cli.commands.upgrade.UpgradeCommand`` class.\"\"\"\n\n def setUp(self):\n \"\"\"Prepare the test fixture.\"\"\"\n super(UpgradeCommandTest, self).setUp()\n base = support.BaseCliStub('updates')\n base.init_sack()\n self.cmd = dnf.cli.commands.upgrade.UpgradeCommand(base.mock_cli())\n\n def test_run(self):\n \"\"\"Test whether a package is updated.\"\"\"\n self.cmd.run(['pepper'])\n\n self.assertResult(self.cmd.base, itertools.chain(\n self.cmd.base.sack.query().installed().filter(name__neq='pepper'),\n self.cmd.base.sack.query().upgrades().filter(name='pepper')))\n\n @mock.patch('dnf.cli.commands.upgrade._',\n dnf.pycomp.NullTranslations().ugettext)\n def test_updatePkgs_notfound(self):\n \"\"\"Test whether it fails if the package cannot be found.\"\"\"\n stdout = dnf.pycomp.StringIO()\n\n with support.wiretap_logs('dnf', logging.INFO, stdout):\n self.assertRaises(dnf.exceptions.Error,\n self.cmd.run, ['non-existent'])\n\n self.assertEqual(stdout.getvalue(),\n 'No match for argument: non-existent\\n')\n self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed())\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475189,"cells":{"repo_name":{"kind":"string","value":"jrief/easy-thumbnails"},"path":{"kind":"string","value":"easy_thumbnails/tests/fields.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3792"},"content":{"kind":"string","value":"import os\n\nfrom django.core.files.base import ContentFile\nfrom django.db import models\n\nfrom easy_thumbnails import test\nfrom easy_thumbnails.fields import ThumbnailerField, ThumbnailerImageField\nfrom easy_thumbnails.exceptions import InvalidImageFormatError\n\n\nclass TestModel(models.Model):\n avatar = ThumbnailerField(upload_to='avatars')\n picture = ThumbnailerImageField(upload_to='pictures',\n resize_source=dict(size=(10, 10)))\n\n\nclass ThumbnailerFieldTest(test.BaseTest):\n def setUp(self):\n super(ThumbnailerFieldTest, self).setUp()\n self.storage = test.TemporaryStorage()\n # Save a test image.\n self.create_image(self.storage, 'avatars/avatar.jpg')\n # Set the test model to use the current temporary storage.\n TestModel._meta.get_field('avatar').storage = self.storage\n TestModel._meta.get_field('avatar').thumbnail_storage = self.storage\n\n def tearDown(self):\n self.storage.delete_temporary_storage()\n super(ThumbnailerFieldTest, self).tearDown()\n\n def test_generate_thumbnail(self):\n instance = TestModel(avatar='avatars/avatar.jpg')\n thumb = instance.avatar.generate_thumbnail({'size': (300, 300)})\n self.assertEqual((thumb.width, thumb.height), (300, 225))\n\n def test_generate_thumbnail_type_error(self):\n text_file = ContentFile(\"Lorem ipsum dolor sit amet. Not an image.\")\n self.storage.save('avatars/invalid.jpg', text_file)\n instance = TestModel(avatar='avatars/invalid.jpg')\n generate = lambda: instance.avatar.generate_thumbnail(\n {'size': (300, 300)})\n self.assertRaises(InvalidImageFormatError, generate)\n\n def test_delete(self):\n instance = TestModel(avatar='avatars/avatar.jpg')\n source_path = instance.avatar.path\n thumb_paths = (\n instance.avatar.get_thumbnail({'size': (300, 300)}).path,\n instance.avatar.get_thumbnail({'size': (200, 200)}).path,\n instance.avatar.get_thumbnail({'size': (100, 100)}).path,\n )\n self.assertTrue(os.path.exists(source_path))\n for path in thumb_paths:\n self.assertTrue(os.path.exists(path))\n instance.avatar.delete(save=False)\n self.assertFalse(os.path.exists(source_path))\n for path in thumb_paths:\n self.assertFalse(os.path.exists(path))\n\n def test_delete_thumbnails(self):\n instance = TestModel(avatar='avatars/avatar.jpg')\n source_path = instance.avatar.path\n thumb_paths = (\n instance.avatar.get_thumbnail({'size': (300, 300)}).path,\n instance.avatar.get_thumbnail({'size': (200, 200)}).path,\n instance.avatar.get_thumbnail({'size': (100, 100)}).path,\n )\n self.assertTrue(os.path.exists(source_path))\n for path in thumb_paths:\n self.assertTrue(os.path.exists(path))\n instance.avatar.delete_thumbnails()\n self.assertTrue(os.path.exists(source_path))\n for path in thumb_paths:\n self.assertFalse(os.path.exists(path))\n\n def test_get_thumbnails(self):\n instance = TestModel(avatar='avatars/avatar.jpg')\n instance.avatar.get_thumbnail({'size': (300, 300)})\n instance.avatar.get_thumbnail({'size': (200, 200)})\n self.assertEqual(len(list(instance.avatar.get_thumbnails())), 2)\n\n def test_saving_image_field_with_resize_source(self):\n # Ensure that saving ThumbnailerImageField with resize_source enabled\n # using instance.field.save() does not fail\n instance = TestModel(avatar='avatars/avatar.jpg')\n instance.picture.save(\n 'file.jpg', ContentFile(instance.avatar.file.read()), save=False)\n self.assertEqual(instance.picture.width, 10)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475190,"cells":{"repo_name":{"kind":"string","value":"watson-developer-cloud/discovery-starter-kit"},"path":{"kind":"string","value":"server/python/server.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4953"},"content":{"kind":"string","value":"import os\nimport sys\nimport json\nfrom helpers import get_constants, get_questions\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_sslify import SSLify\nfrom flask_cors import CORS\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\nfrom requests.exceptions import HTTPError\nfrom dotenv import load_dotenv, find_dotenv\nimport watson_developer_cloud.natural_language_understanding.features.v1 as features # noqa\nfrom watson_developer_cloud import DiscoveryV1, NaturalLanguageUnderstandingV1\nimport metrics_tracker_client\n\ntry:\n load_dotenv(find_dotenv())\nexcept IOError:\n print('warning: no .env file loaded')\n\n# Emit Bluemix deployment event if not a demo deploy\nif not(os.getenv('DEMO_DEPLOY')):\n metrics_tracker_client.track()\n\napp = Flask(\n __name__,\n static_folder=\"../../client/knowledge_base_search/build/static\",\n template_folder=\"../../client/knowledge_base_search/build\"\n )\n\n# force SSL\nsslify = SSLify(app)\n\n# Limit requests\nlimiter = Limiter(\n app,\n key_func=get_remote_address,\n default_limits=['240 per minute', '4 per second'],\n headers_enabled=True\n)\n\nCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n# Discovery\ndiscovery = DiscoveryV1(\n url=os.getenv('DISCOVERY_URL'),\n username=os.getenv('DISCOVERY_USERNAME'),\n password=os.getenv('DISCOVERY_PASSWORD'),\n version=\"2016-12-01\"\n )\n\n# NLU\nnlu = NaturalLanguageUnderstandingV1(\n url=os.getenv('NLU_URL'),\n username=os.getenv('NLU_USERNAME'),\n password=os.getenv('NLU_PASSWORD'),\n version=\"2017-02-27\"\n )\n\n\"\"\"\nretrieve the following:\n{\n environment_id: env_id,\n collection_id: {\n passages: passages_id,\n regular: regular_id,\n trained: trained_id\n }\n}\n\"\"\"\nconstants = get_constants(\n discovery,\n passages_name=os.getenv(\n 'DISCOVERY_PASSAGES_COLLECTION_NAME',\n 'knowledge_base_regular'\n ),\n regular_name=os.getenv(\n 'DISCOVERY_REGULAR_COLLECTION_NAME',\n 'knowledge_base_regular'\n ),\n trained_name=os.getenv(\n 'DISCOVERY_TRAINED_COLLECTION_NAME',\n 'knowledge_base_trained'\n )\n )\ntry:\n total_questions = int(os.getenv('DISCOVERY_QUESTION_COUNT', 5000))\nexcept ValueError:\n sys.exit('DISCOVERY_QUESTION_COUNT not an integer, terminating...')\n\npassages_question_cache = get_questions(\n discovery=discovery,\n constants=constants,\n question_count=total_questions,\n feature_type='passages')\ntrained_question_cache = get_questions(\n discovery=discovery,\n constants=constants,\n question_count=total_questions,\n feature_type='trained')\n\n\n@app.route('/')\n@limiter.exempt\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/query/<collection_type>', methods=['POST'])\ndef query(collection_type):\n query_options = json.loads(request.data)\n query_options['return'] = 'text'\n\n if collection_type == 'passages':\n query_options['passages'] = True\n\n # retrieve more results for regular so that we can compare original rank\n if collection_type == 'regular':\n query_options['count'] = 100\n\n return jsonify(\n discovery.query(\n environment_id=constants['environment_id'],\n collection_id=constants['collection_id'][collection_type],\n query_options=query_options\n )\n )\n\n\n@app.route('/api/questions/<feature_type>', methods=['GET'])\ndef questions(feature_type):\n if feature_type == 'passages':\n return jsonify(passages_question_cache)\n else:\n return jsonify(trained_question_cache)\n\n\n@app.errorhandler(429)\ndef ratelimit_handler(e):\n return jsonify(\n error=\"API Rate Limit exceeded: %s\" % e.description,\n code=429), 429\n\n\n@app.errorhandler(Exception)\ndef handle_error(e):\n code = 500\n error = 'Error processing the request'\n if isinstance(e, HTTPError):\n code = e.code\n error = str(e.message)\n\n return jsonify(error=error, code=code), code\n\n\nif __name__ == '__main__':\n # If we are in the Bluemix environment\n PRODUCTION = True if os.getenv('VCAP_APPLICATION') else False\n # set port to 0.0.0.0, otherwise set it to localhost (127.0.0.1)\n HOST = '0.0.0.0' if PRODUCTION else '127.0.0.1'\n # Get port from the Bluemix environment, or default to 5000\n PORT_NUMBER = int(os.getenv('PORT', '5000'))\n\n app.run(host=HOST, port=PORT_NUMBER, debug=not(PRODUCTION))\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475191,"cells":{"repo_name":{"kind":"string","value":"jordanemedlock/psychtruths"},"path":{"kind":"string","value":"temboo/Library/Microsoft/OAuth/RefreshToken.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"4495"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n###############################################################################\n#\n# RefreshToken\n# Retrieves a new refresh token and access token by exchanging the refresh token that is associated with the expired access token.\n#\n# Python versions 2.6, 2.7, 3.x\n#\n# Copyright 2014, Temboo Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n# either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n#\n#\n###############################################################################\n\nfrom temboo.core.choreography import Choreography\nfrom temboo.core.choreography import InputSet\nfrom temboo.core.choreography import ResultSet\nfrom temboo.core.choreography import ChoreographyExecution\n\nimport json\n\nclass RefreshToken(Choreography):\n\n def __init__(self, temboo_session):\n \"\"\"\n Create a new instance of the RefreshToken Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n \"\"\"\n super(RefreshToken, self).__init__(temboo_session, '/Library/Microsoft/OAuth/RefreshToken')\n\n\n def new_input_set(self):\n return RefreshTokenInputSet()\n\n def _make_result_set(self, result, path):\n return RefreshTokenResultSet(result, path)\n\n def _make_execution(self, session, exec_id, path):\n return RefreshTokenChoreographyExecution(session, exec_id, path)\n\nclass RefreshTokenInputSet(InputSet):\n \"\"\"\n An InputSet with methods appropriate for specifying the inputs to the RefreshToken\n Choreo. The InputSet object is used to specify input parameters when executing this Choreo.\n \"\"\"\n def set_ClientID(self, value):\n \"\"\"\n Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Microsoft after registering your application.)\n \"\"\"\n super(RefreshTokenInputSet, self)._set_input('ClientID', value)\n def set_ClientSecret(self, value):\n \"\"\"\n Set the value of the ClientSecret input for this Choreo. ((required, string) The Client Secret provided by Microsoft after registering your application.)\n \"\"\"\n super(RefreshTokenInputSet, self)._set_input('ClientSecret', value)\n def set_RefreshToken(self, value):\n \"\"\"\n Set the value of the RefreshToken input for this Choreo. ((required, string) An OAuth Refresh Token used to generate a new access token when the original token is expired.)\n \"\"\"\n super(RefreshTokenInputSet, self)._set_input('RefreshToken', value)\n def set_Resource(self, value):\n \"\"\"\n Set the value of the Resource input for this Choreo. ((conditional, string) The App ID URI of the web API (secured resource). See Choreo notes for details.)\n \"\"\"\n super(RefreshTokenInputSet, self)._set_input('Resource', value)\n\nclass RefreshTokenResultSet(ResultSet):\n \"\"\"\n A ResultSet with methods tailored to the values returned by the RefreshToken Choreo.\n The ResultSet object is used to retrieve the results of a Choreo execution.\n \"\"\"\n\n def getJSONFromString(self, str):\n return json.loads(str)\n\n def get_Response(self):\n \"\"\"\n Retrieve the value for the \"Response\" output from this Choreo execution. ((json) The response from Microsoft.)\n \"\"\"\n return self._output.get('Response', None)\n def get_Expires(self):\n \"\"\"\n Retrieve the value for the \"Expires\" output from this Choreo execution. ((integer) The remaining lifetime of the short-lived access token.)\n \"\"\"\n return self._output.get('Expires', None)\n def get_NewRefreshToken(self):\n \"\"\"\n Retrieve the value for the \"NewRefreshToken\" output from this Choreo execution. ((string) The new Refresh Token which can be used the next time your app needs to get a new Access Token.)\n \"\"\"\n return self._output.get('NewRefreshToken', None)\n\nclass RefreshTokenChoreographyExecution(ChoreographyExecution):\n\n def _make_result_set(self, response, path):\n return RefreshTokenResultSet(response, path)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475192,"cells":{"repo_name":{"kind":"string","value":"huahbo/src"},"path":{"kind":"string","value":"book/Recipes/m1d.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"10916"},"content":{"kind":"string","value":"from rsf.proj import *\nfrom decimal import *\n\n\n# --- User set --- # \n\n# model\nmodel = {\n 'X' : 2000, # meter\n 'dx': 10.0,\n 'dt': 0.001,\n 'SelT': 0.25, # selected time for snapshot show \n 'snpintvl': 1.0, # nterval of snapshot output\n 'size' : 8, # FD order\n 'frqcut' : 1.0,\n 'pml' : 240,\n }\n\n# source & receiver\nsrp = {\n 'bgn' : 0.1, # s, time of maximum ricker\n 'frq' : 10.0, # source domain frequence\n 'srcmms' : 'n', # MMS\n 'inject': 'n', # if y, inject; if n, Initiate conditon\n 'slx' : 1000.0, # source location (x), meter\n 'gdep' : 800 # receiver location (z), meter\n }\n\n# ------------------------------------------------------------------------------\ndef mgraph(fin, title):\n Result(fin,\n '''\n put label1=\"Depth\" unit1=\"m\" |\n transp plane=23 |\n graph screenratio=0.5 title=\"%s\"\n '''%str(title))\n\n# ------------------------------------------------------------------------------\ndef setpar(mdl, srp):\n dx = mdl['dx']\n dt = mdl['dt']\n \n objpar = {\n 'vel' : mdl['vel'],\n 'dvel': mdl['dvel'],\n 'den' : mdl['den'],\n 'nx' : mdl['X']/dx+1,\n 'SelT': mdl['SelT'],\n 'nt' : int(Decimal(str(mdl['T']))/Decimal(str(dt)))+1,\n 'snt' : int(Decimal(str(mdl['SelT']))/Decimal(str(dt))/ \\\n Decimal(str(mdl['snpintvl']))),\n 'snpi': mdl['snpintvl'], #snap interval\n 'dt' : dt,\n 'iwdt': dt*1000, #dt for iwave\n 'dx' : dx,\n 'dxhf': 0.5*dx,\n 'ox' : 0.0,\n 'ot' : 0.0,\n # source \n 'frq' : srp['frq'],\n 'wavfrq': srp['frq']/3.0,\n 'bgnp' : srp['bgn']/dt+1,\n 'slx' : srp['slx'],\n 'spx' : srp['slx']/dx+1,\n 'gdep' : srp['gdep'],\n 'gp' : int(srp['gdep']/dx+0.5),\n 'srcmms': srp['srcmms'], # MMS\n 'inject': srp['inject'], # if y, inject; if n, Initiate conditon\n # fd\n 'size' : mdl['size'],\n 'fdsize': mdl['size']/2,\n 'frqcut': mdl['frqcut'],\n 'pml' : mdl['pml'],\n 'bd' : mdl['pml']+int((mdl['size']+1)/2)\n }\n return objpar\n\ndef buildmodel(par, denname, velname, dvelname, denval, velval, dvelval):\n name = {\n 'den' : denname,\n 'vel' : velname,\n 'dvel': dvelname\n }\n \n value = {\n 'den' : denval,\n 'vel' : velval,\n 'dvel': dvelval\n }\n \n label = {\n 'den': 'Density',\n 'vel': 'Velocity',\n 'dvel': 'Velocity'\n }\n \n unit = {\n 'den': 'lg/m\\^3\\_',\n 'vel': 'm/s',\n 'dvel': 'm/s'\n }\n\n for m in ['den','vel','dvel']:\n Flow(name[m],None,\n '''\n spike d1=%(dx)g n1=%(nx)d o1=0.0\n label1=Depth unit1=m | \n '''%par + '''\n math output=\"%s\" \n '''%value[m])\n pml = name[m]+'_pml'\n pmlt = name[m]+'_pmlt'\n pmlb = name[m]+'_pmlb'\n Flow(pmlt, name[m], 'window n1=1 f1= 0 |spray axis=1 n=%(bd)d' %par)\n Flow(pmlb, name[m], 'window n1=1 f1=-1 |spray axis=1 n=%(bd)d' %par)\n Flow(pml,[pmlt, name[m], pmlb],'cat ${SOURCES[1]} ${SOURCES[2]} axis=1')\n for m in ['den','vel','dvel']:\n Flow(name[m]+'hf',None,\n '''\n spike d1=%(dx)g n1=%(nx)d o1=%(dxhf)g\n label1=Depth unit1=m | \n '''%par + '''\n math output=\"%s\" \n '''%value[m])\n\n\ndef buildic(par, ic):\n Flow(ic,None,\n '''\n spike n1=%(nx)d d1=%(dx)g k1=%(spx)d| \n ricker1 frequency=%(wavfrq)g | \n scale axis=1 |\n put lable1=\"Depth\" unit1=\"m\" label2=\"Amplitude\" unit2=\"\"\n '''%par)\n\n\ndef buildsrcp(par, srcp):\n Flow(srcp, None,\n '''\n spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g |\n ricker1 frequency=%(frq)g | \n scale axis=1 |math output=input*400\n '''%par)\n\ndef buildsrcd(par, srcd, prefix, subfix):\n _pf = str(prefix) \n sf_ = str(subfix)\n spike = '%sspike%s' %(_pf, sf_)\n ricker = '%sricker%s' %(_pf, sf_)\n \n Flow(spike,None, \n '''\n spike n1=%(nx)d n2=%(nt)d d1=%(dx)g d2=%(dt)g \n k1=%(spx)d k2=1 \n '''%par)\n \n Flow(ricker,None,\n '''\n spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g | \n ricker1 frequency=%(frq)g |scale axis=1 \n '''%par)\n\n Flow(srcd,[spike,ricker],\n '''\n convft other=${SOURCES[1]} axis=2 |\n window n2=%(nt)d | math output=input*400\n '''%par)\n\ndef buildmms(par, mms, psrc, vsrc, pint, vint, vel, dvel, den, velhf, dvelhf, denhf):\n #beta = 2*3.14159265359*par['frq']\n alpha = 2*3.1415926*par['frq']/4.0\n alpha = alpha*alpha\n Flow([mms, psrc, vsrc, pint, vint], [vel, dvel, den, velhf, dvelhf, denhf],\n '''\n sfmms1dexp nt=%d dt=%g slx=%g alpha=%g \n dvel=${SOURCES[1]} den=${SOURCES[2]}\n presrc=${TARGETS[1]} velsrc=${TARGETS[2]}\n preinit=${TARGETS[3]} velinit=${TARGETS[4]} \n velhf=${SOURCES[3]} dvelhf=${SOURCES[4]} denhf=${SOURCES[5]}|\n put label1=\"Depth\" unit1=\"km\" label2=\"Time\" unit2=\"s\"\n '''%(par['nt'],par['dt'],par['slx'],alpha))\n \n\n# ------------------------------------------------------------------------------\n\ndef lrmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix):\n _pf = str(prefix)\n sf_ = str(suffix)\n \n fft = '%sfft%s' %(_pf, sf_)\n rt = '%srt%s' %(_pf, sf_)\n lt = '%slt%s' %(_pf, sf_)\n \n Flow(fft, vel, 'fft1')\n Flow([rt, lt], [vel, fft], \n '''\n isolrsg1 seed=2010 dt=%(dt)g fft=${SOURCES[1]} left=${TARGETS[1]}\n '''%par)\n if (mmsfiles == {}):\n Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic],\n '''\n sfsglr1 verb=y rec=${TARGETS[1]} \n left=${SOURCES[1]} right=${SOURCES[2]} \n vel=${SOURCES[3]} den=${SOURCES[4]} \n fft=${SOURCES[5]} ic=${SOURCES[6]}\n gdep=%(gdep)g slx=%(slx)g\n inject=%(inject)s srcmms=%(srcmms)s \n '''%par)\n else :\n psrc = mmsfiles['presrc']\n vsrc = mmsfiles['velsrc']\n pint = mmsfiles['preinit']\n vint = mmsfiles['velinit']\n Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic,\n psrc, vsrc, pint, vint],\n '''\n sfsglr1 verb=y \n rec=${TARGETS[1]} \n left=${SOURCES[1]} right=${SOURCES[2]} \n vel=${SOURCES[3]} den=${SOURCES[4]} \n fft=${SOURCES[5]} ic=${SOURCES[6]}\n presrc=${SOURCES[7]} velsrc=${SOURCES[8]}\n preinit=${SOURCES[9]} velinit=${SOURCES[10]}\n gdep=%(gdep)g slx=%(slx)g\n inject=%(inject)s srcmms=%(srcmms)s \n '''%par)\n \ndef lfdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix):\n _pf = str(prefix)\n sf_ = str(suffix)\n G = '%sG%s' %(_pf, sf_)\n sx = '%ssx%s' %(_pf, sf_)\n\n Flow([G,sx],vel,\n '''\n sfsglfdc1 dt=%(dt)g eps=0.00001 npk=20 seed=2012\n sx=${TARGETS[1]} size=%(size)d wavnumcut=%(frqcut)g\n ''' %par)\n \n if mmsfiles == {}:\n Flow([fwf, frec], [src, ic, vel, den, G, sx], \n '''\n sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]}\n vel=${SOURCES[2]} den=${SOURCES[3]} \n G=${SOURCES[4]} sx=${SOURCES[5]}\n pmld0=20 \n gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d\n inject=%(inject)s srcmms=%(srcmms)s\n verb=y snapinter=1 \n ''' %par)\n \n else:\n psrc = mmsfiles['presrc']\n vsrc = mmsfiles['velsrc']\n pint = mmsfiles['preinit']\n vint = mmsfiles['velinit']\n Flow([fwf, frec], [src, ic, vel, den, G, sx,\n psrc, vsrc, pint, vint], \n '''\n sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]}\n vel=${SOURCES[2]} den=${SOURCES[3]} \n G=${SOURCES[4]} sx=${SOURCES[5]}\n presrc=${SOURCES[6]} velsrc=${SOURCES[7]}\n preinit=${SOURCES[8]} velinit=${SOURCES[9]}\n pmld0=20 \n gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d\n inject=%(inject)s srcmms=%(srcmms)s\n verb=y snapinter=1 \n ''' %par)\n\ndef fdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par):\n if (mmsfiles == {}):\n Flow([fwf, frec], [src, ic, vel, den], \n '''\n sfsgfd1 ic=${SOURCES[1]}\n vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]}\n pmld0=20 size=%(fdsize)d \n gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d\n inject=%(inject)s \n verb=y snapinter=1 \n ''' %par )\n else :\n psrc = mmsfiles['presrc']\n vsrc = mmsfiles['velsrc']\n pint = mmsfiles['preinit']\n vint = mmsfiles['velinit']\n Flow([fwf, frec], [src, ic, vel, den, psrc, vsrc, pint, vint], \n '''\n sfsgfd1 ic=${SOURCES[1]}\n vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]}\n presrc=${SOURCES[4]} velsrc=${SOURCES[5]}\n preinit=${SOURCES[6]} velinit=${SOURCES[7]}\n pmld0=20 size=%(fdsize)d \n gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d\n inject=%(inject)s srcmms=%(srcmms)s\n verb=y snapinter=1 \n ''' %par )\n \n# ------------------------------------------------------------------------------\n\ndef analyticslt(fout, par, vel, prefix, subfix):\n _pf = str(prefix)\n sf_ = str(subfix)\n \n spx = par['spx']\n selt= par['SelT']\n dx = par['dx']\n \n leftp = spx - round(vel*selt/dx)\n rightp = spx + round(vel*selt/dx)\n \n left = '%sleft%s' %(_pf, sf_)\n right= '%sright%s'%(_pf, sf_)\n \n for fi in [left, right]:\n p = (leftp, rightp)[fi==right]\n Flow(fi,None,\n '''\n spike n1=%d d1=%g k1=%d| \n ricker1 frequency=%g | math output=\"input\"\n '''%(par['nx'],par['dx'],p,par['wavfrq']))\n\n Flow(fout,[left,right],\n '''\n math t=${SOURCES[1]} output=\"input+t\" | \n scale axis=2 | scale rscale=0.5 |\n put label1=\"Distance\" unit1=\"km\"\n ''')\n \n \n\n\n\n\n\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475193,"cells":{"repo_name":{"kind":"string","value":"stewartsmith/bzr"},"path":{"kind":"string","value":"bzrlib/tests/http_utils.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"20784"},"content":{"kind":"string","value":"# Copyright (C) 2005-2011 Canonical Ltd\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\nfrom cStringIO import StringIO\nimport re\nimport urllib2\n\n\nfrom bzrlib import (\n errors,\n osutils,\n tests,\n transport,\n )\nfrom bzrlib.smart import (\n medium,\n )\nfrom bzrlib.tests import http_server\nfrom bzrlib.transport import chroot\n\n\nclass HTTPServerWithSmarts(http_server.HttpServer):\n \"\"\"HTTPServerWithSmarts extends the HttpServer with POST methods that will\n trigger a smart server to execute with a transport rooted at the rootdir of\n the HTTP server.\n \"\"\"\n\n def __init__(self, protocol_version=None):\n http_server.HttpServer.__init__(self, SmartRequestHandler,\n protocol_version=protocol_version)\n\n\nclass SmartRequestHandler(http_server.TestingHTTPRequestHandler):\n \"\"\"Extend TestingHTTPRequestHandler to support smart client POSTs.\n\n XXX: This duplicates a fair bit of the logic in bzrlib.transport.http.wsgi.\n \"\"\"\n\n def do_POST(self):\n \"\"\"Hand the request off to a smart server instance.\"\"\"\n backing = transport.get_transport_from_path(\n self.server.test_case_server._home_dir)\n chroot_server = chroot.ChrootServer(backing)\n chroot_server.start_server()\n try:\n t = transport.get_transport_from_url(chroot_server.get_url())\n self.do_POST_inner(t)\n finally:\n chroot_server.stop_server()\n\n def do_POST_inner(self, chrooted_transport):\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/octet-stream\")\n if not self.path.endswith('.bzr/smart'):\n raise AssertionError(\n 'POST to path not ending in .bzr/smart: %r' % (self.path,))\n t = chrooted_transport.clone(self.path[:-len('.bzr/smart')])\n # if this fails, we should return 400 bad request, but failure is\n # failure for now - RBC 20060919\n data_length = int(self.headers['Content-Length'])\n # TODO: We might like to support streaming responses. 1.0 allows no\n # Content-length in this case, so for integrity we should perform our\n # own chunking within the stream.\n # 1.1 allows chunked responses, and in this case we could chunk using\n # the HTTP chunking as this will allow HTTP persistence safely, even if\n # we have to stop early due to error, but we would also have to use the\n # HTTP trailer facility which may not be widely available.\n request_bytes = self.rfile.read(data_length)\n protocol_factory, unused_bytes = medium._get_protocol_factory_for_bytes(\n request_bytes)\n out_buffer = StringIO()\n smart_protocol_request = protocol_factory(t, out_buffer.write, '/')\n # Perhaps there should be a SmartServerHTTPMedium that takes care of\n # feeding the bytes in the http request to the smart_protocol_request,\n # but for now it's simpler to just feed the bytes directly.\n smart_protocol_request.accept_bytes(unused_bytes)\n if not (smart_protocol_request.next_read_size() == 0):\n raise errors.SmartProtocolError(\n \"not finished reading, but all data sent to protocol.\")\n self.send_header(\"Content-Length\", str(len(out_buffer.getvalue())))\n self.end_headers()\n self.wfile.write(out_buffer.getvalue())\n\n\nclass TestCaseWithWebserver(tests.TestCaseWithTransport):\n \"\"\"A support class that provides readonly urls that are http://.\n\n This is done by forcing the readonly server to be an http\n one. This will currently fail if the primary transport is not\n backed by regular disk files.\n \"\"\"\n\n # These attributes can be overriden or parametrized by daughter clasess if\n # needed, but must exist so that the create_transport_readonly_server()\n # method (or any method creating an http(s) server) can propagate it.\n _protocol_version = None\n _url_protocol = 'http'\n\n def setUp(self):\n super(TestCaseWithWebserver, self).setUp()\n self.transport_readonly_server = http_server.HttpServer\n\n def create_transport_readonly_server(self):\n server = self.transport_readonly_server(\n protocol_version=self._protocol_version)\n server._url_protocol = self._url_protocol\n return server\n\n\nclass TestCaseWithTwoWebservers(TestCaseWithWebserver):\n \"\"\"A support class providing readonly urls on two servers that are http://.\n\n We set up two webservers to allows various tests involving\n proxies or redirections from one server to the other.\n \"\"\"\n def setUp(self):\n super(TestCaseWithTwoWebservers, self).setUp()\n self.transport_secondary_server = http_server.HttpServer\n self.__secondary_server = None\n\n def create_transport_secondary_server(self):\n \"\"\"Create a transport server from class defined at init.\n\n This is mostly a hook for daughter classes.\n \"\"\"\n server = self.transport_secondary_server(\n protocol_version=self._protocol_version)\n server._url_protocol = self._url_protocol\n return server\n\n def get_secondary_server(self):\n \"\"\"Get the server instance for the secondary transport.\"\"\"\n if self.__secondary_server is None:\n self.__secondary_server = self.create_transport_secondary_server()\n self.start_server(self.__secondary_server)\n return self.__secondary_server\n\n def get_secondary_url(self, relpath=None):\n base = self.get_secondary_server().get_url()\n return self._adjust_url(base, relpath)\n\n def get_secondary_transport(self, relpath=None):\n t = transport.get_transport_from_url(self.get_secondary_url(relpath))\n self.assertTrue(t.is_readonly())\n return t\n\n\nclass ProxyServer(http_server.HttpServer):\n \"\"\"A proxy test server for http transports.\"\"\"\n\n proxy_requests = True\n\n\nclass RedirectRequestHandler(http_server.TestingHTTPRequestHandler):\n \"\"\"Redirect all request to the specified server\"\"\"\n\n def parse_request(self):\n \"\"\"Redirect a single HTTP request to another host\"\"\"\n valid = http_server.TestingHTTPRequestHandler.parse_request(self)\n if valid:\n tcs = self.server.test_case_server\n code, target = tcs.is_redirected(self.path)\n if code is not None and target is not None:\n # Redirect as instructed\n self.send_response(code)\n self.send_header('Location', target)\n # We do not send a body\n self.send_header('Content-Length', '0')\n self.end_headers()\n return False # The job is done\n else:\n # We leave the parent class serve the request\n pass\n return valid\n\n\nclass HTTPServerRedirecting(http_server.HttpServer):\n \"\"\"An HttpServer redirecting to another server \"\"\"\n\n def __init__(self, request_handler=RedirectRequestHandler,\n protocol_version=None):\n http_server.HttpServer.__init__(self, request_handler,\n protocol_version=protocol_version)\n # redirections is a list of tuples (source, target, code)\n # - source is a regexp for the paths requested\n # - target is a replacement for re.sub describing where\n # the request will be redirected\n # - code is the http error code associated to the\n # redirection (301 permanent, 302 temporarry, etc\n self.redirections = []\n\n def redirect_to(self, host, port):\n \"\"\"Redirect all requests to a specific host:port\"\"\"\n self.redirections = [('(.*)',\n r'http://%s:%s\\1' % (host, port) ,\n 301)]\n\n def is_redirected(self, path):\n \"\"\"Is the path redirected by this server.\n\n :param path: the requested relative path\n\n :returns: a tuple (code, target) if a matching\n redirection is found, (None, None) otherwise.\n \"\"\"\n code = None\n target = None\n for (rsource, rtarget, rcode) in self.redirections:\n target, match = re.subn(rsource, rtarget, path)\n if match:\n code = rcode\n break # The first match wins\n else:\n target = None\n return code, target\n\n\nclass TestCaseWithRedirectedWebserver(TestCaseWithTwoWebservers):\n \"\"\"A support class providing redirections from one server to another.\n\n We set up two webservers to allows various tests involving\n redirections.\n The 'old' server is redirected to the 'new' server.\n \"\"\"\n\n def setUp(self):\n super(TestCaseWithRedirectedWebserver, self).setUp()\n # The redirections will point to the new server\n self.new_server = self.get_readonly_server()\n # The requests to the old server will be redirected to the new server\n self.old_server = self.get_secondary_server()\n\n def create_transport_secondary_server(self):\n \"\"\"Create the secondary server redirecting to the primary server\"\"\"\n new = self.get_readonly_server()\n redirecting = HTTPServerRedirecting(\n protocol_version=self._protocol_version)\n redirecting.redirect_to(new.host, new.port)\n redirecting._url_protocol = self._url_protocol\n return redirecting\n\n def get_old_url(self, relpath=None):\n base = self.old_server.get_url()\n return self._adjust_url(base, relpath)\n\n def get_old_transport(self, relpath=None):\n t = transport.get_transport_from_url(self.get_old_url(relpath))\n self.assertTrue(t.is_readonly())\n return t\n\n def get_new_url(self, relpath=None):\n base = self.new_server.get_url()\n return self._adjust_url(base, relpath)\n\n def get_new_transport(self, relpath=None):\n t = transport.get_transport_from_url(self.get_new_url(relpath))\n self.assertTrue(t.is_readonly())\n return t\n\n\nclass AuthRequestHandler(http_server.TestingHTTPRequestHandler):\n \"\"\"Requires an authentication to process requests.\n\n This is intended to be used with a server that always and\n only use one authentication scheme (implemented by daughter\n classes).\n \"\"\"\n\n # The following attributes should be defined in the server\n # - auth_header_sent: the header name sent to require auth\n # - auth_header_recv: the header received containing auth\n # - auth_error_code: the error code to indicate auth required\n\n def _require_authentication(self):\n # Note that we must update test_case_server *before*\n # sending the error or the client may try to read it\n # before we have sent the whole error back.\n tcs = self.server.test_case_server\n tcs.auth_required_errors += 1\n self.send_response(tcs.auth_error_code)\n self.send_header_auth_reqed()\n # We do not send a body\n self.send_header('Content-Length', '0')\n self.end_headers()\n return\n\n def do_GET(self):\n if self.authorized():\n return http_server.TestingHTTPRequestHandler.do_GET(self)\n else:\n return self._require_authentication()\n\n def do_HEAD(self):\n if self.authorized():\n return http_server.TestingHTTPRequestHandler.do_HEAD(self)\n else:\n return self._require_authentication()\n\n\nclass BasicAuthRequestHandler(AuthRequestHandler):\n \"\"\"Implements the basic authentication of a request\"\"\"\n\n def authorized(self):\n tcs = self.server.test_case_server\n if tcs.auth_scheme != 'basic':\n return False\n\n auth_header = self.headers.get(tcs.auth_header_recv, None)\n if auth_header:\n scheme, raw_auth = auth_header.split(' ', 1)\n if scheme.lower() == tcs.auth_scheme:\n user, password = raw_auth.decode('base64').split(':')\n return tcs.authorized(user, password)\n\n return False\n\n def send_header_auth_reqed(self):\n tcs = self.server.test_case_server\n self.send_header(tcs.auth_header_sent,\n 'Basic realm=\"%s\"' % tcs.auth_realm)\n\n\n# FIXME: We could send an Authentication-Info header too when\n# the authentication is succesful\n\nclass DigestAuthRequestHandler(AuthRequestHandler):\n \"\"\"Implements the digest authentication of a request.\n\n We need persistence for some attributes and that can't be\n achieved here since we get instantiated for each request. We\n rely on the DigestAuthServer to take care of them.\n \"\"\"\n\n def authorized(self):\n tcs = self.server.test_case_server\n\n auth_header = self.headers.get(tcs.auth_header_recv, None)\n if auth_header is None:\n return False\n scheme, auth = auth_header.split(None, 1)\n if scheme.lower() == tcs.auth_scheme:\n auth_dict = urllib2.parse_keqv_list(urllib2.parse_http_list(auth))\n\n return tcs.digest_authorized(auth_dict, self.command)\n\n return False\n\n def send_header_auth_reqed(self):\n tcs = self.server.test_case_server\n header = 'Digest realm=\"%s\", ' % tcs.auth_realm\n header += 'nonce=\"%s\", algorithm=\"%s\", qop=\"auth\"' % (tcs.auth_nonce,\n 'MD5')\n self.send_header(tcs.auth_header_sent,header)\n\n\nclass DigestAndBasicAuthRequestHandler(DigestAuthRequestHandler):\n \"\"\"Implements a digest and basic authentication of a request.\n\n I.e. the server proposes both schemes and the client should choose the best\n one it can handle, which, in that case, should be digest, the only scheme\n accepted here.\n \"\"\"\n\n def send_header_auth_reqed(self):\n tcs = self.server.test_case_server\n self.send_header(tcs.auth_header_sent,\n 'Basic realm=\"%s\"' % tcs.auth_realm)\n header = 'Digest realm=\"%s\", ' % tcs.auth_realm\n header += 'nonce=\"%s\", algorithm=\"%s\", qop=\"auth\"' % (tcs.auth_nonce,\n 'MD5')\n self.send_header(tcs.auth_header_sent,header)\n\n\nclass AuthServer(http_server.HttpServer):\n \"\"\"Extends HttpServer with a dictionary of passwords.\n\n This is used as a base class for various schemes which should\n all use or redefined the associated AuthRequestHandler.\n\n Note that no users are defined by default, so add_user should\n be called before issuing the first request.\n \"\"\"\n\n # The following attributes should be set dy daughter classes\n # and are used by AuthRequestHandler.\n auth_header_sent = None\n auth_header_recv = None\n auth_error_code = None\n auth_realm = \"Thou should not pass\"\n\n def __init__(self, request_handler, auth_scheme,\n protocol_version=None):\n http_server.HttpServer.__init__(self, request_handler,\n protocol_version=protocol_version)\n self.auth_scheme = auth_scheme\n self.password_of = {}\n self.auth_required_errors = 0\n\n def add_user(self, user, password):\n \"\"\"Declare a user with an associated password.\n\n password can be empty, use an empty string ('') in that\n case, not None.\n \"\"\"\n self.password_of[user] = password\n\n def authorized(self, user, password):\n \"\"\"Check that the given user provided the right password\"\"\"\n expected_password = self.password_of.get(user, None)\n return expected_password is not None and password == expected_password\n\n\n# FIXME: There is some code duplication with\n# _urllib2_wrappers.py.DigestAuthHandler. If that duplication\n# grows, it may require a refactoring. Also, we don't implement\n# SHA algorithm nor MD5-sess here, but that does not seem worth\n# it.\nclass DigestAuthServer(AuthServer):\n \"\"\"A digest authentication server\"\"\"\n\n auth_nonce = 'now!'\n\n def __init__(self, request_handler, auth_scheme,\n protocol_version=None):\n AuthServer.__init__(self, request_handler, auth_scheme,\n protocol_version=protocol_version)\n\n def digest_authorized(self, auth, command):\n nonce = auth['nonce']\n if nonce != self.auth_nonce:\n return False\n realm = auth['realm']\n if realm != self.auth_realm:\n return False\n user = auth['username']\n if not self.password_of.has_key(user):\n return False\n algorithm= auth['algorithm']\n if algorithm != 'MD5':\n return False\n qop = auth['qop']\n if qop != 'auth':\n return False\n\n password = self.password_of[user]\n\n # Recalculate the response_digest to compare with the one\n # sent by the client\n A1 = '%s:%s:%s' % (user, realm, password)\n A2 = '%s:%s' % (command, auth['uri'])\n\n H = lambda x: osutils.md5(x).hexdigest()\n KD = lambda secret, data: H(\"%s:%s\" % (secret, data))\n\n nonce_count = int(auth['nc'], 16)\n\n ncvalue = '%08x' % nonce_count\n\n cnonce = auth['cnonce']\n noncebit = '%s:%s:%s:%s:%s' % (nonce, ncvalue, cnonce, qop, H(A2))\n response_digest = KD(H(A1), noncebit)\n\n return response_digest == auth['response']\n\n\nclass HTTPAuthServer(AuthServer):\n \"\"\"An HTTP server requiring authentication\"\"\"\n\n def init_http_auth(self):\n self.auth_header_sent = 'WWW-Authenticate'\n self.auth_header_recv = 'Authorization'\n self.auth_error_code = 401\n\n\nclass ProxyAuthServer(AuthServer):\n \"\"\"A proxy server requiring authentication\"\"\"\n\n def init_proxy_auth(self):\n self.proxy_requests = True\n self.auth_header_sent = 'Proxy-Authenticate'\n self.auth_header_recv = 'Proxy-Authorization'\n self.auth_error_code = 407\n\n\nclass HTTPBasicAuthServer(HTTPAuthServer):\n \"\"\"An HTTP server requiring basic authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n HTTPAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',\n protocol_version=protocol_version)\n self.init_http_auth()\n\n\nclass HTTPDigestAuthServer(DigestAuthServer, HTTPAuthServer):\n \"\"\"An HTTP server requiring digest authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n DigestAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',\n protocol_version=protocol_version)\n self.init_http_auth()\n\n\nclass HTTPBasicAndDigestAuthServer(DigestAuthServer, HTTPAuthServer):\n \"\"\"An HTTP server requiring basic or digest authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,\n 'basicdigest',\n protocol_version=protocol_version)\n self.init_http_auth()\n # We really accept Digest only\n self.auth_scheme = 'digest'\n\n\nclass ProxyBasicAuthServer(ProxyAuthServer):\n \"\"\"A proxy server requiring basic authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n ProxyAuthServer.__init__(self, BasicAuthRequestHandler, 'basic',\n protocol_version=protocol_version)\n self.init_proxy_auth()\n\n\nclass ProxyDigestAuthServer(DigestAuthServer, ProxyAuthServer):\n \"\"\"A proxy server requiring basic authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n ProxyAuthServer.__init__(self, DigestAuthRequestHandler, 'digest',\n protocol_version=protocol_version)\n self.init_proxy_auth()\n\n\nclass ProxyBasicAndDigestAuthServer(DigestAuthServer, ProxyAuthServer):\n \"\"\"An proxy server requiring basic or digest authentication\"\"\"\n\n def __init__(self, protocol_version=None):\n DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler,\n 'basicdigest',\n protocol_version=protocol_version)\n self.init_proxy_auth()\n # We really accept Digest only\n self.auth_scheme = 'digest'\n\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475194,"cells":{"repo_name":{"kind":"string","value":"radicalbit/ambari"},"path":{"kind":"string","value":"ambari-common/src/main/python/ambari_commons/get_ambari_version.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"1589"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport os\nimport ConfigParser\nfrom resource_management.core.logger import Logger\n\n\"\"\"\nreturns the ambari version on an agent host\n\"\"\"\ndef get_ambari_version_agent():\n ambari_version = None\n AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'\n if os.path.exists(AMBARI_AGENT_CONF):\n try:\n ambari_agent_config = ConfigParser.RawConfigParser()\n ambari_agent_config.read(AMBARI_AGENT_CONF)\n data_dir = ambari_agent_config.get('agent', 'prefix')\n ver_file = os.path.join(data_dir, 'version')\n with open(ver_file, \"r\") as f:\n ambari_version = f.read().strip()\n except Exception, e:\n Logger.info('Unable to determine ambari version from the agent version file.')\n Logger.debug('Exception: %s' % str(e))\n pass\n pass\n return ambari_version\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475195,"cells":{"repo_name":{"kind":"string","value":"abomyi/django"},"path":{"kind":"string","value":"tests/shortcuts/views.py"},"copies":{"kind":"string","value":"87"},"size":{"kind":"string","value":"2274"},"content":{"kind":"string","value":"from django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\n\n\ndef render_to_response_view(request):\n return render_to_response('shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n })\n\n\ndef render_to_response_view_with_multiple_templates(request):\n return render_to_response([\n 'shortcuts/no_such_template.html',\n 'shortcuts/render_test.html',\n ], {\n 'foo': 'FOO',\n 'bar': 'BAR',\n })\n\n\ndef render_to_response_view_with_content_type(request):\n return render_to_response('shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n }, content_type='application/x-rendertest')\n\n\ndef render_to_response_view_with_status(request):\n return render_to_response('shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n }, status=403)\n\n\ndef render_to_response_view_with_using(request):\n using = request.GET.get('using')\n return render_to_response('shortcuts/using.html', using=using)\n\n\ndef context_processor(request):\n return {'bar': 'context processor output'}\n\n\ndef render_to_response_with_context_instance_misuse(request):\n context_instance = RequestContext(request, {}, processors=[context_processor])\n # Incorrect -- context_instance should be passed as a keyword argument.\n return render_to_response('shortcuts/render_test.html', context_instance)\n\n\ndef render_view(request):\n return render(request, 'shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n })\n\n\ndef render_view_with_multiple_templates(request):\n return render(request, [\n 'shortcuts/no_such_template.html',\n 'shortcuts/render_test.html',\n ], {\n 'foo': 'FOO',\n 'bar': 'BAR',\n })\n\n\ndef render_view_with_content_type(request):\n return render(request, 'shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n }, content_type='application/x-rendertest')\n\n\ndef render_view_with_status(request):\n return render(request, 'shortcuts/render_test.html', {\n 'foo': 'FOO',\n 'bar': 'BAR',\n }, status=403)\n\n\ndef render_view_with_using(request):\n using = request.GET.get('using')\n return render(request, 'shortcuts/using.html', using=using)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475196,"cells":{"repo_name":{"kind":"string","value":"wisechengyi/pants"},"path":{"kind":"string","value":"tests/python/pants_test/repo_scripts/test_git_hooks.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"8029"},"content":{"kind":"string","value":"# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport datetime\nimport os\nimport shutil\nimport subprocess\nimport unittest\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import Optional, Sequence\n\nfrom pants.testutil.git_util import initialize_repo\nfrom pants.util.contextutil import temporary_dir\nfrom pants.util.dirutil import safe_file_dump, safe_mkdir_for\n\n\nclass PreCommitHookTest(unittest.TestCase):\n @contextmanager\n def _create_tiny_git_repo(self, *, copy_files: Optional[Sequence[Path]] = None):\n with temporary_dir() as gitdir, temporary_dir() as worktree:\n # A tiny little fake git repo we will set up. initialize_repo() requires at least one file.\n Path(worktree, \"README\").touch()\n # The contextmanager interface is only necessary if an explicit gitdir is not provided.\n with initialize_repo(worktree, gitdir=gitdir) as git:\n if copy_files is not None:\n for fp in copy_files:\n new_fp = Path(worktree, fp)\n safe_mkdir_for(str(new_fp))\n shutil.copy(fp, new_fp)\n yield git, worktree, gitdir\n\n def _assert_subprocess_error(self, worktree, cmd, expected_excerpt):\n result = subprocess.run(\n cmd, cwd=worktree, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\"utf-8\",\n )\n self.assertNotEqual(0, result.returncode)\n self.assertIn(expected_excerpt, f\"{result.stdout}\\n{result.stderr}\")\n\n def _assert_subprocess_success(self, worktree, cmd, **kwargs):\n self.assertEqual(0, subprocess.check_call(cmd, cwd=worktree, **kwargs))\n\n def _assert_subprocess_success_with_output(self, worktree, cmd, full_expected_output):\n stdout = subprocess.run(\n cmd, cwd=worktree, check=True, stdout=subprocess.PIPE, encoding=\"utf-8\"\n ).stdout\n self.assertEqual(full_expected_output, stdout)\n\n def test_check_packages(self):\n package_check_script = \"build-support/bin/check_packages.sh\"\n with self._create_tiny_git_repo(copy_files=[Path(package_check_script)]) as (\n _,\n worktree,\n _,\n ):\n init_py_path = os.path.join(worktree, \"subdir/__init__.py\")\n\n # Check that an invalid __init__.py errors.\n safe_file_dump(init_py_path, \"asdf\")\n self._assert_subprocess_error(\n worktree,\n [package_check_script, \"subdir\"],\n \"\"\"\\\nERROR: All '__init__.py' files should be empty or else only contain a namespace\ndeclaration, but the following contain code:\n---\nsubdir/__init__.py\n\"\"\",\n )\n\n # Check that a valid empty __init__.py succeeds.\n safe_file_dump(init_py_path, \"\")\n self._assert_subprocess_success(worktree, [package_check_script, \"subdir\"])\n\n # Check that a valid __init__.py with `pkg_resources` setup succeeds.\n safe_file_dump(init_py_path, '__import__(\"pkg_resources\").declare_namespace(__name__)')\n self._assert_subprocess_success(worktree, [package_check_script, \"subdir\"])\n\n # TODO: consider testing the degree to which copies (-C) and moves (-M) are detected by making\n # some small edits to a file, then moving it, and seeing if it is detected as a new file! That's\n # more testing git functionality, but since it's not clear how this is measured, it could be\n # useful if correctly detecting copies and moves ever becomes a concern.\n def test_added_files_correctly_detected(self):\n get_added_files_script = \"build-support/bin/get_added_files.sh\"\n with self._create_tiny_git_repo(copy_files=[Path(get_added_files_script)]) as (\n git,\n worktree,\n _,\n ):\n # Create a new file.\n new_file = os.path.join(worktree, \"wow.txt\")\n safe_file_dump(new_file, \"\")\n # Stage the file.\n rel_new_file = os.path.relpath(new_file, worktree)\n git.add(rel_new_file)\n self._assert_subprocess_success_with_output(\n worktree,\n [get_added_files_script],\n # This should be the only entry in the index, and it is a newly added file.\n full_expected_output=f\"{rel_new_file}\\n\",\n )\n\n def test_check_headers(self):\n header_check_script = \"build-support/bin/check_header.py\"\n cur_year_num = datetime.datetime.now().year\n cur_year = str(cur_year_num)\n with self._create_tiny_git_repo(\n copy_files=[Path(header_check_script), \"build-support/bin/common.py\"]\n ) as (_, worktree, _):\n new_py_path = os.path.join(worktree, \"subdir/file.py\")\n\n def assert_header_check(added_files, expected_excerpt):\n self._assert_subprocess_error(\n worktree=worktree,\n cmd=[header_check_script, \"subdir\", \"--files-added\"] + added_files,\n expected_excerpt=expected_excerpt,\n )\n\n # Check that a file with an empty header fails.\n safe_file_dump(new_py_path, \"\")\n assert_header_check(\n added_files=[], expected_excerpt=\"subdir/file.py: missing the expected header\"\n )\n\n # Check that a file with a random header fails.\n safe_file_dump(new_py_path, \"asdf\")\n assert_header_check(\n added_files=[], expected_excerpt=\"subdir/file.py: missing the expected header\"\n )\n\n # Check that a file with a typo in the header fails\n safe_file_dump(\n new_py_path,\n dedent(\n f\"\"\"\\\n # Copyright {cur_year} Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the MIT License, Version 3.3 (see LICENSE).\n \n \"\"\"\n ),\n )\n assert_header_check(\n added_files=[],\n expected_excerpt=\"subdir/file.py: header does not match the expected header\",\n )\n\n # Check that a file without a valid copyright year fails.\n safe_file_dump(\n new_py_path,\n dedent(\n \"\"\"\\\n # Copyright YYYY Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n \"\"\"\n ),\n )\n assert_header_check(\n added_files=[],\n expected_excerpt=(\n r\"subdir/file.py: copyright year must match '20\\d\\d' (was YYYY): \"\n f\"current year is {cur_year}\"\n ),\n )\n\n # Check that a newly added file must have the current year.\n last_year = str(cur_year_num - 1)\n safe_file_dump(\n new_py_path,\n dedent(\n f\"\"\"\\\n # Copyright {last_year} Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n \"\"\"\n ),\n )\n rel_new_py_path = os.path.relpath(new_py_path, worktree)\n assert_header_check(\n added_files=[rel_new_py_path],\n expected_excerpt=f\"subdir/file.py: copyright year must be {cur_year} (was {last_year})\",\n )\n\n # Check that a file isn't checked against the current year if it is not passed as an\n # arg to the script.\n # Use the same file as last time, with last year's copyright date.\n self._assert_subprocess_success(worktree, [header_check_script, \"subdir\"])\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475197,"cells":{"repo_name":{"kind":"string","value":"apbard/scipy"},"path":{"kind":"string","value":"scipy/sparse/linalg/dsolve/tests/test_linsolve.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"24049"},"content":{"kind":"string","value":"from __future__ import division, print_function, absolute_import\n\nimport threading\n\nimport numpy as np\nfrom numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix\nimport numpy.random as random\nfrom numpy.testing import (\n assert_array_almost_equal, assert_raises, assert_almost_equal,\n assert_equal, assert_array_equal, assert_, assert_allclose,\n assert_warns)\nimport pytest\n\nfrom scipy._lib._numpy_compat import assert_raises_regex\n\nimport scipy.linalg\nfrom scipy.linalg import norm, inv\nfrom scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,\n csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)\nfrom scipy.sparse.linalg import SuperLU\nfrom scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,\n MatrixRankWarning, _superlu, spsolve_triangular, factorized)\n\nfrom scipy._lib._numpy_compat import suppress_warnings\n\n\nsup_sparse_efficiency = suppress_warnings()\nsup_sparse_efficiency.filter(SparseEfficiencyWarning)\n\n# scikits.umfpack is not a SciPy dependency but it is optionally used in\n# dsolve, so check whether it's available\ntry:\n import scikits.umfpack as umfpack\n has_umfpack = True\nexcept ImportError:\n has_umfpack = False\n\ndef toarray(a):\n if isspmatrix(a):\n return a.toarray()\n else:\n return a\n\n\nclass TestFactorized(object):\n def setup_method(self):\n n = 5\n d = arange(n) + 1\n self.n = n\n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()\n random.seed(1234)\n\n def _check_singular(self):\n A = csc_matrix((5,5), dtype='d')\n b = ones(5)\n assert_array_almost_equal(0. * b, factorized(A)(b))\n\n def _check_non_singular(self):\n # Make a diagonal dominant, to make sure it is not singular\n n = 5\n a = csc_matrix(random.rand(n, n))\n b = ones(n)\n\n expected = splu(a).solve(b)\n assert_array_almost_equal(factorized(a)(b), expected)\n\n def test_singular_without_umfpack(self):\n use_solver(useUmfpack=False)\n assert_raises_regex(RuntimeError, \"Factor is exactly singular\", self._check_singular)\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_singular_with_umfpack(self):\n use_solver(useUmfpack=True)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"divide by zero encountered in double_scalars\")\n assert_warns(umfpack.UmfpackWarning, self._check_singular)\n\n def test_non_singular_without_umfpack(self):\n use_solver(useUmfpack=False)\n self._check_non_singular()\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_non_singular_with_umfpack(self):\n use_solver(useUmfpack=True)\n self._check_non_singular()\n\n def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):\n use_solver(useUmfpack=False)\n assert_raises_regex(ValueError, \"can only factor square matrices\",\n factorized, self.A[:,:4])\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_factorizes_nonsquare_matrix_with_umfpack(self):\n use_solver(useUmfpack=True)\n # does not raise\n factorized(self.A[:,:4])\n\n def test_call_with_incorrectly_sized_matrix_without_umfpack(self):\n use_solver(useUmfpack=False)\n solve = factorized(self.A)\n b = random.rand(4)\n B = random.rand(4, 3)\n BB = random.rand(self.n, 3, 9)\n\n assert_raises_regex(ValueError, \"is of incompatible size\", solve, b)\n assert_raises_regex(ValueError, \"is of incompatible size\", solve, B)\n assert_raises_regex(ValueError, \"object too deep for desired array\", solve, BB)\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_call_with_incorrectly_sized_matrix_with_umfpack(self):\n use_solver(useUmfpack=True)\n solve = factorized(self.A)\n b = random.rand(4)\n B = random.rand(4, 3)\n BB = random.rand(self.n, 3, 9)\n\n # does not raise\n solve(b)\n assert_raises_regex(ValueError, \"object too deep for desired array\", solve, B)\n assert_raises_regex(ValueError, \"object too deep for desired array\", solve, BB)\n\n def test_call_with_cast_to_complex_without_umfpack(self):\n use_solver(useUmfpack=False)\n solve = factorized(self.A)\n b = random.rand(4)\n for t in [np.complex64, np.complex128]:\n assert_raises_regex(TypeError, \"Cannot cast array data\", solve,\n b.astype(t))\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_call_with_cast_to_complex_with_umfpack(self):\n use_solver(useUmfpack=True)\n solve = factorized(self.A)\n b = random.rand(4)\n for t in [np.complex64, np.complex128]:\n assert_warns(np.ComplexWarning, solve, b.astype(t))\n\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_assume_sorted_indices_flag(self):\n # a sparse matrix with unsorted indices\n unsorted_inds = np.array([2, 0, 1, 0])\n data = np.array([10, 16, 5, 0.4])\n indptr = np.array([0, 1, 2, 4])\n A = csc_matrix((data, unsorted_inds, indptr), (3, 3))\n b = ones(3)\n\n # should raise when incorrectly assuming indices are sorted\n use_solver(useUmfpack=True, assumeSortedIndices=True)\n assert_raises_regex(RuntimeError, \"UMFPACK_ERROR_invalid_matrix\", factorized, A)\n\n # should sort indices and succeed when not assuming indices are sorted\n use_solver(useUmfpack=True, assumeSortedIndices=False)\n expected = splu(A.copy()).solve(b)\n\n assert_equal(A.has_sorted_indices, 0)\n assert_array_almost_equal(factorized(A)(b), expected)\n assert_equal(A.has_sorted_indices, 1)\n\n\nclass TestLinsolve(object):\n def setup_method(self):\n use_solver(useUmfpack=False)\n\n def test_singular(self):\n A = csc_matrix((5,5), dtype='d')\n b = array([1, 2, 3, 4, 5],dtype='d')\n with suppress_warnings() as sup:\n sup.filter(MatrixRankWarning, \"Matrix is exactly singular\")\n x = spsolve(A, b)\n assert_(not np.isfinite(x).any())\n\n def test_singular_gh_3312(self):\n # \"Bad\" test case that leads SuperLU to call LAPACK with invalid\n # arguments. Check that it fails moderately gracefully.\n ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)\n v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])\n A = csc_matrix((v, ij.T), shape=(20, 20))\n b = np.arange(20)\n\n try:\n # should either raise a runtimeerror or return value\n # appropriate for singular input\n x = spsolve(A, b)\n assert_(not np.isfinite(x).any())\n except RuntimeError:\n pass\n\n def test_twodiags(self):\n A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)\n b = array([1, 2, 3, 4, 5])\n\n # condition number of A\n cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)\n\n for t in ['f','d','F','D']:\n eps = finfo(t).eps # floating point epsilon\n b = b.astype(t)\n\n for format in ['csc','csr']:\n Asp = A.astype(t).asformat(format)\n\n x = spsolve(Asp,b)\n\n assert_(norm(b - Asp*x) < 10 * cond_A * eps)\n\n def test_bvector_smoketest(self):\n Adense = matrix([[0., 1., 1.],\n [1., 0., 1.],\n [0., 0., 1.]])\n As = csc_matrix(Adense)\n random.seed(1234)\n x = random.randn(3)\n b = As*x\n x2 = spsolve(As, b)\n\n assert_array_almost_equal(x, x2)\n\n def test_bmatrix_smoketest(self):\n Adense = matrix([[0., 1., 1.],\n [1., 0., 1.],\n [0., 0., 1.]])\n As = csc_matrix(Adense)\n random.seed(1234)\n x = random.randn(3, 4)\n Bdense = As.dot(x)\n Bs = csc_matrix(Bdense)\n x2 = spsolve(As, Bs)\n assert_array_almost_equal(x, x2.todense())\n\n @sup_sparse_efficiency\n def test_non_square(self):\n # A is not square.\n A = ones((3, 4))\n b = ones((4, 1))\n assert_raises(ValueError, spsolve, A, b)\n # A2 and b2 have incompatible shapes.\n A2 = csc_matrix(eye(3))\n b2 = array([1.0, 2.0])\n assert_raises(ValueError, spsolve, A2, b2)\n\n @sup_sparse_efficiency\n def test_example_comparison(self):\n row = array([0,0,1,2,2,2])\n col = array([0,2,2,0,1,2])\n data = array([1,2,3,-4,5,6])\n sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)\n M = sM.todense()\n\n row = array([0,0,1,1,0,0])\n col = array([0,2,1,1,0,0])\n data = array([1,1,1,1,1,1])\n sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)\n N = sN.todense()\n\n sX = spsolve(sM, sN)\n X = scipy.linalg.solve(M, N)\n\n assert_array_almost_equal(X, sX.todense())\n\n @sup_sparse_efficiency\n @pytest.mark.skipif(not has_umfpack, reason=\"umfpack not available\")\n def test_shape_compatibility(self):\n use_solver(useUmfpack=True)\n A = csc_matrix([[1., 0], [0, 2]])\n bs = [\n [1, 6],\n array([1, 6]),\n [[1], [6]],\n array([[1], [6]]),\n csc_matrix([[1], [6]]),\n csr_matrix([[1], [6]]),\n dok_matrix([[1], [6]]),\n bsr_matrix([[1], [6]]),\n array([[1., 2., 3.], [6., 8., 10.]]),\n csc_matrix([[1., 2., 3.], [6., 8., 10.]]),\n csr_matrix([[1., 2., 3.], [6., 8., 10.]]),\n dok_matrix([[1., 2., 3.], [6., 8., 10.]]),\n bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),\n ]\n\n for b in bs:\n x = np.linalg.solve(A.toarray(), toarray(b))\n for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:\n x1 = spsolve(spmattype(A), b, use_umfpack=True)\n x2 = spsolve(spmattype(A), b, use_umfpack=False)\n\n # check solution\n if x.ndim == 2 and x.shape[1] == 1:\n # interprets also these as \"vectors\"\n x = x.ravel()\n\n assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))\n assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))\n\n # dense vs. sparse output (\"vectors\" are always dense)\n if isspmatrix(b) and x.ndim > 1:\n assert_(isspmatrix(x1), repr((b, spmattype, 1)))\n assert_(isspmatrix(x2), repr((b, spmattype, 2)))\n else:\n assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))\n assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))\n\n # check output shape\n if x.ndim == 1:\n # \"vector\"\n assert_equal(x1.shape, (A.shape[1],))\n assert_equal(x2.shape, (A.shape[1],))\n else:\n # \"matrix\"\n assert_equal(x1.shape, x.shape)\n assert_equal(x2.shape, x.shape)\n\n A = csc_matrix((3, 3))\n b = csc_matrix((1, 3))\n assert_raises(ValueError, spsolve, A, b)\n\n @sup_sparse_efficiency\n def test_ndarray_support(self):\n A = array([[1., 2.], [2., 0.]])\n x = array([[1., 1.], [0.5, -0.5]])\n b = array([[2., 0.], [2., 2.]])\n\n assert_array_almost_equal(x, spsolve(A, b))\n\n def test_gssv_badinput(self):\n N = 10\n d = arange(N) + 1.0\n A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)\n\n for spmatrix in (csc_matrix, csr_matrix):\n A = spmatrix(A)\n b = np.arange(N)\n\n def not_c_contig(x):\n return x.repeat(2)[::2]\n\n def not_1dim(x):\n return x[:,None]\n\n def bad_type(x):\n return x.astype(bool)\n\n def too_short(x):\n return x[:-1]\n\n badops = [not_c_contig, not_1dim, bad_type, too_short]\n\n for badop in badops:\n msg = \"%r %r\" % (spmatrix, badop)\n # Not C-contiguous\n assert_raises((ValueError, TypeError), _superlu.gssv,\n N, A.nnz, badop(A.data), A.indices, A.indptr,\n b, int(spmatrix == csc_matrix), err_msg=msg)\n assert_raises((ValueError, TypeError), _superlu.gssv,\n N, A.nnz, A.data, badop(A.indices), A.indptr,\n b, int(spmatrix == csc_matrix), err_msg=msg)\n assert_raises((ValueError, TypeError), _superlu.gssv,\n N, A.nnz, A.data, A.indices, badop(A.indptr),\n b, int(spmatrix == csc_matrix), err_msg=msg)\n\n def test_sparsity_preservation(self):\n ident = csc_matrix([\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n b = csc_matrix([\n [0, 1],\n [1, 0],\n [0, 0]])\n x = spsolve(ident, b)\n assert_equal(ident.nnz, 3)\n assert_equal(b.nnz, 2)\n assert_equal(x.nnz, 2)\n assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)\n\n def test_dtype_cast(self):\n A_real = scipy.sparse.csr_matrix([[1, 2, 0],\n [0, 0, 3],\n [4, 0, 5]])\n A_complex = scipy.sparse.csr_matrix([[1, 2, 0],\n [0, 0, 3],\n [4, 0, 5 + 1j]])\n b_real = np.array([1,1,1])\n b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])\n x = spsolve(A_real, b_real)\n assert_(np.issubdtype(x.dtype, np.floating))\n x = spsolve(A_real, b_complex)\n assert_(np.issubdtype(x.dtype, np.complexfloating))\n x = spsolve(A_complex, b_real)\n assert_(np.issubdtype(x.dtype, np.complexfloating))\n x = spsolve(A_complex, b_complex)\n assert_(np.issubdtype(x.dtype, np.complexfloating))\n\n\nclass TestSplu(object):\n def setup_method(self):\n use_solver(useUmfpack=False)\n n = 40\n d = arange(n) + 1\n self.n = n\n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)\n random.seed(1234)\n\n def _smoketest(self, spxlu, check, dtype):\n if np.issubdtype(dtype, np.complexfloating):\n A = self.A + 1j*self.A.T\n else:\n A = self.A\n\n A = A.astype(dtype)\n lu = spxlu(A)\n\n rng = random.RandomState(1234)\n\n # Input shapes\n for k in [None, 1, 2, self.n, self.n+2]:\n msg = \"k=%r\" % (k,)\n\n if k is None:\n b = rng.rand(self.n)\n else:\n b = rng.rand(self.n, k)\n\n if np.issubdtype(dtype, np.complexfloating):\n b = b + 1j*rng.rand(*b.shape)\n b = b.astype(dtype)\n\n x = lu.solve(b)\n check(A, b, x, msg)\n\n x = lu.solve(b, 'T')\n check(A.T, b, x, msg)\n\n x = lu.solve(b, 'H')\n check(A.T.conj(), b, x, msg)\n\n @sup_sparse_efficiency\n def test_splu_smoketest(self):\n self._internal_test_splu_smoketest()\n\n def _internal_test_splu_smoketest(self):\n # Check that splu works at all\n def check(A, b, x, msg=\"\"):\n eps = np.finfo(A.dtype).eps\n r = A * x\n assert_(abs(r - b).max() < 1e3*eps, msg)\n\n self._smoketest(splu, check, np.float32)\n self._smoketest(splu, check, np.float64)\n self._smoketest(splu, check, np.complex64)\n self._smoketest(splu, check, np.complex128)\n\n @sup_sparse_efficiency\n def test_spilu_smoketest(self):\n self._internal_test_spilu_smoketest()\n\n def _internal_test_spilu_smoketest(self):\n errors = []\n\n def check(A, b, x, msg=\"\"):\n r = A * x\n err = abs(r - b).max()\n assert_(err < 1e-2, msg)\n if b.dtype in (np.float64, np.complex128):\n errors.append(err)\n\n self._smoketest(spilu, check, np.float32)\n self._smoketest(spilu, check, np.float64)\n self._smoketest(spilu, check, np.complex64)\n self._smoketest(spilu, check, np.complex128)\n\n assert_(max(errors) > 1e-5)\n\n @sup_sparse_efficiency\n def test_spilu_drop_rule(self):\n # Test passing in the drop_rule argument to spilu.\n A = identity(2)\n\n rules = [\n b'basic,area'.decode('ascii'), # unicode\n b'basic,area', # ascii\n [b'basic', b'area'.decode('ascii')]\n ]\n for rule in rules:\n # Argument should be accepted\n assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))\n\n def test_splu_nnz0(self):\n A = csc_matrix((5,5), dtype='d')\n assert_raises(RuntimeError, splu, A)\n\n def test_spilu_nnz0(self):\n A = csc_matrix((5,5), dtype='d')\n assert_raises(RuntimeError, spilu, A)\n\n def test_splu_basic(self):\n # Test basic splu functionality.\n n = 30\n rng = random.RandomState(12)\n a = rng.rand(n, n)\n a[a < 0.95] = 0\n # First test with a singular matrix\n a[:, 0] = 0\n a_ = csc_matrix(a)\n # Matrix is exactly singular\n assert_raises(RuntimeError, splu, a_)\n\n # Make a diagonal dominant, to make sure it is not singular\n a += 4*eye(n)\n a_ = csc_matrix(a)\n lu = splu(a_)\n b = ones(n)\n x = lu.solve(b)\n assert_almost_equal(dot(a, x), b)\n\n def test_splu_perm(self):\n # Test the permutation vectors exposed by splu.\n n = 30\n a = random.random((n, n))\n a[a < 0.95] = 0\n # Make a diagonal dominant, to make sure it is not singular\n a += 4*eye(n)\n a_ = csc_matrix(a)\n lu = splu(a_)\n # Check that the permutation indices do belong to [0, n-1].\n for perm in (lu.perm_r, lu.perm_c):\n assert_(all(perm > -1))\n assert_(all(perm < n))\n assert_equal(len(unique(perm)), len(perm))\n\n # Now make a symmetric, and test that the two permutation vectors are\n # the same\n # Note: a += a.T relies on undefined behavior.\n a = a + a.T\n a_ = csc_matrix(a)\n lu = splu(a_)\n assert_array_equal(lu.perm_r, lu.perm_c)\n\n def test_lu_refcount(self):\n # Test that we are keeping track of the reference count with splu.\n n = 30\n a = random.random((n, n))\n a[a < 0.95] = 0\n # Make a diagonal dominant, to make sure it is not singular\n a += 4*eye(n)\n a_ = csc_matrix(a)\n lu = splu(a_)\n\n # And now test that we don't have a refcount bug\n import sys\n rc = sys.getrefcount(lu)\n for attr in ('perm_r', 'perm_c'):\n perm = getattr(lu, attr)\n assert_equal(sys.getrefcount(lu), rc + 1)\n del perm\n assert_equal(sys.getrefcount(lu), rc)\n\n def test_bad_inputs(self):\n A = self.A.tocsc()\n\n assert_raises(ValueError, splu, A[:,:4])\n assert_raises(ValueError, spilu, A[:,:4])\n\n for lu in [splu(A), spilu(A)]:\n b = random.rand(42)\n B = random.rand(42, 3)\n BB = random.rand(self.n, 3, 9)\n assert_raises(ValueError, lu.solve, b)\n assert_raises(ValueError, lu.solve, B)\n assert_raises(ValueError, lu.solve, BB)\n assert_raises(TypeError, lu.solve,\n b.astype(np.complex64))\n assert_raises(TypeError, lu.solve,\n b.astype(np.complex128))\n\n @sup_sparse_efficiency\n def test_superlu_dlamch_i386_nan(self):\n # SuperLU 4.3 calls some functions returning floats without\n # declaring them. On i386@linux call convention, this fails to\n # clear floating point registers after call. As a result, NaN\n # can appear in the next floating point operation made.\n #\n # Here's a test case that triggered the issue.\n n = 8\n d = np.arange(n) + 1\n A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)\n A = A.astype(np.float32)\n spilu(A)\n A = A + 1j*A\n B = A.A\n assert_(not np.isnan(B).any())\n\n @sup_sparse_efficiency\n def test_lu_attr(self):\n\n def check(dtype, complex_2=False):\n A = self.A.astype(dtype)\n\n if complex_2:\n A = A + 1j*A.T\n\n n = A.shape[0]\n lu = splu(A)\n\n # Check that the decomposition is as advertized\n\n Pc = np.zeros((n, n))\n Pc[np.arange(n), lu.perm_c] = 1\n\n Pr = np.zeros((n, n))\n Pr[lu.perm_r, np.arange(n)] = 1\n\n Ad = A.toarray()\n lhs = Pr.dot(Ad).dot(Pc)\n rhs = (lu.L * lu.U).toarray()\n\n eps = np.finfo(dtype).eps\n\n assert_allclose(lhs, rhs, atol=100*eps)\n\n check(np.float32)\n check(np.float64)\n check(np.complex64)\n check(np.complex128)\n check(np.complex64, True)\n check(np.complex128, True)\n\n @sup_sparse_efficiency\n def test_threads_parallel(self):\n oks = []\n\n def worker():\n try:\n self.test_splu_basic()\n self._internal_test_splu_smoketest()\n self._internal_test_spilu_smoketest()\n oks.append(True)\n except:\n pass\n\n threads = [threading.Thread(target=worker)\n for k in range(20)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n assert_equal(len(oks), 20)\n\n\nclass TestSpsolveTriangular(object):\n def setup_method(self):\n use_solver(useUmfpack=False)\n\n def test_singular(self):\n n = 5\n A = csr_matrix((n, n))\n b = np.arange(n)\n for lower in (True, False):\n assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)\n\n @sup_sparse_efficiency\n def test_bad_shape(self):\n # A is not square.\n A = np.zeros((3, 4))\n b = ones((4, 1))\n assert_raises(ValueError, spsolve_triangular, A, b)\n # A2 and b2 have incompatible shapes.\n A2 = csr_matrix(eye(3))\n b2 = array([1.0, 2.0])\n assert_raises(ValueError, spsolve_triangular, A2, b2)\n\n @sup_sparse_efficiency\n def test_input_types(self):\n A = array([[1., 0.], [1., 2.]])\n b = array([[2., 0.], [2., 2.]])\n for matrix_type in (array, csc_matrix, csr_matrix):\n x = spsolve_triangular(matrix_type(A), b, lower=True)\n assert_array_almost_equal(A.dot(x), b)\n\n @sup_sparse_efficiency\n def test_random(self):\n def random_triangle_matrix(n, lower=True):\n A = scipy.sparse.random(n, n, density=0.1, format='coo')\n if lower:\n A = scipy.sparse.tril(A)\n else:\n A = scipy.sparse.triu(A)\n A = A.tocsr(copy=False)\n for i in range(n):\n A[i, i] = np.random.rand() + 1\n return A\n\n np.random.seed(1234)\n for lower in (True, False):\n for n in (10, 10**2, 10**3):\n A = random_triangle_matrix(n, lower=lower)\n for m in (1, 10):\n for b in (np.random.rand(n, m),\n np.random.randint(-9, 9, (n, m)),\n np.random.randint(-9, 9, (n, m)) +\n np.random.randint(-9, 9, (n, m)) * 1j):\n x = spsolve_triangular(A, b, lower=lower)\n assert_array_almost_equal(A.dot(x), b)\n\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475198,"cells":{"repo_name":{"kind":"string","value":"microcom/odoo"},"path":{"kind":"string","value":"addons/survey/wizard/survey_email_compose_message.py"},"copies":{"kind":"string","value":"29"},"size":{"kind":"string","value":"10120"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom openerp.osv import osv\nfrom openerp.osv import fields\nfrom openerp.tools.translate import _\nfrom datetime import datetime\nfrom openerp.exceptions import UserError\n\nimport re\nimport uuid\nimport urlparse\n\nemails_split = re.compile(r\"[;,\\n\\r]+\")\n\n\nclass survey_mail_compose_message(osv.TransientModel):\n _name = 'survey.mail.compose.message'\n _inherit = 'mail.compose.message'\n _description = 'Email composition wizard for Survey'\n _log_access = True\n\n def _get_public_url(self, cr, uid, ids, name, arg, context=None):\n res = dict((id, 0) for id in ids)\n survey_obj = self.pool.get('survey.survey')\n for wizard in self.browse(cr, uid, ids, context=context):\n res[wizard.id] = wizard.survey_id.public_url\n return res\n\n def _get_public_url_html(self, cr, uid, ids, name, arg, context=None):\n \"\"\" Compute if the message is unread by the current user \"\"\"\n urls = self._get_public_url(cr, uid, ids, name, arg, context=context)\n for key, url in urls.items():\n urls[key] = '<a href=\"%s\">%s</a>' % (url, _(\"Click here to start survey\"))\n return urls\n\n _columns = {\n 'survey_id': fields.many2one('survey.survey', 'Survey', required=True),\n 'public': fields.selection([('public_link', 'Share the public web link to your audience.'),\n ('email_public_link', 'Send by email the public web link to your audience.'),\n ('email_private', 'Send private invitation to your audience (only one response per recipient and per invitation).')],\n string='Share options', required=True),\n 'public_url': fields.function(_get_public_url, string=\"Public url\", type=\"char\"),\n 'public_url_html': fields.function(_get_public_url_html, string=\"Public HTML web link\", type=\"char\"),\n 'partner_ids': fields.many2many('res.partner',\n 'survey_mail_compose_message_res_partner_rel',\n 'wizard_id', 'partner_id', 'Existing contacts'),\n 'attachment_ids': fields.many2many('ir.attachment',\n 'survey_mail_compose_message_ir_attachments_rel',\n 'wizard_id', 'attachment_id', 'Attachments'),\n 'multi_email': fields.text(string='List of emails', help=\"This list of emails of recipients will not converted in contacts. Emails separated by commas, semicolons or newline.\"),\n 'date_deadline': fields.date(string=\"Deadline to which the invitation to respond is valid\", help=\"Deadline to which the invitation to respond for this survey is valid. If the field is empty, the invitation is still valid.\"),\n }\n\n _defaults = {\n 'public': 'public_link',\n 'survey_id': lambda self, cr, uid, ctx={}: ctx.get('model') == 'survey.survey' and ctx.get('res_id') or None\n }\n\n def default_get(self, cr, uid, fields, context=None):\n res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)\n if context.get('active_model') == 'res.partner' and context.get('active_ids'):\n res.update({'partner_ids': context.get('active_ids')})\n return res\n\n def onchange_multi_email(self, cr, uid, ids, multi_email, context=None):\n emails = list(set(emails_split.split(multi_email or \"\")))\n emails_checked = []\n error_message = \"\"\n for email in emails:\n email = email.strip()\n if email:\n if not re.search(r\"^[^@]+@[^@]+$\", email):\n error_message += \"\\n'%s'\" % email\n else:\n emails_checked.append(email)\n if error_message:\n raise UserError(_(\"One email at least is incorrect: %s\") % error_message)\n\n emails_checked.sort()\n values = {'multi_email': '\\n'.join(emails_checked)}\n return {'value': values}\n\n def onchange_survey_id(self, cr, uid, ids, survey_id, context=None):\n \"\"\" Compute if the message is unread by the current user. \"\"\"\n if survey_id:\n survey = self.pool.get('survey.survey').browse(cr, uid, survey_id, context=context)\n return {\n 'value': {\n 'subject': survey.title,\n 'public_url': survey.public_url,\n 'public_url_html': '<a href=\"%s\">%s</a>' % (survey.public_url, _(\"Click here to take survey\")),\n }}\n else:\n txt = _(\"Please select a survey\")\n return {\n 'value': {\n 'public_url': txt,\n 'public_url_html': txt,\n }}\n\n #------------------------------------------------------\n # Wizard validation and send\n #------------------------------------------------------\n\n def send_mail(self, cr, uid, ids, auto_commit=False, context=None):\n \"\"\" Process the wizard content and proceed with sending the related\n email(s), rendering any template patterns on the fly if needed \"\"\"\n if context is None:\n context = {}\n\n survey_response_obj = self.pool.get('survey.user_input')\n partner_obj = self.pool.get('res.partner')\n mail_mail_obj = self.pool.get('mail.mail')\n try:\n model, anonymous_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'portal', 'group_anonymous')\n except ValueError:\n anonymous_id = None\n\n def create_response_and_send_mail(wizard, token, partner_id, email):\n \"\"\" Create one mail by recipients and replace __URL__ by link with identification token \"\"\"\n #set url\n url = wizard.survey_id.public_url\n\n url = urlparse.urlparse(url).path[1:] # dirty hack to avoid incorrect urls\n\n if token:\n url = url + '/' + token\n\n # post the message\n values = {\n 'model': None,\n 'res_id': None,\n 'subject': wizard.subject,\n 'body': wizard.body.replace(\"__URL__\", url),\n 'body_html': wizard.body.replace(\"__URL__\", url),\n 'parent_id': None,\n 'attachment_ids': wizard.attachment_ids and [(6, 0, wizard.attachment_ids.ids)] or None,\n 'email_from': wizard.email_from or None,\n 'auto_delete': True,\n }\n if partner_id:\n values['recipient_ids'] = [(4, partner_id)]\n else:\n values['email_to'] = email\n mail_id = mail_mail_obj.create(cr, uid, values, context=context)\n mail_mail_obj.send(cr, uid, [mail_id], context=context)\n\n def create_token(wizard, partner_id, email):\n if context.get(\"survey_resent_token\"):\n response_ids = survey_response_obj.search(cr, uid, [('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], context=context)\n if response_ids:\n return survey_response_obj.read(cr, uid, response_ids, ['token'], context=context)[0]['token']\n if wizard.public != 'email_private':\n return None\n else:\n token = uuid.uuid4().__str__()\n # create response with token\n survey_response_obj.create(cr, uid, {\n 'survey_id': wizard.survey_id.id,\n 'deadline': wizard.date_deadline,\n 'date_create': datetime.now(),\n 'type': 'link',\n 'state': 'new',\n 'token': token,\n 'partner_id': partner_id,\n 'email': email},\n context=context)\n return token\n\n for wizard in self.browse(cr, uid, ids, context=context):\n # check if __URL__ is in the text\n if wizard.body.find(\"__URL__\") < 0:\n raise UserError(_(\"The content of the text don't contain '__URL__'. \\\n __URL__ is automaticaly converted into the special url of the survey.\"))\n\n if not wizard.multi_email and not wizard.partner_ids and (context.get('default_partner_ids') or context.get('default_multi_email')):\n wizard.multi_email = context.get('default_multi_email')\n wizard.partner_ids = context.get('default_partner_ids')\n\n # quick check of email list\n emails_list = []\n if wizard.multi_email:\n emails = list(set(emails_split.split(wizard.multi_email)) - set([partner.email for partner in wizard.partner_ids]))\n for email in emails:\n email = email.strip()\n if re.search(r\"^[^@]+@[^@]+$\", email):\n emails_list.append(email)\n\n # remove public anonymous access\n partner_list = []\n for partner in wizard.partner_ids:\n if not anonymous_id or not partner.user_ids or anonymous_id not in [x.id for x in partner.user_ids[0].groups_id]:\n partner_list.append({'id': partner.id, 'email': partner.email})\n\n if not len(emails_list) and not len(partner_list):\n if wizard.model == 'res.partner' and wizard.res_id:\n return False\n raise UserError(_(\"Please enter at least one valid recipient.\"))\n\n for email in emails_list:\n partner_id = partner_obj.search(cr, uid, [('email', '=', email)], context=context)\n partner_id = partner_id and partner_id[0] or None\n token = create_token(wizard, partner_id, email)\n create_response_and_send_mail(wizard, token, partner_id, email)\n\n for partner in partner_list:\n token = create_token(wizard, partner['id'], partner['email'])\n create_response_and_send_mail(wizard, token, partner['id'], partner['email'])\n\n return {'type': 'ir.actions.act_window_close'}\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475199,"cells":{"repo_name":{"kind":"string","value":"freephys/python_ase"},"path":{"kind":"string","value":"ase/transport/calculators.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"11688"},"content":{"kind":"string","value":"import numpy as np\n\nfrom numpy import linalg\nfrom ase.transport.selfenergy import LeadSelfEnergy, BoxProbe\nfrom ase.transport.greenfunction import GreenFunction\nfrom ase.transport.tools import subdiagonalize, cutcoupling, tri2full, dagger\n\n\nclass TransportCalculator:\n \"\"\"Determine transport properties of device sandwiched between\n semi-infinite leads using nonequillibrium Green function methods.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Bla Bla XXX\n \n energies is the energy grid on which the transport properties\n should be determined.\n \n h1 (h2) is a matrix representation of the Hamiltonian of two\n principal layers of the left (right) lead, and the coupling\n between such layers.\n \n h is a matrix representation of the Hamiltonian of the\n scattering region. This must include at least one lead\n principal layer on each side. The coupling in (out) of the\n scattering region is by default assumed to be identical to the\n coupling between left (right) principal layers. However,\n these couplings can also be specified explicitly through hc1\n and hc2.\n \n s, s1, and s2 are the overlap matrices corresponding to h, h1,\n and h2. Default is the identity operator. sc1 and sc2 are the\n overlap matrices corresponding to the optional couplings hc1\n and hc2.\n \n align_bf specifies the principal layer basis index used to\n align the fermi levels of the lead and scattering regions.\n \"\"\"\n \n # The default values for all extra keywords\n self.input_parameters = {'energies': None,\n 'h': None,\n 'h1': None,\n 'h2': None,\n 's': None,\n 's1': None,\n 's2': None,\n 'hc1': None,\n 'hc2': None,\n 'sc1': None,\n 'sc2': None,\n 'box': None,\n 'align_bf': None,\n 'eta1': 1e-3,\n 'eta2': 1e-3,\n 'eta': 1e-3,\n 'logfile': None, # '-',\n 'eigenchannels': 0,\n 'dos': False,\n 'pdos': [],\n }\n self.initialized = False # Changed Hamiltonians?\n self.uptodate = False # Changed energy grid?\n self.set(**kwargs)\n\n def set(self, **kwargs):\n for key in kwargs:\n if key in ['h', 'h1', 'h2', 'hc1', 'hc2',\n 's', 's1', 's2', 'sc1', 'sc2',\n 'eta', 'eta1', 'eta2', 'align_bf', 'box']:\n self.initialized = False\n self.uptodate = False\n break\n elif key in ['energies', 'eigenchannels', 'dos', 'pdos']:\n self.uptodate = False\n elif key not in self.input_parameters:\n raise KeyError, '\\'%s\\' not a vaild keyword' % key\n\n self.input_parameters.update(kwargs)\n log = self.input_parameters['logfile']\n if log is None:\n class Trash:\n def write(self, s):\n pass\n def flush(self):\n pass\n self.log = Trash()\n elif log == '-':\n from sys import stdout\n self.log = stdout\n elif 'logfile' in kwargs:\n self.log = open(log, 'w')\n\n def initialize(self):\n if self.initialized:\n return\n\n print >> self.log, '# Initializing calculator...'\n\n p = self.input_parameters\n if p['s1'] == None:\n p['s1'] = np.identity(len(p['h1']))\n if p['s2'] == None:\n p['s2'] = np.identity(len(p['h2']))\n if p['s'] == None:\n p['s'] = np.identity(len(p['h']))\n \n h_mm = p['h']\n s_mm = p['s']\n pl1 = len(p['h1']) / 2\n pl2 = len(p['h2']) / 2\n h1_ii = p['h1'][:pl1, :pl1]\n h1_ij = p['h1'][:pl1, pl1:2 * pl1]\n s1_ii = p['s1'][:pl1, :pl1]\n s1_ij = p['s1'][:pl1, pl1:2 * pl1]\n h2_ii = p['h2'][:pl2, :pl2]\n h2_ij = p['h2'][pl2: 2 * pl2, :pl2]\n s2_ii = p['s2'][:pl2, :pl2]\n s2_ij = p['s2'][pl2: 2 * pl2, :pl2]\n \n if p['hc1'] is None:\n nbf = len(h_mm)\n h1_im = np.zeros((pl1, nbf), complex)\n s1_im = np.zeros((pl1, nbf), complex)\n h1_im[:pl1, :pl1] = h1_ij\n s1_im[:pl1, :pl1] = s1_ij\n else:\n h1_im = p['hc1']\n if p['sc1'] is not None:\n s1_im = p['sc1']\n else:\n s1_im = np.zeros(h1_im.shape, complex)\n\n if p['hc2'] is None:\n h2_im = np.zeros((pl2, nbf), complex)\n s2_im = np.zeros((pl2, nbf), complex)\n h2_im[-pl2:, -pl2:] = h2_ij\n s2_im[-pl2:, -pl2:] = s2_ij\n else:\n h2_im = p['hc2']\n if p['sc2'] is not None:\n s2_im[:] = p['sc2']\n else:\n s2_im = np.zeros(h2_im.shape, complex)\n\n align_bf = p['align_bf']\n if align_bf != None:\n diff = (h_mm[align_bf, align_bf] - h1_ii[align_bf, align_bf]) \\\n / s_mm[align_bf, align_bf]\n print >> self.log, '# Aligning scat. H to left lead H. diff=', diff\n h_mm -= diff * s_mm\n\n #setup lead self-energies\n self.selfenergies = [LeadSelfEnergy((h1_ii, s1_ii), \n (h1_ij, s1_ij),\n (h1_im, s1_im),\n p['eta1']),\n LeadSelfEnergy((h2_ii, s2_ii), \n (h2_ij, s2_ij),\n (h2_im, s2_im),\n p['eta2'])]\n box = p['box']\n if box is not None:\n print 'Using box probe!'\n self.selfenergies.append(\n BoxProbe(eta=box[0], a=box[1], b=box[2], energies=box[3],\n S=s_mm, T=0.3))\n \n #setup scattering green function\n self.greenfunction = GreenFunction(selfenergies=self.selfenergies,\n H=h_mm,\n S=s_mm,\n eta=p['eta'])\n\n self.initialized = True\n \n def update(self):\n if self.uptodate:\n return\n \n p = self.input_parameters\n self.energies = p['energies']\n nepts = len(self.energies)\n nchan = p['eigenchannels']\n pdos = p['pdos']\n self.T_e = np.empty(nepts)\n if p['dos']:\n self.dos_e = np.empty(nepts)\n if pdos != []:\n self.pdos_ne = np.empty((len(pdos), nepts))\n if nchan > 0:\n self.eigenchannels_ne = np.empty((nchan, nepts))\n\n for e, energy in enumerate(self.energies):\n Ginv_mm = self.greenfunction.retarded(energy, inverse=True)\n lambda1_mm = self.selfenergies[0].get_lambda(energy)\n lambda2_mm = self.selfenergies[1].get_lambda(energy)\n a_mm = linalg.solve(Ginv_mm, lambda1_mm)\n b_mm = linalg.solve(dagger(Ginv_mm), lambda2_mm)\n T_mm = np.dot(a_mm, b_mm)\n if nchan > 0:\n t_n = linalg.eigvals(T_mm).real\n self.eigenchannels_ne[:, e] = np.sort(t_n)[-nchan:]\n self.T_e[e] = np.sum(t_n)\n else:\n self.T_e[e] = np.trace(T_mm).real\n\n print >> self.log, energy, self.T_e[e]\n self.log.flush()\n\n if p['dos']:\n self.dos_e[e] = self.greenfunction.dos(energy)\n\n if pdos != []:\n self.pdos_ne[:, e] = np.take(self.greenfunction.pdos(energy),\n pdos)\n \n self.uptodate = True\n\n def print_pl_convergence(self):\n self.initialize()\n pl1 = len(self.input_parameters['h1']) / 2\n \n h_ii = self.selfenergies[0].h_ii\n s_ii = self.selfenergies[0].s_ii\n ha_ii = self.greenfunction.H[:pl1, :pl1]\n sa_ii = self.greenfunction.S[:pl1, :pl1]\n c1 = np.abs(h_ii - ha_ii).max()\n c2 = np.abs(s_ii - sa_ii).max()\n print 'Conv (h,s)=%.2e, %2.e' % (c1, c2)\n\n def plot_pl_convergence(self):\n self.initialize()\n pl1 = len(self.input_parameters['h1']) / 2 \n hlead = self.selfenergies[0].h_ii.real.diagonal()\n hprincipal = self.greenfunction.H.real.diagonal[:pl1]\n\n import pylab as pl\n pl.plot(hlead, label='lead')\n pl.plot(hprincipal, label='principal layer')\n pl.axis('tight')\n pl.show()\n\n def get_transmission(self):\n self.initialize()\n self.update()\n return self.T_e\n\n def get_dos(self):\n self.initialize()\n self.update()\n return self.dos_e\n\n def get_eigenchannels(self, n=None):\n \"\"\"Get ``n`` first eigenchannels.\"\"\"\n self.initialize()\n self.update()\n if n is None:\n n = self.input_parameters['eigenchannels']\n return self.eigenchannels_ne[:n]\n\n def get_pdos(self):\n self.initialize()\n self.update()\n return self.pdos_ne\n\n def subdiagonalize_bfs(self, bfs):\n self.initialize()\n bfs = np.array(bfs)\n p = self.input_parameters\n h_pp = p['h']\n s_pp = p['s']\n ht_pp, st_pp, c_pp, e_p = subdiagonalize(h_pp, s_pp, bfs)\n c_pp = np.take(c_pp, bfs, axis=0)\n c_pp = np.take(c_pp, bfs, axis=1)\n return ht_pp, st_pp, e_p, c_pp\n\n def cutcoupling_bfs(self, bfs):\n self.initialize()\n bfs = np.array(bfs)\n p = self.input_parameters\n h_pp = p['h'].copy()\n s_pp = p['s'].copy()\n cutcoupling(h_pp, s_pp, bfs)\n return h_pp, s_pp\n \n def get_left_channels(self, energy, nchan=1):\n self.initialize()\n g_s_ii = self.greenfunction.retarded(energy)\n lambda_l_ii = self.selfenergies[0].get_lambda(energy)\n lambda_r_ii = self.selfenergies[1].get_lambda(energy)\n\n if self.greenfunction.S is None:\n s_s_qsrt_ii = s_s_isqrt = np.identity(len(g_s_ii))\n else:\n s_mm = self.greenfunction.S\n s_s_i, s_s_ii = linalg.eig(s_mm)\n s_s_i = np.abs(s_s_i)\n s_s_sqrt_i = np.sqrt(s_s_i) # sqrt of eigenvalues \n s_s_sqrt_ii = np.dot(s_s_ii * s_s_sqrt_i, dagger(s_s_ii))\n s_s_isqrt_ii = np.dot(s_s_ii / s_s_sqrt_i, dagger(s_s_ii))\n\n lambdab_r_ii = np.dot(np.dot(s_s_isqrt_ii, lambda_r_ii),s_s_isqrt_ii)\n a_l_ii = np.dot(np.dot(g_s_ii, lambda_l_ii), dagger(g_s_ii))\n ab_l_ii = np.dot(np.dot(s_s_sqrt_ii, a_l_ii), s_s_sqrt_ii)\n lambda_i, u_ii = linalg.eig(ab_l_ii)\n ut_ii = np.sqrt(lambda_i / (2.0 * np.pi)) * u_ii\n m_ii = 2 * np.pi * np.dot(np.dot(dagger(ut_ii), lambdab_r_ii),ut_ii)\n T_i,c_in = linalg.eig(m_ii)\n T_i = np.abs(T_i)\n \n channels = np.argsort(-T_i)[:nchan]\n c_in = np.take(c_in, channels, axis=1)\n T_n = np.take(T_i, channels)\n v_in = np.dot(np.dot(s_s_isqrt_ii, ut_ii), c_in)\n\n return T_n, v_in\n"},"license":{"kind":"string","value":"gpl-3.0"}}}],"truncated":false,"partial":true},"paginationData":{"pageIndex":4751,"numItemsPerPage":100,"numTotalItems":477249,"offset":475100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzE4ODMxMywic3ViIjoiL2RhdGFzZXRzL3RyYW5zZm9ybWVyc2Jvb2svY29kZXBhcnJvdCIsImV4cCI6MTc1NzE5MTkxMywiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.UM45BHl9-Oz0h7TqG3Y4_tKFTHfl3Nn0N3ppeGMrVH7Hh9QP2DkPvmObRsBe82w0MgBF4Jx6XDJJzjMXsKxzCA","displayUrls":true},"discussionsStats":{"closed":0,"open":2,"total":2},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}"><div><header class="bg-linear-to-t border-b border-gray-100 pt-4 xl:pt-0 from-purple-500/8 dark:from-purple-500/20 to-white to-70% dark:to-gray-950"><div class="mx-4 relative flex flex-col xl:flex-row"><h1 class="flex flex-wrap items-center max-md:leading-tight gap-y-1 text-lg xl:flex-none"><a href="/datasets" class="group flex items-center"><svg class="sm:mr-1 -mr-1 text-gray-400" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 25 25"><ellipse cx="12.5" cy="5" fill="currentColor" fill-opacity="0.25" rx="7.5" ry="2"></ellipse><path d="M12.5 15C16.6421 15 20 14.1046 20 13V20C20 21.1046 16.6421 22 12.5 22C8.35786 22 5 21.1046 5 20V13C5 14.1046 8.35786 15 12.5 15Z" fill="currentColor" opacity="0.5"></path><path d="M12.5 7C16.6421 7 20 6.10457 20 5V11.5C20 12.6046 16.6421 13.5 12.5 13.5C8.35786 13.5 5 12.6046 5 11.5V5C5 6.10457 8.35786 7 12.5 7Z" fill="currentColor" opacity="0.5"></path><path d="M5.23628 12C5.08204 12.1598 5 12.8273 5 13C5 14.1046 8.35786 15 12.5 15C16.6421 15 20 14.1046 20 13C20 12.8273 19.918 12.1598 19.7637 12C18.9311 12.8626 15.9947 13.5 12.5 13.5C9.0053 13.5 6.06886 12.8626 5.23628 12Z" fill="currentColor"></path></svg> <span class="mr-2.5 font-semibold text-gray-400 group-hover:text-gray-500 max-sm:hidden">Datasets:</span></a> <hr class="mx-1.5 h-2 translate-y-px rounded-sm border-r dark:border-gray-600 sm:hidden"> <div class="group flex flex-none items-center"><div class="relative mr-1 flex items-center"> <span class="inline-block "><span class="contents"><a href="/transformersbook" class="text-gray-400 hover:text-blue-600"><img alt="" class="size-3.5 rounded-sm flex-none" src="https://aifasthub.com/avatars/v1/production/uploads/1644084257624-5f0c746619cb630495b814fd.jpeg" crossorigin="anonymous"></a></span> </span></div> <span class="inline-block "><span class="contents"><a href="/transformersbook" class="text-gray-400 hover:text-blue-600">transformersbook</a></span> </span> <div class="mx-0.5 text-gray-300">/</div></div> <div class="max-w-full xl:flex xl:min-w-0 xl:flex-nowrap xl:items-center xl:gap-x-1"><a class="break-words font-mono font-semibold hover:text-blue-600 text-[1.07rem] xl:truncate" href="/datasets/transformersbook/codeparrot">codeparrot</a> <button class="text-xs mr-3 focus:outline-hidden inline-flex cursor-pointer items-center text-sm mx-0.5 text-gray-600 " title="Copy dataset name to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> </button></div> <div class="inline-flex items-center overflow-hidden whitespace-nowrap rounded-md border bg-white text-sm leading-none text-gray-500 mr-2"><button class="relative flex items-center overflow-hidden from-red-50 to-transparent dark:from-red-900 px-1.5 py-1 hover:bg-linear-to-t focus:outline-hidden" title="Like"><svg class="left-1.5 absolute" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" fill="currentColor"><path d="M22.45,6a5.47,5.47,0,0,1,3.91,1.64,5.7,5.7,0,0,1,0,8L16,26.13,5.64,15.64a5.7,5.7,0,0,1,0-8,5.48,5.48,0,0,1,7.82,0L16,10.24l2.53-2.58A5.44,5.44,0,0,1,22.45,6m0-2a7.47,7.47,0,0,0-5.34,2.24L16,7.36,14.89,6.24a7.49,7.49,0,0,0-10.68,0,7.72,7.72,0,0,0,0,10.82L16,29,27.79,17.06a7.72,7.72,0,0,0,0-10.82A7.49,7.49,0,0,0,22.45,4Z"></path></svg> <span class="ml-4 pl-0.5 ">like</span></button> <button class="focus:outline-hidden flex items-center border-l px-1.5 py-1 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="See users who liked this repository">60</button></div> <div class="relative flex items-center gap-1.5 "><div class="mr-2 inline-flex h-6 items-center overflow-hidden whitespace-nowrap rounded-md border text-sm text-gray-500"><button class="focus:outline-hidden relative flex h-full max-w-56 items-center gap-1.5 overflow-hidden px-1.5 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" type="button" ><div class="flex h-full flex-1 items-center justify-center ">Follow</div> <img alt="" class="rounded-xs size-3 flex-none" src="https://aifasthub.com/avatars/v1/production/uploads/1644084257624-5f0c746619cb630495b814fd.jpeg"> <span class="truncate">Natural Language Processing with Transformers</span></button> <button class="focus:outline-hidden flex h-full items-center border-l pl-1.5 pr-1.5 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="Show Natural Language Processing with Transformers's followers" type="button">116</button></div> </div> </h1> <div class="flex flex-col-reverse gap-x-2 sm:flex-row sm:items-center sm:justify-between xl:ml-auto"><div class="-mb-px flex h-12 items-center overflow-x-auto overflow-y-hidden "> <a class="tab-alternate" href="/datasets/transformersbook/codeparrot"><svg class="mr-1.5 text-gray-400 flex-none" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg> Dataset card </a><a class="tab-alternate active" href="/datasets/transformersbook/codeparrot/viewer/"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill="currentColor" d="M2.5 2h7a1 1 0 0 1 1 1v6a1 1 0 0 1-1 1h-7a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1Zm0 2v2h3V4h-3Zm4 0v2h3V4h-3Zm-4 3v2h3V7h-3Zm4 0v2h3V7h-3Z"></path></svg> Data Studio </a><a class="tab-alternate" href="/datasets/transformersbook/codeparrot/tree/main"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-tertiary" d="M21 19h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-8h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0 4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M9 19a1 1 0 0 1-1-1V6a1 1 0 0 1 2 0v12a1 1 0 0 1-1 1zm-6-4.333a1 1 0 0 1-.64-1.769L3.438 12l-1.078-.898a1 1 0 0 1 1.28-1.538l2 1.667a1 1 0 0 1 0 1.538l-2 1.667a.999.999 0 0 1-.64.231z" fill="currentColor"></path></svg> <span class="xl:hidden">Files</span> <span class="hidden xl:inline">Files and versions</span> <span class="inline-block "><span class="contents"><div slot="anchor" class="shadow-purple-500/10 ml-2 inline-flex -translate-y-px items-center gap-0.5 rounded-md border bg-white px-1 py-0.5 align-middle text-xs font-semibold leading-none text-gray-800 shadow-sm dark:border-gray-700 dark:bg-gradient-to-b dark:from-gray-925 dark:to-gray-925 dark:text-gray-300"><svg class="size-3 " xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill-rule="evenodd" clip-rule="evenodd" d="M6.14 3.64 5.1 4.92 2.98 2.28h2.06l1.1 1.36Zm0 4.72-1.1 1.36H2.98l2.13-2.64 1.03 1.28Zm4.9 1.36L8.03 6l3-3.72H8.96L5.97 6l3 3.72h2.06Z" fill="#7875FF"></path><path d="M4.24 6 2.6 8.03.97 6 2.6 3.97 4.24 6Z" fill="#FF7F41" opacity="1"></path></svg> <span>xet</span> </div></span> </span> </a><a class="tab-alternate" href="/datasets/transformersbook/codeparrot/discussions"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path><path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path></svg> Community <div class="ml-1.5 flex h-4 min-w-[1rem] items-center justify-center rounded px-1 text-xs leading-none shadow-sm bg-black text-white dark:bg-gray-800 dark:text-gray-200">2</div> </a></div> </div></div></header> </div> <div class="flex flex-col w-full"> <div class="flex h-full flex-1"> <div class="flex flex-1 flex-col overflow-hidden " style="height: calc(100vh - 48px)"><div class="flex flex-col overflow-hidden h-full "> <div class="flex flex-1 flex-col overflow-hidden "><div class="flex flex-1 flex-col overflow-hidden"><div class="flex min-h-0 flex-1"><div class="flex flex-1 flex-col overflow-hidden"><div class="md:shadow-xs dark:border-gray-800 md:my-4 md:ml-4 md:rounded-lg md:border flex min-w-0 flex-wrap "><div class="flex min-w-0 flex-1 flex-wrap"><div class="grid flex-1 grid-cols-1 overflow-hidden text-sm md:grid-cols-2 md:place-content-center md:rounded-lg"><label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-950 md:border-r md:border-r-0 hidden" title="default"><span class="text-gray-500">Subset (1)</span> <div class="flex items-center whitespace-nowrap"><span class="truncate">default</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">~18.7M rows (showing the first 477k)</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Subset (1)"><option value="default" selected>default (~18.7M rows, showing the first 477k)</option></optgroup></select></label> <label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-900 md:border-r md:border-r" title="train"><div class="text-gray-500">Split (1)</div> <div class="flex items-center overflow-hidden whitespace-nowrap"><span class="truncate">train</span> <span class="mx-2 text-gray-500">·</span> <span class="text-gray-500">~18.7M rows (showing the first 477k)</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Split (1)"><option value="train" selected>train (~18.7M rows, showing the first 477k)</option></optgroup></select></label></div></div> <div class="hidden flex-none flex-col items-center gap-0.5 border-l px-1 md:flex justify-end"> <span class="inline-block "><span class="contents"><div slot="anchor"><button class="group text-gray-500 hover:text-gray-700" aria-label="Hide sidepanel"><div class="rounded-xs flex size-4 items-center justify-center border border-gray-400 bg-gray-100 hover:border-gray-600 hover:bg-blue-50 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-700 dark:group-hover:border-gray-400"><div class="float-left h-full w-[65%]"></div> <div class="float-right h-full w-[35%] bg-gray-400 group-hover:bg-gray-600 dark:bg-gray-600 dark:group-hover:bg-gray-400"></div></div></button></div></span> </span> <div class="relative "> <button class="btn px-0.5 py-0.5 " type="button"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="p-0.5" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><circle cx="16" cy="7" r="3" fill="currentColor"></circle><circle cx="16" cy="16" r="3" fill="currentColor"></circle><circle cx="16" cy="25" r="3" fill="currentColor"></circle></svg> </button> </div></div></div> <div class="flex min-h-0 flex-1 flex-col border dark:border-gray-800 md:mb-4 md:ml-4 md:rounded-lg"> <div class="bg-linear-to-r text-smd relative flex items-center dark:border-gray-900 dark:bg-gray-950 false rounded-t-lg [&:has(:focus)]:from-gray-50 [&:has(:focus)]:to-transparent [&:has(:focus)]:to-20% dark:[&:has(:focus)]:from-gray-900"><form class="flex-1"><svg class="absolute left-3 top-1/2 transform -translate-y-1/2 pointer-events-none text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z" fill="currentColor"></path></svg> <input disabled class="outline-hidden h-9 w-full border-none bg-transparent px-1 pl-9 pr-3 placeholder:text-gray-400 " placeholder="Search this dataset" dir="auto"></form> <div class="flex items-center gap-2 px-2 py-1"><button type="button" class="hover:bg-yellow-200/70 flex items-center gap-1 rounded-md border border-yellow-200 bg-yellow-100 pl-0.5 pr-1 text-[.8rem] leading-normal text-gray-700 dark:border-orange-500/25 dark:bg-orange-500/20 dark:text-gray-300 dark:hover:brightness-110 md:hidden"><div class="rounded-sm bg-yellow-300 px-1 font-mono text-[.7rem] font-bold text-black dark:bg-yellow-700 dark:text-gray-200">SQL </div> Console </button></div></div> <div class="flex flex-1 flex-col overflow-hidden min-h-64 flex w-full flex-col border-t md:rounded-b-lg md:shadow-lg"> <div class="flex-1 relative overflow-auto"><table class="w-full table-auto rounded-lg font-mono text-xs text-gray-900"><thead class="shadow-xs sticky left-0 right-0 top-0 z-1 bg-white align-top"><tr class="space-y-54 h-full min-w-fit divide-x border-b text-left"><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">repo_name <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="17.17997224375034" width="11.2" height="12.820027756249662" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="17.834998107528435" width="11.2" height="12.165001892471567" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="24.515437160932176" width="11.2" height="5.484562839067823" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">5</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">100</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">path <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="7.078747978308439" width="11.2" height="22.92125202169156" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="23.94988107696699" width="11.2" height="6.050118923033013" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="26" width="11.2" height="4" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">4</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">375</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">copies <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">classes</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><defs><clipPath id="rounded-bar"><rect x="0" y="0" width="130" height="8" rx="4"></rect></clipPath><pattern id="hatching" patternUnits="userSpaceOnUse" patternTransform="rotate(-45)" height="1" width="5"><line y1="0" class="stroke-gray-400 dark:stroke-gray-500/80" stroke-width="3" y2="1" x1="2" x2="2"></line></pattern><pattern id="hatching-faded" patternUnits="userSpaceOnUse" patternTransform="rotate(-45)" height="1" width="5"><line y1="0" class="stroke-gray-100 dark:stroke-gray-500/20" stroke-width="3" y2="1" x1="2" x2="2"></line></pattern></defs><g height="8" style="transform: translateY(20px)" clip-path="url(#rounded-bar)"><g style="transform: scaleX(1.0153846153846153) translateX(-1px)"><g><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="1" y="0" width="23.54733482940771" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="26.54733482940771" y="0" width="7.474970088989185" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="36.0223049183969" y="0" width="3.372436610658168" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="41.39474152905507" y="0" width="1.980228350399896" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="45.374969879454966" y="0" width="1.35372101355896" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="48.728690893013926" y="0" width="0.8525151440862109" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="51.58120603710014" y="0" width="0.4569983383935847" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="54.03820437549372" y="0" width="0.14701340390446038" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="56.18521777939818" y="0" width="0.06447787213802458" height="8" fill-opacity="1"></rect></g></g></g><g style="transform: scaleX(1.0153846153846153) translateX(-1px)"><g><rect class="fill-white cursor-pointer" x="0" y="0" width="25.54733482940771" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="25.54733482940771" y="0" width="9.474970088989185" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="35.0223049183969" y="0" width="5.372436610658168" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="40.39474152905507" y="0" width="3.980228350399896" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="44.374969879454966" y="0" width="3.35372101355896" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="47.728690893013926" y="0" width="2.852515144086211" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="50.58120603710014" y="0" width="2.4569983383935847" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="53.03820437549372" y="0" width="2.1470134039044604" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="55.18521777939818" y="0" width="2.0644778721380246" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="57.24969565153621" y="0" width="1.6853047361021187" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="58.935000387638325" y="0" width="1.476650553484659" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="60.411650941122986" y="0" width="1.4608516728164962" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="61.87250261393948" y="0" width="1.4532246269766935" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="63.32572724091617" y="0" width="1.3399085173567677" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="64.66563575827294" y="0" width="1.3170273798373595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="65.9826631381103" y="0" width="1.1301647567621933" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="67.1128278948725" y="0" width="1.1048320687942772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="68.21765996366678" y="0" width="1.0119455462452516" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="69.22960550991203" y="0" width="1.0094939957967435" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="70.23909950570878" y="0" width="0.8912747852798015" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="71.13037429098858" y="0" width="0.8588598404606401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="71.98923413144922" y="0" width="0.840881803838248" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="72.83011593528747" y="0" width="0.8327099690098879" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="73.66282590429735" y="0" width="0.7351927400581248" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="74.39801864435547" y="0" width="0.7109496300673234" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="75.10896827442279" y="0" width="0.6861617310879646" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="75.79513000551076" y="0" width="0.6646425660399498" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="76.4597725715507" y="0" width="0.6292312817837229" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="77.08900385333442" y="0" width="0.6172459240354615" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="77.70624977736988" y="0" width="0.5987230984245122" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="78.30497287579439" y="0" width="0.5959991534817254" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="78.90097202927612" y="0" width="0.5793830893307267" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="79.48035511860685" y="0" width="0.5706664655138094" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="80.05102158412066" y="0" width="0.5412478601317132" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="80.59226944425238" y="0" width="0.5298072913720091" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="81.12207673562439" y="0" width="0.5276281354177799" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="81.64970487104218" y="0" width="0.4976647410471264" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="82.1473696120893" y="0" width="0.49739234655284764" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="82.64476195864215" y="0" width="0.493851218127225" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="83.13861317676937" y="0" width="0.4826830438617996" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="83.62129622063117" y="0" width="0.4794143099304556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="84.10071053056163" y="0" width="0.47097008060781687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="84.57168061116944" y="0" width="0.46524979622796486" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="85.0369304073974" y="0" width="0.46061908982522753" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="85.49754949722264" y="0" width="0.45816753937671945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="85.95571703659935" y="0" width="0.4562607779167688" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="86.41197781451612" y="0" width="0.4486337320769661" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="86.86061154659309" y="0" width="0.44590978713417945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="87.30652133372728" y="0" width="0.4437306311799501" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="87.75025196490722" y="0" width="0.425752594557558" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="88.17600455946479" y="0" width="0.41785315422347663" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="88.59385771368827" y="0" width="0.4148568147864113" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="89.00871452847468" y="0" width="0.39633398917546186" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="89.40504851765014" y="0" width="0.38707257636998715" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="89.79212109402013" y="0" width="0.38707257636998715" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="90.17919367039012" y="0" width="0.3805351085072991" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="90.55972877889742" y="0" width="0.36146749390779237" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="90.92119627280522" y="0" width="0.356019604022219" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="91.27721587682744" y="0" width="0.353568053573711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="91.63078393040115" y="0" width="0.34757537469958033" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="91.97835930510072" y="0" width="0.32796297111151623" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="92.30632227621224" y="0" width="0.30807817302917345" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="92.61440044924142" y="0" width="0.29854436572942006" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="92.91294481497084" y="0" width="0.2958204207866334" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="93.20876523575748" y="0" width="0.283835063038372" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="93.49260029879585" y="0" width="0.28274548506125735" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="93.7753457838571" y="0" width="0.2802939346127493" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="94.05563971846985" y="0" width="0.2729392832672253" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="94.32857900173707" y="0" width="0.2664018154045372" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="94.5949808171416" y="0" width="0.2653122374274226" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="94.86029305456903" y="0" width="0.25714040259906257" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="95.11743345716809" y="0" width="0.24951335675925984" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="95.36694681392736" y="0" width="0.24951335675925984" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="95.61646017068662" y="0" width="0.2448826503565225" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="95.86134282104314" y="0" width="0.24270349440229314" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="96.10404631544543" y="0" width="0.24243109990801448" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="96.34647741535345" y="0" width="0.23997954945950647" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="96.58645696481295" y="0" width="0.23997954945950647" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="96.82643651427246" y="0" width="0.23861757698811312" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="97.06505409126058" y="0" width="0.23834518249383446" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="97.30339927375441" y="0" width="0.2369832100224411" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="97.54038248377685" y="0" width="0.23398687058537576" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="97.77436935436222" y="0" width="0.22826658620552376" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="98.00263594056774" y="0" width="0.2220015128371144" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="98.22463745340485" y="0" width="0.2220015128371144" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="98.44663896624196" y="0" width="0.2217291183428357" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="98.6683680845848" y="0" width="0.21737080643437703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="98.88573889101917" y="0" width="0.21709841194009835" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="99.10283730295927" y="0" width="0.21464686149159035" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="99.31748416445086" y="0" width="0.21274010003163968" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="99.5302242644825" y="0" width="0.20947136610029565" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="99.7396956305828" y="0" width="0.20783699913462364" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="99.94753262971743" y="0" width="0.20565784318039432" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="100.15319047289782" y="0" width="0.20456826520327961" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="100.3577587381011" y="0" width="0.20293389823760763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="100.5606926363387" y="0" width="0.2021167147547716" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="100.76280935109347" y="0" width="0.1996651643062636" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="100.96247451539973" y="0" width="0.19748600835203425" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="101.15996052375176" y="0" width="0.19476206340924757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="101.354722587161" y="0" width="0.1868626230751662" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="101.54158521023618" y="0" width="0.18604543959233022" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="101.72763064982851" y="0" width="0.18304910015526488" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="101.91067974998377" y="0" width="0.1827767056609862" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.09345645564476" y="0" width="0.18086994420103553" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.27432639984579" y="0" width="0.17950797172964217" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.45383437157544" y="0" width="0.17406008184406882" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.62789445341951" y="0" width="0.1726981093726755" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.80059256279219" y="0" width="0.1724257148783968" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="102.97301827767059" y="0" width="0.17215332038411812" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.1451715980547" y="0" width="0.16534345802715145" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.31051505608185" y="0" width="0.16207472409580742" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.47258978017766" y="0" width="0.16125754061297143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.63384732079062" y="0" width="0.16098514611869277" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.79483246690931" y="0" width="0.15935077915302073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="103.95418324606233" y="0" width="0.15826120117590609" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.11244444723823" y="0" width="0.15689922870451273" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.26934367594275" y="0" width="0.15580965072739805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.42515332667014" y="0" width="0.15254091679605405" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.5776942434662" y="0" width="0.15008936634754605" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.72778360981374" y="0" width="0.14464147646197267" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="104.87242508627571" y="0" width="0.14436908196769402" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.01679416824341" y="0" width="0.14164513702490733" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.15843930526832" y="0" width="0.13646964163361264" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.29490894690193" y="0" width="0.13619724713933395" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.43110619404126" y="0" width="0.13483527466794062" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.5659414687092" y="0" width="0.13456288017366197" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.70050434888286" y="0" width="0.1342904856793833" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.83479483456225" y="0" width="0.1334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="105.9682681367588" y="0" width="0.13020456826520327" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.098472705024" y="0" width="0.1258462563567446" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.22431896138073" y="0" width="0.1255738618624659" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.3498928232432" y="0" width="0.1255738618624659" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.47546668510566" y="0" width="0.12530146736818726" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.60076815247385" y="0" width="0.12530146736818726" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.72606961984204" y="0" width="0.12393949489679393" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.85000911473882" y="0" width="0.12312231141395791" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="106.97313142615278" y="0" width="0.12284991691967924" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.09598134307245" y="0" width="0.12148794444828591" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.21746928752074" y="0" width="0.12094315545972856" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.33841244298047" y="0" width="0.1198535774826139" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.45826602046309" y="0" width="0.11876399950549923" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.57703001996859" y="0" width="0.11794681602266321" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.69497683599126" y="0" width="0.1146780820913192" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.80965491808257" y="0" width="0.11195413714853253" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.9216090552311" y="0" width="0.10759582524007384" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.02920488047117" y="0" width="0.10732343074579517" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.13652831121696" y="0" width="0.10732343074579517" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.24385174196276" y="0" width="0.10677864175723784" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.35063038371999" y="0" width="0.10623385276868051" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.45686423648867" y="0" width="0.10596145827440183" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.56282569476308" y="0" width="0.10514427479156582" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.66796996955465" y="0" width="0.10514427479156582" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.77311424434622" y="0" width="0.10405469681445116" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.87716894116068" y="0" width="0.10378230232017249" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="108.98095124348085" y="0" width="0.10242032984877915" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.08337157332963" y="0" width="0.10133075187166449" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.1847023252013" y="0" width="0.09969638490599247" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.2843987101073" y="0" width="0.0994239904117138" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.383822700519" y="0" width="0.09561046749181244" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.47943316801081" y="0" width="0.09506567850325512" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.57449884651408" y="0" width="0.09152455007763244" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.66602339659171" y="0" width="0.09152455007763244" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.75754794666935" y="0" width="0.09043497210051776" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.84798291876987" y="0" width="0.0901625776062391" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="109.93814549637611" y="0" width="0.0901625776062391" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.02830807398234" y="0" width="0.08989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.1181982570943" y="0" width="0.08852821064056708" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.20672646773487" y="0" width="0.08852821064056708" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.29525467837543" y="0" width="0.0874386326634524" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.38269331103888" y="0" width="0.08662144918061641" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.46931476021949" y="0" width="0.0849870822149444" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.55430184243444" y="0" width="0.08471468772066573" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.6390165301551" y="0" width="0.0841698987321084" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.72318642888722" y="0" width="0.0841698987321084" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.80735632761933" y="0" width="0.08362510974355106" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.89098143736288" y="0" width="0.08335271524927239" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="110.97433415261216" y="0" width="0.08308032075499372" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.05741447336715" y="0" width="0.08253553176643638" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.13995000513358" y="0" width="0.08253553176643638" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.22248553690002" y="0" width="0.08090116480076437" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.30338670170079" y="0" width="0.08062877030648571" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.38401547200728" y="0" width="0.08035637581220705" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.46437184781948" y="0" width="0.08008398131792838" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.5444558291374" y="0" width="0.07953919232937104" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.62399502146678" y="0" width="0.07953919232937104" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.70353421379616" y="0" width="0.07899440334081372" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.78252861713698" y="0" width="0.07844961435225636" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.86097823148923" y="0" width="0.0773600363751417" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="111.93833826786437" y="0" width="0.0773600363751417" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.0156983042395" y="0" width="0.07708764188086303" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.09278594612037" y="0" width="0.07599806390374836" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.16878401002413" y="0" width="0.07599806390374836" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.24478207392788" y="0" width="0.07463609143235501" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.31941816536023" y="0" width="0.07463609143235501" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.39405425679259" y="0" width="0.07436369693807636" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.46841795373066" y="0" width="0.07436369693807636" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.54278165066874" y="0" width="0.07436369693807636" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.61714534760682" y="0" width="0.07409130244379769" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.69123665005063" y="0" width="0.07409130244379769" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.76532795249443" y="0" width="0.07409130244379769" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.83941925493824" y="0" width="0.07327411896096167" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.9126933738992" y="0" width="0.07218454098384701" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="112.98487791488304" y="0" width="0.07191214648956834" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.05679006137261" y="0" width="0.071367357501011" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.12815741887363" y="0" width="0.07109496300673233" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.19925238188036" y="0" width="0.07082256851245367" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.27007495039281" y="0" width="0.07082256851245367" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.34089751890527" y="0" width="0.07000538502961766" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.41090290393488" y="0" width="0.07000538502961766" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.4809082889645" y="0" width="0.06973299053533899" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.55064127949984" y="0" width="0.06946059604106034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.6201018755409" y="0" width="0.06946059604106034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.68956247158195" y="0" width="0.06918820154678167" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.75875067312873" y="0" width="0.06864341255822431" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.82739408568696" y="0" width="0.06837101806394565" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.8957651037509" y="0" width="0.06755383458110965" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="113.96331893833201" y="0" width="0.06755383458110965" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.03087277291311" y="0" width="0.06755383458110965" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.09842660749422" y="0" width="0.06728144008683098" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.16570804758105" y="0" width="0.06646425660399498" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.23217230418504" y="0" width="0.06646425660399498" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.29863656078903" y="0" width="0.06619186210971631" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.36482842289875" y="0" width="0.06564707312115897" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.43047549601991" y="0" width="0.0653746786268803" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.49585017464679" y="0" width="0.06482988963832298" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.56068006428511" y="0" width="0.06455749514404431" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.62523755942915" y="0" width="0.06455749514404431" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.6897950545732" y="0" width="0.06428510064976564" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.75408015522297" y="0" width="0.06401270615548697" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.81809286137845" y="0" width="0.06346791716692964" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.88156077854538" y="0" width="0.06319552267265097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="114.94475630121804" y="0" width="0.06210594469553629" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.00686224591357" y="0" width="0.06183355020125763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.06869579611482" y="0" width="0.06183355020125763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.13052934631608" y="0" width="0.06183355020125763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.19236289651734" y="0" width="0.061561155706978954" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.25392405222432" y="0" width="0.06101636671842162" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.31494041894274" y="0" width="0.06101636671842162" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.37595678566116" y="0" width="0.06047157772986428" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.43642836339103" y="0" width="0.06047157772986428" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.4968999411209" y="0" width="0.06047157772986428" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.55737151885077" y="0" width="0.06019918323558562" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.61757070208635" y="0" width="0.06019918323558562" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.67776988532194" y="0" width="0.05965439424702828" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.73742427956897" y="0" width="0.05910960525847094" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.79653388482744" y="0" width="0.05856481626991361" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.85509870109735" y="0" width="0.05802002728135627" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.9131187283787" y="0" width="0.057747632787077605" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="115.97086636116579" y="0" width="0.057747632787077605" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.02861399395287" y="0" width="0.057747632787077605" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.08636162673996" y="0" width="0.057475238292798936" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.14383686503275" y="0" width="0.057475238292798936" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.20131210332555" y="0" width="0.05665805480996293" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.2579701581355" y="0" width="0.05638566031568427" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.31435581845119" y="0" width="0.0561132658214056" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.3704690842726" y="0" width="0.05584087132712693" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.42630995559972" y="0" width="0.05556847683284826" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.48187843243257" y="0" width="0.05529608233856959" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.53717451477114" y="0" width="0.05475129335001226" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.59192580812115" y="0" width="0.05447889885573359" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.64640470697688" y="0" width="0.05420650436145492" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.70061121133834" y="0" width="0.05420650436145492" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.7548177156998" y="0" width="0.053934109867176255" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.80875182556697" y="0" width="0.05338932087861892" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.8621411464456" y="0" width="0.053116926384340256" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.91525807282993" y="0" width="0.053116926384340256" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="116.96837499921426" y="0" width="0.05284453189006158" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.02121953110432" y="0" width="0.05229974290150425" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.07351927400583" y="0" width="0.05202734840722558" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.12554662241305" y="0" width="0.05202734840722558" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.17757397082028" y="0" width="0.051754953912946905" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.22932892473322" y="0" width="0.051754953912946905" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.28108387864617" y="0" width="0.051754953912946905" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.33283883255912" y="0" width="0.05148255941866824" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.38432139197779" y="0" width="0.051210164924389574" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.43553155690218" y="0" width="0.051210164924389574" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.48674172182656" y="0" width="0.050665375935832244" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.5374070977624" y="0" width="0.04984819245299624" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.5872552902154" y="0" width="0.04957579795871757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.63683108817412" y="0" width="0.04930340346443889" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.68613449163855" y="0" width="0.04903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.73516550060872" y="0" width="0.04875861447588156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.7839241150846" y="0" width="0.04848621998160289" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.8324103350662" y="0" width="0.04821382548732423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.88062416055352" y="0" width="0.047941430993045556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.92856559154657" y="0" width="0.047941430993045556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="117.97650702253962" y="0" width="0.047941430993045556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.02444845353267" y="0" width="0.047396642004488225" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.07184509553716" y="0" width="0.047124247510209556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.11896934304737" y="0" width="0.047124247510209556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.16609359055758" y="0" width="0.047124247510209556" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.21321783806779" y="0" width="0.04657945852165222" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.25979729658944" y="0" width="0.04657945852165222" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.30637675511109" y="0" width="0.04630706402737355" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.35268381913846" y="0" width="0.04630706402737355" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.39899088316584" y="0" width="0.04630706402737355" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.44529794719321" y="0" width="0.04630706402737355" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.49160501122059" y="0" width="0.04603466953309488" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.53763968075368" y="0" width="0.04603466953309488" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.58367435028678" y="0" width="0.04548988054453754" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.62916423083132" y="0" width="0.04521748605025888" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.67438171688158" y="0" width="0.04494509155598021" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.71932680843756" y="0" width="0.04494509155598021" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.76427189999353" y="0" width="0.04494509155598021" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.8092169915495" y="0" width="0.04494509155598021" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.85416208310548" y="0" width="0.04494509155598021" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.89910717466145" y="0" width="0.044672697061701544" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.94377987172315" y="0" width="0.044672697061701544" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="118.98845256878485" y="0" width="0.044672697061701544" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.03312526584655" y="0" width="0.04440030256742288" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.07752556841398" y="0" width="0.044127908073144206" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.12165347648713" y="0" width="0.04385551357886554" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.16550899006599" y="0" width="0.043583119084586876" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.20909210915057" y="0" width="0.04331072459030821" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.25240283374089" y="0" width="0.04331072459030821" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.2957135583312" y="0" width="0.04331072459030821" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.33902428292151" y="0" width="0.04276593560175087" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.38179021852326" y="0" width="0.0424935411074722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.42428375963074" y="0" width="0.0424935411074722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.46677730073822" y="0" width="0.0424935411074722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.50927084184569" y="0" width="0.0424935411074722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.55176438295317" y="0" width="0.04222114661319353" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.59398552956635" y="0" width="0.04222114661319353" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.63620667617954" y="0" width="0.04194875211891487" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.67815542829845" y="0" width="0.04194875211891487" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.72010418041737" y="0" width="0.041131568636078863" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.76123574905344" y="0" width="0.041131568636078863" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.80236731768952" y="0" width="0.040859174141800195" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.84322649183132" y="0" width="0.040859174141800195" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.88408566597312" y="0" width="0.04058677964752152" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.92467244562064" y="0" width="0.04058677964752152" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="119.96525922526817" y="0" width="0.04031438515324286" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.00557361042141" y="0" width="0.04031438515324286" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.04588799557465" y="0" width="0.04031438515324286" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.08620238072788" y="0" width="0.04031438515324286" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.12651676588112" y="0" width="0.04004199065896419" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.16655875654008" y="0" width="0.04004199065896419" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.20660074719905" y="0" width="0.03922480717612818" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.24582555437517" y="0" width="0.03922480717612818" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.2850503615513" y="0" width="0.03922480717612818" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.32427516872743" y="0" width="0.03922480717612818" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.36349997590355" y="0" width="0.03895241268184951" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.4024523885854" y="0" width="0.03895241268184951" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.44140480126725" y="0" width="0.03868001818757085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.48008481945483" y="0" width="0.03868001818757085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.51876483764241" y="0" width="0.03868001818757085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.55744485582998" y="0" width="0.03840762369329218" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.59585247952327" y="0" width="0.03840762369329218" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.63426010321656" y="0" width="0.03840762369329218" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.67266772690985" y="0" width="0.038135229199013514" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.71080295610886" y="0" width="0.037862834704734845" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.7486657908136" y="0" width="0.03731804571617751" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.78598383652978" y="0" width="0.037045651221898845" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.82302948775168" y="0" width="0.037045651221898845" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.86007513897358" y="0" width="0.037045651221898845" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.89712079019549" y="0" width="0.037045651221898845" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.93416644141739" y="0" width="0.03677325672762017" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.970939698145" y="0" width="0.03677325672762017" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.00771295487262" y="0" width="0.03677325672762017" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.04448621160023" y="0" width="0.0365008622333415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.08098707383357" y="0" width="0.0365008622333415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.11748793606691" y="0" width="0.03622846773906284" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.15371640380597" y="0" width="0.03622846773906284" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.18994487154504" y="0" width="0.03622846773906284" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.2261733392841" y="0" width="0.03595607324478417" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.26212941252889" y="0" width="0.03595607324478417" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.29808548577368" y="0" width="0.03595607324478417" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.33404155901847" y="0" width="0.0356836787505055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.36972523776898" y="0" width="0.0356836787505055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.40540891651948" y="0" width="0.0356836787505055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.44109259526998" y="0" width="0.0356836787505055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.47677627402048" y="0" width="0.035138889761948164" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.51191516378243" y="0" width="0.035138889761948164" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.54705405354439" y="0" width="0.035138889761948164" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.58219294330634" y="0" width="0.035138889761948164" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.61733183306829" y="0" width="0.034866495267669495" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.65219832833596" y="0" width="0.034866495267669495" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.68706482360362" y="0" width="0.034866495267669495" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.72193131887128" y="0" width="0.03459410077339083" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.75652541964467" y="0" width="0.03459410077339083" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.79111952041806" y="0" width="0.03432170627911216" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.82544122669718" y="0" width="0.03432170627911216" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.8597629329763" y="0" width="0.03432170627911216" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.89408463925541" y="0" width="0.03404931178483349" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.92813395104024" y="0" width="0.03377691729055483" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.96191086833079" y="0" width="0.03377691729055483" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="121.99568778562134" y="0" width="0.03350452279627616" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.02919230841762" y="0" width="0.03350452279627616" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.0626968312139" y="0" width="0.03350452279627616" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.09620135401018" y="0" width="0.03350452279627616" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.12970587680645" y="0" width="0.03295973380771882" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.16266561061417" y="0" width="0.03295973380771882" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.19562534442188" y="0" width="0.03295973380771882" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.2285850782296" y="0" width="0.03268733931344015" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.26127241754304" y="0" width="0.03268733931344015" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.29395975685648" y="0" width="0.03268733931344015" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.32664709616992" y="0" width="0.03268733931344015" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.35933443548336" y="0" width="0.03241494481916149" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.39174938030253" y="0" width="0.03241494481916149" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.4241643251217" y="0" width="0.03241494481916149" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.45657926994086" y="0" width="0.03214255032488282" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.48872182026574" y="0" width="0.03214255032488282" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.52086437059062" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.55273452642122" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.58460468225182" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.61647483808242" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.64834499391303" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.68021514974363" y="0" width="0.03187015583060415" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.71208530557423" y="0" width="0.03159776133632548" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.74368306691056" y="0" width="0.03159776133632548" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.77528082824689" y="0" width="0.031325366842046815" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.80660619508893" y="0" width="0.031325366842046815" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.83793156193097" y="0" width="0.031052972347768146" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.86898453427874" y="0" width="0.031052972347768146" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.9000375066265" y="0" width="0.03050818335921081" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.93054568998572" y="0" width="0.03023578886493214" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.96078147885065" y="0" width="0.03023578886493214" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.99101726771558" y="0" width="0.03023578886493214" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.0212530565805" y="0" width="0.03023578886493214" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.05148884544543" y="0" width="0.029963394370653474" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.08145223981609" y="0" width="0.02969099987637481" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.11114323969247" y="0" width="0.02969099987637481" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.14083423956885" y="0" width="0.02969099987637481" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.17052523944523" y="0" width="0.02969099987637481" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.2002162393216" y="0" width="0.029418605382096136" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.2296348447037" y="0" width="0.02914621088781747" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.25878105559151" y="0" width="0.02914621088781747" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.28792726647933" y="0" width="0.02914621088781747" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.31707347736715" y="0" width="0.028873816393538802" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.34594729376069" y="0" width="0.028601421899260137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.37454871565994" y="0" width="0.028601421899260137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.4031501375592" y="0" width="0.0280566329107028" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.4312067704699" y="0" width="0.02778423841642413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.45899100888633" y="0" width="0.02778423841642413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.48677524730276" y="0" width="0.027511843922145462" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.5142870912249" y="0" width="0.027511843922145462" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.54179893514704" y="0" width="0.027511843922145462" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.56931077906918" y="0" width="0.027511843922145462" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.59682262299133" y="0" width="0.027511843922145462" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.62433446691347" y="0" width="0.026967054933588128" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.65130152184706" y="0" width="0.026967054933588128" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.67826857678065" y="0" width="0.02642226594503079" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.70469084272568" y="0" width="0.02642226594503079" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.73111310867071" y="0" width="0.026149871450752125" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.75726298012147" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.78314045707793" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.8090179340344" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.83489541099087" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.86077288794733" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.8866503649038" y="0" width="0.025877476956473452" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.91252784186027" y="0" width="0.025332687967916122" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.93786052982819" y="0" width="0.02506029347363745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.96292082330183" y="0" width="0.024787898979358784" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="123.98770872228118" y="0" width="0.024787898979358784" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.01249662126054" y="0" width="0.024515504485080115" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.03701212574562" y="0" width="0.024515504485080115" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.0615276302307" y="0" width="0.024515504485080115" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.08604313471578" y="0" width="0.024515504485080115" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.11055863920086" y="0" width="0.024515504485080115" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.13507414368594" y="0" width="0.024243109990801447" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.15931725367675" y="0" width="0.024243109990801447" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.18356036366755" y="0" width="0.023970715496522778" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.20753107916407" y="0" width="0.023970715496522778" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.23150179466059" y="0" width="0.023970715496522778" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.2554725101571" y="0" width="0.023698321002244113" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.27917083115935" y="0" width="0.023698321002244113" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.30286915216159" y="0" width="0.023425926507965444" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.32629507866956" y="0" width="0.023425926507965444" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.34972100517753" y="0" width="0.023425926507965444" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.3731469316855" y="0" width="0.023425926507965444" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.39657285819347" y="0" width="0.023153532013686775" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.41972639020715" y="0" width="0.023153532013686775" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.44287992222083" y="0" width="0.023153532013686775" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.4660334542345" y="0" width="0.02288113751940811" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.48891459175391" y="0" width="0.02288113751940811" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.51179572927332" y="0" width="0.02288113751940811" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.53467686679272" y="0" width="0.02288113751940811" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.55755800431213" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.58016674733726" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.60277549036239" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.62538423338752" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.64799297641265" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.67060171943778" y="0" width="0.02260874302512944" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.69321046246291" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.71554681099377" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.73788315952463" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.76021950805548" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.78255585658634" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.8048922051172" y="0" width="0.022336348530850772" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.82722855364806" y="0" width="0.022063954036572103" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.84929250768462" y="0" width="0.022063954036572103" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.87135646172119" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.89314802126349" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.91493958080578" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.93673114034807" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.95852269989037" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.98031425943266" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.00210581897495" y="0" width="0.021791559542293438" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.02389737851725" y="0" width="0.021519165048014766" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.04541654356527" y="0" width="0.021519165048014766" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.06693570861329" y="0" width="0.0212467705537361" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.08818247916702" y="0" width="0.0212467705537361" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.10942924972075" y="0" width="0.020974376059457435" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.1304036257802" y="0" width="0.020974376059457435" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.15137800183966" y="0" width="0.020701981565178763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.17207998340484" y="0" width="0.020701981565178763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.19278196497002" y="0" width="0.020701981565178763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.2134839465352" y="0" width="0.020701981565178763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.23418592810039" y="0" width="0.020701981565178763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.25488790966557" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.27531749673646" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.29574708380736" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.31617667087825" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.33660625794914" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.35703584502004" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.37746543209093" y="0" width="0.020429587070900097" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.39789501916182" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.41805221173844" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.43820940431506" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.45836659689168" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.4785237894683" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.49868098204492" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.51883817462154" y="0" width="0.02015719257662143" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.53899536719815" y="0" width="0.01988479808234276" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.5588801652805" y="0" width="0.01961240358806409" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.57849256886857" y="0" width="0.01961240358806409" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.59810497245664" y="0" width="0.01961240358806409" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.61771737604471" y="0" width="0.01961240358806409" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.63732977963278" y="0" width="0.019340009093785426" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.65666978872656" y="0" width="0.019340009093785426" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.67600979782034" y="0" width="0.019340009093785426" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.69534980691412" y="0" width="0.019340009093785426" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.7146898160079" y="0" width="0.019340009093785426" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.73402982510169" y="0" width="0.019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.7530974397012" y="0" width="0.019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.7721650543007" y="0" width="0.019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.79123266890021" y="0" width="0.019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.81030028349971" y="0" width="0.018795220105228088" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.82909550360495" y="0" width="0.018795220105228088" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.84789072371018" y="0" width="0.018522825610949423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.86641354932112" y="0" width="0.018522825610949423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.88493637493207" y="0" width="0.018522825610949423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.90345920054301" y="0" width="0.018522825610949423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.92198202615396" y="0" width="0.018522825610949423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.9405048517649" y="0" width="0.01825043111667075" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.95875528288157" y="0" width="0.01825043111667075" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.97700571399824" y="0" width="0.01825043111667075" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="125.99525614511491" y="0" width="0.01825043111667075" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.01350657623158" y="0" width="0.01825043111667075" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.03175700734825" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.04973504397064" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.06771308059304" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.08569111721543" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.10366915383783" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.12164719046022" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.13962522708262" y="0" width="0.017978036622392085" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.15760326370501" y="0" width="0.017705642128113416" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.17530890583312" y="0" width="0.017705642128113416" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.19301454796123" y="0" width="0.017705642128113416" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.21072019008933" y="0" width="0.017705642128113416" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.22842583221744" y="0" width="0.017433247633834748" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.24585907985127" y="0" width="0.017433247633834748" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.2632923274851" y="0" width="0.017433247633834748" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.28072557511894" y="0" width="0.017433247633834748" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.29815882275277" y="0" width="0.01716085313955608" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.31531967589233" y="0" width="0.01716085313955608" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.33248052903188" y="0" width="0.01716085313955608" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.34964138217144" y="0" width="0.01716085313955608" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.366802235311" y="0" width="0.016888458645277413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.38369069395628" y="0" width="0.016888458645277413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.40057915260157" y="0" width="0.016888458645277413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.41746761124685" y="0" width="0.016888458645277413" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.43435606989213" y="0" width="0.016616064150998745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.45097213404313" y="0" width="0.016616064150998745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.46758819819412" y="0" width="0.016616064150998745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.48420426234512" y="0" width="0.016616064150998745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.50082032649611" y="0" width="0.016616064150998745" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.5174363906471" y="0" width="0.016343669656720076" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.53378006030383" y="0" width="0.016343669656720076" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.55012372996055" y="0" width="0.016343669656720076" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.56646739961727" y="0" width="0.016343669656720076" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.58281106927399" y="0" width="0.016343669656720076" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.59915473893071" y="0" width="0.01607127516244141" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.61522601409315" y="0" width="0.01607127516244141" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.6312972892556" y="0" width="0.01579888066816274" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.64709616992376" y="0" width="0.01579888066816274" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.66289505059191" y="0" width="0.01579888066816274" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.67869393126007" y="0" width="0.01579888066816274" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.69449281192823" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.71001929810211" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.725545784276" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.74107227044988" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.75659875662376" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.77212524279764" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.78765172897153" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.80317821514541" y="0" width="0.015526486173884073" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.81870470131929" y="0" width="0.015254091679605406" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.8339587929989" y="0" width="0.015254091679605406" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.84921288467851" y="0" width="0.014981697185326737" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.86419458186383" y="0" width="0.014981697185326737" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.87917627904915" y="0" width="0.014981697185326737" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.89415797623447" y="0" width="0.014709302691048068" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.90886727892551" y="0" width="0.014709302691048068" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.92357658161656" y="0" width="0.014709302691048068" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.9382858843076" y="0" width="0.014709302691048068" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.95299518699865" y="0" width="0.014709302691048068" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.9677044896897" y="0" width="0.014436908196769401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.98214139788647" y="0" width="0.014436908196769401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.99657830608324" y="0" width="0.014436908196769401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.01101521428001" y="0" width="0.014436908196769401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.02545212247678" y="0" width="0.014436908196769401" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.03988903067355" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.05405354437605" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.06821805807854" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.08238257178104" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.09654708548354" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.11071159918603" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.12487611288853" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.13904062659103" y="0" width="0.014164513702490732" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.15320514029352" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.16709725950173" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.18098937870994" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.19488149791815" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.20877361712635" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.22266573633456" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.23655785554277" y="0" width="0.013892119208212065" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.25044997475098" y="0" width="0.013619724713933398" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.26406969946491" y="0" width="0.013619724713933398" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.27768942417885" y="0" width="0.013619724713933398" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.29130914889278" y="0" width="0.013619724713933398" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.30492887360671" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.31827620382637" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.33162353404603" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.34497086426569" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.35831819448535" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.37166552470501" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.38501285492467" y="0" width="0.01334733021965473" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.39836018514433" y="0" width="0.013074935725376062" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.4114351208697" y="0" width="0.013074935725376062" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.42451005659507" y="0" width="0.013074935725376062" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.43758499232044" y="0" width="0.013074935725376062" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.45065992804581" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.4634624692769" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.476265010508" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.4890675517391" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.5018700929702" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.51467263420129" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.52747517543239" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.54027771666348" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.55308025789458" y="0" width="0.012802541231097394" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.56588279912567" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.5784129458625" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.59094309259932" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.60347323933614" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.61600338607296" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.62853353280978" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.6410636795466" y="0" width="0.012530146736818725" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.65359382628343" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.66585157852596" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.67810933076849" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.69036708301103" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.70262483525356" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.71488258749609" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.72714033973863" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.73939809198116" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.75165584422369" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.76391359646622" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.77617134870876" y="0" width="0.012257752242540058" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.78842910095129" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.80041445869955" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.81239981644781" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.82438517419607" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.83637053194433" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.84835588969258" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.86034124744084" y="0" width="0.011985357748261389" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.8723266051891" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.88403956844309" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.89575253169707" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.90746549495105" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.91917845820504" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.93089142145902" y="0" width="0.011712963253982722" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.94260438471301" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.95404495347272" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.96548552223243" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.97692609099214" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.98836665975185" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="127.99980722851156" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.01124779727127" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.02268836603096" y="0" width="0.011440568759704055" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.03412893479066" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.0452971090561" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.05646528332153" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.06763345758696" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.0788016318524" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.08996980611784" y="0" width="0.011168174265425386" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.10113798038327" y="0" width="0.010895779771146719" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.11203376015442" y="0" width="0.010895779771146719" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.12292953992556" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.13355292520242" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.14417631047928" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.15479969575614" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.165423081033" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.17604646630986" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.18666985158671" y="0" width="0.01062338527686805" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.19729323686357" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.20764422764617" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.21799521842877" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.22834620921137" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.23869719999396" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.24904819077656" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.25939918155916" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.26975017234176" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.28010116312436" y="0" width="0.010350990782589381" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.29045215390695" y="0" width="0.010078596288310714" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.30053075019526" y="0" width="0.009806201794032045" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.31033695198929" y="0" width="0.009806201794032045" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.3201431537833" y="0" width="0.009806201794032045" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.32994935557733" y="0" width="0.009806201794032045" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.33975555737135" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.3492893646711" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.35882317197087" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.36835697927063" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.3778907865704" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.38742459387015" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.3969584011699" y="0" width="0.009533807299753378" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.40649220846967" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.41575362127514" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.42501503408062" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.4342764468861" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.44353785969156" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.45279927249703" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.4620606853025" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.47132209810798" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.48058351091345" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.48984492371892" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.4991063365244" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.50836774932986" y="0" width="0.009261412805474711" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.51762916213534" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.52661818044652" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.5356071987577" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.5445962170689" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.55358523538007" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.56257425369125" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.57156327200244" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.58055229031362" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.5895413086248" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.598530326936" y="0" width="0.008989018311196043" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.60751934524717" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.6162359690641" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.62495259288102" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.63366921669794" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.64238584051486" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.65110246433179" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.6598190881487" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.66853571196563" y="0" width="0.008716623816917374" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.67725233578255" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.6856965651052" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.69414079442782" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.70258502375046" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.7110292530731" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.71947348239573" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.72791771171836" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.736361941041" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.74480617036363" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.75325039968627" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.7616946290089" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.77013885833153" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.77858308765417" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.7870273169768" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.79547154629944" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.80391577562207" y="0" width="0.008444229322638707" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.8123600049447" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.82053183977305" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.8287036746014" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.83687550942975" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.8450473442581" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.85321917908644" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.86139101391478" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.86956284874313" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.87773468357148" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.88590651839982" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.89407835322817" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9022501880565" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.91042202288486" y="0" width="0.008171834828360038" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9185938577132" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9264932980473" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.93439273838138" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.94229217871546" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.95019161904955" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.95809105938363" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.96599049971772" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9738899400518" y="0" width="0.00789944033408137" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9817893803859" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.9894164262257" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.99704347206549" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.00467051790528" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.01229756374508" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.01992460958488" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.02755165542467" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.03517870126447" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.04280574710427" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.05043279294406" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.05805983878386" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.06568688462366" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.07331393046346" y="0" width="0.007627045839802703" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.08094097630325" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.0882956276488" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.09565027899433" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.10300493033986" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.1103595816854" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.11771423303094" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.12506888437647" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.132423535722" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.13977818706755" y="0" width="0.007354651345524034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.14713283841309" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.15421509526433" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.16129735211558" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.16837960896683" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.17546186581808" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.18254412266933" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.18962637952058" y="0" width="0.007082256851245366" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.19670863637182" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.20351849872878" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.21032836108574" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.2171382234427" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.22394808579966" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.23075794815662" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.23756781051358" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.24437767287054" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.2511875352275" y="0" width="0.006809862356966699" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.25799739758446" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.26453486544716" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.27107233330986" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.27760980117256" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.28414726903526" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.29068473689796" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.29722220476066" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.30375967262336" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.31029714048606" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.31683460834876" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.32337207621146" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.32990954407416" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.33644701193685" y="0" width="0.006537467862688031" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.34298447979955" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.34924955316797" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.35551462653638" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.3617796999048" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.3680447732732" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.3743098466416" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.38057492001002" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.38683999337843" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.39310506674684" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.39937014011525" y="0" width="0.006265073368409362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.40563521348366" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.41162789235779" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.4176205712319" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.42361325010603" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.42960592898015" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.43559860785427" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.4415912867284" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.44758396560252" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.45357664447664" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.45956932335076" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.46556200222489" y="0" width="0.0059926788741306945" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.471554681099" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.47727496547887" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.48299524985873" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.4887155342386" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.49443581861846" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.50015610299832" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.50587638737818" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.51159667175804" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.5173169561379" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.52303724051777" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.52875752489763" y="0" width="0.005720284379852027" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.5344778092775" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.53992569916306" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.54537358904864" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.5508214789342" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.55626936881978" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.56171725870536" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.56716514859093" y="0" width="0.0054478898855733595" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.5726130384765" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.5777885338678" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.58296402925907" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.58813952465036" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.59331502004164" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.59849051543293" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6036660108242" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6088415062155" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.61401700160678" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.61919249699807" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.62436799238935" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.62954348778064" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.63471898317192" y="0" width="0.005175495391294691" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6398944785632" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.64479757946023" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.64970068035726" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.65460378125428" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6595068821513" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.66440998304833" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.66931308394535" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.67421618484238" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6791192857394" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.68402238663643" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.68892548753345" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.69382858843048" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.6987316893275" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.70363479022453" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.70853789112155" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.71344099201858" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7183440929156" y="0" width="0.004903100897016023" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.72324719381263" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.72787790021536" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7325086066181" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.73713931302083" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.74177001942357" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7464007258263" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.75103143222904" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.75566213863178" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7602928450345" y="0" width="0.004630706402737356" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.76492355143725" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7692818633457" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.77364017525414" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7779984871626" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.78235679907104" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.7867151109795" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.79107342288793" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.79543173479638" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.79979004670483" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.80414835861328" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.80850667052172" y="0" width="0.004358311908458687" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.81286498243017" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.81695089984436" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.82103681725854" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.82512273467273" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.82920865208692" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8332945695011" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8373804869153" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.84146640432948" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.84555232174367" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.84963823915785" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.85372415657204" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.85781007398623" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.86189599140042" y="0" width="0.004085917414180019" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8659819088146" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8697954317345" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8736089546544" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8774224775743" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8812360004942" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8850495234141" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.888863046334" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8926765692539" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.8964900921738" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9003036150937" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9041171380136" y="0" width="0.0038135229199013515" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9079306609335" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9114717893591" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9150129177847" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.91855404621032" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.92209517463593" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.92563630306154" y="0" width="0.003541128425622683" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.92917743148715" y="0" width="0.0032687339313440156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9324461654185" y="0" width="0.0032687339313440156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.93571489934985" y="0" width="0.0032687339313440156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9389836332812" y="0" width="0.0032687339313440156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.94225236721255" y="0" width="0.0032687339313440156" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9455211011439" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.94851744058096" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.95151378001802" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.95451011945508" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.95750645889214" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9605027983292" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.96349913776626" y="0" width="0.0029963394370653472" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.96649547720332" y="0" width="0.0027239449427866797" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9692194221461" y="0" width="0.0027239449427866797" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.97194336708887" y="0" width="0.0027239449427866797" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.97466731203164" y="0" width="0.0024515504485080114" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.97711886248015" y="0" width="0.0024515504485080114" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.97957041292867" y="0" width="0.0024515504485080114" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.98202196337718" y="0" width="0.0021791559542293434" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9842011193314" y="0" width="0.0021791559542293434" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.98638027528563" y="0" width="0.0021791559542293434" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.98855943123985" y="0" width="0.0019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9904661926998" y="0" width="0.0019067614599506757" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.99237295415978" y="0" width="0.0016343669656720078" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.99400732112545" y="0" width="0.0016343669656720078" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.99564168809113" y="0" width="0.0013619724713933399" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9970036605625" y="0" width="0.0013619724713933399" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.9983656330339" y="0" width="0.0008171834828360039" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.99918281651674" y="0" width="0.0008171834828360039" height="28" fill-opacity="0"></rect></g></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 max-w-full overflow-hidden text-ellipsis whitespace-nowrap">991 values</div></div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">size <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="31" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="33" y="17.42678695350451" width="31" height="12.57321304649549" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="31" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="99" y="25" width="31" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="33" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="32" y="0" width="33" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="33" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="98" y="0" width="33" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">4</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">7</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">content <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">666</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">1M</div> </div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th><th class="h-full max-w-sm p-2 text-left relative w-auto"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">license <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['·']">classes</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><defs><clipPath id="rounded-bar"><rect x="0" y="0" width="130" height="8" rx="4"></rect></clipPath><pattern id="hatching" patternUnits="userSpaceOnUse" patternTransform="rotate(-45)" height="1" width="5"><line y1="0" class="stroke-gray-400 dark:stroke-gray-500/80" stroke-width="3" y2="1" x1="2" x2="2"></line></pattern><pattern id="hatching-faded" patternUnits="userSpaceOnUse" patternTransform="rotate(-45)" height="1" width="5"><line y1="0" class="stroke-gray-100 dark:stroke-gray-500/20" stroke-width="3" y2="1" x1="2" x2="2"></line></pattern></defs><g height="8" style="transform: translateY(20px)" clip-path="url(#rounded-bar)"><g style="transform: scaleX(1.0153846153846153) translateX(-1px)"><g><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="1" y="0" width="25.82727674652016" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="28.82727674652016" y="0" width="20.86642821671706" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="51.693704963237224" y="0" width="20.71388729992101" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="74.40759226315824" y="0" width="19.212176452962712" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="95.61976871612094" y="0" width="11.357136421448763" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="108.9769051375697" y="0" width="10.617585369482178" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="121.59449050705189" y="0" width="0.0339696887788139" height="8" fill-opacity="1"></rect><rect class="fill-indigo-500 dark:fill-indigo-600/80" x="123.6284601958307" y="0" width="0.018987991593486964" height="8" fill-opacity="1"></rect></g></g></g><g style="transform: scaleX(1.0153846153846153) translateX(-1px)"><g><rect class="fill-white cursor-pointer" x="0" y="0" width="27.82727674652016" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="27.82727674652016" y="0" width="22.86642821671706" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="50.693704963237224" y="0" width="22.71388729992101" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="73.40759226315824" y="0" width="21.212176452962712" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="94.61976871612094" y="0" width="13.357136421448763" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="107.9769051375697" y="0" width="12.617585369482178" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="120.59449050705189" y="0" width="2.033969688778814" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="122.6284601958307" y="0" width="2.018987991593487" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="124.64744818742419" y="0" width="1.9966516430626362" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="126.64409983048682" y="0" width="1.5412080486287034" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="128.18530787911553" y="0" width="1.1081008027256212" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.29340868184116" y="0" width="0.23834518249383446" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.531753864335" y="0" width="0.1999375588005423" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.73169142313554" y="0" width="0.18495586161521554" height="28" fill-opacity="0"></rect><rect class="fill-white cursor-pointer" x="129.91664728475075" y="0" width="0.08335271524927239" height="28" fill-opacity="0"></rect></g></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 max-w-full overflow-hidden text-ellipsis whitespace-nowrap">15 values</div></div></div></div></div> <div class="absolute right-0 top-0 z-10 h-full w-1 cursor-col-resize hover:bg-indigo-100 active:bg-indigo-500 dark:hover:bg-indigo-800 dark:active:bg-indigo-600/80"><div class="absolute right-0 top-0 h-full w-1"></div> </div> </th></tr></thead> <tbody class="h-16 overflow-scroll"><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475100"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Peddle/hue</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apps/sqoop/src/sqoop/urls.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">33</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2308</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf.urls import patterns, url urlpatterns = patterns('sqoop.views', url(r'^$', 'app', name='index') ) urlpatterns += patterns('sqoop.api', url(r'^api/autocomplete/databases/?$', 'autocomplete', name='autocomplete_databases'), url(r'^api/autocomplete/databases/(?P<database>.+)/tables/?$', 'autocomplete', name='autocomplete_tables'), url(r'^api/autocomplete/databases/(?P<database>.+)/tables/(?P<table>.+)/columns/?$', 'autocomplete', name='autocomplete_fields'), url(r'^api/driver/?$', 'driver', name='driver'), url(r'^api/connectors', 'connectors', name='connectors'), url(r'^api/connectors/(?P<connector_id>\d+)/?$', 'connector', name='connector'), url(r'^api/links/?$', 'links', name='links'), url(r'^api/links/(?P<link_id>\d+)/?$', 'link', name='link'), url(r'^api/links/(?P<link_id>\d+)/clone/?$', 'link_clone', name='link_clone'), url(r'^api/links/(?P<link_id>\d+)/delete/?$', 'link_delete', name='link_delete'), url(r'^api/jobs/?$', 'jobs', name='jobs'), url(r'^api/jobs/(?P<job_id>\d+)/?$', 'job', name='job'), url(r'^api/jobs/(?P<job_id>\d+)/clone/?$', 'job_clone', name='job_clone'), url(r'^api/jobs/(?P<job_id>\d+)/delete/?$', 'job_delete', name='job_delete'), url(r'^api/jobs/(?P<job_id>\d+)/start/?$', 'job_start', name='job_start'), url(r'^api/jobs/(?P<job_id>\d+)/stop/?$', 'job_stop', name='job_stop'), url(r'^api/jobs/(?P<job_id>\d+)/status/?$', 'job_status', name='job_status'), url(r'^api/submissions/?$', 'submissions', name='submissions') ) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475101"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">fangxingli/hue</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/management/commands/openid_cleanup.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">45</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1691</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># django-openid-auth - OpenID integration for django.contrib.auth # # Copyright (C) 2009-2013 Canonical Ltd. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from django.core.management.base import NoArgsCommand from django_openid_auth.store import DjangoOpenIDStore class Command(NoArgsCommand): help = 'Clean up stale OpenID associations and nonces' def handle_noargs(self, **options): store = DjangoOpenIDStore() store.cleanup() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475102"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Vishluck/sympy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sympy/simplify/tests/test_cse.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">47</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12493</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import itertools from sympy import (Add, Pow, Symbol, exp, sqrt, symbols, sympify, cse, Matrix, S, cos, sin, Eq, Function, Tuple, RootOf, IndexedBase, Idx, Piecewise, O) from sympy.simplify.cse_opts import sub_pre, sub_post from sympy.functions.special.hyper import meijerg from sympy.simplify import cse_main, cse_opts from sympy.utilities.pytest import XFAIL, raises from sympy.matrices import (eye, SparseMatrix, MutableDenseMatrix, MutableSparseMatrix, ImmutableDenseMatrix, ImmutableSparseMatrix) from sympy.core.compatibility import range w, x, y, z = symbols('w,x,y,z') x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = symbols('x:13') def test_numbered_symbols(): ns = cse_main.numbered_symbols(prefix='y') assert list(itertools.islice( ns, 0, 10)) == [Symbol('y%s' % i) for i in range(0, 10)] ns = cse_main.numbered_symbols(prefix='y') assert list(itertools.islice( ns, 10, 20)) == [Symbol('y%s' % i) for i in range(10, 20)] ns = cse_main.numbered_symbols() assert list(itertools.islice( ns, 0, 10)) == [Symbol('x%s' % i) for i in range(0, 10)] # Dummy "optimization" functions for testing. def opt1(expr): return expr + y def opt2(expr): return expr*z def test_preprocess_for_cse(): assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x assert cse_main.preprocess_for_cse(x, [(None, None)]) == x assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y assert cse_main.preprocess_for_cse( x, [(opt1, None), (opt2, None)]) == (x + y)*z def test_postprocess_for_cse(): assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y assert cse_main.postprocess_for_cse(x, [(None, None)]) == x assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z # Note the reverse order of application. assert cse_main.postprocess_for_cse( x, [(None, opt1), (None, opt2)]) == x*z + y def test_cse_single(): # Simple substitution. e = Add(Pow(x + y, 2), sqrt(x + y)) substs, reduced = cse([e]) assert substs == [(x0, x + y)] assert reduced == [sqrt(x0) + x0**2] def test_cse_single2(): # Simple substitution, test for being able to pass the expression directly e = Add(Pow(x + y, 2), sqrt(x + y)) substs, reduced = cse(e) assert substs == [(x0, x + y)] assert reduced == [sqrt(x0) + x0**2] substs, reduced = cse(Matrix([[1]])) assert isinstance(reduced[0], Matrix) def test_cse_not_possible(): # No substitution possible. e = Add(x, y) substs, reduced = cse([e]) assert substs == [] assert reduced == [x + y] # issue 6329 eq = (meijerg((1, 2), (y, 4), (5,), [], x) + meijerg((1, 3), (y, 4), (5,), [], x)) assert cse(eq) == ([], [eq]) def test_nested_substitution(): # Substitution within a substitution. e = Add(Pow(w*x + y, 2), sqrt(w*x + y)) substs, reduced = cse([e]) assert substs == [(x0, w*x + y)] assert reduced == [sqrt(x0) + x0**2] def test_subtraction_opt(): # Make sure subtraction is optimized. e = (x - y)*(z - y) + exp((x - y)*(z - y)) substs, reduced = cse( [e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) assert substs == [(x0, (x - y)*(y - z))] assert reduced == [-x0 + exp(-x0)] e = -(x - y)*(z - y) + exp(-(x - y)*(z - y)) substs, reduced = cse( [e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) assert substs == [(x0, (x - y)*(y - z))] assert reduced == [x0 + exp(x0)] # issue 4077 n = -1 + 1/x e = n/x/(-n)**2 - 1/n/x assert cse(e, optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) == \ ([], [0]) def test_multiple_expressions(): e1 = (x + y)*z e2 = (x + y)*w substs, reduced = cse([e1, e2]) assert substs == [(x0, x + y)] assert reduced == [x0*z, x0*w] l = [w*x*y + z, w*y] substs, reduced = cse(l) rsubsts, _ = cse(reversed(l)) assert substs == rsubsts assert reduced == [z + x*x0, x0] l = [w*x*y, w*x*y + z, w*y] substs, reduced = cse(l) rsubsts, _ = cse(reversed(l)) assert substs == rsubsts assert reduced == [x1, x1 + z, x0] l = [(x - z)*(y - z), x - z, y - z] substs, reduced = cse(l) rsubsts, _ = cse(reversed(l)) assert substs == [(x0, -z), (x1, x + x0), (x2, x0 + y)] assert rsubsts == [(x0, -z), (x1, x0 + y), (x2, x + x0)] assert reduced == [x1*x2, x1, x2] l = [w*y + w + x + y + z, w*x*y] assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0]) assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0]) assert cse([x + y, x + z]) == ([], [x + y, x + z]) assert cse([x*y, z + x*y, x*y*z + 3]) == \ ([(x0, x*y)], [x0, z + x0, 3 + x0*z]) @XFAIL # CSE of non-commutative Mul terms is disabled def test_non_commutative_cse(): A, B, C = symbols('A B C', commutative=False) l = [A*B*C, A*C] assert cse(l) == ([], l) l = [A*B*C, A*B] assert cse(l) == ([(x0, A*B)], [x0*C, x0]) # Test if CSE of non-commutative Mul terms is disabled def test_bypass_non_commutatives(): A, B, C = symbols('A B C', commutative=False) l = [A*B*C, A*C] assert cse(l) == ([], l) l = [A*B*C, A*B] assert cse(l) == ([], l) l = [B*C, A*B*C] assert cse(l) == ([], l) @XFAIL # CSE fails when replacing non-commutative sub-expressions def test_non_commutative_order(): A, B, C = symbols('A B C', commutative=False) x0 = symbols('x0', commutative=False) l = [B+C, A*(B+C)] assert cse(l) == ([(x0, B+C)], [x0, A*x0]) @XFAIL def test_powers(): assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0]) def test_issue_4498(): assert cse(w/(x - y) + z/(y - x), optimizations='basic') == \ ([], [(w - z)/(x - y)]) def test_issue_4020(): assert cse(x**5 + x**4 + x**3 + x**2, optimizations='basic') \ == ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)]) def test_issue_4203(): assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0]) def test_issue_6263(): e = Eq(x*(-x + 1) + x*(x - 1), 0) assert cse(e, optimizations='basic') == ([], [True]) def test_dont_cse_tuples(): from sympy import Subs f = Function("f") g = Function("g") name_val, (expr,) = cse( Subs(f(x, y), (x, y), (0, 1)) + Subs(g(x, y), (x, y), (0, 1))) assert name_val == [] assert expr == (Subs(f(x, y), (x, y), (0, 1)) + Subs(g(x, y), (x, y), (0, 1))) name_val, (expr,) = cse( Subs(f(x, y), (x, y), (0, x + y)) + Subs(g(x, y), (x, y), (0, x + y))) assert name_val == [(x0, x + y)] assert expr == Subs(f(x, y), (x, y), (0, x0)) + \ Subs(g(x, y), (x, y), (0, x0)) def test_pow_invpow(): assert cse(1/x**2 + x**2) == \ ([(x0, x**2)], [x0 + 1/x0]) assert cse(x**2 + (1 + 1/x**2)/x**2) == \ ([(x0, x**2), (x1, 1/x0)], [x0 + x1*(x1 + 1)]) assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \ ([(x0, x**2), (x1, 1/x0)], [x0*(x1 + 1) + x1]) assert cse(cos(1/x**2) + sin(1/x**2)) == \ ([(x0, x**(-2))], [sin(x0) + cos(x0)]) assert cse(cos(x**2) + sin(x**2)) == \ ([(x0, x**2)], [sin(x0) + cos(x0)]) assert cse(y/(2 + x**2) + z/x**2/y) == \ ([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)]) assert cse(exp(x**2) + x**2*cos(1/x**2)) == \ ([(x0, x**2)], [x0*cos(1/x0) + exp(x0)]) assert cse((1 + 1/x**2)/x**2) == \ ([(x0, x**(-2))], [x0*(x0 + 1)]) assert cse(x**(2*y) + x**(-2*y)) == \ ([(x0, x**(2*y))], [x0 + 1/x0]) def test_postprocess(): eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1)) assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)], postprocess=cse_main.cse_separate) == \ [[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)], [x0 + exp(x0/x1) + cos(x1), z - 2, x0*x2]] def test_issue_4499(): # previously, this gave 16 constants from sympy.abc import a, b B = Function('B') G = Function('G') t = Tuple(* (a, a + S(1)/2, 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a - b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b, sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1, sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1), (sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, S(1)/2, z/2, -b + 1, -2*a + b, -2*a)) c = cse(t) ans = ( [(x0, 2*a), (x1, -b), (x2, x1 + 1), (x3, x0 + x2), (x4, sqrt(z)), (x5, B(x0 + x1, x4)), (x6, G(b)), (x7, G(x3)), (x8, -x0), (x9, (x4/2)**(x8 + 1)), (x10, x6*x7*x9*B(b - 1, x4)), (x11, x6*x7*x9*B(b, x4)), (x12, B(x3, x4))], [(a, a + S(1)/2, x0, b, x3, x10*x5, x11*x4*x5, x10*x12*x4, x11*x12, 1, 0, S(1)/2, z/2, x2, b + x8, x8)]) assert ans == c def test_issue_6169(): r = RootOf(x**6 - 4*x**5 - 2, 1) assert cse(r) == ([], [r]) # and a check that the right thing is done with the new # mechanism assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y def test_cse_Indexed(): len_y = 5 y = IndexedBase('y', shape=(len_y,)) x = IndexedBase('x', shape=(len_y,)) Dy = IndexedBase('Dy', shape=(len_y-1,)) i = Idx('i', len_y-1) expr1 = (y[i+1]-y[i])/(x[i+1]-x[i]) expr2 = 1/(x[i+1]-x[i]) replacements, reduced_exprs = cse([expr1, expr2]) assert len(replacements) > 0 @XFAIL def test_cse_MatrixSymbol(): from sympy import MatrixSymbol A = MatrixSymbol('A', 3, 3) y = MatrixSymbol('y', 3, 1) expr1 = (A.T*A).I * A * y expr2 = (A.T*A) * A * y replacements, reduced_exprs = cse([expr1, expr2]) assert len(replacements) > 0 def test_Piecewise(): f = Piecewise((-z + x*y, Eq(y, 0)), (-z - x*y, True)) ans = cse(f) actual_ans = ([(x0, -z), (x1, x*y)], [Piecewise((x0+x1, Eq(y, 0)), (x0 - x1, True))]) assert ans == actual_ans def test_ignore_order_terms(): eq = exp(x).series(x,0,3) + sin(y+x**3) - 1 assert cse(eq) == ([], [sin(x**3 + y) + x + x**2/2 + O(x**3)]) def test_name_conflict(): z1 = x0 + y z2 = x2 + x3 l = [cos(z1) + z1, cos(z2) + z2, x0 + x2] substs, reduced = cse(l) assert [e.subs(reversed(substs)) for e in reduced] == l def test_name_conflict_cust_symbols(): z1 = x0 + y z2 = x2 + x3 l = [cos(z1) + z1, cos(z2) + z2, x0 + x2] substs, reduced = cse(l, symbols("x:10")) assert [e.subs(reversed(substs)) for e in reduced] == l def test_symbols_exhausted_error(): l = cos(x+y)+x+y+cos(w+y)+sin(w+y) sym = [x, y, z] with raises(ValueError) as excinfo: cse(l, symbols=sym) def test_issue_7840(): # daveknippers' example C393 = sympify( \ 'Piecewise((C391 - 1.65, C390 < 0.5), (Piecewise((C391 - 1.65, \ C391 > 2.35), (C392, True)), True))' ) C391 = sympify( \ 'Piecewise((2.05*C390**(-1.03), C390 < 0.5), (2.5*C390**(-0.625), True))' ) C393 = C393.subs('C391',C391) # simple substitution sub = {} sub['C390'] = 0.703451854 sub['C392'] = 1.01417794 ss_answer = C393.subs(sub) # cse substitutions,new_eqn = cse(C393) for pair in substitutions: sub[pair[0].name] = pair[1].subs(sub) cse_answer = new_eqn[0].subs(sub) # both methods should be the same assert ss_answer == cse_answer # GitRay's example expr = sympify( "Piecewise((Symbol('ON'), Equality(Symbol('mode'), Symbol('ON'))), \ (Piecewise((Piecewise((Symbol('OFF'), StrictLessThan(Symbol('x'), \ Symbol('threshold'))), (Symbol('ON'), S.true)), Equality(Symbol('mode'), \ Symbol('AUTO'))), (Symbol('OFF'), S.true)), S.true))" ) substitutions, new_eqn = cse(expr) # this Piecewise should be exactly the same assert new_eqn[0] == expr # there should not be any replacements assert len(substitutions) < 1 def test_issue_8891(): for cls in (MutableDenseMatrix, MutableSparseMatrix, ImmutableDenseMatrix, ImmutableSparseMatrix): m = cls(2, 2, [x + y, 0, 0, 0]) res = cse([x + y, m]) ans = ([(x0, x + y)], [x0, cls([[x0, 0], [0, 0]])]) assert res == ans assert isinstance(res[1][-1], cls) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475103"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ritchyteam/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openerp/addons/test_workflow/tests/test_workflow.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">392</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6232</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- import openerp from openerp import SUPERUSER_ID from openerp.tests import common class test_workflows(common.TransactionCase): def check_activities(self, model_name, i, names): """ Check that the record i has workitems in the given activity names. """ instance = self.registry('workflow.instance') workitem = self.registry('workflow.workitem') # Given the workflow instance associated to the record ... instance_id = instance.search( self.cr, SUPERUSER_ID, [('res_type', '=', model_name), ('res_id', '=', i)]) self.assertTrue( instance_id, 'A workflow instance is expected.') # ... get all its workitems ... workitem_ids = workitem.search( self.cr, SUPERUSER_ID, [('inst_id', '=', instance_id[0])]) self.assertTrue( workitem_ids, 'The workflow instance should have workitems.') # ... and check the activity the are in against the provided names. workitem_records = workitem.browse( self.cr, SUPERUSER_ID, workitem_ids) self.assertEqual( sorted([item.act_id.name for item in workitem_records]), sorted(names)) def check_value(self, model_name, i, value): """ Check that the record i has the given value. """ model = self.registry(model_name) record = model.read(self.cr, SUPERUSER_ID, [i], ['value'])[0] self.assertEqual(record['value'], value) def test_workflow(self): model = self.registry('test.workflow.model') trigger = self.registry('test.workflow.trigger') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) # a -> b is just a signal. model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b') self.check_activities(model._name, i, ['b']) # b -> c is a trigger (which is False), # so we remain in the b activity. model.trigger(self.cr, SUPERUSER_ID) self.check_activities(model._name, i, ['b']) # b -> c is a trigger (which is set to True). # so we go in c when the trigger is called. trigger.write(self.cr, SUPERUSER_ID, [1], {'value': True}) model.trigger(self.cr, SUPERUSER_ID) self.check_activities(model._name, i, ['c']) self.assertEqual( True, True) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_a(self): model = self.registry('test.workflow.model.a') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 0) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_b(self): model = self.registry('test.workflow.model.b') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 1) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_c(self): model = self.registry('test.workflow.model.c') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 0) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_d(self): model = self.registry('test.workflow.model.d') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 1) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_e(self): model = self.registry('test.workflow.model.e') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['b']) self.check_value(model._name, i, 2) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_f(self): model = self.registry('test.workflow.model.f') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 1) model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b') self.check_activities(model._name, i, ['b']) self.check_value(model._name, i, 2) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_g(self): model = self.registry('test.workflow.model.g') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 1) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_h(self): model = self.registry('test.workflow.model.h') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['b', 'c']) self.check_value(model._name, i, 2) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_i(self): model = self.registry('test.workflow.model.i') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['b']) self.check_value(model._name, i, 2) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_j(self): model = self.registry('test.workflow.model.j') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['a']) self.check_value(model._name, i, 1) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_k(self): model = self.registry('test.workflow.model.k') i = model.create(self.cr, SUPERUSER_ID, {}) # Non-determinisitic: can be b or c # self.check_activities(model._name, i, ['b']) # self.check_activities(model._name, i, ['c']) self.check_value(model._name, i, 2) model.unlink(self.cr, SUPERUSER_ID, [i]) def test_workflow_l(self): model = self.registry('test.workflow.model.l') i = model.create(self.cr, SUPERUSER_ID, {}) self.check_activities(model._name, i, ['c', 'c', 'd']) self.check_value(model._name, i, 3) model.unlink(self.cr, SUPERUSER_ID, [i]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475104"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tequa/ammisoft</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/idlelib/rpc.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">20150</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""RPC Implementation, originally written for the Python Idle IDE For security reasons, GvR requested that Idle's Python execution server process connect to the Idle process, which listens for the connection. Since Idle has only one client per server, this was not a limitation. +---------------------------------+ +-------------+ | SocketServer.BaseRequestHandler | | SocketIO | +---------------------------------+ +-------------+ ^ | register() | | | unregister()| | +-------------+ | ^ ^ | | | | + -------------------+ | | | | +-------------------------+ +-----------------+ | RPCHandler | | RPCClient | | [attribute of RPCServer]| | | +-------------------------+ +-----------------+ The RPCServer handler class is expected to provide register/unregister methods. RPCHandler inherits the mix-in class SocketIO, which provides these methods. See the Idle run.main() docstring for further information on how this was accomplished in Idle. """ import sys import os import socket import select import SocketServer import struct import cPickle as pickle import threading import Queue import traceback import copy_reg import types import marshal def unpickle_code(ms): co = marshal.loads(ms) assert isinstance(co, types.CodeType) return co def pickle_code(co): assert isinstance(co, types.CodeType) ms = marshal.dumps(co) return unpickle_code, (ms,) # XXX KBK 24Aug02 function pickling capability not used in Idle # def unpickle_function(ms): # return ms # def pickle_function(fn): # assert isinstance(fn, type.FunctionType) # return repr(fn) copy_reg.pickle(types.CodeType, pickle_code, unpickle_code) # copy_reg.pickle(types.FunctionType, pickle_function, unpickle_function) BUFSIZE = 8*1024 LOCALHOST = '127.0.0.1' class RPCServer(SocketServer.TCPServer): def __init__(self, addr, handlerclass=None): if handlerclass is None: handlerclass = RPCHandler SocketServer.TCPServer.__init__(self, addr, handlerclass) def server_bind(self): "Override TCPServer method, no bind() phase for connecting entity" pass def server_activate(self): """Override TCPServer method, connect() instead of listen() Due to the reversed connection, self.server_address is actually the address of the Idle Client to which we are connecting. """ self.socket.connect(self.server_address) def get_request(self): "Override TCPServer method, return already connected socket" return self.socket, self.server_address def handle_error(self, request, client_address): """Override TCPServer method Error message goes to __stderr__. No error message if exiting normally or socket raised EOF. Other exceptions not handled in server code will cause os._exit. """ try: raise except SystemExit: raise except: erf = sys.__stderr__ print>>erf, '\n' + '-'*40 print>>erf, 'Unhandled server exception!' print>>erf, 'Thread: %s' % threading.currentThread().getName() print>>erf, 'Client Address: ', client_address print>>erf, 'Request: ', repr(request) traceback.print_exc(file=erf) print>>erf, '\n*** Unrecoverable, server exiting!' print>>erf, '-'*40 os._exit(0) #----------------- end class RPCServer -------------------- objecttable = {} request_queue = Queue.Queue(0) response_queue = Queue.Queue(0) class SocketIO(object): nextseq = 0 def __init__(self, sock, objtable=None, debugging=None): self.sockthread = threading.currentThread() if debugging is not None: self.debugging = debugging self.sock = sock if objtable is None: objtable = objecttable self.objtable = objtable self.responses = {} self.cvars = {} def close(self): sock = self.sock self.sock = None if sock is not None: sock.close() def exithook(self): "override for specific exit action" os._exit(0) def debug(self, *args): if not self.debugging: return s = self.location + " " + str(threading.currentThread().getName()) for a in args: s = s + " " + str(a) print>>sys.__stderr__, s def register(self, oid, object): self.objtable[oid] = object def unregister(self, oid): try: del self.objtable[oid] except KeyError: pass def localcall(self, seq, request): self.debug("localcall:", request) try: how, (oid, methodname, args, kwargs) = request except TypeError: return ("ERROR", "Bad request format") if oid not in self.objtable: return ("ERROR", "Unknown object id: %r" % (oid,)) obj = self.objtable[oid] if methodname == "__methods__": methods = {} _getmethods(obj, methods) return ("OK", methods) if methodname == "__attributes__": attributes = {} _getattributes(obj, attributes) return ("OK", attributes) if not hasattr(obj, methodname): return ("ERROR", "Unsupported method name: %r" % (methodname,)) method = getattr(obj, methodname) try: if how == 'CALL': ret = method(*args, **kwargs) if isinstance(ret, RemoteObject): ret = remoteref(ret) return ("OK", ret) elif how == 'QUEUE': request_queue.put((seq, (method, args, kwargs))) return("QUEUED", None) else: return ("ERROR", "Unsupported message type: %s" % how) except SystemExit: raise except socket.error: raise except: msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\ " Object: %s \n Method: %s \n Args: %s\n" print>>sys.__stderr__, msg % (oid, method, args) traceback.print_exc(file=sys.__stderr__) return ("EXCEPTION", None) def remotecall(self, oid, methodname, args, kwargs): self.debug("remotecall:asynccall: ", oid, methodname) seq = self.asynccall(oid, methodname, args, kwargs) return self.asyncreturn(seq) def remotequeue(self, oid, methodname, args, kwargs): self.debug("remotequeue:asyncqueue: ", oid, methodname) seq = self.asyncqueue(oid, methodname, args, kwargs) return self.asyncreturn(seq) def asynccall(self, oid, methodname, args, kwargs): request = ("CALL", (oid, methodname, args, kwargs)) seq = self.newseq() if threading.currentThread() != self.sockthread: cvar = threading.Condition() self.cvars[seq] = cvar self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs) self.putmessage((seq, request)) return seq def asyncqueue(self, oid, methodname, args, kwargs): request = ("QUEUE", (oid, methodname, args, kwargs)) seq = self.newseq() if threading.currentThread() != self.sockthread: cvar = threading.Condition() self.cvars[seq] = cvar self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs) self.putmessage((seq, request)) return seq def asyncreturn(self, seq): self.debug("asyncreturn:%d:call getresponse(): " % seq) response = self.getresponse(seq, wait=0.05) self.debug(("asyncreturn:%d:response: " % seq), response) return self.decoderesponse(response) def decoderesponse(self, response): how, what = response if how == "OK": return what if how == "QUEUED": return None if how == "EXCEPTION": self.debug("decoderesponse: EXCEPTION") return None if how == "EOF": self.debug("decoderesponse: EOF") self.decode_interrupthook() return None if how == "ERROR": self.debug("decoderesponse: Internal ERROR:", what) raise RuntimeError, what raise SystemError, (how, what) def decode_interrupthook(self): "" raise EOFError def mainloop(self): """Listen on socket until I/O not ready or EOF pollresponse() will loop looking for seq number None, which never comes, and exit on EOFError. """ try: self.getresponse(myseq=None, wait=0.05) except EOFError: self.debug("mainloop:return") return def getresponse(self, myseq, wait): response = self._getresponse(myseq, wait) if response is not None: how, what = response if how == "OK": response = how, self._proxify(what) return response def _proxify(self, obj): if isinstance(obj, RemoteProxy): return RPCProxy(self, obj.oid) if isinstance(obj, types.ListType): return map(self._proxify, obj) # XXX Check for other types -- not currently needed return obj def _getresponse(self, myseq, wait): self.debug("_getresponse:myseq:", myseq) if threading.currentThread() is self.sockthread: # this thread does all reading of requests or responses while 1: response = self.pollresponse(myseq, wait) if response is not None: return response else: # wait for notification from socket handling thread cvar = self.cvars[myseq] cvar.acquire() while myseq not in self.responses: cvar.wait() response = self.responses[myseq] self.debug("_getresponse:%s: thread woke up: response: %s" % (myseq, response)) del self.responses[myseq] del self.cvars[myseq] cvar.release() return response def newseq(self): self.nextseq = seq = self.nextseq + 2 return seq def putmessage(self, message): self.debug("putmessage:%d:" % message[0]) try: s = pickle.dumps(message) except pickle.PicklingError: print >>sys.__stderr__, "Cannot pickle:", repr(message) raise s = struct.pack("<i", len(s)) + s while len(s) > 0: try: r, w, x = select.select([], [self.sock], []) n = self.sock.send(s[:BUFSIZE]) except (AttributeError, TypeError): raise IOError, "socket no longer exists" s = s[n:] buffer = "" bufneed = 4 bufstate = 0 # meaning: 0 => reading count; 1 => reading data def pollpacket(self, wait): self._stage0() if len(self.buffer) < self.bufneed: r, w, x = select.select([self.sock.fileno()], [], [], wait) if len(r) == 0: return None try: s = self.sock.recv(BUFSIZE) except socket.error: raise EOFError if len(s) == 0: raise EOFError self.buffer += s self._stage0() return self._stage1() def _stage0(self): if self.bufstate == 0 and len(self.buffer) >= 4: s = self.buffer[:4] self.buffer = self.buffer[4:] self.bufneed = struct.unpack("<i", s)[0] self.bufstate = 1 def _stage1(self): if self.bufstate == 1 and len(self.buffer) >= self.bufneed: packet = self.buffer[:self.bufneed] self.buffer = self.buffer[self.bufneed:] self.bufneed = 4 self.bufstate = 0 return packet def pollmessage(self, wait): packet = self.pollpacket(wait) if packet is None: return None try: message = pickle.loads(packet) except pickle.UnpicklingError: print >>sys.__stderr__, "-----------------------" print >>sys.__stderr__, "cannot unpickle packet:", repr(packet) traceback.print_stack(file=sys.__stderr__) print >>sys.__stderr__, "-----------------------" raise return message def pollresponse(self, myseq, wait): """Handle messages received on the socket. Some messages received may be asynchronous 'call' or 'queue' requests, and some may be responses for other threads. 'call' requests are passed to self.localcall() with the expectation of immediate execution, during which time the socket is not serviced. 'queue' requests are used for tasks (which may block or hang) to be processed in a different thread. These requests are fed into request_queue by self.localcall(). Responses to queued requests are taken from response_queue and sent across the link with the associated sequence numbers. Messages in the queues are (sequence_number, request/response) tuples and code using this module removing messages from the request_queue is responsible for returning the correct sequence number in the response_queue. pollresponse() will loop until a response message with the myseq sequence number is received, and will save other responses in self.responses and notify the owning thread. """ while 1: # send queued response if there is one available try: qmsg = response_queue.get(0) except Queue.Empty: pass else: seq, response = qmsg message = (seq, ('OK', response)) self.putmessage(message) # poll for message on link try: message = self.pollmessage(wait) if message is None: # socket not ready return None except EOFError: self.handle_EOF() return None except AttributeError: return None seq, resq = message how = resq[0] self.debug("pollresponse:%d:myseq:%s" % (seq, myseq)) # process or queue a request if how in ("CALL", "QUEUE"): self.debug("pollresponse:%d:localcall:call:" % seq) response = self.localcall(seq, resq) self.debug("pollresponse:%d:localcall:response:%s" % (seq, response)) if how == "CALL": self.putmessage((seq, response)) elif how == "QUEUE": # don't acknowledge the 'queue' request! pass continue # return if completed message transaction elif seq == myseq: return resq # must be a response for a different thread: else: cv = self.cvars.get(seq, None) # response involving unknown sequence number is discarded, # probably intended for prior incarnation of server if cv is not None: cv.acquire() self.responses[seq] = resq cv.notify() cv.release() continue def handle_EOF(self): "action taken upon link being closed by peer" self.EOFhook() self.debug("handle_EOF") for key in self.cvars: cv = self.cvars[key] cv.acquire() self.responses[key] = ('EOF', None) cv.notify() cv.release() # call our (possibly overridden) exit function self.exithook() def EOFhook(self): "Classes using rpc client/server can override to augment EOF action" pass #----------------- end class SocketIO -------------------- class RemoteObject(object): # Token mix-in class pass def remoteref(obj): oid = id(obj) objecttable[oid] = obj return RemoteProxy(oid) class RemoteProxy(object): def __init__(self, oid): self.oid = oid class RPCHandler(SocketServer.BaseRequestHandler, SocketIO): debugging = False location = "#S" # Server def __init__(self, sock, addr, svr): svr.current_handler = self ## cgt xxx SocketIO.__init__(self, sock) SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr) def handle(self): "handle() method required by SocketServer" self.mainloop() def get_remote_proxy(self, oid): return RPCProxy(self, oid) class RPCClient(SocketIO): debugging = False location = "#C" # Client nextseq = 1 # Requests coming from the client are odd numbered def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM): self.listening_sock = socket.socket(family, type) self.listening_sock.bind(address) self.listening_sock.listen(1) def accept(self): working_sock, address = self.listening_sock.accept() if self.debugging: print>>sys.__stderr__, "****** Connection request from ", address if address[0] == LOCALHOST: SocketIO.__init__(self, working_sock) else: print>>sys.__stderr__, "** Invalid host: ", address raise socket.error def get_remote_proxy(self, oid): return RPCProxy(self, oid) class RPCProxy(object): __methods = None __attributes = None def __init__(self, sockio, oid): self.sockio = sockio self.oid = oid def __getattr__(self, name): if self.__methods is None: self.__getmethods() if self.__methods.get(name): return MethodProxy(self.sockio, self.oid, name) if self.__attributes is None: self.__getattributes() if name in self.__attributes: value = self.sockio.remotecall(self.oid, '__getattribute__', (name,), {}) return value else: raise AttributeError, name def __getattributes(self): self.__attributes = self.sockio.remotecall(self.oid, "__attributes__", (), {}) def __getmethods(self): self.__methods = self.sockio.remotecall(self.oid, "__methods__", (), {}) def _getmethods(obj, methods): # Helper to get a list of methods from an object # Adds names to dictionary argument 'methods' for name in dir(obj): attr = getattr(obj, name) if hasattr(attr, '__call__'): methods[name] = 1 if type(obj) == types.InstanceType: _getmethods(obj.__class__, methods) if type(obj) == types.ClassType: for super in obj.__bases__: _getmethods(super, methods) def _getattributes(obj, attributes): for name in dir(obj): attr = getattr(obj, name) if not hasattr(attr, '__call__'): attributes[name] = 1 class MethodProxy(object): def __init__(self, sockio, oid, name): self.sockio = sockio self.oid = oid self.name = name def __call__(self, *args, **kwargs): value = self.sockio.remotecall(self.oid, self.name, args, kwargs) return value # XXX KBK 09Sep03 We need a proper unit test for this module. Previously # existing test code was removed at Rev 1.27 (r34098). </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475105"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gemalto/pycryptoki</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pycryptoki/cryptoki/ck_defs.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">25581</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Structure & PKCS11-specific definitions. """ from ctypes import CFUNCTYPE, Structure from pycryptoki.cryptoki.c_defs import * from pycryptoki.cryptoki.helpers import struct_def # values for unnamed enumeration CK_MECHANISM_TYPE = CK_ULONG CK_MECHANISM_TYPE_PTR = POINTER(CK_MECHANISM_TYPE) CK_USER_TYPE = CK_ULONG CK_SESSION_HANDLE = CK_ULONG CK_SESSION_HANDLE_PTR = POINTER(CK_SESSION_HANDLE) CK_OBJECT_HANDLE = CK_ULONG CK_OBJECT_HANDLE_PTR = POINTER(CK_OBJECT_HANDLE) CK_STATE = CK_ULONG CK_OBJECT_CLASS = CK_ULONG CK_OBJECT_CLASS_PTR = POINTER(CK_OBJECT_CLASS) CK_HW_FEATURE_TYPE = CK_ULONG CK_KEY_TYPE = CK_ULONG CK_CERTIFICATE_TYPE = CK_ULONG CK_ATTRIBUTE_TYPE = CK_ULONG class CK_MECHANISM(Structure): pass class CK_ATTRIBUTE(Structure): pass CK_MECHANISM_PTR = POINTER(CK_MECHANISM) CK_ATTRIBUTE_PTR = POINTER(CK_ATTRIBUTE) class CK_AES_GCM_PARAMS(Structure): pass struct_def( CK_AES_GCM_PARAMS, [ ("pIv", CK_BYTE_PTR), ("ulIvLen", CK_ULONG), ("ulIvBits", CK_ULONG), ("pAAD", CK_BYTE_PTR), ("ulAADLen", CK_ULONG), ("ulTagBits", CK_ULONG), ], ) CK_AES_GCM_PARAMS_PTR = CK_AES_GCM_PARAMS class CK_XOR_BASE_DATA_KDF_PARAMS(Structure): pass CK_EC_KDF_TYPE = CK_ULONG struct_def( CK_XOR_BASE_DATA_KDF_PARAMS, [("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR)], ) CK_XOR_BASE_DATA_KDF_PARAMS_PTR = POINTER(CK_XOR_BASE_DATA_KDF_PARAMS) class CK_AES_XTS_PARAMS(Structure): pass struct_def(CK_AES_XTS_PARAMS, [("hTweakKey", CK_OBJECT_HANDLE), ("cb", CK_BYTE * 16)]) CK_AES_XTS_PARAMS_PTR = POINTER(CK_AES_XTS_PARAMS) CK_EC_DH_PRIMITIVE = CK_ULONG CK_EC_ENC_SCHEME = CK_ULONG CK_EC_MAC_SCHEME = CK_ULONG class CK_ECIES_PARAMS(Structure): pass struct_def( CK_ECIES_PARAMS, [ ("dhPrimitive", CK_EC_DH_PRIMITIVE), ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen1", CK_ULONG), ("pSharedData1", CK_BYTE_PTR), ("encScheme", CK_EC_ENC_SCHEME), ("ulEncKeyLenInBits", CK_ULONG), ("macScheme", CK_EC_MAC_SCHEME), ("ulMacKeyLenInBits", CK_ULONG), ("ulMacLenInBits", CK_ULONG), ("ulSharedDataLen2", CK_ULONG), ("pSharedData2", CK_BYTE_PTR), ], ) CK_ECIES_PARAMS_PTR = POINTER(CK_ECIES_PARAMS) CK_KDF_PRF_TYPE = CK_ULONG CK_KDF_PRF_ENCODING_SCHEME = CK_ULONG class CK_KDF_PRF_PARAMS(Structure): pass struct_def( CK_KDF_PRF_PARAMS, [ ("prfType", CK_KDF_PRF_TYPE), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pContext", CK_BYTE_PTR), ("ulContextLen", CK_ULONG), ("ulCounter", CK_ULONG), ("ulEncodingScheme", CK_KDF_PRF_ENCODING_SCHEME), ], ) CK_PRF_KDF_PARAMS = CK_KDF_PRF_PARAMS CK_KDF_PRF_PARAMS_PTR = POINTER(CK_PRF_KDF_PARAMS) class CK_AES_CTR_PARAMS(Structure): pass CK_SEED_CTR_PARAMS = CK_AES_CTR_PARAMS CK_SEED_CTR_PARAMS_PTR = POINTER(CK_SEED_CTR_PARAMS) CK_ARIA_CTR_PARAMS = CK_AES_CTR_PARAMS CK_ARIA_CTR_PARAMS_PTR = POINTER(CK_ARIA_CTR_PARAMS) class CK_DES_CTR_PARAMS(Structure): pass struct_def(CK_DES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 8)]) CK_DES_CTR_PARAMS_PTR = POINTER(CK_DES_CTR_PARAMS) CK_AES_GMAC_PARAMS = CK_AES_GCM_PARAMS CK_AES_GMAC_PARAMS_PTR = POINTER(CK_AES_GMAC_PARAMS) class HSM_STATS_PARAMS(Structure): pass struct_def( HSM_STATS_PARAMS, [("ulId", CK_ULONG), ("ulHighValue", CK_ULONG), ("ulLowValue", CK_ULONG)] ) class CA_ROLE_STATE(Structure): pass struct_def( CA_ROLE_STATE, [ ("flags", CK_BYTE), ("loginAttemptsLeft", CK_BYTE), ("primaryAuthMech", CK_BYTE), ("secondaryAuthMech", CK_BYTE), ], ) class CA_MOFN_GENERATION(Structure): pass struct_def( CA_MOFN_GENERATION, [("ulWeight", CK_ULONG), ("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)], ) CA_MOFN_GENERATION_PTR = POINTER(CA_MOFN_GENERATION) class CA_MOFN_ACTIVATION(Structure): pass struct_def(CA_MOFN_ACTIVATION, [("pVector", CK_BYTE_PTR), ("ulVectorLen", CK_ULONG)]) CA_MOFN_ACTIVATION_PTR = POINTER(CA_MOFN_ACTIVATION) class CA_M_OF_N_STATUS(Structure): pass struct_def( CA_M_OF_N_STATUS, [ ("ulID", CK_ULONG), ("ulM", CK_ULONG), ("ulN", CK_ULONG), ("ulSecretSize", CK_ULONG), ("ulFlag", CK_ULONG), ], ) CA_MOFN_STATUS = CA_M_OF_N_STATUS CA_MOFN_STATUS_PTR = POINTER(CA_MOFN_STATUS) CKCA_MODULE_ID = CK_ULONG CKCA_MODULE_ID_PTR = POINTER(CKCA_MODULE_ID) class CKCA_MODULE_INFO(Structure): pass class CK_VERSION(Structure): pass struct_def(CK_VERSION, [("major", CK_BYTE), ("minor", CK_BYTE)]) struct_def( CKCA_MODULE_INFO, [ ("ulModuleSize", CK_ULONG), ("developerName", CK_CHAR * 32), ("moduleDescription", CK_CHAR * 32), ("moduleVersion", CK_VERSION), ], ) CKCA_MODULE_INFO_PTR = POINTER(CKCA_MODULE_INFO) class CK_HA_MEMBER(Structure): pass struct_def(CK_HA_MEMBER, [("memberSerial", CK_CHAR * 20), ("memberStatus", CK_RV)]) class CK_HA_STATUS(Structure): pass struct_def( CK_HA_STATUS, [("groupSerial", CK_CHAR * 20), ("memberList", CK_HA_MEMBER * 32), ("listSize", CK_ULONG)], ) CK_HA_MEMBER_PTR = POINTER(CK_HA_MEMBER) CK_HA_STATE_PTR = POINTER(CK_HA_STATUS) CKA_SIM_AUTH_FORM = CK_ULONG class CT_Token(Structure): pass struct_def(CT_Token, []) CT_TokenHndle = POINTER(CT_Token) class CK_AES_CBC_PAD_EXTRACT_PARAMS(Structure): pass struct_def( CK_AES_CBC_PAD_EXTRACT_PARAMS, [ ("ulType", CK_ULONG), ("ulHandle", CK_ULONG), ("ulDeleteAfterExtract", CK_ULONG), ("pBuffer", CK_BYTE_PTR), ("pulBufferLen", CK_ULONG_PTR), ("ulStorage", CK_ULONG), ("pedId", CK_ULONG), ("pbFileName", CK_BYTE_PTR), ("ctxID", CK_ULONG), ], ) CK_AES_CBC_PAD_EXTRACT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_EXTRACT_PARAMS) class CK_AES_CBC_PAD_INSERT_PARAMS(Structure): pass struct_def( CK_AES_CBC_PAD_INSERT_PARAMS, [ ("ulStorageType", CK_ULONG), ("ulContainerState", CK_ULONG), ("pBuffer", CK_BYTE_PTR), ("ulBufferLen", CK_ULONG), ("pulType", CK_ULONG_PTR), ("pulHandle", CK_ULONG_PTR), ("ulStorage", CK_ULONG), ("pedId", CK_ULONG), ("pbFileName", CK_BYTE_PTR), ("ctxID", CK_ULONG), ], ) CK_AES_CBC_PAD_INSERT_PARAMS_PTR = POINTER(CK_AES_CBC_PAD_INSERT_PARAMS) class CK_CLUSTER_STATE(Structure): pass struct_def(CK_CLUSTER_STATE, [("bMembers", CK_BYTE * 32 * 8), ("ulMemberStatus", CK_ULONG * 8)]) CK_CLUSTER_STATE_PTR = POINTER(CK_CLUSTER_STATE) class CK_LKM_TOKEN_ID_S(Structure): pass struct_def(CK_LKM_TOKEN_ID_S, [("id", CK_BYTE * 20)]) CK_LKM_TOKEN_ID = CK_LKM_TOKEN_ID_S CK_LKM_TOKEN_ID_PTR = POINTER(CK_LKM_TOKEN_ID) class CK_UTILIZATION_COUNTER(Structure): pass struct_def( CK_UTILIZATION_COUNTER, [ ("ullSerialNumber", CK_ULONGLONG), ("label", CK_CHAR * 66), ("ulBindId", CK_ULONG), ("ulCounterId", CK_ULONG), ("ullCount", CK_ULONGLONG), ], ) CK_UTILIZATION_COUNTER_PTR = POINTER(CK_UTILIZATION_COUNTER) # pka class CK_KEY_STATUS(Structure): pass struct_def( CK_KEY_STATUS, [ ("flags", CK_BYTE), ("failedAuthCountLimit", CK_BYTE), ("reserved1", CK_BYTE), ("reserved2", CK_BYTE), ], ) class CK_FUNCTION_LIST(Structure): pass class CK_INFO(Structure): pass CK_INFO_PTR = POINTER(CK_INFO) class CK_SLOT_INFO(Structure): pass CK_SLOT_INFO_PTR = POINTER(CK_SLOT_INFO) class CK_TOKEN_INFO(Structure): pass CK_TOKEN_INFO_PTR = POINTER(CK_TOKEN_INFO) class CK_MECHANISM_INFO(Structure): pass CK_MECHANISM_INFO_PTR = POINTER(CK_MECHANISM_INFO) class CK_SESSION_INFO(Structure): pass CK_SESSION_INFO_PTR = POINTER(CK_SESSION_INFO) CK_VERSION_PTR = POINTER(CK_VERSION) struct_def( CK_INFO, [ ("cryptokiVersion", CK_VERSION), ("manufacturerID", CK_UTF8CHAR * 32), ("flags", CK_FLAGS), ("libraryDescription", CK_UTF8CHAR * 32), ("libraryVersion", CK_VERSION), ], ) struct_def( CK_SLOT_INFO, [ ("slotDescription", CK_UTF8CHAR * 64), ("manufacturerID", CK_UTF8CHAR * 32), ("flags", CK_FLAGS), ("hardwareVersion", CK_VERSION), ("firmwareVersion", CK_VERSION), ], ) struct_def( CK_TOKEN_INFO, [ ("label", CK_UTF8CHAR * 32), ("manufacturerID", CK_UTF8CHAR * 32), ("model", CK_UTF8CHAR * 16), ("serialNumber", CK_CHAR * 16), ("flags", CK_FLAGS), ("usMaxSessionCount", CK_ULONG), ("usSessionCount", CK_ULONG), ("usMaxRwSessionCount", CK_ULONG), ("usRwSessionCount", CK_ULONG), ("usMaxPinLen", CK_ULONG), ("usMinPinLen", CK_ULONG), ("ulTotalPublicMemory", CK_ULONG), ("ulFreePublicMemory", CK_ULONG), ("ulTotalPrivateMemory", CK_ULONG), ("ulFreePrivateMemory", CK_ULONG), ("hardwareVersion", CK_VERSION), ("firmwareVersion", CK_VERSION), ("utcTime", CK_CHAR * 16), ], ) struct_def( CK_SESSION_INFO, [("slotID", CK_SLOT_ID), ("state", CK_STATE), ("flags", CK_FLAGS), ("usDeviceError", CK_ULONG)], ) struct_def( CK_ATTRIBUTE, [("type", CK_ATTRIBUTE_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)] ) class CK_DATE(Structure): pass struct_def(CK_DATE, [("year", CK_CHAR * 4), ("month", CK_CHAR * 2), ("day", CK_CHAR * 2)]) struct_def( CK_MECHANISM, [("mechanism", CK_MECHANISM_TYPE), ("pParameter", CK_VOID_PTR), ("usParameterLen", CK_ULONG)], ) struct_def( CK_MECHANISM_INFO, [("ulMinKeySize", CK_ULONG), ("ulMaxKeySize", CK_ULONG), ("flags", CK_FLAGS)] ) CK_CREATEMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR_PTR) CK_DESTROYMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) CK_LOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) CK_UNLOCKMUTEX = CFUNCTYPE(CK_RV, CK_VOID_PTR) class CK_C_INITIALIZE_ARGS(Structure): pass struct_def( CK_C_INITIALIZE_ARGS, [ ("CreateMutex", CK_CREATEMUTEX), ("DestroyMutex", CK_DESTROYMUTEX), ("LockMutex", CK_LOCKMUTEX), ("UnlockMutex", CK_UNLOCKMUTEX), ("flags", CK_FLAGS), ("pReserved", CK_VOID_PTR), ], ) CK_C_INITIALIZE_ARGS_PTR = POINTER(CK_C_INITIALIZE_ARGS) CK_RSA_PKCS_MGF_TYPE = CK_ULONG CK_RSA_PKCS_MGF_TYPE_PTR = POINTER(CK_RSA_PKCS_MGF_TYPE) CK_RSA_PKCS_OAEP_SOURCE_TYPE = CK_ULONG CK_RSA_PKCS_OAEP_SOURCE_TYPE_PTR = POINTER(CK_RSA_PKCS_OAEP_SOURCE_TYPE) class CK_RSA_PKCS_OAEP_PARAMS(Structure): pass struct_def( CK_RSA_PKCS_OAEP_PARAMS, [ ("hashAlg", CK_MECHANISM_TYPE), ("mgf", CK_RSA_PKCS_MGF_TYPE), ("source", CK_RSA_PKCS_OAEP_SOURCE_TYPE), ("pSourceData", CK_VOID_PTR), ("ulSourceDataLen", CK_ULONG), ], ) CK_RSA_PKCS_OAEP_PARAMS_PTR = POINTER(CK_RSA_PKCS_OAEP_PARAMS) class CK_RSA_PKCS_PSS_PARAMS(Structure): pass struct_def( CK_RSA_PKCS_PSS_PARAMS, [("hashAlg", CK_MECHANISM_TYPE), ("mgf", CK_RSA_PKCS_MGF_TYPE), ("usSaltLen", CK_ULONG)], ) CK_RSA_PKCS_PSS_PARAMS_PTR = POINTER(CK_RSA_PKCS_PSS_PARAMS) class CK_ECDH1_DERIVE_PARAMS(Structure): pass struct_def( CK_ECDH1_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_ECDH1_DERIVE_PARAMS_PTR = POINTER(CK_ECDH1_DERIVE_PARAMS) class CK_ECDH2_DERIVE_PARAMS(Structure): pass struct_def( CK_ECDH2_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ], ) CK_ECDH2_DERIVE_PARAMS_PTR = POINTER(CK_ECDH2_DERIVE_PARAMS) class CK_ECMQV_DERIVE_PARAMS(Structure): pass struct_def( CK_ECMQV_DERIVE_PARAMS, [ ("kdf", CK_EC_KDF_TYPE), ("ulSharedDataLen", CK_ULONG), ("pSharedData", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ("publicKey", CK_OBJECT_HANDLE), ], ) CK_ECMQV_DERIVE_PARAMS_PTR = POINTER(CK_ECMQV_DERIVE_PARAMS) CK_X9_42_DH_KDF_TYPE = CK_ULONG CK_X9_42_DH_KDF_TYPE_PTR = POINTER(CK_X9_42_DH_KDF_TYPE) class CK_X9_42_DH1_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_DH1_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_X9_42_DH1_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH1_DERIVE_PARAMS) class CK_X9_42_DH2_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_DH2_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ], ) CK_X9_42_DH2_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_DH2_DERIVE_PARAMS) class CK_X9_42_MQV_DERIVE_PARAMS(Structure): pass struct_def( CK_X9_42_MQV_DERIVE_PARAMS, [ ("kdf", CK_X9_42_DH_KDF_TYPE), ("ulOtherInfoLen", CK_ULONG), ("pOtherInfo", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPrivateDataLen", CK_ULONG), ("hPrivateData", CK_OBJECT_HANDLE), ("ulPublicDataLen2", CK_ULONG), ("pPublicData2", CK_BYTE_PTR), ("publicKey", CK_OBJECT_HANDLE), ], ) CK_X9_42_MQV_DERIVE_PARAMS_PTR = POINTER(CK_X9_42_MQV_DERIVE_PARAMS) class CK_KEA_DERIVE_PARAMS(Structure): pass struct_def( CK_KEA_DERIVE_PARAMS, [ ("isSender", CK_BBOOL), ("ulRandomLen", CK_ULONG), ("pRandomA", CK_BYTE_PTR), ("pRandomB", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ], ) CK_KEA_DERIVE_PARAMS_PTR = POINTER(CK_KEA_DERIVE_PARAMS) CK_RC2_PARAMS = CK_ULONG CK_RC2_PARAMS_PTR = POINTER(CK_RC2_PARAMS) class CK_RC2_CBC_PARAMS(Structure): pass struct_def(CK_RC2_CBC_PARAMS, [("usEffectiveBits", CK_ULONG), ("iv", CK_BYTE * 8)]) CK_RC2_CBC_PARAMS_PTR = POINTER(CK_RC2_CBC_PARAMS) class CK_RC2_MAC_GENERAL_PARAMS(Structure): pass struct_def(CK_RC2_MAC_GENERAL_PARAMS, [("usEffectiveBits", CK_ULONG), ("ulMacLength", CK_ULONG)]) CK_RC2_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC2_MAC_GENERAL_PARAMS) class CK_RC5_PARAMS(Structure): pass struct_def(CK_RC5_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG)]) CK_RC5_PARAMS_PTR = POINTER(CK_RC5_PARAMS) class CK_RC5_CBC_PARAMS(Structure): pass struct_def( CK_RC5_CBC_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("pIv", CK_BYTE_PTR), ("ulIvLen", CK_ULONG)], ) CK_RC5_CBC_PARAMS_PTR = POINTER(CK_RC5_CBC_PARAMS) class CK_RC5_MAC_GENERAL_PARAMS(Structure): pass struct_def( CK_RC5_MAC_GENERAL_PARAMS, [("ulWordsize", CK_ULONG), ("ulRounds", CK_ULONG), ("ulMacLength", CK_ULONG)], ) CK_RC5_MAC_GENERAL_PARAMS_PTR = POINTER(CK_RC5_MAC_GENERAL_PARAMS) CK_MAC_GENERAL_PARAMS = CK_ULONG CK_MAC_GENERAL_PARAMS_PTR = POINTER(CK_MAC_GENERAL_PARAMS) class CK_DES_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_DES_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 8), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_DES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_DES_CBC_ENCRYPT_DATA_PARAMS) class CK_AES_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_AES_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_AES_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_AES_CBC_ENCRYPT_DATA_PARAMS) class CK_SKIPJACK_PRIVATE_WRAP_PARAMS(Structure): pass struct_def( CK_SKIPJACK_PRIVATE_WRAP_PARAMS, [ ("usPasswordLen", CK_ULONG), ("pPassword", CK_BYTE_PTR), ("ulPublicDataLen", CK_ULONG), ("pPublicData", CK_BYTE_PTR), ("ulPAndGLen", CK_ULONG), ("ulQLen", CK_ULONG), ("ulRandomLen", CK_ULONG), ("pRandomA", CK_BYTE_PTR), ("pPrimeP", CK_BYTE_PTR), ("pBaseG", CK_BYTE_PTR), ("pSubprimeQ", CK_BYTE_PTR), ], ) CK_SKIPJACK_PRIVATE_WRAP_PTR = POINTER(CK_SKIPJACK_PRIVATE_WRAP_PARAMS) class CK_SKIPJACK_RELAYX_PARAMS(Structure): pass struct_def( CK_SKIPJACK_RELAYX_PARAMS, [ ("ulOldWrappedXLen", CK_ULONG), ("pOldWrappedX", CK_BYTE_PTR), ("ulOldPasswordLen", CK_ULONG), ("pOldPassword", CK_BYTE_PTR), ("ulOldPublicDataLen", CK_ULONG), ("pOldPublicData", CK_BYTE_PTR), ("ulOldRandomLen", CK_ULONG), ("pOldRandomA", CK_BYTE_PTR), ("ulNewPasswordLen", CK_ULONG), ("pNewPassword", CK_BYTE_PTR), ("ulNewPublicDataLen", CK_ULONG), ("pNewPublicData", CK_BYTE_PTR), ("ulNewRandomLen", CK_ULONG), ("pNewRandomA", CK_BYTE_PTR), ], ) CK_SKIPJACK_RELAYX_PARAMS_PTR = POINTER(CK_SKIPJACK_RELAYX_PARAMS) class CK_PBE_PARAMS(Structure): pass struct_def( CK_PBE_PARAMS, [ ("pInitVector", CK_BYTE_PTR), ("pPassword", CK_UTF8CHAR_PTR), ("usPasswordLen", CK_ULONG), ("pSalt", CK_BYTE_PTR), ("usSaltLen", CK_ULONG), ("usIteration", CK_ULONG), ], ) CK_PBE_PARAMS_PTR = POINTER(CK_PBE_PARAMS) class CK_KEY_WRAP_SET_OAEP_PARAMS(Structure): pass struct_def( CK_KEY_WRAP_SET_OAEP_PARAMS, [("bBC", CK_BYTE), ("pX", CK_BYTE_PTR), ("ulXLen", CK_ULONG)] ) CK_KEY_WRAP_SET_OAEP_PARAMS_PTR = POINTER(CK_KEY_WRAP_SET_OAEP_PARAMS) class CK_SSL3_RANDOM_DATA(Structure): pass struct_def( CK_SSL3_RANDOM_DATA, [ ("pClientRandom", CK_BYTE_PTR), ("ulClientRandomLen", CK_ULONG), ("pServerRandom", CK_BYTE_PTR), ("ulServerRandomLen", CK_ULONG), ], ) class CK_SSL3_MASTER_KEY_DERIVE_PARAMS(Structure): pass struct_def( CK_SSL3_MASTER_KEY_DERIVE_PARAMS, [("RandomInfo", CK_SSL3_RANDOM_DATA), ("pVersion", CK_VERSION_PTR)], ) CK_SSL3_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_SSL3_MASTER_KEY_DERIVE_PARAMS) class CK_SSL3_KEY_MAT_OUT(Structure): pass struct_def( CK_SSL3_KEY_MAT_OUT, [ ("hClientMacSecret", CK_OBJECT_HANDLE), ("hServerMacSecret", CK_OBJECT_HANDLE), ("hClientKey", CK_OBJECT_HANDLE), ("hServerKey", CK_OBJECT_HANDLE), ("pIVClient", CK_BYTE_PTR), ("pIVServer", CK_BYTE_PTR), ], ) CK_SSL3_KEY_MAT_OUT_PTR = POINTER(CK_SSL3_KEY_MAT_OUT) class CK_SSL3_KEY_MAT_PARAMS(Structure): pass struct_def( CK_SSL3_KEY_MAT_PARAMS, [ ("ulMacSizeInBits", CK_ULONG), ("ulKeySizeInBits", CK_ULONG), ("ulIVSizeInBits", CK_ULONG), ("bIsExport", CK_BBOOL), ("RandomInfo", CK_SSL3_RANDOM_DATA), ("pReturnedKeyMaterial", CK_SSL3_KEY_MAT_OUT_PTR), ], ) CK_SSL3_KEY_MAT_PARAMS_PTR = POINTER(CK_SSL3_KEY_MAT_PARAMS) class CK_TLS_PRF_PARAMS(Structure): pass struct_def( CK_TLS_PRF_PARAMS, [ ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pOutput", CK_BYTE_PTR), ("pulOutputLen", CK_ULONG_PTR), ], ) CK_TLS_PRF_PARAMS_PTR = POINTER(CK_TLS_PRF_PARAMS) class CK_WTLS_RANDOM_DATA(Structure): pass struct_def( CK_WTLS_RANDOM_DATA, [ ("pClientRandom", CK_BYTE_PTR), ("ulClientRandomLen", CK_ULONG), ("pServerRandom", CK_BYTE_PTR), ("ulServerRandomLen", CK_ULONG), ], ) CK_WTLS_RANDOM_DATA_PTR = POINTER(CK_WTLS_RANDOM_DATA) class CK_WTLS_MASTER_KEY_DERIVE_PARAMS(Structure): pass struct_def( CK_WTLS_MASTER_KEY_DERIVE_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("RandomInfo", CK_WTLS_RANDOM_DATA), ("pVersion", CK_BYTE_PTR), ], ) CK_WTLS_MASTER_KEY_DERIVE_PARAMS_PTR = POINTER(CK_WTLS_MASTER_KEY_DERIVE_PARAMS) class CK_WTLS_PRF_PARAMS(Structure): pass struct_def( CK_WTLS_PRF_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ("pLabel", CK_BYTE_PTR), ("ulLabelLen", CK_ULONG), ("pOutput", CK_BYTE_PTR), ("pulOutputLen", CK_ULONG_PTR), ], ) CK_WTLS_PRF_PARAMS_PTR = POINTER(CK_WTLS_PRF_PARAMS) class CK_WTLS_KEY_MAT_OUT(Structure): pass struct_def( CK_WTLS_KEY_MAT_OUT, [("hMacSecret", CK_OBJECT_HANDLE), ("hKey", CK_OBJECT_HANDLE), ("pIV", CK_BYTE_PTR)], ) CK_WTLS_KEY_MAT_OUT_PTR = POINTER(CK_WTLS_KEY_MAT_OUT) class CK_WTLS_KEY_MAT_PARAMS(Structure): pass struct_def( CK_WTLS_KEY_MAT_PARAMS, [ ("DigestMechanism", CK_MECHANISM_TYPE), ("ulMacSizeInBits", CK_ULONG), ("ulKeySizeInBits", CK_ULONG), ("ulIVSizeInBits", CK_ULONG), ("ulSequenceNumber", CK_ULONG), ("bIsExport", CK_BBOOL), ("RandomInfo", CK_WTLS_RANDOM_DATA), ("pReturnedKeyMaterial", CK_WTLS_KEY_MAT_OUT_PTR), ], ) CK_WTLS_KEY_MAT_PARAMS_PTR = POINTER(CK_WTLS_KEY_MAT_PARAMS) class CK_CMS_SIG_PARAMS(Structure): pass struct_def( CK_CMS_SIG_PARAMS, [ ("certificateHandle", CK_OBJECT_HANDLE), ("pSigningMechanism", CK_MECHANISM_PTR), ("pDigestMechanism", CK_MECHANISM_PTR), ("pContentType", CK_UTF8CHAR_PTR), ("pRequestedAttributes", CK_BYTE_PTR), ("ulRequestedAttributesLen", CK_ULONG), ("pRequiredAttributes", CK_BYTE_PTR), ("ulRequiredAttributesLen", CK_ULONG), ], ) CK_CMS_SIG_PARAMS_PTR = POINTER(CK_CMS_SIG_PARAMS) class CK_KEY_DERIVATION_STRING_DATA(Structure): pass struct_def(CK_KEY_DERIVATION_STRING_DATA, [("pData", CK_BYTE_PTR), ("ulLen", CK_ULONG)]) CK_KEY_DERIVATION_STRING_DATA_PTR = POINTER(CK_KEY_DERIVATION_STRING_DATA) CK_EXTRACT_PARAMS = CK_ULONG CK_EXTRACT_PARAMS_PTR = POINTER(CK_EXTRACT_PARAMS) CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE = CK_ULONG CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE_PTR = POINTER(CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE) CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE = CK_ULONG CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE_PTR = POINTER(CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE) class CK_PKCS5_PBKD2_PARAMS(Structure): pass struct_def( CK_PKCS5_PBKD2_PARAMS, [ ("saltSource", CK_PKCS5_PBKDF2_SALT_SOURCE_TYPE), ("pSaltSourceData", CK_VOID_PTR), ("ulSaltSourceDataLen", CK_ULONG), ("iterations", CK_ULONG), ("prf", CK_PKCS5_PBKD2_PSEUDO_RANDOM_FUNCTION_TYPE), ("pPrfData", CK_VOID_PTR), ("ulPrfDataLen", CK_ULONG), ("pPassword", CK_UTF8CHAR_PTR), ("usPasswordLen", CK_ULONG), ], ) CK_PKCS5_PBKD2_PARAMS_PTR = POINTER(CK_PKCS5_PBKD2_PARAMS) CK_OTP_PARAM_TYPE = CK_ULONG CK_PARAM_TYPE = CK_OTP_PARAM_TYPE class CK_OTP_PARAM(Structure): pass struct_def( CK_OTP_PARAM, [("type", CK_OTP_PARAM_TYPE), ("pValue", CK_VOID_PTR), ("usValueLen", CK_ULONG)] ) CK_OTP_PARAM_PTR = POINTER(CK_OTP_PARAM) class CK_OTP_PARAMS(Structure): pass struct_def(CK_OTP_PARAMS, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)]) CK_OTP_PARAMS_PTR = POINTER(CK_OTP_PARAMS) class CK_OTP_SIGNATURE_INFO(Structure): pass struct_def(CK_OTP_SIGNATURE_INFO, [("pParams", CK_OTP_PARAM_PTR), ("ulCount", CK_ULONG)]) CK_OTP_SIGNATURE_INFO_PTR = POINTER(CK_OTP_SIGNATURE_INFO) class CK_KIP_PARAMS(Structure): pass struct_def( CK_KIP_PARAMS, [ ("pMechanism", CK_MECHANISM_PTR), ("hKey", CK_OBJECT_HANDLE), ("pSeed", CK_BYTE_PTR), ("ulSeedLen", CK_ULONG), ], ) CK_KIP_PARAMS_PTR = POINTER(CK_KIP_PARAMS) struct_def(CK_AES_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)]) CK_AES_CTR_PARAMS_PTR = POINTER(CK_AES_CTR_PARAMS) class CK_CAMELLIA_CTR_PARAMS(Structure): pass struct_def(CK_CAMELLIA_CTR_PARAMS, [("ulCounterBits", CK_ULONG), ("cb", CK_BYTE * 16)]) CK_CAMELLIA_CTR_PARAMS_PTR = POINTER(CK_CAMELLIA_CTR_PARAMS) class CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_CAMELLIA_CBC_ENCRYPT_DATA_PARAMS) class CK_ARIA_CBC_ENCRYPT_DATA_PARAMS(Structure): pass struct_def( CK_ARIA_CBC_ENCRYPT_DATA_PARAMS, [("iv", CK_BYTE * 16), ("pData", CK_BYTE_PTR), ("length", CK_ULONG)], ) CK_ARIA_CBC_ENCRYPT_DATA_PARAMS_PTR = POINTER(CK_ARIA_CBC_ENCRYPT_DATA_PARAMS) class CK_APPLICATION_ID(Structure): def __init__(self, aid=None): if aid is None: aid = [] self.id = (CK_BYTE * 16)(*aid) struct_def(CK_APPLICATION_ID, [("id", CK_BYTE * 16)]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475106"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">zx8/youtube-dl</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">youtube_dl/extractor/comedycentral.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">92</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12008</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals import re from .mtv import MTVServicesInfoExtractor from ..compat import ( compat_str, compat_urllib_parse, ) from ..utils import ( ExtractorError, float_or_none, unified_strdate, ) class ComedyCentralIE(MTVServicesInfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/ (video-clips|episodes|cc-studios|video-collections|full-episodes) /(?P<title>.*)''' _FEED_URL = 'http://comedycentral.com/feeds/mrss/' _TEST = { 'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother', 'md5': 'c4f48e9eda1b16dd10add0744344b6d8', 'info_dict': { 'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354', 'ext': 'mp4', 'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother', 'description': 'After a certain point, breastfeeding becomes c**kblocking.', }, } class ComedyCentralShowsIE(MTVServicesInfoExtractor): IE_DESC = 'The Daily Show / The Colbert Report' # urls can be abbreviations like :thedailyshow # urls for episodes like: # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 _VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow) |https?://(:www\.)? (?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/ ((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)| (?P<clip> (?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+)) |(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)) )| (?P<interview> extended-interviews/(?P<interID>[0-9a-z]+)/ (?:playlist_tds_extended_)?(?P<interview_title>[^/?#]*?) (?:/[^/?#]?|[?#]|$)))) ''' _TESTS = [{ 'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart', 'md5': '4e2f5cb088a83cd8cdb7756132f9739d', 'info_dict': { 'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55', 'ext': 'mp4', 'upload_date': '20121213', 'description': 'Kristen Stewart learns to let loose in "On the Road."', 'uploader': 'thedailyshow', 'title': 'thedailyshow kristen-stewart part 1', } }, { 'url': 'http://thedailyshow.cc.com/extended-interviews/b6364d/sarah-chayes-extended-interview', 'info_dict': { 'id': 'sarah-chayes-extended-interview', 'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."', 'title': 'thedailyshow Sarah Chayes Extended Interview', }, 'playlist': [ { 'info_dict': { 'id': '0baad492-cbec-4ec1-9e50-ad91c291127f', 'ext': 'mp4', 'upload_date': '20150129', 'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."', 'uploader': 'thedailyshow', 'title': 'thedailyshow sarah-chayes-extended-interview part 1', }, }, { 'info_dict': { 'id': '1e4fb91b-8ce7-4277-bd7c-98c9f1bbd283', 'ext': 'mp4', 'upload_date': '20150129', 'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."', 'uploader': 'thedailyshow', 'title': 'thedailyshow sarah-chayes-extended-interview part 2', }, }, ], 'params': { 'skip_download': True, }, }, { 'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary', 'only_matching': True, }, { 'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food', 'only_matching': True, }, { 'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel', 'only_matching': True, }] _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] _video_extensions = { '3500': 'mp4', '2200': 'mp4', '1700': 'mp4', '1200': 'mp4', '750': 'mp4', '400': 'mp4', } _video_dimensions = { '3500': (1280, 720), '2200': (960, 540), '1700': (768, 432), '1200': (640, 360), '750': (512, 288), '400': (384, 216), } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj.group('shortname'): if mobj.group('shortname') in ('tds', 'thedailyshow'): url = 'http://thedailyshow.cc.com/full-episodes/' else: url = 'http://thecolbertreport.cc.com/full-episodes/' mobj = re.match(self._VALID_URL, url, re.VERBOSE) assert mobj is not None if mobj.group('clip'): if mobj.group('videotitle'): epTitle = mobj.group('videotitle') elif mobj.group('showname') == 'thedailyshow': epTitle = mobj.group('tdstitle') else: epTitle = mobj.group('cntitle') dlNewest = False elif mobj.group('interview'): epTitle = mobj.group('interview_title') dlNewest = False else: dlNewest = not mobj.group('episode') if dlNewest: epTitle = mobj.group('showname') else: epTitle = mobj.group('episode') show_name = mobj.group('showname') webpage, htmlHandle = self._download_webpage_handle(url, epTitle) if dlNewest: url = htmlHandle.geturl() mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid redirected URL: ' + url) if mobj.group('episode') == '': raise ExtractorError('Redirected URL is still not specific: ' + url) epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1] mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage) if len(mMovieParams) == 0: # The Colbert Report embeds the information in a without # a URL prefix; so extract the alternate reference # and then add the URL prefix manually. altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage) if len(altMovieParams) == 0: raise ExtractorError('unable to find Flash URL in webpage ' + url) else: mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] uri = mMovieParams[0][1] # Correct cc.com in uri uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri) index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri})) idoc = self._download_xml( index_url, epTitle, 'Downloading show index', 'Unable to download episode index') title = idoc.find('./channel/title').text description = idoc.find('./channel/description').text entries = [] item_els = idoc.findall('.//item') for part_num, itemEl in enumerate(item_els): upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text) thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url') content = itemEl.find('.//{http://search.yahoo.com/mrss/}content') duration = float_or_none(content.attrib.get('duration')) mediagen_url = content.attrib['url'] guid = itemEl.find('./guid').text.rpartition(':')[-1] cdoc = self._download_xml( mediagen_url, epTitle, 'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els))) turls = [] for rendition in cdoc.findall('.//rendition'): finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text) turls.append(finfo) formats = [] for format, rtmp_video_url in turls: w, h = self._video_dimensions.get(format, (None, None)) formats.append({ 'format_id': 'vhttp-%s' % format, 'url': self._transform_rtmp_url(rtmp_video_url), 'ext': self._video_extensions.get(format, 'mp4'), 'height': h, 'width': w, }) formats.append({ 'format_id': 'rtmp-%s' % format, 'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'), 'ext': self._video_extensions.get(format, 'mp4'), 'height': h, 'width': w, }) self._sort_formats(formats) subtitles = self._extract_subtitles(cdoc, guid) virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1) entries.append({ 'id': guid, 'title': virtual_id, 'formats': formats, 'uploader': show_name, 'upload_date': upload_date, 'duration': duration, 'thumbnail': thumbnail, 'description': description, 'subtitles': subtitles, }) return { '_type': 'playlist', 'id': epTitle, 'entries': entries, 'title': show_name + ' ' + title, 'description': description, } </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">unlicense</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475107"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ArcherSys/ArcherSys</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Lib/test/test_strptime.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">79523</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "><<<<<<< HEAD <<<<<<< HEAD """PyUnit testing against strptime""" import unittest import time import locale import re import sys from test import support from datetime import date as datetime_date import _strptime class getlang_Tests(unittest.TestCase): """Test _getlang""" def test_basic(self): self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME)) class LocaleTime_Tests(unittest.TestCase): """Tests for _strptime.LocaleTime. All values are lower-cased when stored in LocaleTime, so make sure to compare values after running ``lower`` on them. """ def setUp(self): """Create time tuple based on current time.""" self.time_tuple = time.localtime() self.LT_ins = _strptime.LocaleTime() def compare_against_time(self, testing, directive, tuple_position, error_msg): """Helper method that tests testing against directive based on the tuple_position of time_tuple. Uses error_msg as error message. """ strftime_output = time.strftime(directive, self.time_tuple).lower() comparison = testing[self.time_tuple[tuple_position]] self.assertIn(strftime_output, testing, "%s: not found in tuple" % error_msg) self.assertEqual(comparison, strftime_output, "%s: position within tuple incorrect; %s != %s" % (error_msg, comparison, strftime_output)) def test_weekday(self): # Make sure that full and abbreviated weekday names are correct in # both string and position with tuple self.compare_against_time(self.LT_ins.f_weekday, '%A', 6, "Testing of full weekday name failed") self.compare_against_time(self.LT_ins.a_weekday, '%a', 6, "Testing of abbreviated weekday name failed") def test_month(self): # Test full and abbreviated month names; both string and position # within the tuple self.compare_against_time(self.LT_ins.f_month, '%B', 1, "Testing against full month name failed") self.compare_against_time(self.LT_ins.a_month, '%b', 1, "Testing against abbreviated month name failed") def test_am_pm(self): # Make sure AM/PM representation done properly strftime_output = time.strftime("%p", self.time_tuple).lower() self.assertIn(strftime_output, self.LT_ins.am_pm, "AM/PM representation not in tuple") if self.time_tuple[3] < 12: position = 0 else: position = 1 self.assertEqual(self.LT_ins.am_pm[position], strftime_output, "AM/PM representation in the wrong position within the tuple") def test_timezone(self): # Make sure timezone is correct timezone = time.strftime("%Z", self.time_tuple).lower() if timezone: self.assertTrue(timezone in self.LT_ins.timezone[0] or timezone in self.LT_ins.timezone[1], "timezone %s not found in %s" % (timezone, self.LT_ins.timezone)) def test_date_time(self): # Check that LC_date_time, LC_date, and LC_time are correct # the magic date is used so as to not have issues with %c when day of # the month is a single digit and has a leading space. This is not an # issue since strptime still parses it correctly. The problem is # testing these directives for correctness by comparing strftime # output. magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0) strftime_output = time.strftime("%c", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date), strftime_output, "LC_date_time incorrect") strftime_output = time.strftime("%x", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date), strftime_output, "LC_date incorrect") strftime_output = time.strftime("%X", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date), strftime_output, "LC_time incorrect") LT = _strptime.LocaleTime() LT.am_pm = ('', '') self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle " "empty strings") def test_lang(self): # Make sure lang is set to what _getlang() returns # Assuming locale has not changed between now and when self.LT_ins was created self.assertEqual(self.LT_ins.lang, _strptime._getlang()) class TimeRETests(unittest.TestCase): """Tests for TimeRE.""" def setUp(self): """Construct generic TimeRE object.""" self.time_re = _strptime.TimeRE() self.locale_time = _strptime.LocaleTime() def test_pattern(self): # Test TimeRE.pattern pattern_string = self.time_re.pattern(r"%a %A %d") self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1, "did not find abbreviated weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1, "did not find full weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.time_re['d']) != -1, "did not find 'd' directive pattern string '%s'" % pattern_string) def test_pattern_escaping(self): # Make sure any characters in the format string that might be taken as # regex syntax is escaped. pattern_string = self.time_re.pattern("\d+") self.assertIn(r"\\d\+", pattern_string, "%s does not have re characters escaped properly" % pattern_string) def test_compile(self): # Check that compiled regex is correct found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6]) self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6], "re object for '%A' failed") compiled = self.time_re.compile(r"%a %b") found = compiled.match("%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4])) self.assertTrue(found, "Match failed with '%s' regex and '%s' string" % (compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4]))) self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and found.group('b') == self.locale_time.a_month[4], "re object couldn't find the abbreviated weekday month in " "'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" % (found.string, found.re.pattern, found.group('a'), found.group('b'))) for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S', 'U','w','W','x','X','y','Y','Z','%'): compiled = self.time_re.compile("%" + directive) found = compiled.match(time.strftime("%" + directive)) self.assertTrue(found, "Matching failed on '%s' using '%s' regex" % (time.strftime("%" + directive), compiled.pattern)) def test_blankpattern(self): # Make sure when tuple or something has no values no regex is generated. # Fixes bug #661354 test_locale = _strptime.LocaleTime() test_locale.timezone = (frozenset(), frozenset()) self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '', "with timezone == ('',''), TimeRE().pattern('%Z') != ''") def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works compiled_re = self.time_re.compile("\w+ %m") found = compiled_re.match("\w+ 10") self.assertTrue(found, "Escaping failed of format '\w+ 10'") def test_locale_data_w_regex_metacharacters(self): # Check that if locale data contains regex metacharacters they are # escaped properly. # Discovered by bug #1039270 . locale_time = _strptime.LocaleTime() locale_time.timezone = (frozenset(("utc", "gmt", "Tokyo (standard time)")), frozenset("Tokyo (daylight time)")) time_re = _strptime.TimeRE(locale_time) self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"), "locale data that contains regex metacharacters is not" " properly escaped") def test_whitespace_substitution(self): # When pattern contains whitespace, make sure it is taken into account # so as to not allow to subpatterns to end up next to each other and # "steal" characters from each other. pattern = self.time_re.pattern('%j %H') self.assertFalse(re.match(pattern, "180")) self.assertTrue(re.match(pattern, "18 0")) class StrptimeTests(unittest.TestCase): """Tests for _strptime.strptime.""" def setUp(self): """Create testing time tuple.""" self.time_tuple = time.gmtime() def test_ValueError(self): # Make sure ValueError is raised when match fails or format is bad self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d", format="%A") for bad_format in ("%", "% ", "%e"): try: _strptime._strptime_time("2005", bad_format) except ValueError: continue except Exception as err: self.fail("'%s' raised %s, not ValueError" % (bad_format, err.__class__.__name__)) else: self.fail("'%s' did not raise ValueError" % bad_format) def test_strptime_exception_context(self): # check that this doesn't chain exceptions needlessly (see #17572) with self.assertRaises(ValueError) as e: _strptime._strptime_time('', '%D') self.assertIs(e.exception.__suppress_context__, True) # additional check for IndexError branch (issue #19545) with self.assertRaises(ValueError) as e: _strptime._strptime_time('19', '%Y %') self.assertIs(e.exception.__suppress_context__, True) def test_unconverteddata(self): # Check ValueError is raised when there is unconverted data self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m") def helper(self, directive, position): """Helper fxn in testing.""" strf_output = time.strftime("%" + directive, self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%" + directive) self.assertTrue(strp_output[position] == self.time_tuple[position], "testing of '%s' directive failed; '%s' -> %s != %s" % (directive, strf_output, strp_output[position], self.time_tuple[position])) def test_year(self): # Test that the year is handled properly for directive in ('y', 'Y'): self.helper(directive, 0) # Must also make sure %y values are correct for bounds set by Open Group for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))): for bound in bounds: strp_output = _strptime._strptime_time(bound, '%y') expected_result = century + int(bound) self.assertTrue(strp_output[0] == expected_result, "'y' test failed; passed in '%s' " "and returned '%s'" % (bound, strp_output[0])) def test_month(self): # Test for month directives for directive in ('B', 'b', 'm'): self.helper(directive, 1) def test_day(self): # Test for day directives self.helper('d', 2) def test_hour(self): # Test hour directives self.helper('H', 3) strf_output = time.strftime("%I %p", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%I %p") self.assertTrue(strp_output[3] == self.time_tuple[3], "testing of '%%I %%p' directive failed; '%s' -> %s != %s" % (strf_output, strp_output[3], self.time_tuple[3])) def test_minute(self): # Test minute directives self.helper('M', 4) def test_second(self): # Test second directives self.helper('S', 5) def test_fraction(self): # Test microseconds import datetime d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987) tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f") self.assertEqual(frac, d.microsecond) def test_weekday(self): # Test weekday directives for directive in ('A', 'a', 'w'): self.helper(directive,6) def test_julian(self): # Test julian directives self.helper('j', 7) def test_timezone(self): # Test timezone directives. # When gmtime() is used with %Z, entire result of strftime() is empty. # Check for equal timezone names deals with bad locale info when this # occurs; first found in FreeBSD 4.4. strp_output = _strptime._strptime_time("UTC", "%Z") self.assertEqual(strp_output.tm_isdst, 0) strp_output = _strptime._strptime_time("GMT", "%Z") self.assertEqual(strp_output.tm_isdst, 0) time_tuple = time.localtime() strf_output = time.strftime("%Z") #UTC does not have a timezone strp_output = _strptime._strptime_time(strf_output, "%Z") locale_time = _strptime.LocaleTime() if time.tzname[0] != time.tzname[1] or not time.daylight: self.assertTrue(strp_output[8] == time_tuple[8], "timezone check failed; '%s' -> %s != %s" % (strf_output, strp_output[8], time_tuple[8])) else: self.assertTrue(strp_output[8] == -1, "LocaleTime().timezone has duplicate values and " "time.daylight but timezone value not set to -1") def test_bad_timezone(self): # Explicitly test possibility of bad timezone; # when time.tzname[0] == time.tzname[1] and time.daylight tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') try: original_tzname = time.tzname original_daylight = time.daylight time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] self.assertEqual(tz_value, -1, "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) finally: time.tzname = original_tzname time.daylight = original_daylight def test_date_time(self): # Test %c directive for position in range(6): self.helper('c', position) def test_date(self): # Test %x directive for position in range(0,3): self.helper('x', position) def test_time(self): # Test %X directive for position in range(3,6): self.helper('X', position) def test_percent(self): # Make sure % signs are handled properly strf_output = time.strftime("%m %% %Y", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%m %% %Y") self.assertTrue(strp_output[0] == self.time_tuple[0] and strp_output[1] == self.time_tuple[1], "handling of percent sign failed") def test_caseinsensitive(self): # Should handle names case-insensitively. strf_output = time.strftime("%B", self.time_tuple) self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"), "strptime does not handle ALL-CAPS names properly") self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"), "strptime does not handle lowercase names properly") self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"), "strptime does not handle capword names properly") def test_defaults(self): # Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0) defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1) strp_output = _strptime._strptime_time('1', '%m') self.assertTrue(strp_output == defaults, "Default values for strptime() are incorrect;" " %s != %s" % (strp_output, defaults)) def test_escaping(self): # Make sure all characters that have regex significance are escaped. # Parentheses are in a purposeful order; will cause an error of # unbalanced parentheses when the regex is compiled if they are not # escaped. # Test instigated by bug #796149 . need_escaping = ".^$*+?{}\[]|)(" self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping)) def test_feb29_on_leap_year_without_year(self): time.strptime("Feb 29", "%b %d") def test_mar1_comes_after_feb29_even_when_omitting_the_year(self): self.assertLess( time.strptime("Feb 29", "%b %d"), time.strptime("Mar 1", "%b %d")) class Strptime12AMPMTests(unittest.TestCase): """Test a _strptime regression in '%I %p' at 12 noon (12 PM)""" def test_twelve_noon_midnight(self): eq = self.assertEqual eq(time.strptime('12 PM', '%I %p')[3], 12) eq(time.strptime('12 AM', '%I %p')[3], 0) eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12) eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0) class JulianTests(unittest.TestCase): """Test a _strptime regression that all julian (1-366) are accepted""" def test_all_julian_days(self): eq = self.assertEqual for i in range(1, 367): # use 2004, since it is a leap year, we have 366 days eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i) class CalculationTests(unittest.TestCase): """Test that strptime() fills in missing info correctly""" def setUp(self): self.time_tuple = time.gmtime() def test_julian_calculation(self): # Make sure that when Julian is missing that it is calculated format_string = "%Y %m %d %H %M %S %w %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_yday == self.time_tuple.tm_yday, "Calculation of tm_yday failed; %s != %s" % (result.tm_yday, self.time_tuple.tm_yday)) def test_gregorian_calculation(self): # Test that Gregorian date can be calculated from Julian day format_string = "%Y %H %M %S %w %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_year == self.time_tuple.tm_year and result.tm_mon == self.time_tuple.tm_mon and result.tm_mday == self.time_tuple.tm_mday, "Calculation of Gregorian date failed;" "%s-%s-%s != %s-%s-%s" % (result.tm_year, result.tm_mon, result.tm_mday, self.time_tuple.tm_year, self.time_tuple.tm_mon, self.time_tuple.tm_mday)) def test_day_of_week_calculation(self): # Test that the day of the week is calculated as needed format_string = "%Y %m %d %H %S %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_wday == self.time_tuple.tm_wday, "Calculation of day of the week failed;" "%s != %s" % (result.tm_wday, self.time_tuple.tm_wday)) def test_week_of_year_and_day_of_week_calculation(self): # Should be able to infer date if given year, week of year (%U or %W) # and day of the week def test_helper(ymd_tuple, test_reason): for directive in ('W', 'U'): format_string = "%%Y %%%s %%w" % directive dt_date = datetime_date(*ymd_tuple) strp_input = dt_date.strftime(format_string) strp_output = _strptime._strptime_time(strp_input, format_string) self.assertTrue(strp_output[:3] == ymd_tuple, "%s(%s) test failed w/ '%s': %s != %s (%s != %s)" % (test_reason, directive, strp_input, strp_output[:3], ymd_tuple, strp_output[7], dt_date.timetuple()[7])) test_helper((1901, 1, 3), "week 0") test_helper((1901, 1, 8), "common case") test_helper((1901, 1, 13), "day on Sunday") test_helper((1901, 1, 14), "day on Monday") test_helper((1905, 1, 1), "Jan 1 on Sunday") test_helper((1906, 1, 1), "Jan 1 on Monday") test_helper((1906, 1, 7), "first Sunday in a year starting on Monday") test_helper((1905, 12, 31), "Dec 31 on Sunday") test_helper((1906, 12, 31), "Dec 31 on Monday") test_helper((2008, 12, 29), "Monday in the last week of the year") test_helper((2008, 12, 22), "Monday in the second-to-last week of the " "year") test_helper((1978, 10, 23), "randomly chosen date") test_helper((2004, 12, 18), "randomly chosen date") test_helper((1978, 10, 23), "year starting and ending on Monday while " "date not on Sunday or Monday") test_helper((1917, 12, 17), "year starting and ending on Monday with " "a Monday not at the beginning or end " "of the year") test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and " "ending on Monday") test_helper((2007, 1, 7), "First Sunday of 2007") test_helper((2007, 1, 14), "Second Sunday of 2007") test_helper((2006, 12, 31), "Last Sunday of 2006") test_helper((2006, 12, 24), "Second to last Sunday of 2006") class CacheTests(unittest.TestCase): """Test that caching works properly.""" def test_time_re_recreation(self): # Make sure cache is recreated when current locale does not match what # cached object was created with. _strptime._strptime_time("10", "%d") _strptime._strptime_time("2005", "%Y") _strptime._TimeRE_cache.locale_time.lang = "Ni" original_time_re = _strptime._TimeRE_cache _strptime._strptime_time("10", "%d") self.assertIsNot(original_time_re, _strptime._TimeRE_cache) self.assertEqual(len(_strptime._regex_cache), 1) def test_regex_cleanup(self): # Make sure cached regexes are discarded when cache becomes "full". try: del _strptime._regex_cache['%d'] except KeyError: pass bogus_key = 0 while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE: _strptime._regex_cache[bogus_key] = None bogus_key += 1 _strptime._strptime_time("10", "%d") self.assertEqual(len(_strptime._regex_cache), 1) def test_new_localetime(self): # A new LocaleTime instance should be created when a new TimeRE object # is created. locale_time_id = _strptime._TimeRE_cache.locale_time _strptime._TimeRE_cache.locale_time.lang = "Ni" _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) def test_TimeRE_recreation(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8')) except locale.Error: self.skipTest('test needs en_US.UTF8 locale') try: _strptime._strptime_time('10', '%d') # Get id of current cache object. first_time_re = _strptime._TimeRE_cache try: # Change the locale and force a recreation of the cache. locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8')) _strptime._strptime_time('10', '%d') # Get the new cache object's id. second_time_re = _strptime._TimeRE_cache # They should not be equal. self.assertIsNot(first_time_re, second_time_re) # Possible test locale is not supported while initial locale is. # If this is the case just suppress the exception and fall-through # to the resetting to the original locale. except locale.Error: self.skipTest('test needs de_DE.UTF8 locale') # Make sure we don't trample on the locale setting once we leave the # test. finally: locale.setlocale(locale.LC_TIME, locale_info) def test_main(): support.run_unittest( getlang_Tests, LocaleTime_Tests, TimeRETests, StrptimeTests, Strptime12AMPMTests, JulianTests, CalculationTests, CacheTests ) if __name__ == '__main__': test_main() ======= """PyUnit testing against strptime""" import unittest import time import locale import re import sys from test import support from datetime import date as datetime_date import _strptime class getlang_Tests(unittest.TestCase): """Test _getlang""" def test_basic(self): self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME)) class LocaleTime_Tests(unittest.TestCase): """Tests for _strptime.LocaleTime. All values are lower-cased when stored in LocaleTime, so make sure to compare values after running ``lower`` on them. """ def setUp(self): """Create time tuple based on current time.""" self.time_tuple = time.localtime() self.LT_ins = _strptime.LocaleTime() def compare_against_time(self, testing, directive, tuple_position, error_msg): """Helper method that tests testing against directive based on the tuple_position of time_tuple. Uses error_msg as error message. """ strftime_output = time.strftime(directive, self.time_tuple).lower() comparison = testing[self.time_tuple[tuple_position]] self.assertIn(strftime_output, testing, "%s: not found in tuple" % error_msg) self.assertEqual(comparison, strftime_output, "%s: position within tuple incorrect; %s != %s" % (error_msg, comparison, strftime_output)) def test_weekday(self): # Make sure that full and abbreviated weekday names are correct in # both string and position with tuple self.compare_against_time(self.LT_ins.f_weekday, '%A', 6, "Testing of full weekday name failed") self.compare_against_time(self.LT_ins.a_weekday, '%a', 6, "Testing of abbreviated weekday name failed") def test_month(self): # Test full and abbreviated month names; both string and position # within the tuple self.compare_against_time(self.LT_ins.f_month, '%B', 1, "Testing against full month name failed") self.compare_against_time(self.LT_ins.a_month, '%b', 1, "Testing against abbreviated month name failed") def test_am_pm(self): # Make sure AM/PM representation done properly strftime_output = time.strftime("%p", self.time_tuple).lower() self.assertIn(strftime_output, self.LT_ins.am_pm, "AM/PM representation not in tuple") if self.time_tuple[3] < 12: position = 0 else: position = 1 self.assertEqual(self.LT_ins.am_pm[position], strftime_output, "AM/PM representation in the wrong position within the tuple") def test_timezone(self): # Make sure timezone is correct timezone = time.strftime("%Z", self.time_tuple).lower() if timezone: self.assertTrue(timezone in self.LT_ins.timezone[0] or timezone in self.LT_ins.timezone[1], "timezone %s not found in %s" % (timezone, self.LT_ins.timezone)) def test_date_time(self): # Check that LC_date_time, LC_date, and LC_time are correct # the magic date is used so as to not have issues with %c when day of # the month is a single digit and has a leading space. This is not an # issue since strptime still parses it correctly. The problem is # testing these directives for correctness by comparing strftime # output. magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0) strftime_output = time.strftime("%c", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date), strftime_output, "LC_date_time incorrect") strftime_output = time.strftime("%x", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date), strftime_output, "LC_date incorrect") strftime_output = time.strftime("%X", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date), strftime_output, "LC_time incorrect") LT = _strptime.LocaleTime() LT.am_pm = ('', '') self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle " "empty strings") def test_lang(self): # Make sure lang is set to what _getlang() returns # Assuming locale has not changed between now and when self.LT_ins was created self.assertEqual(self.LT_ins.lang, _strptime._getlang()) class TimeRETests(unittest.TestCase): """Tests for TimeRE.""" def setUp(self): """Construct generic TimeRE object.""" self.time_re = _strptime.TimeRE() self.locale_time = _strptime.LocaleTime() def test_pattern(self): # Test TimeRE.pattern pattern_string = self.time_re.pattern(r"%a %A %d") self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1, "did not find abbreviated weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1, "did not find full weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.time_re['d']) != -1, "did not find 'd' directive pattern string '%s'" % pattern_string) def test_pattern_escaping(self): # Make sure any characters in the format string that might be taken as # regex syntax is escaped. pattern_string = self.time_re.pattern("\d+") self.assertIn(r"\\d\+", pattern_string, "%s does not have re characters escaped properly" % pattern_string) def test_compile(self): # Check that compiled regex is correct found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6]) self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6], "re object for '%A' failed") compiled = self.time_re.compile(r"%a %b") found = compiled.match("%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4])) self.assertTrue(found, "Match failed with '%s' regex and '%s' string" % (compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4]))) self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and found.group('b') == self.locale_time.a_month[4], "re object couldn't find the abbreviated weekday month in " "'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" % (found.string, found.re.pattern, found.group('a'), found.group('b'))) for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S', 'U','w','W','x','X','y','Y','Z','%'): compiled = self.time_re.compile("%" + directive) found = compiled.match(time.strftime("%" + directive)) self.assertTrue(found, "Matching failed on '%s' using '%s' regex" % (time.strftime("%" + directive), compiled.pattern)) def test_blankpattern(self): # Make sure when tuple or something has no values no regex is generated. # Fixes bug #661354 test_locale = _strptime.LocaleTime() test_locale.timezone = (frozenset(), frozenset()) self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '', "with timezone == ('',''), TimeRE().pattern('%Z') != ''") def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works compiled_re = self.time_re.compile("\w+ %m") found = compiled_re.match("\w+ 10") self.assertTrue(found, "Escaping failed of format '\w+ 10'") def test_locale_data_w_regex_metacharacters(self): # Check that if locale data contains regex metacharacters they are # escaped properly. # Discovered by bug #1039270 . locale_time = _strptime.LocaleTime() locale_time.timezone = (frozenset(("utc", "gmt", "Tokyo (standard time)")), frozenset("Tokyo (daylight time)")) time_re = _strptime.TimeRE(locale_time) self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"), "locale data that contains regex metacharacters is not" " properly escaped") def test_whitespace_substitution(self): # When pattern contains whitespace, make sure it is taken into account # so as to not allow to subpatterns to end up next to each other and # "steal" characters from each other. pattern = self.time_re.pattern('%j %H') self.assertFalse(re.match(pattern, "180")) self.assertTrue(re.match(pattern, "18 0")) class StrptimeTests(unittest.TestCase): """Tests for _strptime.strptime.""" def setUp(self): """Create testing time tuple.""" self.time_tuple = time.gmtime() def test_ValueError(self): # Make sure ValueError is raised when match fails or format is bad self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d", format="%A") for bad_format in ("%", "% ", "%e"): try: _strptime._strptime_time("2005", bad_format) except ValueError: continue except Exception as err: self.fail("'%s' raised %s, not ValueError" % (bad_format, err.__class__.__name__)) else: self.fail("'%s' did not raise ValueError" % bad_format) def test_strptime_exception_context(self): # check that this doesn't chain exceptions needlessly (see #17572) with self.assertRaises(ValueError) as e: _strptime._strptime_time('', '%D') self.assertIs(e.exception.__suppress_context__, True) # additional check for IndexError branch (issue #19545) with self.assertRaises(ValueError) as e: _strptime._strptime_time('19', '%Y %') self.assertIs(e.exception.__suppress_context__, True) def test_unconverteddata(self): # Check ValueError is raised when there is unconverted data self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m") def helper(self, directive, position): """Helper fxn in testing.""" strf_output = time.strftime("%" + directive, self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%" + directive) self.assertTrue(strp_output[position] == self.time_tuple[position], "testing of '%s' directive failed; '%s' -> %s != %s" % (directive, strf_output, strp_output[position], self.time_tuple[position])) def test_year(self): # Test that the year is handled properly for directive in ('y', 'Y'): self.helper(directive, 0) # Must also make sure %y values are correct for bounds set by Open Group for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))): for bound in bounds: strp_output = _strptime._strptime_time(bound, '%y') expected_result = century + int(bound) self.assertTrue(strp_output[0] == expected_result, "'y' test failed; passed in '%s' " "and returned '%s'" % (bound, strp_output[0])) def test_month(self): # Test for month directives for directive in ('B', 'b', 'm'): self.helper(directive, 1) def test_day(self): # Test for day directives self.helper('d', 2) def test_hour(self): # Test hour directives self.helper('H', 3) strf_output = time.strftime("%I %p", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%I %p") self.assertTrue(strp_output[3] == self.time_tuple[3], "testing of '%%I %%p' directive failed; '%s' -> %s != %s" % (strf_output, strp_output[3], self.time_tuple[3])) def test_minute(self): # Test minute directives self.helper('M', 4) def test_second(self): # Test second directives self.helper('S', 5) def test_fraction(self): # Test microseconds import datetime d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987) tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f") self.assertEqual(frac, d.microsecond) def test_weekday(self): # Test weekday directives for directive in ('A', 'a', 'w'): self.helper(directive,6) def test_julian(self): # Test julian directives self.helper('j', 7) def test_timezone(self): # Test timezone directives. # When gmtime() is used with %Z, entire result of strftime() is empty. # Check for equal timezone names deals with bad locale info when this # occurs; first found in FreeBSD 4.4. strp_output = _strptime._strptime_time("UTC", "%Z") self.assertEqual(strp_output.tm_isdst, 0) strp_output = _strptime._strptime_time("GMT", "%Z") self.assertEqual(strp_output.tm_isdst, 0) time_tuple = time.localtime() strf_output = time.strftime("%Z") #UTC does not have a timezone strp_output = _strptime._strptime_time(strf_output, "%Z") locale_time = _strptime.LocaleTime() if time.tzname[0] != time.tzname[1] or not time.daylight: self.assertTrue(strp_output[8] == time_tuple[8], "timezone check failed; '%s' -> %s != %s" % (strf_output, strp_output[8], time_tuple[8])) else: self.assertTrue(strp_output[8] == -1, "LocaleTime().timezone has duplicate values and " "time.daylight but timezone value not set to -1") def test_bad_timezone(self): # Explicitly test possibility of bad timezone; # when time.tzname[0] == time.tzname[1] and time.daylight tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') try: original_tzname = time.tzname original_daylight = time.daylight time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] self.assertEqual(tz_value, -1, "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) finally: time.tzname = original_tzname time.daylight = original_daylight def test_date_time(self): # Test %c directive for position in range(6): self.helper('c', position) def test_date(self): # Test %x directive for position in range(0,3): self.helper('x', position) def test_time(self): # Test %X directive for position in range(3,6): self.helper('X', position) def test_percent(self): # Make sure % signs are handled properly strf_output = time.strftime("%m %% %Y", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%m %% %Y") self.assertTrue(strp_output[0] == self.time_tuple[0] and strp_output[1] == self.time_tuple[1], "handling of percent sign failed") def test_caseinsensitive(self): # Should handle names case-insensitively. strf_output = time.strftime("%B", self.time_tuple) self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"), "strptime does not handle ALL-CAPS names properly") self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"), "strptime does not handle lowercase names properly") self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"), "strptime does not handle capword names properly") def test_defaults(self): # Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0) defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1) strp_output = _strptime._strptime_time('1', '%m') self.assertTrue(strp_output == defaults, "Default values for strptime() are incorrect;" " %s != %s" % (strp_output, defaults)) def test_escaping(self): # Make sure all characters that have regex significance are escaped. # Parentheses are in a purposeful order; will cause an error of # unbalanced parentheses when the regex is compiled if they are not # escaped. # Test instigated by bug #796149 . need_escaping = ".^$*+?{}\[]|)(" self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping)) def test_feb29_on_leap_year_without_year(self): time.strptime("Feb 29", "%b %d") def test_mar1_comes_after_feb29_even_when_omitting_the_year(self): self.assertLess( time.strptime("Feb 29", "%b %d"), time.strptime("Mar 1", "%b %d")) class Strptime12AMPMTests(unittest.TestCase): """Test a _strptime regression in '%I %p' at 12 noon (12 PM)""" def test_twelve_noon_midnight(self): eq = self.assertEqual eq(time.strptime('12 PM', '%I %p')[3], 12) eq(time.strptime('12 AM', '%I %p')[3], 0) eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12) eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0) class JulianTests(unittest.TestCase): """Test a _strptime regression that all julian (1-366) are accepted""" def test_all_julian_days(self): eq = self.assertEqual for i in range(1, 367): # use 2004, since it is a leap year, we have 366 days eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i) class CalculationTests(unittest.TestCase): """Test that strptime() fills in missing info correctly""" def setUp(self): self.time_tuple = time.gmtime() def test_julian_calculation(self): # Make sure that when Julian is missing that it is calculated format_string = "%Y %m %d %H %M %S %w %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_yday == self.time_tuple.tm_yday, "Calculation of tm_yday failed; %s != %s" % (result.tm_yday, self.time_tuple.tm_yday)) def test_gregorian_calculation(self): # Test that Gregorian date can be calculated from Julian day format_string = "%Y %H %M %S %w %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_year == self.time_tuple.tm_year and result.tm_mon == self.time_tuple.tm_mon and result.tm_mday == self.time_tuple.tm_mday, "Calculation of Gregorian date failed;" "%s-%s-%s != %s-%s-%s" % (result.tm_year, result.tm_mon, result.tm_mday, self.time_tuple.tm_year, self.time_tuple.tm_mon, self.time_tuple.tm_mday)) def test_day_of_week_calculation(self): # Test that the day of the week is calculated as needed format_string = "%Y %m %d %H %S %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_wday == self.time_tuple.tm_wday, "Calculation of day of the week failed;" "%s != %s" % (result.tm_wday, self.time_tuple.tm_wday)) def test_week_of_year_and_day_of_week_calculation(self): # Should be able to infer date if given year, week of year (%U or %W) # and day of the week def test_helper(ymd_tuple, test_reason): for directive in ('W', 'U'): format_string = "%%Y %%%s %%w" % directive dt_date = datetime_date(*ymd_tuple) strp_input = dt_date.strftime(format_string) strp_output = _strptime._strptime_time(strp_input, format_string) self.assertTrue(strp_output[:3] == ymd_tuple, "%s(%s) test failed w/ '%s': %s != %s (%s != %s)" % (test_reason, directive, strp_input, strp_output[:3], ymd_tuple, strp_output[7], dt_date.timetuple()[7])) test_helper((1901, 1, 3), "week 0") test_helper((1901, 1, 8), "common case") test_helper((1901, 1, 13), "day on Sunday") test_helper((1901, 1, 14), "day on Monday") test_helper((1905, 1, 1), "Jan 1 on Sunday") test_helper((1906, 1, 1), "Jan 1 on Monday") test_helper((1906, 1, 7), "first Sunday in a year starting on Monday") test_helper((1905, 12, 31), "Dec 31 on Sunday") test_helper((1906, 12, 31), "Dec 31 on Monday") test_helper((2008, 12, 29), "Monday in the last week of the year") test_helper((2008, 12, 22), "Monday in the second-to-last week of the " "year") test_helper((1978, 10, 23), "randomly chosen date") test_helper((2004, 12, 18), "randomly chosen date") test_helper((1978, 10, 23), "year starting and ending on Monday while " "date not on Sunday or Monday") test_helper((1917, 12, 17), "year starting and ending on Monday with " "a Monday not at the beginning or end " "of the year") test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and " "ending on Monday") test_helper((2007, 1, 7), "First Sunday of 2007") test_helper((2007, 1, 14), "Second Sunday of 2007") test_helper((2006, 12, 31), "Last Sunday of 2006") test_helper((2006, 12, 24), "Second to last Sunday of 2006") class CacheTests(unittest.TestCase): """Test that caching works properly.""" def test_time_re_recreation(self): # Make sure cache is recreated when current locale does not match what # cached object was created with. _strptime._strptime_time("10", "%d") _strptime._strptime_time("2005", "%Y") _strptime._TimeRE_cache.locale_time.lang = "Ni" original_time_re = _strptime._TimeRE_cache _strptime._strptime_time("10", "%d") self.assertIsNot(original_time_re, _strptime._TimeRE_cache) self.assertEqual(len(_strptime._regex_cache), 1) def test_regex_cleanup(self): # Make sure cached regexes are discarded when cache becomes "full". try: del _strptime._regex_cache['%d'] except KeyError: pass bogus_key = 0 while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE: _strptime._regex_cache[bogus_key] = None bogus_key += 1 _strptime._strptime_time("10", "%d") self.assertEqual(len(_strptime._regex_cache), 1) def test_new_localetime(self): # A new LocaleTime instance should be created when a new TimeRE object # is created. locale_time_id = _strptime._TimeRE_cache.locale_time _strptime._TimeRE_cache.locale_time.lang = "Ni" _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) def test_TimeRE_recreation(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8')) except locale.Error: self.skipTest('test needs en_US.UTF8 locale') try: _strptime._strptime_time('10', '%d') # Get id of current cache object. first_time_re = _strptime._TimeRE_cache try: # Change the locale and force a recreation of the cache. locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8')) _strptime._strptime_time('10', '%d') # Get the new cache object's id. second_time_re = _strptime._TimeRE_cache # They should not be equal. self.assertIsNot(first_time_re, second_time_re) # Possible test locale is not supported while initial locale is. # If this is the case just suppress the exception and fall-through # to the resetting to the original locale. except locale.Error: self.skipTest('test needs de_DE.UTF8 locale') # Make sure we don't trample on the locale setting once we leave the # test. finally: locale.setlocale(locale.LC_TIME, locale_info) def test_main(): support.run_unittest( getlang_Tests, LocaleTime_Tests, TimeRETests, StrptimeTests, Strptime12AMPMTests, JulianTests, CalculationTests, CacheTests ) if __name__ == '__main__': test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= """PyUnit testing against strptime""" import unittest import time import locale import re import sys from test import support from datetime import date as datetime_date import _strptime class getlang_Tests(unittest.TestCase): """Test _getlang""" def test_basic(self): self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME)) class LocaleTime_Tests(unittest.TestCase): """Tests for _strptime.LocaleTime. All values are lower-cased when stored in LocaleTime, so make sure to compare values after running ``lower`` on them. """ def setUp(self): """Create time tuple based on current time.""" self.time_tuple = time.localtime() self.LT_ins = _strptime.LocaleTime() def compare_against_time(self, testing, directive, tuple_position, error_msg): """Helper method that tests testing against directive based on the tuple_position of time_tuple. Uses error_msg as error message. """ strftime_output = time.strftime(directive, self.time_tuple).lower() comparison = testing[self.time_tuple[tuple_position]] self.assertIn(strftime_output, testing, "%s: not found in tuple" % error_msg) self.assertEqual(comparison, strftime_output, "%s: position within tuple incorrect; %s != %s" % (error_msg, comparison, strftime_output)) def test_weekday(self): # Make sure that full and abbreviated weekday names are correct in # both string and position with tuple self.compare_against_time(self.LT_ins.f_weekday, '%A', 6, "Testing of full weekday name failed") self.compare_against_time(self.LT_ins.a_weekday, '%a', 6, "Testing of abbreviated weekday name failed") def test_month(self): # Test full and abbreviated month names; both string and position # within the tuple self.compare_against_time(self.LT_ins.f_month, '%B', 1, "Testing against full month name failed") self.compare_against_time(self.LT_ins.a_month, '%b', 1, "Testing against abbreviated month name failed") def test_am_pm(self): # Make sure AM/PM representation done properly strftime_output = time.strftime("%p", self.time_tuple).lower() self.assertIn(strftime_output, self.LT_ins.am_pm, "AM/PM representation not in tuple") if self.time_tuple[3] < 12: position = 0 else: position = 1 self.assertEqual(self.LT_ins.am_pm[position], strftime_output, "AM/PM representation in the wrong position within the tuple") def test_timezone(self): # Make sure timezone is correct timezone = time.strftime("%Z", self.time_tuple).lower() if timezone: self.assertTrue(timezone in self.LT_ins.timezone[0] or timezone in self.LT_ins.timezone[1], "timezone %s not found in %s" % (timezone, self.LT_ins.timezone)) def test_date_time(self): # Check that LC_date_time, LC_date, and LC_time are correct # the magic date is used so as to not have issues with %c when day of # the month is a single digit and has a leading space. This is not an # issue since strptime still parses it correctly. The problem is # testing these directives for correctness by comparing strftime # output. magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0) strftime_output = time.strftime("%c", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date), strftime_output, "LC_date_time incorrect") strftime_output = time.strftime("%x", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date), strftime_output, "LC_date incorrect") strftime_output = time.strftime("%X", magic_date) self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date), strftime_output, "LC_time incorrect") LT = _strptime.LocaleTime() LT.am_pm = ('', '') self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle " "empty strings") def test_lang(self): # Make sure lang is set to what _getlang() returns # Assuming locale has not changed between now and when self.LT_ins was created self.assertEqual(self.LT_ins.lang, _strptime._getlang()) class TimeRETests(unittest.TestCase): """Tests for TimeRE.""" def setUp(self): """Construct generic TimeRE object.""" self.time_re = _strptime.TimeRE() self.locale_time = _strptime.LocaleTime() def test_pattern(self): # Test TimeRE.pattern pattern_string = self.time_re.pattern(r"%a %A %d") self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1, "did not find abbreviated weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1, "did not find full weekday in pattern string '%s'" % pattern_string) self.assertTrue(pattern_string.find(self.time_re['d']) != -1, "did not find 'd' directive pattern string '%s'" % pattern_string) def test_pattern_escaping(self): # Make sure any characters in the format string that might be taken as # regex syntax is escaped. pattern_string = self.time_re.pattern("\d+") self.assertIn(r"\\d\+", pattern_string, "%s does not have re characters escaped properly" % pattern_string) def test_compile(self): # Check that compiled regex is correct found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6]) self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6], "re object for '%A' failed") compiled = self.time_re.compile(r"%a %b") found = compiled.match("%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4])) self.assertTrue(found, "Match failed with '%s' regex and '%s' string" % (compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4], self.locale_time.a_month[4]))) self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and found.group('b') == self.locale_time.a_month[4], "re object couldn't find the abbreviated weekday month in " "'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" % (found.string, found.re.pattern, found.group('a'), found.group('b'))) for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S', 'U','w','W','x','X','y','Y','Z','%'): compiled = self.time_re.compile("%" + directive) found = compiled.match(time.strftime("%" + directive)) self.assertTrue(found, "Matching failed on '%s' using '%s' regex" % (time.strftime("%" + directive), compiled.pattern)) def test_blankpattern(self): # Make sure when tuple or something has no values no regex is generated. # Fixes bug #661354 test_locale = _strptime.LocaleTime() test_locale.timezone = (frozenset(), frozenset()) self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '', "with timezone == ('',''), TimeRE().pattern('%Z') != ''") def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works compiled_re = self.time_re.compile("\w+ %m") found = compiled_re.match("\w+ 10") self.assertTrue(found, "Escaping failed of format '\w+ 10'") def test_locale_data_w_regex_metacharacters(self): # Check that if locale data contains regex metacharacters they are # escaped properly. # Discovered by bug #1039270 . locale_time = _strptime.LocaleTime() locale_time.timezone = (frozenset(("utc", "gmt", "Tokyo (standard time)")), frozenset("Tokyo (daylight time)")) time_re = _strptime.TimeRE(locale_time) self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"), "locale data that contains regex metacharacters is not" " properly escaped") def test_whitespace_substitution(self): # When pattern contains whitespace, make sure it is taken into account # so as to not allow to subpatterns to end up next to each other and # "steal" characters from each other. pattern = self.time_re.pattern('%j %H') self.assertFalse(re.match(pattern, "180")) self.assertTrue(re.match(pattern, "18 0")) class StrptimeTests(unittest.TestCase): """Tests for _strptime.strptime.""" def setUp(self): """Create testing time tuple.""" self.time_tuple = time.gmtime() def test_ValueError(self): # Make sure ValueError is raised when match fails or format is bad self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d", format="%A") for bad_format in ("%", "% ", "%e"): try: _strptime._strptime_time("2005", bad_format) except ValueError: continue except Exception as err: self.fail("'%s' raised %s, not ValueError" % (bad_format, err.__class__.__name__)) else: self.fail("'%s' did not raise ValueError" % bad_format) def test_strptime_exception_context(self): # check that this doesn't chain exceptions needlessly (see #17572) with self.assertRaises(ValueError) as e: _strptime._strptime_time('', '%D') self.assertIs(e.exception.__suppress_context__, True) # additional check for IndexError branch (issue #19545) with self.assertRaises(ValueError) as e: _strptime._strptime_time('19', '%Y %') self.assertIs(e.exception.__suppress_context__, True) def test_unconverteddata(self): # Check ValueError is raised when there is unconverted data self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m") def helper(self, directive, position): """Helper fxn in testing.""" strf_output = time.strftime("%" + directive, self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%" + directive) self.assertTrue(strp_output[position] == self.time_tuple[position], "testing of '%s' directive failed; '%s' -> %s != %s" % (directive, strf_output, strp_output[position], self.time_tuple[position])) def test_year(self): # Test that the year is handled properly for directive in ('y', 'Y'): self.helper(directive, 0) # Must also make sure %y values are correct for bounds set by Open Group for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))): for bound in bounds: strp_output = _strptime._strptime_time(bound, '%y') expected_result = century + int(bound) self.assertTrue(strp_output[0] == expected_result, "'y' test failed; passed in '%s' " "and returned '%s'" % (bound, strp_output[0])) def test_month(self): # Test for month directives for directive in ('B', 'b', 'm'): self.helper(directive, 1) def test_day(self): # Test for day directives self.helper('d', 2) def test_hour(self): # Test hour directives self.helper('H', 3) strf_output = time.strftime("%I %p", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%I %p") self.assertTrue(strp_output[3] == self.time_tuple[3], "testing of '%%I %%p' directive failed; '%s' -> %s != %s" % (strf_output, strp_output[3], self.time_tuple[3])) def test_minute(self): # Test minute directives self.helper('M', 4) def test_second(self): # Test second directives self.helper('S', 5) def test_fraction(self): # Test microseconds import datetime d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987) tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f") self.assertEqual(frac, d.microsecond) def test_weekday(self): # Test weekday directives for directive in ('A', 'a', 'w'): self.helper(directive,6) def test_julian(self): # Test julian directives self.helper('j', 7) def test_timezone(self): # Test timezone directives. # When gmtime() is used with %Z, entire result of strftime() is empty. # Check for equal timezone names deals with bad locale info when this # occurs; first found in FreeBSD 4.4. strp_output = _strptime._strptime_time("UTC", "%Z") self.assertEqual(strp_output.tm_isdst, 0) strp_output = _strptime._strptime_time("GMT", "%Z") self.assertEqual(strp_output.tm_isdst, 0) time_tuple = time.localtime() strf_output = time.strftime("%Z") #UTC does not have a timezone strp_output = _strptime._strptime_time(strf_output, "%Z") locale_time = _strptime.LocaleTime() if time.tzname[0] != time.tzname[1] or not time.daylight: self.assertTrue(strp_output[8] == time_tuple[8], "timezone check failed; '%s' -> %s != %s" % (strf_output, strp_output[8], time_tuple[8])) else: self.assertTrue(strp_output[8] == -1, "LocaleTime().timezone has duplicate values and " "time.daylight but timezone value not set to -1") def test_bad_timezone(self): # Explicitly test possibility of bad timezone; # when time.tzname[0] == time.tzname[1] and time.daylight tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') try: original_tzname = time.tzname original_daylight = time.daylight time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] self.assertEqual(tz_value, -1, "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) finally: time.tzname = original_tzname time.daylight = original_daylight def test_date_time(self): # Test %c directive for position in range(6): self.helper('c', position) def test_date(self): # Test %x directive for position in range(0,3): self.helper('x', position) def test_time(self): # Test %X directive for position in range(3,6): self.helper('X', position) def test_percent(self): # Make sure % signs are handled properly strf_output = time.strftime("%m %% %Y", self.time_tuple) strp_output = _strptime._strptime_time(strf_output, "%m %% %Y") self.assertTrue(strp_output[0] == self.time_tuple[0] and strp_output[1] == self.time_tuple[1], "handling of percent sign failed") def test_caseinsensitive(self): # Should handle names case-insensitively. strf_output = time.strftime("%B", self.time_tuple) self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"), "strptime does not handle ALL-CAPS names properly") self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"), "strptime does not handle lowercase names properly") self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"), "strptime does not handle capword names properly") def test_defaults(self): # Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0) defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1) strp_output = _strptime._strptime_time('1', '%m') self.assertTrue(strp_output == defaults, "Default values for strptime() are incorrect;" " %s != %s" % (strp_output, defaults)) def test_escaping(self): # Make sure all characters that have regex significance are escaped. # Parentheses are in a purposeful order; will cause an error of # unbalanced parentheses when the regex is compiled if they are not # escaped. # Test instigated by bug #796149 . need_escaping = ".^$*+?{}\[]|)(" self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping)) def test_feb29_on_leap_year_without_year(self): time.strptime("Feb 29", "%b %d") def test_mar1_comes_after_feb29_even_when_omitting_the_year(self): self.assertLess( time.strptime("Feb 29", "%b %d"), time.strptime("Mar 1", "%b %d")) class Strptime12AMPMTests(unittest.TestCase): """Test a _strptime regression in '%I %p' at 12 noon (12 PM)""" def test_twelve_noon_midnight(self): eq = self.assertEqual eq(time.strptime('12 PM', '%I %p')[3], 12) eq(time.strptime('12 AM', '%I %p')[3], 0) eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12) eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0) class JulianTests(unittest.TestCase): """Test a _strptime regression that all julian (1-366) are accepted""" def test_all_julian_days(self): eq = self.assertEqual for i in range(1, 367): # use 2004, since it is a leap year, we have 366 days eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i) class CalculationTests(unittest.TestCase): """Test that strptime() fills in missing info correctly""" def setUp(self): self.time_tuple = time.gmtime() def test_julian_calculation(self): # Make sure that when Julian is missing that it is calculated format_string = "%Y %m %d %H %M %S %w %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_yday == self.time_tuple.tm_yday, "Calculation of tm_yday failed; %s != %s" % (result.tm_yday, self.time_tuple.tm_yday)) def test_gregorian_calculation(self): # Test that Gregorian date can be calculated from Julian day format_string = "%Y %H %M %S %w %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_year == self.time_tuple.tm_year and result.tm_mon == self.time_tuple.tm_mon and result.tm_mday == self.time_tuple.tm_mday, "Calculation of Gregorian date failed;" "%s-%s-%s != %s-%s-%s" % (result.tm_year, result.tm_mon, result.tm_mday, self.time_tuple.tm_year, self.time_tuple.tm_mon, self.time_tuple.tm_mday)) def test_day_of_week_calculation(self): # Test that the day of the week is calculated as needed format_string = "%Y %m %d %H %S %j %Z" result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple), format_string) self.assertTrue(result.tm_wday == self.time_tuple.tm_wday, "Calculation of day of the week failed;" "%s != %s" % (result.tm_wday, self.time_tuple.tm_wday)) def test_week_of_year_and_day_of_week_calculation(self): # Should be able to infer date if given year, week of year (%U or %W) # and day of the week def test_helper(ymd_tuple, test_reason): for directive in ('W', 'U'): format_string = "%%Y %%%s %%w" % directive dt_date = datetime_date(*ymd_tuple) strp_input = dt_date.strftime(format_string) strp_output = _strptime._strptime_time(strp_input, format_string) self.assertTrue(strp_output[:3] == ymd_tuple, "%s(%s) test failed w/ '%s': %s != %s (%s != %s)" % (test_reason, directive, strp_input, strp_output[:3], ymd_tuple, strp_output[7], dt_date.timetuple()[7])) test_helper((1901, 1, 3), "week 0") test_helper((1901, 1, 8), "common case") test_helper((1901, 1, 13), "day on Sunday") test_helper((1901, 1, 14), "day on Monday") test_helper((1905, 1, 1), "Jan 1 on Sunday") test_helper((1906, 1, 1), "Jan 1 on Monday") test_helper((1906, 1, 7), "first Sunday in a year starting on Monday") test_helper((1905, 12, 31), "Dec 31 on Sunday") test_helper((1906, 12, 31), "Dec 31 on Monday") test_helper((2008, 12, 29), "Monday in the last week of the year") test_helper((2008, 12, 22), "Monday in the second-to-last week of the " "year") test_helper((1978, 10, 23), "randomly chosen date") test_helper((2004, 12, 18), "randomly chosen date") test_helper((1978, 10, 23), "year starting and ending on Monday while " "date not on Sunday or Monday") test_helper((1917, 12, 17), "year starting and ending on Monday with " "a Monday not at the beginning or end " "of the year") test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and " "ending on Monday") test_helper((2007, 1, 7), "First Sunday of 2007") test_helper((2007, 1, 14), "Second Sunday of 2007") test_helper((2006, 12, 31), "Last Sunday of 2006") test_helper((2006, 12, 24), "Second to last Sunday of 2006") class CacheTests(unittest.TestCase): """Test that caching works properly.""" def test_time_re_recreation(self): # Make sure cache is recreated when current locale does not match what # cached object was created with. _strptime._strptime_time("10", "%d") _strptime._strptime_time("2005", "%Y") _strptime._TimeRE_cache.locale_time.lang = "Ni" original_time_re = _strptime._TimeRE_cache _strptime._strptime_time("10", "%d") self.assertIsNot(original_time_re, _strptime._TimeRE_cache) self.assertEqual(len(_strptime._regex_cache), 1) def test_regex_cleanup(self): # Make sure cached regexes are discarded when cache becomes "full". try: del _strptime._regex_cache['%d'] except KeyError: pass bogus_key = 0 while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE: _strptime._regex_cache[bogus_key] = None bogus_key += 1 _strptime._strptime_time("10", "%d") self.assertEqual(len(_strptime._regex_cache), 1) def test_new_localetime(self): # A new LocaleTime instance should be created when a new TimeRE object # is created. locale_time_id = _strptime._TimeRE_cache.locale_time _strptime._TimeRE_cache.locale_time.lang = "Ni" _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) def test_TimeRE_recreation(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8')) except locale.Error: self.skipTest('test needs en_US.UTF8 locale') try: _strptime._strptime_time('10', '%d') # Get id of current cache object. first_time_re = _strptime._TimeRE_cache try: # Change the locale and force a recreation of the cache. locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8')) _strptime._strptime_time('10', '%d') # Get the new cache object's id. second_time_re = _strptime._TimeRE_cache # They should not be equal. self.assertIsNot(first_time_re, second_time_re) # Possible test locale is not supported while initial locale is. # If this is the case just suppress the exception and fall-through # to the resetting to the original locale. except locale.Error: self.skipTest('test needs de_DE.UTF8 locale') # Make sure we don't trample on the locale setting once we leave the # test. finally: locale.setlocale(locale.LC_TIME, locale_info) def test_main(): support.run_unittest( getlang_Tests, LocaleTime_Tests, TimeRETests, StrptimeTests, Strptime12AMPMTests, JulianTests, CalculationTests, CacheTests ) if __name__ == '__main__': test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475108"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cython-testbed/pandas</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pandas/tests/test_downstream.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3443</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ Testing that we work in the downstream packages """ import subprocess import sys import pytest import numpy as np # noqa from pandas import DataFrame from pandas.compat import PY36 from pandas.util import testing as tm import importlib def import_module(name): # we *only* want to skip if the module is truly not available # and NOT just an actual import error because of pandas changes if PY36: try: return importlib.import_module(name) except ModuleNotFoundError: # noqa pytest.skip("skipping as {} not available".format(name)) else: try: return importlib.import_module(name) except ImportError as e: if "No module named" in str(e) and name in str(e): pytest.skip("skipping as {} not available".format(name)) raise @pytest.fixture def df(): return DataFrame({'A': [1, 2, 3]}) def test_dask(df): toolz = import_module('toolz') # noqa dask = import_module('dask') # noqa import dask.dataframe as dd ddf = dd.from_pandas(df, npartitions=3) assert ddf.A is not None assert ddf.compute() is not None def test_xarray(df): xarray = import_module('xarray') # noqa assert df.to_xarray() is not None def test_oo_optimizable(): # GH 21071 subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) @tm.network # Cython import warning @pytest.mark.filterwarnings("ignore:can't:ImportWarning") def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa import statsmodels.api as sm import statsmodels.formula.api as smf df = sm.datasets.get_rdataset("Guerry", "HistData").data smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit() # Cython import warning @pytest.mark.filterwarnings("ignore:can't:ImportWarning") def test_scikit_learn(df): sklearn = import_module('sklearn') # noqa from sklearn import svm, datasets digits = datasets.load_digits() clf = svm.SVC(gamma=0.001, C=100.) clf.fit(digits.data[:-1], digits.target[:-1]) clf.predict(digits.data[-1:]) # Cython import warning and traitlets @tm.network @pytest.mark.filterwarnings("ignore") def test_seaborn(): seaborn = import_module('seaborn') tips = seaborn.load_dataset("tips") seaborn.stripplot(x="day", y="total_bill", data=tips) def test_pandas_gbq(df): pandas_gbq = import_module('pandas_gbq') # noqa @pytest.mark.xfail(reason="0.7.0 pending", strict=True) @tm.network def test_pandas_datareader(): pandas_datareader = import_module('pandas_datareader') # noqa pandas_datareader.DataReader( 'F', 'quandl', '2017-01-01', '2017-02-01') # importing from pandas, Cython import warning @pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning") @pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning") @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") def test_geopandas(): geopandas = import_module('geopandas') # noqa fp = geopandas.datasets.get_path('naturalearth_lowres') assert geopandas.read_file(fp) is not None # Cython import warning @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") def test_pyarrow(df): pyarrow = import_module('pyarrow') # noqa table = pyarrow.Table.from_pandas(df) result = table.to_pandas() tm.assert_frame_equal(result, df) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475109"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">chatchavan/pcl</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">doc/tutorials/content/conf.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">66</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4604</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># All configuration values have a default; values that are commented out # serve to show the default. import sys, os # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.pngmath', 'sphinxcontrib.doxylink.doxylink'] pngmath_dvipng_args = ['-gamma 1.5', '-D 110', '-bg Transparent'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PCL' copyright = '' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0' # The full version, including alpha/beta/rc tags. release = '0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'rightsidebar' : 'true' } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'Home' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). html_file_suffix = '.html' html_sidebars = { '**': [], 'using/windows': [], } html_show_copyright = False html_show_sphinx = False html_add_permalinks = None needs_sphinx = 1.0 file_insertion_enabled = True raw_enabled = True # Set up doxylink doxylink = {'pcl' : ('../../../build/doc/doxygen/pcl.tag', 'http://docs.pointclouds.org/trunk/')} </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475110"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">goldshtn/windbg-extensions</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">heap_stat.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4876</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from pykd import * import re import sys import pickle # TODO list: # 1) better parameter parsing and validation # 2) cleaner printing code stats_only = False save_info = False short_output = False pointer_size = ptrSize() pointer_format = "%016x" if pointer_size == 8 else "%08x" if '-stat' in sys.argv: stats_only = True potential_save_file = "" if '-save' in sys.argv: save_info = True potential_save_file = sys.argv[sys.argv.index('-save')+1] if '-load' in sys.argv: if save_info: dprintln("Error: can't use -load together with -save") exit() potential_save_file = sys.argv[sys.argv.index('-load')+1] if '-short' in sys.argv: short_output = True min_count = 0 if '-min' in sys.argv: min_count = int(sys.argv[sys.argv.index('-min')+1]) type_filter = "" if '-type' in sys.argv: type_filter = sys.argv[sys.argv.index('-type')+1] if '-help' in sys.argv: dprintln("") dprintln("Usage:") dprintln(" !py %s [-stat] [-help] [-short] [-type <typename>] [-min <count>] [-<save|load> <cache_filename>]" % sys.argv[0]) dprintln("") dprintln(" -stat displays statistics only at the end of the run") dprintln(" -short displays only object addresses, for scripting with .foreach and similar commands") dprintln(" -help displays usage information") dprintln(" -type filters output to the specified type(s) only - accepts a regular expression") dprintln(" -min filters statistics output to types that have at least that many instances") dprintln(" -save at the end of the run, saves type information to a file to make subsequent runs faster") dprintln(" -load read type information from a file to make the run faster") dprintln("") exit() vftables_by_address = {} vftables_by_type = {} typesize_by_type = {} if not save_info and len(potential_save_file) > 0: if not short_output: dprint("Loading type information from save file %s..." % potential_save_file) file = open(potential_save_file, 'rb') (vftables_by_address, vftables_by_type, typesize_by_type) = pickle.load(file) file.close() if not short_output: dprintln("DONE") else: if not short_output: dprint("Running x /2 *!*`vftable' command...") vftables = dbgCommand("x /2 *!*`vftable'").split('\n') if not short_output: dprintln("DONE") for vftable in vftables: parts = [w.lstrip().rstrip() for w in vftable.replace("::`vftable'", "").split(' ', 1)] if len(parts) < 2: continue (address, type) = parts address = address.replace('`', '') address_ptr = long(address,16) vftables_by_address[address_ptr] = type vftables_by_type[type] = address_ptr if not short_output: dprint("Running !heap -h 0 command...") heap_output = dbgCommand('!heap -h 0').split('\n') if not short_output: dprintln("DONE") stats = {} if not short_output: dprintln("Enumerating %d heap blocks" % len(heap_output)) blocks_done = 0 for heap_block in heap_output: blocks_done += 1 if stats_only and blocks_done % 100 == 0 and not short_output: dprintln(" Enumerated %d heap blocks" % blocks_done) # example block output: 00e3f088: 00080 . 00078 [101] - busy (70) match = re.match(r'\s+([0-9a-f]+): [0-9a-f]+ \. [0-9a-f]+ \[[0-9a-f]+\] - busy \(([0-9a-f]+)\)', heap_block) if match: address = long(match.group(1),16) size = long(match.group(2),16) ptr = address - pointer_size while ptr < address+size: ptr += pointer_size try: vftable_candidate = ptrPtr(ptr) except: continue if vftable_candidate in vftables_by_address: type_name = vftables_by_address[vftable_candidate] if len(type_filter) > 0 and not re.match(type_filter, type_name): continue if not stats_only: if short_output: dprintln(pointer_format % ptr) else: dprintln((pointer_format + "\t%s") % (ptr, type_name)) if type_name in stats: stats[type_name] += 1 else: stats[type_name] = 1 if not short_output: dprintln("") dprintln("Statistics:") dprintln("%50s\t%10s\t%s" % ("Type name", "Count", "Size")) for type in sorted(stats, key=stats.get, reverse=True): if stats[type] < min_count or (len(type_filter) > 0 and not re.match(type_filter, type)): continue if not type in typesize_by_type: try: type_info = typeInfo(type) typesize_by_type[type] = type_info.size() except: # some types aren't included in public symbols, so we can't get their type typesize_by_type[type] = None size = "Unknown" if typesize_by_type[type] is not None: size = stats[type]*typesize_by_type[type] dprintln("%50s\t%10d\t%s" % (type, stats[type], size)) if not short_output: dprintln("") if save_info and len(potential_save_file) > 0: if not short_output: dprint("Saving type information and vtables to file %s..." % potential_save_file) file = open(potential_save_file, 'wb') pickle.dump((vftables_by_address, vftables_by_type, typesize_by_type), file) file.close() if not short_output: dprintln("DONE") </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475111"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">parinporecha/backend_gtgonline</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">GTG/tests/test_backend_tomboy.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15680</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Getting Things GNOME! - a personal organizer for the GNOME desktop # Copyright (c) 2008-2012 - Lionel Dricot & Bertrand Rousseau # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- """ Tests for the tomboy backend """ from datetime import datetime from dbus.mainloop.glib import DBusGMainLoop import dbus import dbus.glib import dbus.service import errno import gobject import math import os import random import signal import sys import tempfile import threading import time import unittest import uuid from GTG.backends import BackendFactory from GTG.backends.genericbackend import GenericBackend from GTG.core.datastore import DataStore PID_TOMBOY = False class TestBackendTomboy(unittest.TestCase): """ Tests for the tomboy backend """ def setUp(self): thread_tomboy = threading.Thread(target=self.spawn_fake_tomboy_server) thread_tomboy.start() thread_tomboy.join() # only the test process should go further, the dbus server one should # stop here if not PID_TOMBOY: return # we create a custom dictionary listening to the server, and register # it in GTG. additional_dic = {} additional_dic["use this fake connection instead"] = ( FakeTomboy.BUS_NAME, FakeTomboy.BUS_PATH, FakeTomboy.BUS_INTERFACE) additional_dic[GenericBackend.KEY_ATTACHED_TAGS] = \ [GenericBackend.ALLTASKS_TAG] additional_dic[GenericBackend.KEY_DEFAULT_BACKEND] = True dic = BackendFactory().get_new_backend_dict('backend_tomboy', additional_dic) self.datastore = DataStore() self.backend = self.datastore.register_backend(dic) # waiting for the "start_get_tasks" to settle time.sleep(1) # we create a dbus session to speak with the server self.bus = dbus.SessionBus() obj = self.bus.get_object(FakeTomboy.BUS_NAME, FakeTomboy.BUS_PATH) self.tomboy = dbus.Interface(obj, FakeTomboy.BUS_INTERFACE) def spawn_fake_tomboy_server(self): # the fake tomboy server has to be in a different process, # otherwise it will lock on the GIL. # For details, see # http://lists.freedesktop.org/archives/dbus/2007-January/006921.html # we use a lockfile to make sure the server is running before we start # the test global PID_TOMBOY lockfile_fd, lockfile_path = tempfile.mkstemp() PID_TOMBOY = os.fork() if PID_TOMBOY: # we wait in polling that the server has been started while True: try: fd = os.open(lockfile_path, os.O_CREAT | os.O_EXCL | os.O_RDWR) except OSError, e: if e.errno != errno.EEXIST: raise time.sleep(0.3) continue os.close(fd) break else: FakeTomboy() os.close(lockfile_fd) os.unlink(lockfile_path) def tearDown(self): if not PID_TOMBOY: return self.datastore.save(quit=True) time.sleep(0.5) self.tomboy.FakeQuit() # FIXME: self.bus.close() os.kill(PID_TOMBOY, signal.SIGKILL) os.waitpid(PID_TOMBOY, 0) def test_everything(self): # we cannot use separate test functions because we only want a single # FakeTomboy dbus server running if not PID_TOMBOY: return for function in dir(self): if function.startswith("TEST_"): getattr(self, function)() self.tomboy.Reset() for tid in self.datastore.get_all_tasks(): self.datastore.request_task_deletion(tid) time.sleep(0.1) def TEST_processing_tomboy_notes(self): self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG]) # adding a note note = self.tomboy.CreateNamedNote(str(uuid.uuid4())) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 1) tid = self.backend.sync_engine.sync_memes.get_local_id(note) task = self.datastore.get_task(tid) # re-adding that (should not change anything) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 1) self.assertEqual( self.backend.sync_engine.sync_memes.get_local_id(note), tid) # removing the note and updating gtg self.tomboy.DeleteNote(note) self.backend.set_task(task) self.assertEqual(len(self.datastore.get_all_tasks()), 0) def TEST_set_task(self): self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG]) # adding a task task = self.datastore.requester.new_task() task.set_title("title") self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 1) note = self.tomboy.ListAllNotes()[0] self.assertEqual(str(self.tomboy.GetNoteTitle(note)), task.get_title()) # re-adding that (should not change anything) self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 1) self.assertEqual(note, self.tomboy.ListAllNotes()[0]) # removing the task and updating tomboy self.datastore.request_task_deletion(task.get_id()) self.backend._process_tomboy_note(note) self.assertEqual(len(self.tomboy.ListAllNotes()), 0) def TEST_update_newest(self): self.backend.set_attached_tags([GenericBackend.ALLTASKS_TAG]) task = self.datastore.requester.new_task() task.set_title("title") self.backend.set_task(task) note = self.tomboy.ListAllNotes()[0] gtg_modified = task.get_modified() tomboy_modified = self._modified_string_to_datetime( self.tomboy.GetNoteChangeDate(note)) # no-one updated, nothing should happen self.backend.set_task(task) self.assertEqual(gtg_modified, task.get_modified()) self.assertEqual(tomboy_modified, self._modified_string_to_datetime( self.tomboy.GetNoteChangeDate(note))) # we update the GTG task UPDATED_GTG_TITLE = "UPDATED_GTG_TITLE" task.set_title(UPDATED_GTG_TITLE) self.backend.set_task(task) self.assertTrue(gtg_modified < task.get_modified()) self.assertTrue(tomboy_modified <= self._modified_string_to_datetime( self.tomboy.GetNoteChangeDate(note))) self.assertEqual(task.get_title(), UPDATED_GTG_TITLE) self.assertEqual(self.tomboy.GetNoteTitle(note), UPDATED_GTG_TITLE) gtg_modified = task.get_modified() tomboy_modified = self._modified_string_to_datetime( self.tomboy.GetNoteChangeDate(note)) # we update the TOMBOY task UPDATED_TOMBOY_TITLE = "UPDATED_TOMBOY_TITLE" # the resolution of tomboy notes changed time is 1 second, so we need # to wait. This *shouldn't* be needed in the actual code because # tomboy signals are always a few seconds late. time.sleep(1) self.tomboy.SetNoteContents(note, UPDATED_TOMBOY_TITLE) self.backend._process_tomboy_note(note) self.assertTrue(gtg_modified <= task.get_modified()) self.assertTrue(tomboy_modified <= self._modified_string_to_datetime( self.tomboy.GetNoteChangeDate(note))) self.assertEqual(task.get_title(), UPDATED_TOMBOY_TITLE) self.assertEqual(self.tomboy.GetNoteTitle(note), UPDATED_TOMBOY_TITLE) def TEST_processing_tomboy_notes_with_tags(self): self.backend.set_attached_tags(['@a']) # adding a not syncable note note = self.tomboy.CreateNamedNote("title" + str(uuid.uuid4())) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 0) # re-adding that (should not change anything) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 0) # adding a tag to that note self.tomboy.SetNoteContents(note, "something with @a") self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 1) # removing the tag and resyncing self.tomboy.SetNoteContents(note, "something with no tags") self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 0) # adding a syncable note note = self.tomboy.CreateNamedNote("title @a" + str(uuid.uuid4())) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 1) tid = self.backend.sync_engine.sync_memes.get_local_id(note) task = self.datastore.get_task(tid) # re-adding that (should not change anything) self.backend._process_tomboy_note(note) self.assertEqual(len(self.datastore.get_all_tasks()), 1) self.assertEqual( self.backend.sync_engine.sync_memes.get_local_id(note), tid) # removing the note and updating gtg self.tomboy.DeleteNote(note) self.backend.set_task(task) self.assertEqual(len(self.datastore.get_all_tasks()), 0) def TEST_set_task_with_tags(self): self.backend.set_attached_tags(['@a']) # adding a not syncable task task = self.datastore.requester.new_task() task.set_title("title") self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 0) # making that task syncable task.set_title("something else") task.add_tag("@a") self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 1) note = self.tomboy.ListAllNotes()[0] self.assertEqual(str(self.tomboy.GetNoteTitle(note)), task.get_title()) # re-adding that (should not change anything) self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 1) self.assertEqual(note, self.tomboy.ListAllNotes()[0]) # removing the syncable property and updating tomboy task.remove_tag("@a") self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), 0) def TEST_multiple_task_same_title(self): self.backend.set_attached_tags(['@a']) how_many_tasks = int(math.ceil(20 * random.random())) for iteration in xrange(0, how_many_tasks): task = self.datastore.requester.new_task() task.set_title("title") task.add_tag('@a') self.backend.set_task(task) self.assertEqual(len(self.tomboy.ListAllNotes()), how_many_tasks) def _modified_string_to_datetime(self, modified_string): return datetime.fromtimestamp(modified_string) def test_suite(): return unittest.TestLoader().loadTestsFromTestCase(TestBackendTomboy) class FakeTomboy(dbus.service.Object): """ D-Bus service object that mimics TOMBOY """ # We don't directly use the tomboy dbus path to avoid conflicts # if tomboy is running during the test BUS_NAME = "Fake.Tomboy" BUS_PATH = "/Fake/Tomboy" BUS_INTERFACE = "Fake.Tomboy.RemoteControl" def __init__(self): # Attach the object to D-Bus DBusGMainLoop(set_as_default=True) self.bus = dbus.SessionBus() bus_name = dbus.service.BusName(self.BUS_NAME, bus=self.bus) dbus.service.Object.__init__(self, bus_name, self.BUS_PATH) self.notes = {} threading.Thread(target=self.fake_main_loop).start() @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s") def GetNoteContents(self, note): return self.notes[note]['content'] @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="b") def NoteExists(self, note): return note in self.notes @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="d") def GetNoteChangeDate(self, note): return self.notes[note]['changed'] @dbus.service.method(BUS_INTERFACE, in_signature="ss") def SetNoteContents(self, note, text): self.fake_update_note(note) self.notes[note]['content'] = text @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s") def GetNoteTitle(self, note): return self._GetNoteTitle(note) def _GetNoteTitle(self, note): content = self.notes[note]['content'] try: end_of_title = content.index('\n') except ValueError: return content return content[:end_of_title] @dbus.service.method(BUS_INTERFACE, in_signature="s") def DeleteNote(self, note): del self.notes[note] @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s") def CreateNamedNote(self, title): # this is to mimic the way tomboy handles title clashes if self._FindNote(title) != '': return '' note = str(uuid.uuid4()) self.notes[note] = {'content': title} self.fake_update_note(note) return note @dbus.service.method(BUS_INTERFACE, in_signature="s", out_signature="s") def FindNote(self, title): return self._FindNote(title) def _FindNote(self, title): for note in self.notes: if self._GetNoteTitle(note) == title: return note return '' @dbus.service.method(BUS_INTERFACE, out_signature="as") def ListAllNotes(self): return list(self.notes) @dbus.service.signal(BUS_INTERFACE, signature='s') def NoteSaved(self, note): pass @dbus.service.signal(BUS_INTERFACE, signature='s') def NoteDeleted(self, note): pass ############################################################################### ### Function with the fake_ prefix are here to assist in testing, they do not ### need to be present in the real class ############################################################################### def fake_update_note(self, note): self.notes[note]['changed'] = time.mktime(datetime.now().timetuple()) def fake_main_loop(self): gobject.threads_init() dbus.glib.init_threads() self.main_loop = gobject.MainLoop() self.main_loop.run() @dbus.service.method(BUS_INTERFACE) def Reset(self): self.notes = {} @dbus.service.method(BUS_INTERFACE) def FakeQuit(self): threading.Timer(0.2, self._fake_quit).start() def _fake_quit(self): self.main_loop.quit() sys.exit(0) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475112"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">avlach/univbris-ocf</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">optin_manager/src/python/scripts/setup_ch.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3214</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">''' Created on Jul 19, 2010 @author: jnaous ''' from django.core.urlresolvers import reverse from django.test import Client from expedient.common.tests.client import test_get_and_post_form from django.contrib.auth.models import User from pyquery import PyQuery as pq from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection from geni.planetlab.models import PlanetLabNode try: from setup_expedient_params import \ SUPERUSER_USERNAME, SUPERUSER_PASSWORD,\ USER_INFO,\ PL_AGGREGATE_INFO,\ OF_AGGREGATE_INFO,\ OF_PL_CONNECTIONS except ImportError: print """ Could not import setup_om_params module. Make sure this module exists and that it contains the following variables: SUPERUSER_USERNAME, SUPERUSER_PASSWORD, CH_PASSWORD, CH_USERNAME """ raise def run(): client = Client() client.login(username=SUPERUSER_USERNAME, password=SUPERUSER_PASSWORD) # Add all planetlab aggregates for pl_agg in PL_AGGREGATE_INFO: print "adding pl agg %s" % pl_agg["url"] response = test_get_and_post_form( client, reverse("planetlab_aggregate_create"), pl_agg, ) print "got response %s" % response assert response.status_code == 302 for of_agg in OF_AGGREGATE_INFO: print "adding of agg %s" % of_agg["url"] response = test_get_and_post_form( client, reverse("openflow_aggregate_create"), of_agg, del_params=["verify_certs"], ) assert response.status_code == 302 for cnxn_tuple in OF_PL_CONNECTIONS: print "adding cnxn %s" % (cnxn_tuple,) NonOpenFlowConnection.objects.get_or_create( of_iface=OpenFlowInterface.objects.get( switch__datapath_id=cnxn_tuple[0], port_num=cnxn_tuple[1], ), resource=PlanetLabNode.objects.get(name=cnxn_tuple[2]), ) client.logout() for username, info in USER_INFO.items(): # create user User.objects.create_user( username=username, email=info["email"], password=info["password"]) client.login(username=username, password=info["password"]) # create project and slice for project in info["projects"]: response = test_get_and_post_form( client, reverse("project_create"), params=dict( name=project["name"], description=project["description"], ), ) assert response.status_code == 302 # This code is missing the project id. Need to get somehow to use reverse. # for slice in project["slices"]: # response = test_get_and_post_form( # client, reverse("slice_create"), # params=dict( # name=slice["name"], # description=slice["description"], # ), # ) # assert response.status_code == 302 client.logout() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475113"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">julien78910/CouchPotatoServer</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">libs/apscheduler/jobstores/redis_store.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">98</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2815</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Stores jobs in a Redis database. """ from uuid import uuid4 from datetime import datetime import logging from apscheduler.jobstores.base import JobStore from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from redis import StrictRedis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') try: long = long except NameError: long = int logger = logging.getLogger(__name__) class RedisJobStore(JobStore): def __init__(self, db=0, key_prefix='jobs.', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): self.jobs = [] self.pickle_protocol = pickle_protocol self.key_prefix = key_prefix if db is None: raise ValueError('The "db" parameter must not be empty') if not key_prefix: raise ValueError('The "key_prefix" parameter must not be empty') self.redis = StrictRedis(db=db, **connect_args) def add_job(self, job): job.id = str(uuid4()) job_state = job.__getstate__() job_dict = { 'job_state': pickle.dumps(job_state, self.pickle_protocol), 'runs': '0', 'next_run_time': job_state.pop('next_run_time').isoformat()} self.redis.hmset(self.key_prefix + job.id, job_dict) self.jobs.append(job) def remove_job(self, job): self.redis.delete(self.key_prefix + job.id) self.jobs.remove(job) def load_jobs(self): jobs = [] keys = self.redis.keys(self.key_prefix + '*') pipeline = self.redis.pipeline() for key in keys: pipeline.hgetall(key) results = pipeline.execute() for job_dict in results: job_state = {} try: job = Job.__new__(Job) job_state = pickle.loads(job_dict['job_state'.encode()]) job_state['runs'] = long(job_dict['runs'.encode()]) dateval = job_dict['next_run_time'.encode()].decode() job_state['next_run_time'] = datetime.strptime( dateval, '%Y-%m-%dT%H:%M:%S') job.__setstate__(job_state) jobs.append(job) except Exception: job_name = job_state.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs def update_job(self, job): attrs = { 'next_run_time': job.next_run_time.isoformat(), 'runs': job.runs} self.redis.hmset(self.key_prefix + job.id, attrs) def close(self): self.redis.connection_pool.disconnect() def __repr__(self): return '<%s>' % self.__class__.__name__ </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475114"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">hello-base/web</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apps/events/models.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5284</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models from model_utils import Choices from apps.accounts.models import ContributorMixin from apps.people.models import ParticipationMixin class Event(ContributorMixin, ParticipationMixin): CATEGORIES = Choices( ('birthday', 'Birthday'), ('bustour', 'Bus Tour'), ('concert', 'Concert'), ('convention', 'Convention'), ('dinnershow', 'Dinner Show'), ('general', 'General'), ('hawaii', 'Hawaii'), ('live', 'Live'), ('release', 'Release'), ('promotional', 'Promotional'), ('other', 'Other'), ) # Details. category = models.CharField(choices=CATEGORIES, default=CATEGORIES.general, max_length=16) romanized_name = models.CharField(max_length=200) name = models.CharField(max_length=200) nickname = models.CharField(max_length=200) slug = models.SlugField() start_date = models.DateField(blank=True, null=True) end_date = models.DateField(blank=True, null=True) info_link_name = models.CharField(max_length=200, blank=True, help_text='Separate multiple link names by comma (must have accompanying info link).') info_link = models.URLField(blank=True, max_length=500, help_text='Seperate multiple links with comma (must have accompanying link name).') # Imagery. logo = models.ImageField(blank=True, null=True, upload_to='events/events/') promotional_image = models.ImageField(blank=True, null=True, upload_to='events/events/') stage = models.ImageField(blank=True, null=True, upload_to='events/events/') # Booleans. has_handshake = models.BooleanField('has handshake?', default=False) is_fanclub = models.BooleanField('fanclub?', default=False) is_international = models.BooleanField('international?', default=False) class Meta: get_latest_by = 'start_date' def __unicode__(self): return u'%s' % (self.romanized_name) def get_absolute_url(self): return reverse('event-detail', kwargs={'slug': self.slug}) def clean(self, *args, **kwargs): # Make sure that we have an equal number of info links and info link # names, so that we can zip() them properly. if (len(self.info_link.split(',')) != len(self.info_link_name.split(','))): message = u'There need to be the same number of info links and info link names.' raise ValidationError(message) super(Event, self).clean(*args, **kwargs) def save(self, *args, **kwargs): self.full_clean() super(Event, self).save(*args, **kwargs) def get_info_links(self): info_links = self.info_link.split(',') info_link_names = self.info_link_name.split(',') return zip(info_links, info_link_names) @staticmethod def autocomplete_search_fields(): return ('id__iexact', 'name__icontains', 'romanized_name__icontains') class Activity(ContributorMixin): event = models.ForeignKey(Event, related_name='activities') # Details. day = models.DateField() romanized_name = models.CharField(max_length=200, blank=True) name = models.CharField(max_length=200, blank=True) start_time = models.TimeField(blank=True, null=True) description = models.TextField(blank=True, help_text='If multiple activities took place on the same day/event, it can be specified here.') is_performance = models.BooleanField('is a performance?', default=False) # Venue. venue = models.ForeignKey('Venue', blank=True, null=True, related_name='activities') venue_known_as = models.CharField(max_length=200, blank=True, help_text='Did the venue go by another name at the time of this activity?') # Add 'set list' field with convoluted ordering and everything... class Meta: get_latest_by = 'day' ordering = ('day', 'start_time') verbose_name_plural = 'activities' def __unicode__(self): if self.start_time: return u'%s %s at %s' % (self.day, self.event.nickname, self.start_time) return u'%s %s' % (self.day, self.event.nickname) class Venue(ContributorMixin): romanized_name = models.CharField(max_length=200) name = models.CharField(max_length=200) other_names = models.CharField(max_length=200, blank=True, null=True) capacity = models.IntegerField(blank=True, null=True) url = models.URLField('URL', blank=True) slug = models.SlugField() # Location. romanized_address = models.CharField(max_length=200, blank=True, null=True) address = models.CharField(max_length=200, blank=True, null=True) country = models.CharField(max_length=200, blank=True, null=True) # Only filled if outside of Japan (maybe unnecessary). # Imagery. photo = models.ImageField(blank=True, null=True, upload_to='events/venues/') def __unicode__(self): return u'%s' % (self.romanized_name) def get_absolute_url(self): return reverse('venue-detail', kwargs={'slug': self.slug}) @staticmethod def autocomplete_search_fields(): return ('id__iexact', 'name__icontains', 'romanized_name__icontains') </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475115"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ohsu-computational-biology/common-workflow-language</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">draft-3/salad/schema_salad/makedoc.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">10</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15362</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import mistune import schema import json import yaml import os import copy import re import sys import StringIO import logging import urlparse from aslist import aslist import re import argparse _logger = logging.getLogger("salad") def has_types(items): r = [] if isinstance(items, dict): if items["type"] == "https://w3id.org/cwl/salad#record": return [items["name"]] for n in ("type", "items", "values"): if n in items: r.extend(has_types(items[n])) return r if isinstance(items, list): for i in items: r.extend(has_types(i)) return r if isinstance(items, basestring): return [items] return [] def linkto(item): _, frg = urlparse.urldefrag(item) return "[%s](#%s)" % (frg, to_id(frg)) class MyRenderer(mistune.Renderer): def header(self, text, level, raw=None): return """<h%i id="%s">%s</h1>""" % (level, to_id(text), text) def to_id(text): textid = text if text[0] in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): try: textid = text[text.index(" ")+1:] except ValueError: pass textid = textid.replace(" ", "_") return textid class ToC(object): def __init__(self): self.first_toc_entry = True self.numbering = [0] self.toc = "" self.start_numbering = True def add_entry(self, thisdepth, title): depth = len(self.numbering) if thisdepth < depth: self.toc += "</ol>" for n in range(0, depth-thisdepth): self.numbering.pop() self.toc += "</li></ol>" self.numbering[-1] += 1 elif thisdepth == depth: if not self.first_toc_entry: self.toc += "</ol>" else: self.first_toc_entry = False self.numbering[-1] += 1 elif thisdepth > depth: self.numbering.append(1) if self.start_numbering: num = "%i.%s" % (self.numbering[0], ".".join([str(n) for n in self.numbering[1:]])) else: num = "" self.toc += """<li><a href="#%s">%s %s</a><ol>\n""" %(to_id(title), num, title) return num def contents(self, id): c = """<h1 id="%s">Table of contents</h1> <nav class="tocnav"><ol>%s""" % (id, self.toc) c += "</ol>" for i in range(0, len(self.numbering)): c += "</li></ol>" c += """</nav>""" return c basicTypes = ("https://w3id.org/cwl/salad#null", "http://www.w3.org/2001/XMLSchema#boolean", "http://www.w3.org/2001/XMLSchema#int", "http://www.w3.org/2001/XMLSchema#long", "http://www.w3.org/2001/XMLSchema#float", "http://www.w3.org/2001/XMLSchema#double", "http://www.w3.org/2001/XMLSchema#string", "https://w3id.org/cwl/salad#record", "https://w3id.org/cwl/salad#enum", "https://w3id.org/cwl/salad#array") def add_dictlist(di, key, val): if key not in di: di[key] = [] di[key].append(val) def number_headings(toc, maindoc): mdlines = [] skip = False for line in maindoc.splitlines(): if line.strip() == "# Introduction": toc.start_numbering = True toc.numbering = [0] if line == "```": skip = not skip if not skip: m = re.match(r'^(#+) (.*)', line) if m: num = toc.add_entry(len(m.group(1)), m.group(2)) line = "%s %s %s" % (m.group(1), num, m.group(2)) line = re.sub(r'^(https?://\S+)', r'[\1](\1)', line) mdlines.append(line) maindoc = '\n'.join(mdlines) return maindoc def fix_doc(doc): if isinstance(doc, list): doc = "".join(doc) return "\n".join([re.sub(r"<([^>@]+@[^>]+)>", r"[\1](mailto:\1)", d) for d in doc.splitlines()]) class RenderType(object): def __init__(self, toc, j, renderlist, redirects): self.typedoc = StringIO.StringIO() self.toc = toc self.subs = {} self.docParent = {} self.docAfter = {} self.rendered = set() self.redirects = redirects self.title = None for t in j: if "extends" in t: for e in aslist(t["extends"]): add_dictlist(self.subs, e, t["name"]) #if "docParent" not in t and "docAfter" not in t: # add_dictlist(self.docParent, e, t["name"]) if t.get("docParent"): add_dictlist(self.docParent, t["docParent"], t["name"]) if t.get("docChild"): for c in aslist(t["docChild"]): add_dictlist(self.docParent, t["name"], c) if t.get("docAfter"): add_dictlist(self.docAfter, t["docAfter"], t["name"]) _, _, metaschema_loader = schema.get_metaschema() alltypes = schema.extend_and_specialize(j, metaschema_loader) self.typemap = {} self.uses = {} self.record_refs = {} for t in alltypes: self.typemap[t["name"]] = t try: if t["type"] == "record": self.record_refs[t["name"]] = [] for f in t.get("fields", []): p = has_types(f) for tp in p: if tp not in self.uses: self.uses[tp] = [] if (t["name"], f["name"]) not in self.uses[tp]: _, frg1 = urlparse.urldefrag(t["name"]) _, frg2 = urlparse.urldefrag(f["name"]) self.uses[tp].append((frg1, frg2)) if tp not in basicTypes and tp not in self.record_refs[t["name"]]: self.record_refs[t["name"]].append(tp) except KeyError as e: _logger.error("Did not find 'type' in %s", t) raise for f in alltypes: if (f["name"] in renderlist or ((not renderlist) and ("extends" not in f) and ("docParent" not in f) and ("docAfter" not in f))): self.render_type(f, 1) def typefmt(self, tp, redirects, nbsp=False): global primitiveType if isinstance(tp, list): if nbsp and len(tp) <= 3: return "&nbsp;|&nbsp;".join([self.typefmt(n, redirects) for n in tp]) else: return " | ".join([self.typefmt(n, redirects) for n in tp]) if isinstance(tp, dict): if tp["type"] == "https://w3id.org/cwl/salad#array": return "array&lt;%s&gt;" % (self.typefmt(tp["items"], redirects, nbsp=True)) if tp["type"] in ("https://w3id.org/cwl/salad#record", "https://w3id.org/cwl/salad#enum"): frg = schema.avro_name(tp["name"]) if tp["name"] in redirects: return """<a href="%s">%s</a>""" % (redirects[tp["name"]], frg) elif tp["name"] in self.typemap: return """<a href="#%s">%s</a>""" % (to_id(frg), frg) else: return frg if isinstance(tp["type"], dict): return self.typefmt(tp["type"], redirects) else: if str(tp) in redirects: return """<a href="%s">%s</a>""" % (redirects[tp], redirects[tp]) elif str(tp) in basicTypes: return """<a href="%s">%s</a>""" % (primitiveType, schema.avro_name(str(tp))) else: _, frg = urlparse.urldefrag(tp) if frg: tp = frg return """<a href="#%s">%s</a>""" % (to_id(tp), tp) def render_type(self, f, depth): if f["name"] in self.rendered or f["name"] in self.redirects: return self.rendered.add(f["name"]) if "doc" not in f: f["doc"] = "" f["type"] = copy.deepcopy(f) f["doc"] = "" f = f["type"] if "doc" not in f: f["doc"] = "" def extendsfrom(item, ex): if "extends" in item: for e in aslist(item["extends"]): ex.insert(0, self.typemap[e]) extendsfrom(self.typemap[e], ex) ex = [f] extendsfrom(f, ex) enumDesc = {} if f["type"] == "enum" and isinstance(f["doc"], list): for e in ex: for i in e["doc"]: idx = i.find(":") if idx > -1: enumDesc[i[:idx]] = i[idx+1:] e["doc"] = [i for i in e["doc"] if i.find(":") == -1 or i.find(" ") < i.find(":")] f["doc"] = fix_doc(f["doc"]) if f["type"] == "record": for field in f.get("fields", []): if "doc" not in field: field["doc"] = "" if f["type"] != "documentation": lines = [] for l in f["doc"].splitlines(): if len(l) > 0 and l[0] == "#": l = ("#" * depth) + l lines.append(l) f["doc"] = "\n".join(lines) _, frg = urlparse.urldefrag(f["name"]) num = self.toc.add_entry(depth, frg) doc = "## %s %s\n" % (num, frg) else: doc = "" if self.title is None: self.title = f["doc"][0:f["doc"].index("\n")][2:] if f["type"] == "documentation": f["doc"] = number_headings(self.toc, f["doc"]) #if "extends" in f: # doc += "\n\nExtends " # doc += ", ".join([" %s" % linkto(ex) for ex in aslist(f["extends"])]) #if f["name"] in self.subs: # doc += "\n\nExtended by" # doc += ", ".join([" %s" % linkto(s) for s in self.subs[f["name"]]]) #if f["name"] in self.uses: # doc += "\n\nReferenced by" # doc += ", ".join([" [%s.%s](#%s)" % (s[0], s[1], to_id(s[0])) for s in self.uses[f["name"]]]) doc = doc + "\n\n" + f["doc"] doc = mistune.markdown(doc, renderer=MyRenderer()) if f["type"] == "record": doc += "<h3>Fields</h3>" doc += """<table class="table table-striped">""" doc += "<tr><th>field</th><th>type</th><th>required</th><th>description</th></tr>" required = [] optional = [] for i in f.get("fields", []): tp = i["type"] if isinstance(tp, list) and tp[0] == "https://w3id.org/cwl/salad#null": opt = False tp = tp[1:] else: opt = True desc = i["doc"] #if "inherited_from" in i: # desc = "%s _Inherited from %s_" % (desc, linkto(i["inherited_from"])) frg = schema.avro_name(i["name"]) tr = "<td><code>%s</code></td><td>%s</td><td>%s</td><td>%s</td>" % (frg, self.typefmt(tp, self.redirects), opt, mistune.markdown(desc)) if opt: required.append(tr) else: optional.append(tr) for i in required+optional: doc += "<tr>" + i + "</tr>" doc += """</table>""" elif f["type"] == "enum": doc += "<h3>Symbols</h3>" doc += """<table class="table table-striped">""" doc += "<tr><th>symbol</th><th>description</th></tr>" for e in ex: for i in e.get("symbols", []): doc += "<tr>" frg = schema.avro_name(i) doc += "<td><code>%s</code></td><td>%s</td>" % (frg, enumDesc.get(frg, "")) doc += "</tr>" doc += """</table>""" f["doc"] = doc self.typedoc.write(f["doc"]) subs = self.docParent.get(f["name"], []) + self.record_refs.get(f["name"], []) if len(subs) == 1: self.render_type(self.typemap[subs[0]], depth) else: for s in subs: self.render_type(self.typemap[s], depth+1) for s in self.docAfter.get(f["name"], []): self.render_type(self.typemap[s], depth) def avrold_doc(j, outdoc, renderlist, redirects, brand, brandlink): toc = ToC() toc.start_numbering = False rt = RenderType(toc, j, renderlist, redirects) content = rt.typedoc.getvalue() outdoc.write(""" <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css"> """) outdoc.write("<title>%s</title>" % (rt.title)) outdoc.write(""" <style> :target { padding-top: 61px; margin-top: -61px; } body { padding-top: 61px; } .tocnav ol { list-style: none } </style> </head> <body> """) outdoc.write(""" <nav class="navbar navbar-default navbar-fixed-top"> <div class="container"> <div class="navbar-header"> <a class="navbar-brand" href="%s">%s</a> """ % (brandlink, brand)) if u"<!--ToC-->" in content: content = content.replace(u"<!--ToC-->", toc.contents("toc")) outdoc.write(""" <ul class="nav navbar-nav"> <li><a href="#toc">Table of contents</a></li> </ul> """) outdoc.write(""" </div> </div> </nav> """) outdoc.write(""" <div class="container"> """) outdoc.write(""" <div class="row"> """) outdoc.write(""" <div class="col-md-12" role="main" id="main">""") outdoc.write(content.encode("utf-8")) outdoc.write("""</div>""") outdoc.write(""" </div> </div> </body> </html>""") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("schema") parser.add_argument('--only', action='append') parser.add_argument('--redirect', action='append') parser.add_argument('--brand') parser.add_argument('--brandlink') parser.add_argument('--primtype', default="#PrimitiveType") args = parser.parse_args() s = [] a = args.schema with open(a) as f: if a.endswith("md"): s.append({"name": os.path.splitext(os.path.basename(a))[0], "type": "documentation", "doc": f.read().decode("utf-8") }) else: uri = "file://" + os.path.abspath(a) _, _, metaschema_loader = schema.get_metaschema() j, schema_metadata = metaschema_loader.resolve_ref(uri, "") if isinstance(j, list): s.extend(j) else: s.append(j) primitiveType = args.primtype redirect = {r.split("=")[0]:r.split("=")[1] for r in args.redirect} if args.redirect else {} renderlist = args.only if args.only else [] avrold_doc(s, sys.stdout, renderlist, redirect, args.brand, args.brandlink) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475116"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">nwjs/chromium.src</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mojo/public/tools/bindings/pylib/mojom/parse/lexer.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6258</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import imp import os.path import sys def _GetDirAbove(dirname): """Returns the directory "above" this file containing |dirname| (which must also be "above" this file).""" path = os.path.abspath(__file__) while True: path, tail = os.path.split(path) assert tail if tail == dirname: return path try: imp.find_module("ply") except ImportError: sys.path.append(os.path.join(_GetDirAbove("mojo"), "third_party")) from ply.lex import TOKEN from ..error import Error class LexError(Error): """Class for errors from the lexer.""" def __init__(self, filename, message, lineno): Error.__init__(self, filename, message, lineno=lineno) # We have methods which look like they could be functions: # pylint: disable=R0201 class Lexer(object): def __init__(self, filename): self.filename = filename ######################-- PRIVATE --###################### ## ## Internal auxiliary methods ## def _error(self, msg, token): raise LexError(self.filename, msg, token.lineno) ## ## Reserved keywords ## keywords = ( 'HANDLE', 'IMPORT', 'MODULE', 'STRUCT', 'UNION', 'INTERFACE', 'ENUM', 'CONST', 'TRUE', 'FALSE', 'DEFAULT', 'ARRAY', 'MAP', 'ASSOCIATED', 'PENDING_REMOTE', 'PENDING_RECEIVER', 'PENDING_ASSOCIATED_REMOTE', 'PENDING_ASSOCIATED_RECEIVER', ) keyword_map = {} for keyword in keywords: keyword_map[keyword.lower()] = keyword ## ## All the tokens recognized by the lexer ## tokens = keywords + ( # Identifiers 'NAME', # Constants 'ORDINAL', 'INT_CONST_DEC', 'INT_CONST_HEX', 'FLOAT_CONST', # String literals 'STRING_LITERAL', # Operators 'MINUS', 'PLUS', 'AMP', 'QSTN', # Assignment 'EQUALS', # Request / response 'RESPONSE', # Delimiters 'LPAREN', 'RPAREN', # ( ) 'LBRACKET', 'RBRACKET', # [ ] 'LBRACE', 'RBRACE', # { } 'LANGLE', 'RANGLE', # < > 'SEMI', # ; 'COMMA', 'DOT' # , . ) ## ## Regexes for use in tokens ## # valid C identifiers (K&R2: A.2.3) identifier = r'[a-zA-Z_][0-9a-zA-Z_]*' hex_prefix = '0[xX]' hex_digits = '[0-9a-fA-F]+' # integer constants (K&R2: A.2.5.1) decimal_constant = '0|([1-9][0-9]*)' hex_constant = hex_prefix+hex_digits # Don't allow octal constants (even invalid octal). octal_constant_disallowed = '0[0-9]+' # character constants (K&R2: A.2.5.2) # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line # directives with Windows paths as filenames (..\..\dir\file) # For the same reason, decimal_escape allows all digit sequences. We want to # parse all correct code, even if it means to sometimes parse incorrect # code. # simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])""" decimal_escape = r"""(\d+)""" hex_escape = r"""(x[0-9a-fA-F]+)""" bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" escape_sequence = \ r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' # string literals (K&R2: A.2.6) string_char = r"""([^"\\\n]|"""+escape_sequence+')' string_literal = '"'+string_char+'*"' bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' # floating constants (K&R2: A.2.5.3) exponent_part = r"""([eE][-+]?[0-9]+)""" fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)""" floating_constant = \ '(((('+fractional_constant+')'+ \ exponent_part+'?)|([0-9]+'+exponent_part+')))' # Ordinals ordinal = r'@[0-9]+' missing_ordinal_value = r'@' # Don't allow ordinal values in octal (even invalid octal, like 09) or # hexadecimal. octal_or_hex_ordinal_disallowed = r'@((0[0-9]+)|('+hex_prefix+hex_digits+'))' ## ## Rules for the normal state ## t_ignore = ' \t\r' # Newlines def t_NEWLINE(self, t): r'\n+' t.lexer.lineno += len(t.value) # Operators t_MINUS = r'-' t_PLUS = r'\+' t_AMP = r'&' t_QSTN = r'\?' # = t_EQUALS = r'=' # => t_RESPONSE = r'=>' # Delimiters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_LANGLE = r'<' t_RANGLE = r'>' t_COMMA = r',' t_DOT = r'\.' t_SEMI = r';' t_STRING_LITERAL = string_literal # The following floating and integer constants are defined as # functions to impose a strict order (otherwise, decimal # is placed before the others because its regex is longer, # and this is bad) # @TOKEN(floating_constant) def t_FLOAT_CONST(self, t): return t @TOKEN(hex_constant) def t_INT_CONST_HEX(self, t): return t @TOKEN(octal_constant_disallowed) def t_OCTAL_CONSTANT_DISALLOWED(self, t): msg = "Octal values not allowed" self._error(msg, t) @TOKEN(decimal_constant) def t_INT_CONST_DEC(self, t): return t # unmatched string literals are caught by the preprocessor @TOKEN(bad_string_literal) def t_BAD_STRING_LITERAL(self, t): msg = "String contains invalid escape code" self._error(msg, t) # Handle ordinal-related tokens in the right order: @TOKEN(octal_or_hex_ordinal_disallowed) def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t): msg = "Octal and hexadecimal ordinal values not allowed" self._error(msg, t) @TOKEN(ordinal) def t_ORDINAL(self, t): return t @TOKEN(missing_ordinal_value) def t_BAD_ORDINAL(self, t): msg = "Missing ordinal value" self._error(msg, t) @TOKEN(identifier) def t_NAME(self, t): t.type = self.keyword_map.get(t.value, "NAME") return t # Ignore C and C++ style comments def t_COMMENT(self, t): r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)' t.lexer.lineno += t.value.count("\n") def t_error(self, t): msg = "Illegal character %s" % repr(t.value[0]) self._error(msg, t) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475117"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">dlazz/ansible</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/ansible/modules/network/ios/ios_linkagg.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">57</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9433</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: ios_linkagg version_added: "2.5" author: "Trishna Guha (@trishnaguha)" short_description: Manage link aggregation groups on Cisco IOS network devices description: - This module provides declarative management of link aggregation groups on Cisco IOS network devices. notes: - Tested against IOS 15.2 options: group: description: - Channel-group number for the port-channel Link aggregation group. Range 1-255. mode: description: - Mode of the link aggregation group. choices: ['active', 'on', 'passive', 'auto', 'desirable'] members: description: - List of members of the link aggregation group. aggregate: description: List of link aggregation definitions. state: description: - State of the link aggregation group. default: present choices: ['present', 'absent'] purge: description: - Purge links not defined in the I(aggregate) parameter. default: no type: bool extends_documentation_fragment: ios """ EXAMPLES = """ - name: create link aggregation group ios_linkagg: group: 10 state: present - name: delete link aggregation group ios_linkagg: group: 10 state: absent - name: set link aggregation group to members ios_linkagg: group: 200 mode: active members: - GigabitEthernet0/0 - GigabitEthernet0/1 - name: remove link aggregation group from GigabitEthernet0/0 ios_linkagg: group: 200 mode: active members: - GigabitEthernet0/1 - name: Create aggregate of linkagg definitions ios_linkagg: aggregate: - { group: 3, mode: on, members: [GigabitEthernet0/1] } - { group: 100, mode: passive, members: [GigabitEthernet0/2] } """ RETURN = """ commands: description: The list of configuration mode commands to send to the device returned: always, except for the platforms that use Netconf transport to manage the device. type: list sample: - interface port-channel 30 - interface GigabitEthernet0/3 - channel-group 30 mode on - no interface port-channel 30 """ import re from copy import deepcopy from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import CustomNetworkConfig from ansible.module_utils.network.common.utils import remove_default_spec from ansible.module_utils.network.ios.ios import get_config, load_config from ansible.module_utils.network.ios.ios import ios_argument_spec def search_obj_in_list(group, lst): for o in lst: if o['group'] == group: return o def map_obj_to_commands(updates, module): commands = list() want, have = updates purge = module.params['purge'] for w in want: group = w['group'] mode = w['mode'] members = w.get('members') or [] state = w['state'] del w['state'] obj_in_have = search_obj_in_list(group, have) if state == 'absent': if obj_in_have: commands.append('no interface port-channel {0}'.format(group)) elif state == 'present': cmd = ['interface port-channel {0}'.format(group), 'end'] if not obj_in_have: if not group: module.fail_json(msg='group is a required option') commands.extend(cmd) if members: for m in members: commands.append('interface {0}'.format(m)) commands.append('channel-group {0} mode {1}'.format(group, mode)) else: if members: if 'members' not in obj_in_have.keys(): for m in members: commands.extend(cmd) commands.append('interface {0}'.format(m)) commands.append('channel-group {0} mode {1}'.format(group, mode)) elif set(members) != set(obj_in_have['members']): missing_members = list(set(members) - set(obj_in_have['members'])) for m in missing_members: commands.extend(cmd) commands.append('interface {0}'.format(m)) commands.append('channel-group {0} mode {1}'.format(group, mode)) superfluous_members = list(set(obj_in_have['members']) - set(members)) for m in superfluous_members: commands.extend(cmd) commands.append('interface {0}'.format(m)) commands.append('no channel-group {0} mode {1}'.format(group, mode)) if purge: for h in have: obj_in_want = search_obj_in_list(h['group'], want) if not obj_in_want: commands.append('no interface port-channel {0}'.format(h['group'])) return commands def map_params_to_obj(module): obj = [] aggregate = module.params.get('aggregate') if aggregate: for item in aggregate: for key in item: if item.get(key) is None: item[key] = module.params[key] d = item.copy() d['group'] = str(d['group']) obj.append(d) else: obj.append({ 'group': str(module.params['group']), 'mode': module.params['mode'], 'members': module.params['members'], 'state': module.params['state'] }) return obj def parse_mode(module, config, group, member): mode = None netcfg = CustomNetworkConfig(indent=1, contents=config) parents = ['interface {0}'.format(member)] body = netcfg.get_section(parents) match_int = re.findall(r'interface {0}\n'.format(member), body, re.M) if match_int: match = re.search(r'channel-group {0} mode (\S+)'.format(group), body, re.M) if match: mode = match.group(1) return mode def parse_members(module, config, group): members = [] for line in config.strip().split('!'): l = line.strip() if l.startswith('interface'): match_group = re.findall(r'channel-group {0} mode'.format(group), l, re.M) if match_group: match = re.search(r'interface (\S+)', l, re.M) if match: members.append(match.group(1)) return members def get_channel(module, config, group): match = re.findall(r'^interface (\S+)', config, re.M) if not match: return {} channel = {} for item in set(match): member = item channel['mode'] = parse_mode(module, config, group, member) channel['members'] = parse_members(module, config, group) return channel def map_config_to_obj(module): objs = list() config = get_config(module) for line in config.split('\n'): l = line.strip() match = re.search(r'interface Port-channel(\S+)', l, re.M) if match: obj = {} group = match.group(1) obj['group'] = group obj.update(get_channel(module, config, group)) objs.append(obj) return objs def main(): """ main entry point for module execution """ element_spec = dict( group=dict(type='int'), mode=dict(choices=['active', 'on', 'passive', 'auto', 'desirable']), members=dict(type='list'), state=dict(default='present', choices=['present', 'absent']) ) aggregate_spec = deepcopy(element_spec) aggregate_spec['group'] = dict(required=True) required_one_of = [['group', 'aggregate']] required_together = [['members', 'mode']] mutually_exclusive = [['group', 'aggregate']] # remove default in aggregate spec, to handle common arguments remove_default_spec(aggregate_spec) argument_spec = dict( aggregate=dict(type='list', elements='dict', options=aggregate_spec, required_together=required_together), purge=dict(default=False, type='bool') ) argument_spec.update(element_spec) argument_spec.update(ios_argument_spec) module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of, required_together=required_together, mutually_exclusive=mutually_exclusive, supports_check_mode=True) warnings = list() result = {'changed': False} if warnings: result['warnings'] = warnings want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands((want, have), module) result['commands'] = commands if commands: if not module.check_mode: load_config(module, commands) result['changed'] = True module.exit_json(**result) if __name__ == '__main__': main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475118"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">adamklawonn/CityCircles</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">citycircles_iphone/build_back2/iphoneDistribution-iphoneos/CityCircles.app/pntsandrects.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">17</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6225</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Point and Rectangle classes. This code is in the public domain. Point -- point with (x,y) coordinates Rect -- two points, forming a rectangle """ import math class Point: """A point identified by (x,y) coordinates. supports: +, -, *, /, str, repr length -- calculate length of vector to point from origin distance_to -- calculate distance between two points as_tuple -- construct tuple (x,y) clone -- construct a duplicate integerize -- convert x & y to integers floatize -- convert x & y to floats move_to -- reset x & y slide -- move (in place) +dx, +dy, as spec'd by point slide_xy -- move (in place) +dx, +dy rotate -- rotate around the origin rotate_about -- rotate around another point """ def __init__(self, x=0.0, y=0.0): self.x = x self.y = y def __add__(self, p): """Point(x1+x2, y1+y2)""" return Point(self.x+p.x, self.y+p.y) def __sub__(self, p): """Point(x1-x2, y1-y2)""" return Point(self.x-p.x, self.y-p.y) def __mul__( self, scalar ): """Point(x1*x2, y1*y2)""" return Point(self.x*scalar, self.y*scalar) def __div__(self, scalar): """Point(x1/x2, y1/y2)""" return Point(self.x/scalar, self.y/scalar) def __str__(self): return "(%s, %s)" % (self.x, self.y) def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.x, self.y) def length(self): return math.sqrt(self.x**2 + self.y**2) def distance_to(self, p): """Calculate the distance between two points.""" return (self - p).length() def as_tuple(self): """(x, y)""" return (self.x, self.y) def clone(self): """Return a full copy of this point.""" return Point(self.x, self.y) def integerize(self): """Convert co-ordinate values to integers.""" self.x = int(self.x) self.y = int(self.y) def floatize(self): """Convert co-ordinate values to floats.""" self.x = float(self.x) self.y = float(self.y) def move_to(self, x, y): """Reset x & y coordinates.""" self.x = x self.y = y def slide(self, p): '''Move to new (x+dx,y+dy). Can anyone think up a better name for this function? slide? shift? delta? move_by? ''' self.x = self.x + p.x self.y = self.y + p.y def slide_xy(self, dx, dy): '''Move to new (x+dx,y+dy). Can anyone think up a better name for this function? slide? shift? delta? move_by? ''' self.x = self.x + dx self.y = self.y + dy def rotate(self, rad): """Rotate counter-clockwise by rad radians. Positive y goes *up,* as in traditional mathematics. Interestingly, you can use this in y-down computer graphics, if you just remember that it turns clockwise, rather than counter-clockwise. The new position is returned as a new Point. """ s, c = [f(rad) for f in (math.sin, math.cos)] x, y = (c*self.x - s*self.y, s*self.x + c*self.y) return Point(x,y) def rotate_about(self, p, theta): """Rotate counter-clockwise around a point, by theta degrees. Positive y goes *up,* as in traditional mathematics. The new position is returned as a new Point. """ result = self.clone() result.slide(-p.x, -p.y) result.rotate(theta) result.slide(p.x, p.y) return result class Rect: """A rectangle identified by two points. The rectangle stores left, top, right, and bottom values. Coordinates are based on screen coordinates. origin top +-----> x increases | | left -+- right v | y increases bottom set_points -- reset rectangle coordinates contains -- is a point inside? overlaps -- does a rectangle overlap? top_left -- get top-left corner bottom_right -- get bottom-right corner expanded_by -- grow (or shrink) """ def __init__(self, pt1, pt2): """Initialize a rectangle from two points.""" self.set_points(pt1, pt2) def set_points(self, pt1, pt2): """Reset the rectangle coordinates.""" (x1, y1) = pt1.as_tuple() (x2, y2) = pt2.as_tuple() self.left = min(x1, x2) self.top = min(y1, y2) self.right = max(x1, x2) self.bottom = max(y1, y2) def contains(self, pt): """Return true if a point is inside the rectangle.""" x,y = pt.as_tuple() return (self.left <= x <= self.right and self.top <= y <= self.bottom) def overlaps(self, other): """Return true if a rectangle overlaps this rectangle.""" return (self.right > other.left and self.left < other.right and self.top < other.bottom and self.bottom > other.top) def top_left(self): """Return the top-left corner as a Point.""" return Point(self.left, self.top) def bottom_right(self): """Return the bottom-right corner as a Point.""" return Point(self.right, self.bottom) def expanded_by(self, n): """Return a rectangle with extended borders. Create a new rectangle that is wider and taller than the immediate one. All sides are extended by "n" points. """ p1 = Point(self.left-n, self.top-n) p2 = Point(self.right+n, self.bottom+n) return Rect(p1, p2) def __str__( self ): return "<Rect (%s,%s)-(%s,%s)>" % (self.left,self.top, self.right,self.bottom) def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, Point(self.left, self.top), Point(self.right, self.bottom))</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475119"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">rosjat/python-scsi</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pyscsi/pyscsi/scsi_enum_command.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">23775</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># coding: utf-8 # Copyright (C) 2014 by Ronnie Sahlberg<<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="1d6f72737374786e7c75717f786f7a5d7a707c7471337e7270">[email protected]</a>> # Copyright (C) 2015 by Markus Rosjat<<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="3954584b524c4a174b564a53584d795e54585055175a5654">[email protected]</a>> # SPDX-FileCopyrightText: 2014 The python-scsi Authors # # SPDX-License-Identifier: LGPL-2.1-or-later from pyscsi.pyscsi.scsi_opcode import OpCode from pyscsi.utils.enum import Enum # Dictionaries to define service actions and there values # # We use a helper to connect the service actions to the corresponding opcode.as # The OpCode object holds a Enum object with the service actions and has a value and # a name property to access the opcode name and value. """ ------------------------------------------------------------------------------ Maintenance in Service Actions ------------------------------------------------------------------------------ """ sa_maintenance_in = {'REPORT_ASSIGNED_UNASSIGNED_P_EXTENT': 0x00, 'REPORT_COMPONENT_DEVICE': 0x01, 'REPORT_COMPONENT_DEVICE_ATTACHMENTS': 0x02, 'REPORT_DEVICE_IDENTIFICATION': 0x07, 'REPORT_PERIPHERAL_DEVICE': 0x03, 'REPORT_PERIPHERAL_DEVICE_ASSOCIATIONS': 0x04, 'REPORT_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x05, 'REPORT_STATES': 0x06, 'REPORT_SUPPORTED_CONFIGURATION_METHOD': 0x09, 'REPORT_UNCONFIGURED_CAPACITY': 0x08, } """ ------------------------------------------------------------------------------ Maintenance out Service Actions Dictionaries ------------------------------------------------------------------------------ """ sa_maintenance_out = {'ADD_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x00, 'ATTACH_TO_COMPONENT_DEVICE': 0x01, 'BREAK_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x07, 'EXCHANGE_P_EXTENT': 0x02, 'EXCHANGE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x03, 'INSTRUCT_COMPONENT_DEVICE': 0x04, 'REMOVE_PERIPHERAL_DEVICE_COMPONENT_DEVICE': 0x05, 'SET_PERIPHERAL_DEVICE_COMPONENT_DEVICE_IDENTIFIER': 0x06, } """ ------------------------------------------------------------------------------ Service Actions Dictionaries for the A3 opcode ------------------------------------------------------------------------------ """ service_actions = {'REPORT_DEVICE_IDENTIFIER': 0x05, 'REPORT_ALIASES': 0x0b, 'REPORT_PRIORITY': 0x0e, 'REPORT_SUPPORTED_OPERATION_CODES': 0x0c, 'REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS': 0x0d, 'REPORT_TARGET_PORT_GROUPS': 0x0a, 'REPORT_TIMESTAMP': 0x0f, 'REPORT_IDENTIFYING_INFORMATION': 0x05, 'REQUEST_DATA_TRANSFER_ELEMENT_INQUIRY': 0x06, 'CHANGE_ALIASES': 0x0b, 'SET_DEVICE_IDENTIFIER': 0x06, 'SET_PRIORITY': 0x0e, 'SET_TARGET_PORT_GROUPS': 0x0a, 'SET_TIMESTAMP': 0x0f, 'SET_IDENTIFYING_INFORMATION': 0x06, 'ORWRITE_32': 0x000e, 'READ_32': 0x0009, 'VERIFY_32': 0x000a, 'WRITE_32': 0x000b, 'WRITE_AND_VERIFY_32': 0x000c, 'WRITE_SAME_32': 0x000d, 'XDREAD_32': 0x0003, 'XDWRITE_32': 0x0004, 'XDWRITEREAD_32': 0x0007, 'XPWRITE_32': 0x0006, 'GET_LBA_STATUS': 0x12, 'READ_CAPACITY_16': 0x10, 'REPORT_REFERRALS': 0x13, 'OPEN_IMPORTEXPORT_ELEMENT': 0x00, 'CLOSE_IMPORTEXPORT_ELEMENT': 0x01, } """ ------------------------------------------------------------------------------ opcode Dictionaries ------------------------------------------------------------------------------ """ spc_opcodes = {'SPC_OPCODE_A4': OpCode('SPC_OPCODE_A4', 0xa4, service_actions), 'SPC_OPCODE_A3': OpCode('SPC_OPCODE_A3', 0xa3, service_actions), 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}), 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}), 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}), 'INQUIRY': OpCode('INQUIRY', 0x12, {}), 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}), 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}), 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}), 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}), 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}), 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}), 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}), 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}), 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}), 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}), 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}), 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}), 'READ_MEDIA_SERIAL_NUMBER': OpCode('READ_MEDIA_SERIAL_NUMBER', 0xab, {'READ_MEDIA_SERIAL_NUMBER': 0x01, }), 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}), 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}), 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}), 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}), 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}), 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}), 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}), 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), } sbc_opcodes = {'SBC_OPCODE_7F': OpCode('SBC_OPCODE_7F', 0x7f, service_actions), 'SBC_OPCODE_A4': OpCode('SBC_OPCODE_A4', 0xa4, service_actions), 'SBC_OPCODE_A3': OpCode('SBC_OPCODE_A3', 0xa3, service_actions), 'SBC_OPCODE_9E': OpCode('SBC_OPCODE_9E', 0x9e, service_actions), 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}), 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}), 'COMPARE_AND_WRITE': OpCode('COMPARE_AND_WRITE', 0x89, {}), 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}), 'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}), 'INQUIRY': OpCode('INQUIRY', 0x12, {}), 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}), 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}), 'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in), 'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out), 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}), 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}), 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}), 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}), 'ORWRITE_16': OpCode('ORWRITE_16', 0x8b, {}), 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}), 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}), 'PRE_FETCH_10': OpCode('PRE_FETCH_10', 0x34, {}), 'PRE_FETCH_16': OpCode('PRE_FETCH_16', 0x90, {}), 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}), 'READ_6': OpCode('READ_6', 0x08, {}), 'READ_10': OpCode('READ_10', 0x28, {}), 'READ_12': OpCode('READ_12', 0xa8, {}), 'READ_16': OpCode('READ_16', 0x88, {}), 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}), 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}), 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}), 'READ_CAPACITY_10': OpCode('READ_CAPACITY_10', 0x25, {}), 'READ_DEFECT_DATA_10': OpCode('READ_DEFECT_DATA_10', 0x37, {}), 'READ_DEFECT_DATA_12': OpCode('READ_DEFECT_DATA_12', 0xb7, {}), 'READ_LONG_10': OpCode('READ_LONG_10', 0x3e, {}), 'READ_LONG_16': OpCode('READ_LONG_16', 0x9e, {'READ_LONG_16': 0x11, }), 'REASSIGN_BLOCKS': OpCode('REASSIGN_BLOCKS', 0x07, {}), 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}), 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}), 'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}), 'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OT', 0xbb, {}), 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}), 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}), 'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}), 'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}), 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}), 'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}), 'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}), 'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}), 'SYNCHRONIZE_CACHE_10': OpCode('SYNCHRONIZE_CACHE_10', 0x35, {}), 'SYNCHRONIZE_CACHE_16': OpCode('SYNCHRONIZE_CACHE_16', 0x91, {}), 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}), 'UNMAP': OpCode('UNMAP', 0x42, {}), 'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}), 'VERIFY_12': OpCode('VERIFY_12', 0xaf, {}), 'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}), 'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}), 'VOLUME_SET_OUT': OpCode('VOLUME_SET_IN', 0xbf, {}), 'WRITE_6': OpCode('WRITE_6', 0xa0, {}), 'WRITE_10': OpCode('WRITE_10', 0x2a, {}), 'WRITE_12': OpCode('WRITE_12', 0xaa, {}), 'WRITE_16': OpCode('WRITE_16', 0x8a, {}), 'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}), 'WRITE_AND_VERIFY_12': OpCode('WRITE_AND_VERIFY_12', 0xae, {}), 'WRITE_AND_VERIFY_16': OpCode('WRITE_AND_VERIFY_16', 0x8e, {}), 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}), 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), 'WRITE_LONG_10': OpCode('WRITE_LONG_10', 0x3f, {}), 'WRITE_LONG_16': OpCode('WRITE_LONG_16', 0x9f, {'WRITE_LONG_16': 0x11, }), 'WRITE_SAME_10': OpCode('WRITE_SAME_10', 0x41, {}), 'WRITE_SAME_16': OpCode('WRITE_SAME_16', 0x93, {}), 'XDREAD_10': OpCode('XDREAD_10', 0x52, {}), 'XDWRITE_10': OpCode('XDWRITE_10', 0x50, {}), 'XDWRITEREAD_10': OpCode('XDWRITEREAD_10', 0x53, {}), 'XPWRITE_10': OpCode('XPWRITE_10', 0x51, {}), } ssc_opcodes = {'SSC_OPCODE_A4': OpCode('SSC_OPCODE_A4', 0xa4, service_actions), 'SSC_OPCODE_A3': OpCode('SSC_OPCODE_A3', 0xa3, service_actions), 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}), 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}), 'ERASE_16': OpCode('ERASE_16', 0x93, {}), 'EXTENDED_COPY': OpCode('EXTENDED_COPY', 0x83, {}), 'FORMAT_MEDIUM': OpCode('FORMAT_MEDIUM', 0x04, {}), 'INQUIRY': OpCode('INQUIRY', 0x12, {}), 'LOAD_UNLOAD': OpCode('LOAD_UNLOAD', 0x1b, {}), 'LOCATE_16': OpCode('LOCATE_16', 0x92, {}), 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}), 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}), 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}), 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}), 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}), 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}), 'MOVE_MEDIUM_ATTACHED': OpCode('MOVE_MEDIUM_ATTACHED', 0xa7, {}), 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}), 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}), 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}), 'READ_6': OpCode('READ_6', 0x08, {}), 'READ_16': OpCode('READ_16', 0x88, {}), 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}), 'READ_BLOCK_LIMITS': OpCode('READ_BLOCK_LIMITS', 0x05, {}), 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}), 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}), 'READ_ELEMENT_STATUS_ATTACHED': OpCode('READ_ELEMENT_STATUS_ATTACHED', 0xb4, {}), 'READ_POSITION': OpCode('READ_POSITION', 0x34, {}), 'READ_REVERSE_6': OpCode('READ_REVERSE_6', 0x0f, {}), 'READ_REVERSE_16': OpCode('READ_REVERSE_16', 0x81, {}), 'RECEIVE_COPY_RESULTS': OpCode('RECEIVE_COPY_RESULTS', 0x84, {}), 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}), 'RECOVER_BUFFERED_DATA': OpCode('RECOVER_BUFFERED_DATA', 0x14, {}), 'REPORT_ALIAS': OpCode('REPORT_ALIAS', 0xa3, {'REPORT_ALIAS': 0x0b, }), 'REPORT_DENSITY_SUPPORT': OpCode('REPORT_DENSITY_SUPPORT', 0x44, {}), 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}), 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}), 'REWIND': OpCode('REWIND', 0x01, {}), 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}), 'SET_CAPACITY': OpCode('SET_CAPACITY', 0x0b, {}), 'SPACE_6': OpCode('SPACE_6', 0x11, {}), 'SPACE_16': OpCode('SPACE_16', 0x91, {}), 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}), 'VERIFY_6': OpCode('VERIFY_6', 0x13, {}), 'VERIFY_16': OpCode('VERIFY_16', 0x8f, {}), 'WRITE_6': OpCode('WRITE_6', 0x0a, {}), 'WRITE_16': OpCode('WRITE_16', 0x8a, {}), 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}), 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), 'WRITE_FILEMARKS_6': OpCode('WRITE_FILEMARKS_6', 0x10, {}), 'WRITE_FILEMARKS_16': OpCode('WRITE_FILEMARKS_16', 0x80, {}), } smc_opcodes = {'SMC_OPCODE_A4': OpCode('SMC_OPCODE_A4', 0xa4, service_actions), 'SMC_OPCODE_A3': OpCode('SMC_OPCODE_A3', 0xa3, service_actions), 'ACCESS_CONTROL_IN': OpCode('ACCESS_CONTROL_IN', 0x86, {}), 'ACCESS_CONTROL_OUT': OpCode('ACCESS_CONTROL_OUT', 0x87, {}), 'EXCHANGE_MEDIUM': OpCode('EXCHANGE_MEDIUM', 0xa6, {}), 'INITIALIZE_ELEMENT_STATUS': OpCode('INITIALIZE_ELEMENT_STATUS', 0x07, {}), 'INITIALIZE_ELEMENT_STATUS_WITH_RANGE': OpCode('INITIALIZE_ELEMENT_STATUS_WITH_RANGE', 0x37, {}), 'INQUIRY': OpCode('INQUIRY', 0x12, {}), 'LOG_SELECT': OpCode('LOG_SELECT', 0x4c, {}), 'LOG_SENSE': OpCode('LOG_SENSE', 0x4d, {}), 'MAINTENANCE_IN': OpCode('MAINTENANCE_IN', 0xa3, sa_maintenance_in), 'MAINTENANCE_OUT': OpCode('MAINTENANCE_OUT', 0xa4, sa_maintenance_out), 'MODE_SELECT_6': OpCode('MODE_SELECT_6', 0x15, {}), 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}), 'MODE_SENSE_6': OpCode('MODE_SENSE_6', 0x1a, {}), 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0x5a, {}), 'MOVE_MEDIUM': OpCode('MOVE_MEDIUM', 0xa5, {}), 'OPEN_CLOSE_IMPORT_EXPORT_ELEMENT': OpCode('SMC_OPCODE_1B', 0x1b, service_actions), 'PERSISTENT_RESERVE_IN': OpCode('PERSISTENT_RESERVE_IN', 0x5e, {}), 'PERSISTENT_RESERVE_OUT': OpCode('PERSISTENT_RESERVE_OUT', 0x5f, {}), 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}), 'POSITION_TO_ELEMENT': OpCode('POSITION_TO_ELEMENT', 0x2b, {}), 'READ_ATTRIBUTE': OpCode('READ_ATTRIBUTE', 0x8c, {}), 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}), 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}), 'READ_ELEMENT_STATUS': OpCode('READ_ELEMENT_STATUS', 0xb8, {}), 'RECEIVE_DIAGNOSTIC_RESULTS': OpCode('RECEIVE_DIAGNOSTIC_RESULTS', 0x1c, {}), 'REDUNDANCY_GROUP_IN': OpCode('REDUNDANCY_GROUP_IN', 0xba, {}), 'REDUNDANCY_GROUP_OUT': OpCode('REDUNDANCY_GROUP_OUT', 0xbb, {}), 'RELEASE_6': OpCode('RELEASE_6', 0x17, {}), 'RELEASE_10': OpCode('RELEASE_10', 0x57, {}), 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}), 'REPORT_VOLUME_TYPES_SUPPORTED': OpCode('REPORT_VOLUME_TYPES_SUPPORTED', 0x44, {}), 'REQUEST_VOLUME_ELEMENT_ADDRESS': OpCode('REQUEST_VOLUME_ELEMENT_ADDRESS', 0xb5, {}), 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}), 'RESERVE_6': OpCode('RESERVE_6', 0x16, {}), 'RESERVE_10': OpCode('RESERVE_10', 0x56, {}), 'SEND_DIAGNOSTIC': OpCode('SEND_DIAGNOSTIC', 0x1d, {}), 'SEND_VOLUME_TAG': OpCode('SEND_VOLUME_TAG', 0xb6, {}), 'SPARE_IN': OpCode('SPARE_IN', 0xbc, {}), 'SPARE_OUT': OpCode('SPARE_OUT', 0xbd, {}), 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}), 'VOLUME_SET_IN': OpCode('VOLUME_SET_IN', 0xbe, {}), 'VOLUME_SET_OUT': OpCode('VOLUME_SET_OUT', 0xbf, {}), 'WRITE_ATTRIBUTE': OpCode('WRITE_ATTRIBUTE', 0x8d, {}), 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), } mmc_opcodes = {'BLANK': OpCode('BLANK', 0xa1, {}), 'CLOSE_TRACK_SESSION': OpCode('CLOSE_TRACK_SESSION', 0x5b, {}), 'FORMAT_UNIT': OpCode('FORMAT_UNIT', 0x04, {}), 'GET_CONFIGURATION': OpCode('GET_CONFIGURATION', 0x46, {}), 'GET_EVENT_STATUS_NOTIFICATION': OpCode('GET_EVENT_STATUS_NOTIFICATION', 0x4a, {}), 'GET_PERFORMANCE': OpCode('GET_PERFORMANCE', 0xac, {}), 'INQUIRY': OpCode('INQUIRY', 0x12, {}), 'LOAD_UNLOAD_MEDIUM': OpCode('LOAD_UNLOAD_MEDIUM', 0xa6, {}), 'MECHANISM_STATUS': OpCode('MECHANISM_STATUS', 0xbd, {}), 'MODE_SELECT_10': OpCode('MODE_SELECT_10', 0x55, {}), 'MODE_SENSE_10': OpCode('MODE_SENSE_10', 0xa5, {}), 'PREVENT_ALLOW_MEDIUM_REMOVAL': OpCode('PREVENT_ALLOW_MEDIUM_REMOVAL', 0x1e, {}), 'READ_10': OpCode('READ_10', 0x28, {}), 'READ_12': OpCode('READ_12', 0xa8, {}), 'READ_BUFFER_10': OpCode('READ_BUFFER_10', 0x3c, {}), 'READ_BUFFER_16': OpCode('READ_BUFFER_16', 0x9b, {}), 'READ_BUFFER_CAPACITY': OpCode('READ_BUFFER_CAPACITY', 0x5c, {}), 'READ_CAPACITY': OpCode('READ_CAPACITY', 0x25, {}), 'READ_CD': OpCode('READ_CD', 0xbe, {}), 'READ_CD_MSF': OpCode('READ_CD_MSF', 0xb9, {}), 'READ_DISC_INFORMATION': OpCode('READ_DISC_INFORMATION', 0x51, {}), 'READ_DISC_STRUCTURE': OpCode('READ_DISC_STRUCTURE', 0xad, {}), 'READ_FORMAT_CAPACITIES': OpCode('READ_FORMAT_CAPACITIES', 0x23, {}), 'READ_TOC_PMA_ATIP': OpCode('READ_TOC_PMA_ATIP', 0x43, {}), 'READ_TRACK_INFORMATION': OpCode('READ_TRACK_INFORMATION', 0x52, {}), 'REPAIR_TRACK': OpCode('REPAIR_TRACK', 0x58, {}), 'REPORT_KEY': OpCode('REPORT_KEY', 0xa4, {}), 'REPORT_LUNS': OpCode('REPORT_LUNS', 0xa0, {}), 'REQUEST_SENSE': OpCode('REQUEST_SENSE', 0x03, {}), 'RESERVE_TRACK': OpCode('RESERVE_TRACK', 0x53, {}), 'SECURITY_PROTOCOL_IN': OpCode('SECURITY_PROTOCOL_IN', 0xa2, {}), 'SECURITY_PROTOCOL_OUT': OpCode('SECURITY_PROTOCOL_OUT', 0xb5, {}), 'SEEK_10': OpCode('SEEK_10', 0x2b, {}), 'SEND_CUE_SHEET': OpCode('SEND_CUE_SHEET', 0x5d, {}), 'SEND_DISC_STRUCTURE': OpCode('SEND_DISC_STRUCTURE', 0xbf, {}), 'SEND_KEY': OpCode('SEND_KEY', 0xa3, {}), 'SEND_OPC_INFORMATION': OpCode('SEND_OPC_INFORMATION', 0x54, {}), 'SET_CD_SPEED': OpCode('SET_CD_SPEED', 0xbb, {}), 'SET_READ_AHEAD': OpCode('SET_READ_AHEAD', 0xa7, {}), 'SET_STREAMING': OpCode('SET_STREAMING', 0xb6, {}), 'START_STOP_UNIT': OpCode('START_STOP_UNIT', 0x1b, {}), 'SYNCHRONIZE_CACHE': OpCode('SYNCHRONIZE_CACHE', 0x35, {}), 'TEST_UNIT_READY': OpCode('TEST_UNIT_READY', 0x00, {}), 'VERIFY_10': OpCode('VERIFY_10', 0x2f, {}), 'WRITE_10': OpCode('WRITE_10', 0x2a, {}), 'WRITE_12': OpCode('WRITE_12', 0xaa, {}), 'WRITE_AND_VERIFY_10': OpCode('WRITE_AND_VERIFY_10', 0x2e, {}), 'WRITE_BUFFER': OpCode('WRITE_BUFFER', 0x3b, {}), } """ ------------------------------------------------------------------------------ scsi status Dictionaries ------------------------------------------------------------------------------ """ scsi_status = {'GOOD': 0x00, 'CHECK_CONDITION': 0x02, 'CONDITIONS_MET': 0x04, 'BUSY': 0x08, 'RESERVATION_CONFLICT': 0x18, 'TASK_SET_FULL': 0x28, 'ACA_ACTIVE': 0x30, 'TASK_ABORTED': 0x40, 'SGIO_ERROR': 0xff, } """ ------------------------------------------------------------------------------ open/close ------------------------------------------------------------------------------ """ action_codes = {''} """ ------------------------------------------------------------------------------ Instantiate the Enum Objects ------------------------------------------------------------------------------ """ SCSI_STATUS = Enum(scsi_status) spc = Enum(spc_opcodes) sbc = Enum(sbc_opcodes) ssc = Enum(ssc_opcodes) smc = Enum(smc_opcodes) mmc = Enum(mmc_opcodes) """ ------------------------------------------------------------------------------ Obsolete Dictionaries and Enums ------------------------------------------------------------------------------ NOTE: the dicts and Enums in this section and will be removed in a future release """ opcodes = {'INQUIRY': 0x12, 'MODE_SENSE_6': 0x1a, 'MOVE_MEDIUM': 0xa5, 'READ_10': 0x28, 'READ_12': 0xa8, 'READ_16': 0x88, 'READ_CAPACITY_10': 0x25, 'READ_ELEMENT_STATUS': 0xb8, 'SERVICE_ACTION_IN': 0x9e, 'TEST_UNIT_READY': 0x00, 'WRITE_10': 0x2a, 'WRITE_12': 0xaa, 'WRITE_16': 0x8a, 'WRITE_SAME_10': 0x41, 'WRITE_SAME_16': 0x93, } OPCODE = Enum(opcodes) service_action_ins = {'READ_CAPACITY_16': 0x10, 'GET_LBA_STATUS': 0x12, } SERVICE_ACTION_IN = Enum(service_action_ins) """ ------------------------------------------------------------------------------ """ </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lgpl-2.1</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475120"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">forrestv/myhdl</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">myhdl/test/conversion/toVerilog/test_inc.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5163</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os path = os.path import unittest from unittest import TestCase import random from random import randrange random.seed(2) from myhdl import * from util import setupCosimulation ACTIVE_LOW, INACTIVE_HIGH = 0, 1 def incRef(count, enable, clock, reset, n): """ Incrementer with enable. count -- output enable -- control input, increment when 1 clock -- clock input reset -- asynchronous reset input n -- counter max value """ @instance def logic(): while 1: yield clock.posedge, reset.negedge if reset == ACTIVE_LOW: count.next = 0 else: if enable: count.next = (count + 1) % n return logic def inc(count, enable, clock, reset, n): """ Incrementer with enable. count -- output enable -- control input, increment when 1 clock -- clock input reset -- asynchronous reset input n -- counter max value """ @always(clock.posedge, reset.negedge) def incProcess(): if reset == ACTIVE_LOW: count.next = 0 else: if enable: count.next = (count + 1) % n return incProcess def inc2(count, enable, clock, reset, n): @always(clock.posedge, reset.negedge) def incProcess(): if reset == ACTIVE_LOW: count.next = 0 else: if enable: if count == n-1: count.next = 0 else: count.next = count + 1 return incProcess def incTask(count, enable, clock, reset, n): def incTaskFunc(cnt, enable, reset, n): if enable: cnt[:] = (cnt + 1) % n @instance def incTaskGen(): cnt = intbv(0)[8:] while 1: yield clock.posedge, reset.negedge if reset == ACTIVE_LOW: cnt[:] = 0 count.next = 0 else: # print count incTaskFunc(cnt, enable, reset, n) count.next = cnt return incTaskGen def incTaskFreeVar(count, enable, clock, reset, n): def incTaskFunc(): if enable: count.next = (count + 1) % n @always(clock.posedge, reset.negedge) def incTaskGen(): if reset == ACTIVE_LOW: count.next = 0 else: # print count incTaskFunc() return incTaskGen def inc_v(name, count, enable, clock, reset): return setupCosimulation(**locals()) class TestInc(TestCase): def clockGen(self, clock): while 1: yield delay(10) clock.next = not clock def stimulus(self, enable, clock, reset): reset.next = INACTIVE_HIGH yield clock.negedge reset.next = ACTIVE_LOW yield clock.negedge reset.next = INACTIVE_HIGH for i in range(1000): enable.next = 1 yield clock.negedge for i in range(1000): enable.next = min(1, randrange(5)) yield clock.negedge raise StopSimulation def check(self, count, count_v, enable, clock, reset, n): expect = 0 yield reset.posedge self.assertEqual(count, expect) self.assertEqual(count, count_v) while 1: yield clock.posedge if enable: expect = (expect + 1) % n yield delay(1) # print "%d count %s expect %s count_v %s" % (now(), count, expect, count_v) self.assertEqual(count, expect) self.assertEqual(count, count_v) def bench(self, inc): m = 8 n = 2 ** m count = Signal(intbv(0)[m:]) count_v = Signal(intbv(0)[m:]) enable = Signal(bool(0)) clock, reset = [Signal(bool()) for i in range(2)] inc_inst_ref = incRef(count, enable, clock, reset, n=n) inc_inst = toVerilog(inc, count, enable, clock, reset, n=n) # inc_inst = inc(count, enable, clock, reset, n=n) inc_inst_v = inc_v(inc.func_name, count_v, enable, clock, reset) clk_1 = self.clockGen(clock) st_1 = self.stimulus(enable, clock, reset) ch_1 = self.check(count, count_v, enable, clock, reset, n=n) sim = Simulation(inc_inst_ref, inc_inst_v, clk_1, st_1, ch_1) return sim def testIncRef(self): """ Check increment operation """ sim = self.bench(incRef) sim.run(quiet=1) def testInc(self): """ Check increment operation """ sim = self.bench(inc) sim.run(quiet=1) def testInc2(self): """ Check increment operation """ sim = self.bench(inc2) sim.run(quiet=1) def testIncTask(self): sim = self.bench(incTask) sim.run(quiet=1) def testIncTaskFreeVar(self): sim = self.bench(incTaskFreeVar) sim.run(quiet=1) if __name__ == '__main__': unittest.main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lgpl-2.1</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475121"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">grap/OpenUpgrade</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">setup/package.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">180</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">22070</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python2 # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import optparse import os import pexpect import shutil import signal import subprocess import tempfile import time import xmlrpclib from contextlib import contextmanager from glob import glob from os.path import abspath, dirname, join from sys import stdout from tempfile import NamedTemporaryFile #---------------------------------------------------------- # Utils #---------------------------------------------------------- execfile(join(dirname(__file__), '..', 'openerp', 'release.py')) version = version.split('-')[0] timestamp = time.strftime("%Y%m%d", time.gmtime()) GPGPASSPHRASE = os.getenv('GPGPASSPHRASE') GPGID = os.getenv('GPGID') PUBLISH_DIRS = { 'debian': 'deb', 'redhat': 'rpm', 'tarball': 'src', 'windows': 'exe', } ADDONS_NOT_TO_PUBLISH = [ 'web_analytics' ] def mkdir(d): if not os.path.isdir(d): os.makedirs(d) def system(l, chdir=None): print l if chdir: cwd = os.getcwd() os.chdir(chdir) if isinstance(l, list): rc = os.spawnvp(os.P_WAIT, l[0], l) elif isinstance(l, str): tmp = ['sh', '-c', l] rc = os.spawnvp(os.P_WAIT, tmp[0], tmp) if chdir: os.chdir(cwd) return rc def _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'): time.sleep(5) modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute( dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')] ) if modules and len(modules) > 1: time.sleep(1) toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute( dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')] ) if toinstallmodules: print("Package test: FAILED. Not able to install dependencies of base.") raise Exception("Installation of package failed") else: print("Package test: successfuly installed %s modules" % len(modules)) else: print("Package test: FAILED. Not able to install base.") raise Exception("Installation of package failed") def publish(o, type, extensions): def _publish(o, release): arch = '' filename = release.split(os.path.sep)[-1] release_dir = PUBLISH_DIRS[type] release_path = join(o.pub, release_dir, filename) system('mkdir -p %s' % join(o.pub, release_dir)) shutil.move(join(o.build_dir, release), release_path) # Latest/symlink handler release_abspath = abspath(release_path) latest_abspath = release_abspath.replace(timestamp, 'latest') if os.path.islink(latest_abspath): os.unlink(latest_abspath) os.symlink(release_abspath, latest_abspath) return release_path published = [] for extension in extensions: release = glob("%s/odoo_*.%s" % (o.build_dir, extension))[0] published.append(_publish(o, release)) return published class OdooDocker(object): def __init__(self): self.log_file = NamedTemporaryFile(mode='w+b', prefix="bash", suffix=".txt", delete=False) self.port = 8069 # TODO sle: reliable way to get a free port? self.prompt_re = '[root@nightly-tests] # ' self.timeout = 600 def system(self, command): self.docker.sendline(command) self.docker.expect_exact(self.prompt_re) def start(self, docker_image, build_dir, pub_dir): self.build_dir = build_dir self.pub_dir = pub_dir self.docker = pexpect.spawn( 'docker run -v %s:/opt/release -p 127.0.0.1:%s:8069' ' -t -i %s /bin/bash --noediting' % (self.build_dir, self.port, docker_image), timeout=self.timeout, searchwindowsize=len(self.prompt_re) + 1, ) time.sleep(2) # let the bash start self.docker.logfile_read = self.log_file self.id = subprocess.check_output('docker ps -l -q', shell=True) def end(self): try: _rpc_count_modules(port=str(self.port)) except Exception, e: print('Exception during docker execution: %s:' % str(e)) print('Error during docker execution: printing the bash output:') with open(self.log_file.name) as f: print '\n'.join(f.readlines()) raise finally: self.docker.close() system('docker rm -f %s' % self.id) self.log_file.close() os.remove(self.log_file.name) @contextmanager def docker(docker_image, build_dir, pub_dir): _docker = OdooDocker() try: _docker.start(docker_image, build_dir, pub_dir) try: yield _docker except Exception, e: raise finally: _docker.end() class KVM(object): def __init__(self, o, image, ssh_key='', login='openerp'): self.o = o self.image = image self.ssh_key = ssh_key self.login = login def timeout(self,signum,frame): print "vm timeout kill",self.pid os.kill(self.pid,15) def start(self): l="kvm -net nic,model=rtl8139 -net user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432 -drive".split(" ") #l.append('file=%s,if=virtio,index=0,boot=on,snapshot=on'%self.image) l.append('file=%s,snapshot=on'%self.image) #l.extend(['-vnc','127.0.0.1:1']) l.append('-nographic') print " ".join(l) self.pid=os.spawnvp(os.P_NOWAIT, l[0], l) time.sleep(10) signal.alarm(2400) signal.signal(signal.SIGALRM, self.timeout) try: self.run() finally: signal.signal(signal.SIGALRM, signal.SIG_DFL) os.kill(self.pid,15) time.sleep(10) def ssh(self,cmd): l=['ssh','-o','UserKnownHostsFile=/dev/null','-o','StrictHostKeyChecking=no','-p','10022','-i',self.ssh_key,'%<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="ee9daedfdcd9c0dec0dec0df">[email protected]</a>'%self.login,cmd] system(l) def rsync(self,args,options='--delete --exclude .bzrignore'): cmd ='rsync -rt -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s" %s %s' % (self.ssh_key, options, args) system(cmd) def run(self): pass class KVMWinBuildExe(KVM): def run(self): with open(join(self.o.build_dir, 'setup/win32/Makefile.version'), 'w') as f: f.write("VERSION=%s\n" % self.o.version_full) with open(join(self.o.build_dir, 'setup/win32/Makefile.python'), 'w') as f: f.write("PYTHON_VERSION=%s\n" % self.o.vm_winxp_python_version.replace('.', '')) self.ssh("mkdir -p build") self.rsync('%s/ %<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="eb98abdad9dcc5dbc5dbc5da">[email protected]</a>:build/server/' % (self.o.build_dir, self.login)) self.ssh("cd build/server/setup/win32;time make allinone;") self.rsync('%<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="4d3e0d7c7f7a637d637d637c">[email protected]</a>:build/server/setup/win32/release/ %s/' % (self.login, self.o.build_dir), '') print "KVMWinBuildExe.run(): done" class KVMWinTestExe(KVM): def run(self): # Cannot use o.version_full when the version is not correctly parsed # (for instance, containing *rc* or *dev*) setuppath = glob("%s/openerp-server-setup-*.exe" % self.o.build_dir)[0] setupfile = setuppath.split('/')[-1] setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0] self.rsync('"%s" %<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="5023106162677e607e607e61">[email protected]</a>:' % (setuppath, self.login)) self.ssh("TEMP=/tmp ./%s /S" % setupfile) self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/"Program Files"/"Odoo %s"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion) self.ssh('/cygdrive/c/"Program Files"/"Odoo %s"/server/openerp-server.exe -d mycompany -i base --stop-after-init' % setupversion) self.ssh('net start odoo-server-8.0') _rpc_count_modules(port=18069) #---------------------------------------------------------- # Stage: building #---------------------------------------------------------- def _prepare_build_dir(o, win32=False): cmd = ['rsync', '-a', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo'] if not win32: cmd += ['--exclude', 'setup/win32'] system(cmd + ['%s/' % o.odoo_dir, o.build_dir]) try: for addon_path in glob(join(o.build_dir, 'addons/*')): if addon_path.split(os.path.sep)[-1] not in ADDONS_NOT_TO_PUBLISH: shutil.move(addon_path, join(o.build_dir, 'openerp/addons')) except shutil.Error: # Thrown when the add-on is already in openerp/addons (if _prepare_build_dir # has already been called once) pass def build_tgz(o): system(['python2', 'setup.py', 'sdist', '--quiet', '--formats=gztar,zip'], o.build_dir) system(['mv', glob('%s/dist/odoo-*.tar.gz' % o.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (o.build_dir, version, timestamp)]) system(['mv', glob('%s/dist/odoo-*.zip' % o.build_dir)[0], '%s/odoo_%s.%s.zip' % (o.build_dir, version, timestamp)]) def build_deb(o): # Append timestamp to version for the .dsc to refer the right .tar.gz cmd=['sed', '-i', '1s/^.*$/odoo (%s.%s) stable; urgency=low/'%(version,timestamp), 'debian/changelog'] subprocess.call(cmd, cwd=o.build_dir) deb = pexpect.spawn('dpkg-buildpackage -rfakeroot -k%s' % GPGID, cwd=o.build_dir) deb.logfile = stdout if GPGPASSPHRASE: deb.expect_exact('Enter passphrase: ', timeout=1200) deb.send(GPGPASSPHRASE + '\r\n') deb.expect_exact('Enter passphrase: ') deb.send(GPGPASSPHRASE + '\r\n') deb.expect(pexpect.EOF, timeout=1200) system(['mv', glob('%s/../odoo_*.deb' % o.build_dir)[0], '%s' % o.build_dir]) system(['mv', glob('%s/../odoo_*.dsc' % o.build_dir)[0], '%s' % o.build_dir]) system(['mv', glob('%s/../odoo_*_amd64.changes' % o.build_dir)[0], '%s' % o.build_dir]) system(['mv', glob('%s/../odoo_*.tar.gz' % o.build_dir)[0], '%s' % o.build_dir]) def build_rpm(o): system(['python2', 'setup.py', '--quiet', 'bdist_rpm'], o.build_dir) system(['mv', glob('%s/dist/odoo-*.noarch.rpm' % o.build_dir)[0], '%s/odoo_%s.%s.noarch.rpm' % (o.build_dir, version, timestamp)]) def build_exe(o): KVMWinBuildExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start() system(['cp', glob('%s/openerp*.exe' % o.build_dir)[0], '%s/odoo_%s.%s.exe' % (o.build_dir, version, timestamp)]) #---------------------------------------------------------- # Stage: testing #---------------------------------------------------------- def _prepare_testing(o): if not o.no_tarball: subprocess.call(["mkdir", "docker_src"], cwd=o.build_dir) subprocess.call(["cp", "package.dfsrc", os.path.join(o.build_dir, "docker_src", "Dockerfile")], cwd=os.path.join(o.odoo_dir, "setup")) # Use rsync to copy requirements.txt in order to keep original permissions subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_src")], cwd=os.path.join(o.odoo_dir)) subprocess.call(["docker", "build", "-t", "odoo-%s-src-nightly-tests" % version, "."], cwd=os.path.join(o.build_dir, "docker_src")) if not o.no_debian: subprocess.call(["mkdir", "docker_debian"], cwd=o.build_dir) subprocess.call(["cp", "package.dfdebian", os.path.join(o.build_dir, "docker_debian", "Dockerfile")], cwd=os.path.join(o.odoo_dir, "setup")) # Use rsync to copy requirements.txt in order to keep original permissions subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_debian")], cwd=os.path.join(o.odoo_dir)) subprocess.call(["docker", "build", "-t", "odoo-%s-debian-nightly-tests" % version, "."], cwd=os.path.join(o.build_dir, "docker_debian")) if not o.no_rpm: subprocess.call(["mkdir", "docker_centos"], cwd=o.build_dir) subprocess.call(["cp", "package.dfcentos", os.path.join(o.build_dir, "docker_centos", "Dockerfile")], cwd=os.path.join(o.odoo_dir, "setup")) subprocess.call(["docker", "build", "-t", "odoo-%s-centos-nightly-tests" % version, "."], cwd=os.path.join(o.build_dir, "docker_centos")) def test_tgz(o): with docker('odoo-%s-src-nightly-tests' % version, o.build_dir, o.pub) as wheezy: wheezy.release = '*.tar.gz' wheezy.system("service postgresql start") wheezy.system('pip install /opt/release/%s' % wheezy.release) wheezy.system("useradd --system --no-create-home odoo") wheezy.system('su postgres -s /bin/bash -c "createuser -s odoo"') wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"') wheezy.system('mkdir /var/lib/odoo') wheezy.system('chown odoo:odoo /var/lib/odoo') wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany -i base --stop-after-init"') wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany &"') def test_deb(o): with docker('odoo-%s-debian-nightly-tests' % version, o.build_dir, o.pub) as wheezy: wheezy.release = '*.deb' wheezy.system("service postgresql start") wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"') wheezy.system('/usr/bin/dpkg -i /opt/release/%s' % wheezy.release) wheezy.system('/usr/bin/apt-get install -f -y') wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"') wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany &"') def test_rpm(o): with docker('odoo-%s-centos-nightly-tests' % version, o.build_dir, o.pub) as centos7: centos7.release = '*.noarch.rpm' # Start postgresql centos7.system('su postgres -c "/usr/bin/pg_ctl -D /var/lib/postgres/data start"') centos7.system('sleep 5') centos7.system('su postgres -c "createdb mycompany"') # Odoo install centos7.system('yum install -d 0 -e 0 /opt/release/%s -y' % centos7.release) centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"') centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany &"') def test_exe(o): KVMWinTestExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start() #--------------------------------------------------------- # Generates Packages, Sources and Release files of debian package #--------------------------------------------------------- def gen_deb_package(o, published_files): # Executes command to produce file_name in path, and moves it to o.pub/deb def _gen_file(o, (command, file_name), path): cur_tmp_file_path = os.path.join(path, file_name) with open(cur_tmp_file_path, 'w') as out: subprocess.call(command, stdout=out, cwd=path) system(['cp', cur_tmp_file_path, os.path.join(o.pub, 'deb', file_name)]) # Copy files to a temp directory (required because the working directory must contain only the # files of the last release) temp_path = tempfile.mkdtemp(suffix='debPackages') for pub_file_path in published_files: system(['cp', pub_file_path, temp_path]) commands = [ (['dpkg-scanpackages', '.'], "Packages"), # Generate Packages file (['dpkg-scansources', '.'], "Sources"), # Generate Sources file (['apt-ftparchive', 'release', '.'], "Release") # Generate Release file ] # Generate files for command in commands: _gen_file(o, command, temp_path) # Remove temp directory shutil.rmtree(temp_path) # Generate Release.gpg (= signed Release) # Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature) subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(o.pub, 'deb')) #--------------------------------------------------------- # Generates an RPM repo #--------------------------------------------------------- def gen_rpm_repo(o, file_name): # Sign the RPM rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(o.pub, 'rpm')) rpmsign.expect_exact('Enter pass phrase: ') rpmsign.send(GPGPASSPHRASE + '\r\n') rpmsign.expect(pexpect.EOF) # Removes the old repodata subprocess.call(['rm', '-rf', os.path.join(o.pub, 'rpm', 'repodata')]) # Copy files to a temp directory (required because the working directory must contain only the # files of the last release) temp_path = tempfile.mkdtemp(suffix='rpmPackages') subprocess.call(['cp', file_name, temp_path]) subprocess.call(['createrepo', temp_path]) # creates a repodata folder in temp_path subprocess.call(['cp', '-r', os.path.join(temp_path, "repodata"), os.path.join(o.pub, 'rpm')]) # Remove temp directory shutil.rmtree(temp_path) #---------------------------------------------------------- # Options and Main #---------------------------------------------------------- def options(): op = optparse.OptionParser() root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) build_dir = "%s-%s" % (root, timestamp) op.add_option("-b", "--build-dir", default=build_dir, help="build directory (%default)", metavar="DIR") op.add_option("-p", "--pub", default=None, help="pub directory (%default)", metavar="DIR") op.add_option("", "--no-testing", action="store_true", help="don't test the builded packages") op.add_option("-v", "--version", default='8.0', help="version (%default)") op.add_option("", "--no-debian", action="store_true", help="don't build the debian package") op.add_option("", "--no-rpm", action="store_true", help="don't build the rpm package") op.add_option("", "--no-tarball", action="store_true", help="don't build the tarball") op.add_option("", "--no-windows", action="store_true", help="don't build the windows package") # Windows VM op.add_option("", "--vm-winxp-image", default='/home/odoo/vm/winxp27/winxp27.vdi', help="%default") op.add_option("", "--vm-winxp-ssh-key", default='/home/odoo/vm/winxp27/id_rsa', help="%default") op.add_option("", "--vm-winxp-login", default='Naresh', help="Windows login (%default)") op.add_option("", "--vm-winxp-python-version", default='2.7', help="Windows Python version installed in the VM (default: %default)") (o, args) = op.parse_args() # derive other options o.odoo_dir = root o.pkg = join(o.build_dir, 'pkg') o.version_full = '%s-%s' % (o.version, timestamp) o.work = join(o.build_dir, 'openerp-%s' % o.version_full) o.work_addons = join(o.work, 'openerp', 'addons') return o def main(): o = options() _prepare_build_dir(o) if not o.no_testing: _prepare_testing(o) try: if not o.no_tarball: build_tgz(o) try: if not o.no_testing: test_tgz(o) published_files = publish(o, 'tarball', ['tar.gz', 'zip']) except Exception, e: print("Won't publish the tgz release.\n Exception: %s" % str(e)) if not o.no_debian: build_deb(o) try: if not o.no_testing: test_deb(o) published_files = publish(o, 'debian', ['deb', 'dsc', 'changes', 'tar.gz']) gen_deb_package(o, published_files) except Exception, e: print("Won't publish the deb release.\n Exception: %s" % str(e)) if not o.no_rpm: build_rpm(o) try: if not o.no_testing: test_rpm(o) published_files = publish(o, 'redhat', ['noarch.rpm']) gen_rpm_repo(o, published_files[0]) except Exception, e: print("Won't publish the rpm release.\n Exception: %s" % str(e)) if not o.no_windows: _prepare_build_dir(o, win32=True) build_exe(o) try: if not o.no_testing: test_exe(o) published_files = publish(o, 'windows', ['exe']) except Exception, e: print("Won't publish the exe release.\n Exception: %s" % str(e)) except: pass finally: shutil.rmtree(o.build_dir) print('Build dir %s removed' % o.build_dir) if not o.no_testing: system("docker rm -f `docker ps -a | awk '{print $1 }'` 2>>/dev/null") print('Remaining dockers removed') if __name__ == '__main__': main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475122"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">miptliot/edx-platform</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openedx/core/djangoapps/ccxcon/tasks.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">19</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1672</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" This file contains celery tasks for ccxcon """ from celery.task import task # pylint: disable=no-name-in-module, import-error from celery.utils.log import get_task_logger # pylint: disable=no-name-in-module, import-error from opaque_keys.edx.keys import CourseKey from requests.exceptions import ConnectionError, HTTPError, RequestException, TooManyRedirects from openedx.core.djangoapps.ccxcon import api log = get_task_logger(__name__) @task() def update_ccxcon(course_id, cur_retry=0): """ Pass through function to update course information on CCXCon. Takes care of retries in case of some specific exceptions. Args: course_id (str): string representing a course key cur_retry (int): integer representing the current task retry """ course_key = CourseKey.from_string(course_id) try: api.course_info_to_ccxcon(course_key) log.info('Course update to CCXCon returned no errors. Course key: %s', course_id) except (ConnectionError, HTTPError, RequestException, TooManyRedirects, api.CCXConnServerError) as exp: log.error('Course update to CCXCon failed for course_id %s with error: %s', course_id, exp) # in case the maximum amount of retries has not been reached, # insert another task delayed exponentially up to 5 retries if cur_retry < 5: update_ccxcon.apply_async( kwargs={'course_id': course_id, 'cur_retry': cur_retry + 1}, countdown=10 ** cur_retry # number of seconds the task should be delayed ) log.info('Requeued celery task for course key %s ; retry # %s', course_id, cur_retry + 1) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475123"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">neharejanjeva/techstitution</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">venv/lib/python2.7/site-packages/flask/testsuite/reqctx.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">557</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5960</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ flask.testsuite.reqctx ~~~~~~~~~~~~~~~~~~~~~~ Tests the request context. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import flask import unittest try: from greenlet import greenlet except ImportError: greenlet = None from flask.testsuite import FlaskTestCase class RequestContextTestCase(FlaskTestCase): def test_teardown_on_pop(self): buffer = [] app = flask.Flask(__name__) @app.teardown_request def end_of_request(exception): buffer.append(exception) ctx = app.test_request_context() ctx.push() self.assert_equal(buffer, []) ctx.pop() self.assert_equal(buffer, [None]) def test_proper_test_request_context(self): app = flask.Flask(__name__) app.config.update( SERVER_NAME='localhost.localdomain:5000' ) @app.route('/') def index(): return None @app.route('/', subdomain='foo') def sub(): return None with app.test_request_context('/'): self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/') with app.test_request_context('/'): self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/') try: with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}): pass except Exception as e: self.assert_true(isinstance(e, ValueError)) self.assert_equal(str(e), "the server name provided " + "('localhost.localdomain:5000') does not match the " + \ "server name from the WSGI environment ('localhost')") try: app.config.update(SERVER_NAME='localhost') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) try: app.config.update(SERVER_NAME='localhost:80') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) def test_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] @app.route('/meh') def meh(): return flask.request.url with app.test_request_context('/?name=World'): self.assert_equal(index(), 'Hello World!') with app.test_request_context('/meh'): self.assert_equal(meh(), 'http://localhost/meh') self.assert_true(flask._request_ctx_stack.top is None) def test_context_test(self): app = flask.Flask(__name__) self.assert_false(flask.request) self.assert_false(flask.has_request_context()) ctx = app.test_request_context() ctx.push() try: self.assert_true(flask.request) self.assert_true(flask.has_request_context()) finally: ctx.pop() def test_manual_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] ctx = app.test_request_context('/?name=World') ctx.push() self.assert_equal(index(), 'Hello World!') ctx.pop() try: index() except RuntimeError: pass else: self.assert_true(0, 'expected runtime error') def test_greenlet_context_copying(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() def g(): self.assert_false(flask.request) self.assert_false(flask.current_app) with reqctx: self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') self.assert_false(flask.request) return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) def test_greenlet_context_copying_api(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() @flask.copy_current_request_context def g(): self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) # Disable test if we don't have greenlets available if greenlet is None: test_greenlet_context_copying = None test_greenlet_context_copying_api = None def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(RequestContextTestCase)) return suite </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cc0-1.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475124"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lichia/luigi</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">luigi/contrib/hdfs/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3160</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Provides access to HDFS using the :py:class:`HdfsTarget`, a subclass of :py:class:`~luigi.target.Target`. You can configure what client by setting the "client" config under the "hdfs" section in the configuration, or using the ``--hdfs-client`` command line option. "hadoopcli" is the slowest, but should work out of the box. "snakebite" is the fastest, but requires Snakebite to be installed. Currently (4th May) the :py:mod:`luigi.contrib.hdfs` module is under reorganization. We recommend importing the reexports from :py:mod:`luigi.contrib.hdfs` instead of the sub-modules, as we're not yet sure how the final structure of the sub-modules will be. Eventually this module will be empty and you'll have to import directly from the sub modules like :py:mod:`luigi.contrib.hdfs.config`. """ # config.py from luigi.contrib.hdfs import config as hdfs_config hdfs = hdfs_config.hdfs load_hadoop_cmd = hdfs_config.load_hadoop_cmd get_configured_hadoop_version = hdfs_config.get_configured_hadoop_version get_configured_hdfs_client = hdfs_config.get_configured_hdfs_client tmppath = hdfs_config.tmppath # clients from luigi.contrib.hdfs import clients as hdfs_clients from luigi.contrib.hdfs import error as hdfs_error from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients HDFSCliError = hdfs_error.HDFSCliError call_check = hdfs_hadoopcli_clients.HdfsClient.call_check list_path = hdfs_snakebite_client.SnakebiteHdfsClient.list_path HdfsClient = hdfs_hadoopcli_clients.HdfsClient SnakebiteHdfsClient = hdfs_snakebite_client.SnakebiteHdfsClient HdfsClientCdh3 = hdfs_hadoopcli_clients.HdfsClientCdh3 HdfsClientApache1 = hdfs_hadoopcli_clients.HdfsClientApache1 create_hadoopcli_client = hdfs_hadoopcli_clients.create_hadoopcli_client get_autoconfig_client = hdfs_clients.get_autoconfig_client exists = hdfs_clients.exists rename = hdfs_clients.rename remove = hdfs_clients.remove mkdir = hdfs_clients.mkdir listdir = hdfs_clients.listdir # format.py from luigi.contrib.hdfs import format as hdfs_format HdfsReadPipe = hdfs_format.HdfsReadPipe HdfsAtomicWritePipe = hdfs_format.HdfsAtomicWritePipe HdfsAtomicWriteDirPipe = hdfs_format.HdfsAtomicWriteDirPipe PlainFormat = hdfs_format.PlainFormat PlainDirFormat = hdfs_format.PlainDirFormat Plain = hdfs_format.Plain PlainDir = hdfs_format.PlainDir CompatibleHdfsFormat = hdfs_format.CompatibleHdfsFormat # target.py from luigi.contrib.hdfs import target as hdfs_target HdfsTarget = hdfs_target.HdfsTarget </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475125"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ubiar/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/note/tests/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">260</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1076</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import test_note # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475126"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mathgl67/pymmr</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/file.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4005</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # vi:ai:et:ts=4 sw=4 # # -*- coding: utf8 -*- # # PyMmr My Music Renamer # Copyright (C) 2007-2010 <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="2f424e5b47484319186f48424e4643014c4042">[email protected]</a> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import unittest import os import mmr.file from mmr.file import BaseFile, AudioFile class TestFileFactory(unittest.TestCase): def setUp(self): # create cross os compatible path self.file_name = "name.ext" self.file_path = os.path.join("tests", "data", "file") self.file_fullpath = os.path.join( self.file_path, self.file_name ) # create a base file object with previous data # this will be used for all test in this class. self.file = mmr.file.factory(self.file_fullpath) def testName(self): self.assertEquals( self.file.name, self.file_name, "Factory must set the name to '%s' and it was '%s' !" % ( self.file_name, self.file.name ) ) def testExtension(self): self.assertEquals( self.file.extension, ".ext", "Factory must set extension to '%s' and it was '%s' !" % ( ".ext", self.file.extension ) ) def testPath(self): self.assertEquals( self.file.path, self.file_path, "Factory must set path to '%s' and it was '%s' !" % ( self.file_path, self.file.path ) ) def testFullpath(self): self.assertEquals( self.file.get_fullpath(), self.file_fullpath, "Factory must retrieve path to '%s' (!= '%s')." % ( self.file_fullpath, self.file.get_fullpath() ) ) class TestFileUnknown(unittest.TestCase): def setUp(self): self.file = mmr.file.factory("tests/data/file/unknown") def testObjectType(self): self.assertTrue(isinstance(self.file, BaseFile), "file should be a BaseFile object") def testExtention(self): self.assertEquals(self.file.extension, None, "file extension on unknown file should be None != %s" % self.file.extension) def testBaseFileRepr(self): self.assertEquals(repr(self.file), "<File name='unknown' extension='None' path='tests/data/file' />") class TestFileAudio(unittest.TestCase): def setUp(self): self.file = { ".mp3": mmr.file.factory("tests/data/tags/silence.mp3"), ".ogg": mmr.file.factory("tests/data/tags/silence.ogg"), ".flac":mmr.file.factory("tests/data/tags/silence.flac"), } def testMp3FileIsFileAudio(self): self.assertTrue(isinstance(self.file[".mp3"], AudioFile), "File with '.mp3' extension should be 'AudioFile'") def testOggFileIsFileAudio(self): self.assertTrue(isinstance(self.file[".ogg"], AudioFile), "File with '.ogg' extension should be 'AudioFile'") def testFlacFileIsFileAudio(self): self.assertTrue(isinstance(self.file[".flac"], AudioFile), "File with '.flac' extension should be 'AudioFile'") def testHaveTag(self): self.assertNotEquals(self.file[".mp3"].tags, None) self.assertNotEquals(self.file[".ogg"].tags, None) self.assertNotEquals(self.file[".flac"].tags, None) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475127"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cloudfoundry/php-buildpack-legacy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">builds/runtimes/python-2.7.6/lib/python2.7/test/test_int_literal.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">138</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9128</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Test correct treatment of hex/oct constants. This is complex because of changes due to PEP 237. """ import unittest from test import test_support class TestHexOctBin(unittest.TestCase): def test_hex_baseline(self): # A few upper/lowercase tests self.assertEqual(0x0, 0X0) self.assertEqual(0x1, 0X1) self.assertEqual(0x123456789abcdef, 0X123456789abcdef) # Baseline tests self.assertEqual(0x0, 0) self.assertEqual(0x10, 16) self.assertEqual(0x7fffffff, 2147483647) self.assertEqual(0x7fffffffffffffff, 9223372036854775807) # Ditto with a minus sign and parentheses self.assertEqual(-(0x0), 0) self.assertEqual(-(0x10), -16) self.assertEqual(-(0x7fffffff), -2147483647) self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807) # Ditto with a minus sign and NO parentheses self.assertEqual(-0x0, 0) self.assertEqual(-0x10, -16) self.assertEqual(-0x7fffffff, -2147483647) self.assertEqual(-0x7fffffffffffffff, -9223372036854775807) def test_hex_unsigned(self): # Positive constants self.assertEqual(0x80000000, 2147483648L) self.assertEqual(0xffffffff, 4294967295L) # Ditto with a minus sign and parentheses self.assertEqual(-(0x80000000), -2147483648L) self.assertEqual(-(0xffffffff), -4294967295L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0x80000000, -2147483648L) self.assertEqual(-0xffffffff, -4294967295L) # Positive constants self.assertEqual(0x8000000000000000, 9223372036854775808L) self.assertEqual(0xffffffffffffffff, 18446744073709551615L) # Ditto with a minus sign and parentheses self.assertEqual(-(0x8000000000000000), -9223372036854775808L) self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0x8000000000000000, -9223372036854775808L) self.assertEqual(-0xffffffffffffffff, -18446744073709551615L) def test_oct_baseline(self): # Baseline tests self.assertEqual(00, 0) self.assertEqual(020, 16) self.assertEqual(017777777777, 2147483647) self.assertEqual(0777777777777777777777, 9223372036854775807) # Ditto with a minus sign and parentheses self.assertEqual(-(00), 0) self.assertEqual(-(020), -16) self.assertEqual(-(017777777777), -2147483647) self.assertEqual(-(0777777777777777777777), -9223372036854775807) # Ditto with a minus sign and NO parentheses self.assertEqual(-00, 0) self.assertEqual(-020, -16) self.assertEqual(-017777777777, -2147483647) self.assertEqual(-0777777777777777777777, -9223372036854775807) def test_oct_baseline_new(self): # A few upper/lowercase tests self.assertEqual(0o0, 0O0) self.assertEqual(0o1, 0O1) self.assertEqual(0o1234567, 0O1234567) # Baseline tests self.assertEqual(0o0, 0) self.assertEqual(0o20, 16) self.assertEqual(0o17777777777, 2147483647) self.assertEqual(0o777777777777777777777, 9223372036854775807) # Ditto with a minus sign and parentheses self.assertEqual(-(0o0), 0) self.assertEqual(-(0o20), -16) self.assertEqual(-(0o17777777777), -2147483647) self.assertEqual(-(0o777777777777777777777), -9223372036854775807) # Ditto with a minus sign and NO parentheses self.assertEqual(-0o0, 0) self.assertEqual(-0o20, -16) self.assertEqual(-0o17777777777, -2147483647) self.assertEqual(-0o777777777777777777777, -9223372036854775807) def test_oct_unsigned(self): # Positive constants self.assertEqual(020000000000, 2147483648L) self.assertEqual(037777777777, 4294967295L) # Ditto with a minus sign and parentheses self.assertEqual(-(020000000000), -2147483648L) self.assertEqual(-(037777777777), -4294967295L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-020000000000, -2147483648L) self.assertEqual(-037777777777, -4294967295L) # Positive constants self.assertEqual(01000000000000000000000, 9223372036854775808L) self.assertEqual(01777777777777777777777, 18446744073709551615L) # Ditto with a minus sign and parentheses self.assertEqual(-(01000000000000000000000), -9223372036854775808L) self.assertEqual(-(01777777777777777777777), -18446744073709551615L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-01000000000000000000000, -9223372036854775808L) self.assertEqual(-01777777777777777777777, -18446744073709551615L) def test_oct_unsigned_new(self): # Positive constants self.assertEqual(0o20000000000, 2147483648L) self.assertEqual(0o37777777777, 4294967295L) # Ditto with a minus sign and parentheses self.assertEqual(-(0o20000000000), -2147483648L) self.assertEqual(-(0o37777777777), -4294967295L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0o20000000000, -2147483648L) self.assertEqual(-0o37777777777, -4294967295L) # Positive constants self.assertEqual(0o1000000000000000000000, 9223372036854775808L) self.assertEqual(0o1777777777777777777777, 18446744073709551615L) # Ditto with a minus sign and parentheses self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L) self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0o1000000000000000000000, -9223372036854775808L) self.assertEqual(-0o1777777777777777777777, -18446744073709551615L) def test_bin_baseline(self): # A few upper/lowercase tests self.assertEqual(0b0, 0B0) self.assertEqual(0b1, 0B1) self.assertEqual(0b10101010101, 0B10101010101) # Baseline tests self.assertEqual(0b0, 0) self.assertEqual(0b10000, 16) self.assertEqual(0b1111111111111111111111111111111, 2147483647) self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807) # Ditto with a minus sign and parentheses self.assertEqual(-(0b0), 0) self.assertEqual(-(0b10000), -16) self.assertEqual(-(0b1111111111111111111111111111111), -2147483647) self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807) # Ditto with a minus sign and NO parentheses self.assertEqual(-0b0, 0) self.assertEqual(-0b10000, -16) self.assertEqual(-0b1111111111111111111111111111111, -2147483647) self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807) def test_bin_unsigned(self): # Positive constants self.assertEqual(0b10000000000000000000000000000000, 2147483648L) self.assertEqual(0b11111111111111111111111111111111, 4294967295L) # Ditto with a minus sign and parentheses self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L) self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0b10000000000000000000000000000000, -2147483648L) self.assertEqual(-0b11111111111111111111111111111111, -4294967295L) # Positive constants self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L) self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L) # Ditto with a minus sign and parentheses self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L) self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L) # Ditto with a minus sign and NO parentheses # This failed in Python 2.2 through 2.2.2 and in 2.3a1 self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L) self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L) def test_main(): test_support.run_unittest(TestHexOctBin) if __name__ == "__main__": test_main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475128"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jkkummerfeld/1ec-graph-parser</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">properties/count_unique_dev_spines.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2799</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python3 from __future__ import print_function import argparse import string import sys def read(filename): sent = [] spines = [] for line in open(filename): line = line.strip() if line.startswith("# Sentence"): spines.append([]) sent = line.strip().split()[2:] elif len(line) > 0 and line[0] != '#': fields = line.split() num = int(fields[0]) word = fields[1] pos = fields[2] spine = fields[3] spines[-1].append((word, pos, spine)) return spines if __name__ == '__main__': parser = argparse.ArgumentParser(description='Calculate how many spines in the dev set are novel.') parser.add_argument('train', help='Training data in SHP format.') parser.add_argument('dev', help='Development data in SHP format.') args = parser.parse_args() train_spines = read(args.train) word_set = set() pos_set = set() spine_set = set() for spines in train_spines: for spine in spines: word_set.add(spine) pos_set.add((spine[1], spine[2])) spine_set.add(spine[2]) results = { 'Dev sentences with all seen: (word, POS, spine)': 0, 'Dev sentences with all seen: (POS, spine)': 0, 'Dev sentences with all seen: spine': 0, 'Train spines (word, POS, spine)': len(word_set), 'Train spines (POS, spine)': len(pos_set), 'Train spines spine': len(spine_set), 'Dev spines new (word, POS, spine)': 0, 'Dev spines new (POS, spine)': 0, 'Dev spines new spine': 0, 'Dev spines': 0 } sentences = 0 for spines in read(args.dev): sentences += 1 all_wpresent = True all_ppresent = True all_cpresent = True for spine in spines: results['Dev spines'] += 1 if spine not in word_set: results['Dev spines new (word, POS, spine)'] += 1 all_wpresent = False if (spine[1], spine[2]) not in pos_set: results['Dev spines new (POS, spine)'] += 1 all_ppresent = False if spine[2] not in spine_set: results['Dev spines new spine'] += 1 all_cpresent = False if all_wpresent: results['Dev sentences with all seen: (word, POS, spine)'] += 1 if all_ppresent: results['Dev sentences with all seen: (POS, spine)'] += 1 if all_cpresent: results['Dev sentences with all seen: spine'] += 1 for key in results: if key.startswith("Dev sentences"): print("{} {} {:.1f}%".format(key, results[key], results[key] * 100 / sentences)) else: print(key, results[key]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">isc</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475129"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ABaldwinHunter/django-clone-classic</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/template_tests/test_logging.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">117</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4628</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals import logging from django.template import Context, Engine, Variable, VariableDoesNotExist from django.test import SimpleTestCase class TestHandler(logging.Handler): def __init__(self): super(TestHandler, self).__init__() self.log_record = None def emit(self, record): self.log_record = record class BaseTemplateLoggingTestCase(SimpleTestCase): def setUp(self): self.test_handler = TestHandler() self.logger = logging.getLogger('django.template') self.original_level = self.logger.level self.logger.addHandler(self.test_handler) self.logger.setLevel(self.loglevel) def tearDown(self): self.logger.removeHandler(self.test_handler) self.logger.level = self.original_level class VariableResolveLoggingTests(BaseTemplateLoggingTestCase): loglevel = logging.DEBUG def test_log_on_variable_does_not_exist_silent(self): class TestObject(object): class SilentDoesNotExist(Exception): silent_variable_failure = True @property def template_name(self): return "template_name" @property def template(self): return Engine().from_string('') @property def article(self): raise TestObject.SilentDoesNotExist("Attribute does not exist.") def __iter__(self): return iter(attr for attr in dir(TestObject) if attr[:2] != "__") def __getitem__(self, item): return self.__dict__[item] Variable('article').resolve(TestObject()) self.assertEqual( self.test_handler.log_record.getMessage(), "Exception while resolving variable 'article' in template 'template_name'." ) self.assertIsNotNone(self.test_handler.log_record.exc_info) raised_exception = self.test_handler.log_record.exc_info[1] self.assertEqual(str(raised_exception), 'Attribute does not exist.') def test_log_on_variable_does_not_exist_not_silent(self): with self.assertRaises(VariableDoesNotExist): Variable('article.author').resolve({'article': {'section': 'News'}}) self.assertEqual( self.test_handler.log_record.getMessage(), "Exception while resolving variable 'author' in template 'unknown'." ) self.assertIsNotNone(self.test_handler.log_record.exc_info) raised_exception = self.test_handler.log_record.exc_info[1] self.assertEqual( str(raised_exception), 'Failed lookup for key [author] in %r' % ("{%r: %r}" % ('section', 'News')) ) def test_no_log_when_variable_exists(self): Variable('article.section').resolve({'article': {'section': 'News'}}) self.assertIsNone(self.test_handler.log_record) class IncludeNodeLoggingTests(BaseTemplateLoggingTestCase): loglevel = logging.WARN @classmethod def setUpClass(cls): super(IncludeNodeLoggingTests, cls).setUpClass() cls.engine = Engine(loaders=[ ('django.template.loaders.locmem.Loader', { 'child': '{{ raises_exception }}', }), ], debug=False) def error_method(): raise IndexError("some generic exception") cls.ctx = Context({'raises_exception': error_method}) def test_logs_exceptions_during_rendering_with_debug_disabled(self): template = self.engine.from_string('{% include "child" %}') template.name = 'template_name' self.assertEqual(template.render(self.ctx), '') self.assertEqual( self.test_handler.log_record.getMessage(), "Exception raised while rendering {% include %} for template " "'template_name'. Empty string rendered instead." ) self.assertIsNotNone(self.test_handler.log_record.exc_info) self.assertEqual(self.test_handler.log_record.levelno, logging.WARN) def test_logs_exceptions_during_rendering_with_no_template_name(self): template = self.engine.from_string('{% include "child" %}') self.assertEqual(template.render(self.ctx), '') self.assertEqual( self.test_handler.log_record.getMessage(), "Exception raised while rendering {% include %} for template " "'unknown'. Empty string rendered instead." ) self.assertIsNotNone(self.test_handler.log_record.exc_info) self.assertEqual(self.test_handler.log_record.levelno, logging.WARN) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475130"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">BeyondTheClouds/nova</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">nova/api/openstack/compute/cells.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">12036</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The cells extension.""" import oslo_messaging as messaging from oslo_utils import strutils import six from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import cells from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api import validation from nova.cells import rpcapi as cells_rpcapi import nova.conf from nova import exception from nova.i18n import _ from nova import rpc CONF = nova.conf.CONF ALIAS = "os-cells" authorize = extensions.os_compute_authorizer(ALIAS) def _filter_keys(item, keys): """Filters all model attributes except for keys item is a dict """ return {k: v for k, v in six.iteritems(item) if k in keys} def _fixup_cell_info(cell_info, keys): """If the transport_url is present in the cell, derive username, rpc_host, and rpc_port from it. """ if 'transport_url' not in cell_info: return # Disassemble the transport URL transport_url = cell_info.pop('transport_url') try: transport_url = rpc.get_transport_url(transport_url) except messaging.InvalidTransportURL: # Just go with None's for key in keys: cell_info.setdefault(key, None) return if not transport_url.hosts: return transport_host = transport_url.hosts[0] transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'} for key in keys: if key in cell_info: continue transport_field = transport_field_map.get(key, key) cell_info[key] = getattr(transport_host, transport_field) def _scrub_cell(cell, detail=False): keys = ['name', 'username', 'rpc_host', 'rpc_port'] if detail: keys.append('capabilities') cell_info = _filter_keys(cell, keys + ['transport_url']) _fixup_cell_info(cell_info, keys) cell_info['type'] = 'parent' if cell['is_parent'] else 'child' return cell_info class CellsController(wsgi.Controller): """Controller for Cell resources.""" def __init__(self): self.cells_rpcapi = cells_rpcapi.CellsAPI() def _get_cells(self, ctxt, req, detail=False): """Return all cells.""" # Ask the CellsManager for the most recent data items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt) items = common.limited(items, req) items = [_scrub_cell(item, detail=detail) for item in items] return dict(cells=items) @extensions.expected_errors(501) @common.check_cells_enabled def index(self, req): """Return all cells in brief.""" ctxt = req.environ['nova.context'] authorize(ctxt) return self._get_cells(ctxt, req) @extensions.expected_errors(501) @common.check_cells_enabled def detail(self, req): """Return all cells in detail.""" ctxt = req.environ['nova.context'] authorize(ctxt) return self._get_cells(ctxt, req, detail=True) @extensions.expected_errors(501) @common.check_cells_enabled def info(self, req): """Return name and capabilities for this cell.""" context = req.environ['nova.context'] authorize(context) cell_capabs = {} my_caps = CONF.cells.capabilities for cap in my_caps: key, value = cap.split('=') cell_capabs[key] = value cell = {'name': CONF.cells.name, 'type': 'self', 'rpc_host': None, 'rpc_port': 0, 'username': None, 'capabilities': cell_capabs} return dict(cell=cell) @extensions.expected_errors((404, 501)) @common.check_cells_enabled def capacities(self, req, id=None): """Return capacities for a given cell or all cells.""" # TODO(kaushikc): return capacities as a part of cell info and # cells detail calls in v2.1, along with capabilities context = req.environ['nova.context'] authorize(context) try: capacities = self.cells_rpcapi.get_capacities(context, cell_name=id) except exception.CellNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return dict(cell={"capacities": capacities}) @extensions.expected_errors((404, 501)) @common.check_cells_enabled def show(self, req, id): """Return data about the given cell name. 'id' is a cell name.""" context = req.environ['nova.context'] authorize(context) try: cell = self.cells_rpcapi.cell_get(context, id) except exception.CellNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 204 # as this operation complete the deletion of aggregate resource and return # no response body. @extensions.expected_errors((403, 404, 501)) @common.check_cells_enabled def delete(self, req, id): """Delete a child or parent cell entry. 'id' is a cell name.""" context = req.environ['nova.context'] authorize(context, action="delete") try: num_deleted = self.cells_rpcapi.cell_delete(context, id) except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) if num_deleted == 0: raise exc.HTTPNotFound( explanation=_("Cell %s doesn't exist.") % id) def _normalize_cell(self, cell, existing=None): """Normalize input cell data. Normalizations include: * Converting cell['type'] to is_parent boolean. * Merging existing transport URL with transport information. """ if 'name' in cell: cell['name'] = common.normalize_name(cell['name']) # Start with the cell type conversion if 'type' in cell: cell['is_parent'] = cell['type'] == 'parent' del cell['type'] # Avoid cell type being overwritten to 'child' elif existing: cell['is_parent'] = existing['is_parent'] else: cell['is_parent'] = False # Now we disassemble the existing transport URL... transport_url = existing.get('transport_url') if existing else None transport_url = rpc.get_transport_url(transport_url) if 'rpc_virtual_host' in cell: transport_url.virtual_host = cell.pop('rpc_virtual_host') if not transport_url.hosts: transport_url.hosts.append(messaging.TransportHost()) transport_host = transport_url.hosts[0] if 'rpc_port' in cell: cell['rpc_port'] = int(cell['rpc_port']) # Copy over the input fields transport_field_map = { 'username': 'username', 'password': 'password', 'hostname': 'rpc_host', 'port': 'rpc_port', } for key, input_field in transport_field_map.items(): # Only override the value if we're given an override if input_field in cell: setattr(transport_host, key, cell.pop(input_field)) # Now set the transport URL cell['transport_url'] = str(transport_url) # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 # as this operation complete the creation of aggregates resource when # returning a response. @extensions.expected_errors((400, 403, 501)) @common.check_cells_enabled @validation.schema(cells.create_v20, '2.0', '2.0') @validation.schema(cells.create, '2.1') def create(self, req, body): """Create a child cell entry.""" context = req.environ['nova.context'] authorize(context, action="create") cell = body['cell'] self._normalize_cell(cell) try: cell = self.cells_rpcapi.cell_create(context, cell) except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) @extensions.expected_errors((400, 403, 404, 501)) @common.check_cells_enabled @validation.schema(cells.update_v20, '2.0', '2.0') @validation.schema(cells.update, '2.1') def update(self, req, id, body): """Update a child cell entry. 'id' is the cell name to update.""" context = req.environ['nova.context'] authorize(context, action="update") cell = body['cell'] cell.pop('id', None) try: # NOTE(Vek): There is a race condition here if multiple # callers are trying to update the cell # information simultaneously. Since this # operation is administrative in nature, and # will be going away in the future, I don't see # it as much of a problem... existing = self.cells_rpcapi.cell_get(context, id) except exception.CellNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) self._normalize_cell(cell, existing) try: cell = self.cells_rpcapi.cell_update(context, id, cell) except exception.CellNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.CellsUpdateUnsupported as e: raise exc.HTTPForbidden(explanation=e.format_message()) return dict(cell=_scrub_cell(cell)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 204 # as this operation complete the sync instance info and return # no response body. @extensions.expected_errors((400, 501)) @common.check_cells_enabled @validation.schema(cells.sync_instances) def sync_instances(self, req, body): """Tell all cells to sync instance info.""" context = req.environ['nova.context'] authorize(context, action="sync_instances") project_id = body.pop('project_id', None) deleted = body.pop('deleted', False) updated_since = body.pop('updated_since', None) if isinstance(deleted, six.string_types): deleted = strutils.bool_from_string(deleted, strict=True) self.cells_rpcapi.sync_instances(context, project_id=project_id, updated_since=updated_since, deleted=deleted) class Cells(extensions.V21APIExtensionBase): """Enables cells-related functionality such as adding neighbor cells, listing neighbor cells, and getting the capabilities of the local cell. """ name = "Cells" alias = ALIAS version = 1 def get_resources(self): coll_actions = { 'detail': 'GET', 'info': 'GET', 'sync_instances': 'POST', 'capacities': 'GET', } memb_actions = { 'capacities': 'GET', } res = extensions.ResourceExtension(ALIAS, CellsController(), collection_actions=coll_actions, member_actions=memb_actions) return [res] def get_controller_extensions(self): return [] </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475131"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tkremenek/swift</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">utils/build_swift/tests/build_swift/test_driver_arguments.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">24206</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors from __future__ import absolute_import, unicode_literals import os import platform import sys import unittest from build_swift import argparse from build_swift import constants from build_swift import driver_arguments from build_swift import migration from build_swift.presets import PresetParser import six from .test_presets import PRESET_DEFAULTS from .. import expected_options as eo from .. import utils PRESETS_FILES = [ os.path.join(constants.UTILS_PATH, 'build-presets.ini'), ] class ParserError(Exception): pass def _load_all_presets(preset_files): parser = PresetParser() parser.read_files(preset_files) # Hack to filter out mixins which are not expected to be valid presets preset_names = [ name for name in parser.preset_names if not name.startswith('mixin') ] presets = dict() for name in preset_names: preset = parser.get_preset(name, vars=PRESET_DEFAULTS) args = migration.migrate_swift_sdks(preset.args) presets[name] = args return presets class TestDriverArgumentParserMeta(type): """Metaclass used to dynamically generate test methods for each of the individual options accepted by the parser and methods to validate all of the presets. """ def __new__(cls, name, bases, attrs): # Generate tests for each default value for dest, value in eo.EXPECTED_DEFAULTS.items(): test_name = 'test_default_value_{}'.format(dest) attrs[test_name] = cls.generate_default_value_test(dest, value) # Generate tests for each expected option for option in eo.EXPECTED_OPTIONS: test_name = 'test_option_{}'.format(option.sanitized_string()) attrs[test_name] = cls.generate_option_test(option) # Generate tests for each preset presets = _load_all_presets(PRESETS_FILES) for name, args in presets.items(): test_name = 'test_preset_{}'.format(name) attrs[test_name] = cls.generate_preset_test(name, args) if six.PY2: name = str(name) return super(TestDriverArgumentParserMeta, cls).__new__( cls, name, bases, attrs) @classmethod def generate_default_value_test(cls, dest, default_value): def test(self): parsed_values = self.parse_default_args([]) parsed_value = getattr(parsed_values, dest) if default_value.__class__ in six.string_types: parsed_value = six.text_type(parsed_value) self.assertEqual(default_value, parsed_value, 'Invalid default value for "{}": {} != {}' .format(dest, default_value, parsed_value)) return test @classmethod def _generate_help_option_test(cls, option): def test(self): with utils.redirect_stdout() as output: with self.assertRaises(ParserError): self.parse_args([option.option_string]) self.assertNotEmpty(output) return test @classmethod def _generate_set_option_test(cls, option): def test(self): namespace = self.parse_args([option.option_string]) self.assertEqual(getattr(namespace, option.dest), option.value) with self.assertRaises(ParserError): self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_set_true_option_test(cls, option): def test(self): # TODO: Move to unit-tests for the action class namespace = self.parse_args([]) self.assertFalse(getattr(namespace, option.dest)) namespace = self.parse_args([option.option_string]) self.assertTrue(getattr(namespace, option.dest)) return test @classmethod def _generate_set_false_option_test(cls, option): def test(self): # TODO: Move to unit-tests for the action class namespace = self.parse_args([]) self.assertTrue(getattr(namespace, option.dest)) namespace = self.parse_args([option.option_string]) self.assertFalse(getattr(namespace, option.dest)) return test @classmethod def _generate_enable_option_test(cls, option): def test(self): # TODO: Move to unit-tests for the action class # Test parsing True values self.parse_args([option.option_string, '1']) self.parse_args([option.option_string, 'true']) self.parse_args([option.option_string, 'True']) self.parse_args([option.option_string, 'TRUE']) # TODO: Move to unit-tests for the action class # Test parsing False values self.parse_args([option.option_string, '0']) self.parse_args([option.option_string, 'false']) self.parse_args([option.option_string, 'False']) self.parse_args([option.option_string, 'FALSE']) # TODO: Move to unit-tests for the action class # Test default value namespace = self.parse_args([option.option_string]) self.assertTrue(getattr(namespace, option.dest)) # Test setting value to True namespace = self.parse_args([option.option_string, 'True']) self.assertTrue(getattr(namespace, option.dest)) # Test setting value to False namespace = self.parse_args([option.option_string, 'False']) self.assertFalse(getattr(namespace, option.dest)) return test @classmethod def _generate_disable_option_test(cls, option): def test(self): # TODO: Move to unit-tests for the action class # Test parsing True values self.parse_args([option.option_string, '1']) self.parse_args([option.option_string, 'true']) self.parse_args([option.option_string, 'True']) self.parse_args([option.option_string, 'TRUE']) # TODO: Move to unit-tests for the action class # Test parsing False values self.parse_args([option.option_string, '0']) self.parse_args([option.option_string, 'false']) self.parse_args([option.option_string, 'False']) self.parse_args([option.option_string, 'FALSE']) # TODO: Move to unit-tests for the action class # Test default value namespace = self.parse_args([option.option_string]) self.assertFalse(getattr(namespace, option.dest)) # Test setting value to True resulting in False namespace = self.parse_args([option.option_string, 'True']) self.assertFalse(getattr(namespace, option.dest)) # Test setting value to False resulting in True namespace = self.parse_args([option.option_string, 'False']) self.assertTrue(getattr(namespace, option.dest)) return test @classmethod def _generate_choices_option_test(cls, option): def test(self): for choice in option.choices: namespace = self.parse_args( [option.option_string, six.text_type(choice)]) self.assertEqual(getattr(namespace, option.dest), choice) with self.assertRaises(ParserError): self.parse_args([option.option_string, 'INVALID']) return test @classmethod def _generate_int_option_test(cls, option): def test(self): for i in [0, 1, 42]: namespace = self.parse_args( [option.option_string, six.text_type(i)]) self.assertEqual(int(getattr(namespace, option.dest)), i) # FIXME: int-type options should not accept non-int strings # self.parse_args([option.option_string, six.text_type(0.0)]) # self.parse_args([option.option_string, six.text_type(1.0)]) # self.parse_args([option.option_string, six.text_type(3.14)]) # self.parse_args([option.option_string, 'NaN']) return test @classmethod def _generate_str_option_test(cls, option): def test(self): self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_path_option_test(cls, option): def test(self): self.parse_args([option.option_string, sys.executable]) # FIXME: path-type options should not accept non-path inputs # self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_append_option_test(cls, option): def test(self): # Range size is arbitrary, just needs to be more than once for i in range(1, 4): namespace = self.parse_args([option.option_string, 'ARG'] * i) self.assertEqual(getattr(namespace, option.dest), ['ARG'] * i) return test @classmethod def _generate_unsupported_option_test(cls, option): def test(self): with self.assertRaises(ParserError): self.parse_args([option.option_string]) return test @classmethod def _generate_build_script_impl_option_test(cls, option): def test(self): namespace, unknown_args = self.parse_args_and_unknown_args([]) self.assertFalse(hasattr(namespace, option.dest)) self.assertEqual(unknown_args, []) namespace, unknown_args = self.parse_args_and_unknown_args( [option.option_string]) # The argument should never show up in the namespace self.assertFalse(hasattr(namespace, option.dest)) # It should instead be forwareded to unkown_args self.assertEqual(unknown_args, [option.option_string]) return test @classmethod def generate_option_test(cls, option): generate_test_funcs = { eo.HelpOption: cls._generate_help_option_test, eo.SetOption: cls._generate_set_option_test, eo.SetTrueOption: cls._generate_set_true_option_test, eo.SetFalseOption: cls._generate_set_false_option_test, eo.EnableOption: cls._generate_enable_option_test, eo.DisableOption: cls._generate_disable_option_test, eo.ChoicesOption: cls._generate_choices_option_test, eo.IntOption: cls._generate_int_option_test, eo.StrOption: cls._generate_str_option_test, eo.PathOption: cls._generate_path_option_test, eo.AppendOption: cls._generate_append_option_test, eo.UnsupportedOption: cls._generate_unsupported_option_test, eo.BuildScriptImplOption: cls._generate_build_script_impl_option_test, # IgnoreOptions should be manually tested eo.IgnoreOption: lambda self: None, } test_func = generate_test_funcs.get(option.__class__, None) if test_func is not None: return test_func(option) # Catch-all meaningless test return lambda self: \ self.fail('unexpected option "{}"'.format(option.option_string)) @classmethod def generate_preset_test(cls, preset_name, preset_args): def test(self): try: # Windows cannot run build-script-impl to check the impl args. is_windows = platform.system() == 'Windows' self.parse_default_args(preset_args, check_impl_args=not is_windows) except ParserError as e: self.fail('failed to parse preset "{}": {}'.format( preset_name, e)) return test @six.add_metaclass(TestDriverArgumentParserMeta) class TestDriverArgumentParser(unittest.TestCase): def _parse_args(self, args): try: return migration.parse_args(self.parser, args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse arguments: {}'.format( six.text_type(args), e)) def _check_impl_args(self, namespace): assert hasattr(namespace, 'build_script_impl_args') try: migration.check_impl_args( constants.BUILD_SCRIPT_IMPL_PATH, namespace.build_script_impl_args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse impl arguments: {}'.format( six.text_type(namespace.build_script_impl_args), e)) def parse_args_and_unknown_args(self, args, namespace=None): if namespace is None: namespace = argparse.Namespace() with utils.quiet_output(): try: namespace, unknown_args = ( super(self.parser.__class__, self.parser).parse_known_args( args, namespace)) namespace, unknown_args = ( migration._process_disambiguation_arguments( namespace, unknown_args)) except (SystemExit, argparse.ArgumentError) as e: raise ParserError('failed to parse arguments: {}'.format( six.text_type(args), e)) return namespace, unknown_args def parse_args(self, args, namespace=None): namespace, unknown_args = self.parse_args_and_unknown_args( args, namespace) if unknown_args: raise ParserError('unknown arguments: {}'.format( six.text_type(unknown_args))) return namespace def parse_default_args(self, args, check_impl_args=False): with utils.quiet_output(): namespace = self._parse_args(args) if check_impl_args: self._check_impl_args(namespace) return namespace def setUp(self): self.parser = driver_arguments.create_argument_parser() # ------------------------------------------------------------------------- def test_expected_options_exhaustive(self): """Test that we are exhaustively testing all options accepted by the parser. If this test if failing then the parser accepts more options than currently being tested, meaning the EXPECTED_OPTIONS list in build_swift/tests/expected_options.py should be updated to include the missing options. """ expected_options = {o.option_string for o in eo.EXPECTED_OPTIONS} # aggregate and flatten the options_strings accepted by the parser actual_options = [a.option_strings for a in self.parser._actions] actual_options = set(sum(actual_options, [])) diff = actual_options - expected_options if len(diff) > 0: self.fail('non-exhaustive expected options, missing: {}' .format(diff)) def test_expected_options_have_default_values(self): """Test that all the options in EXPECTED_OPTIONS have an associated default value. """ skip_option_classes = [ eo.HelpOption, eo.IgnoreOption, eo.UnsupportedOption, eo.BuildScriptImplOption, ] missing_defaults = set() for option in eo.EXPECTED_OPTIONS: if option.__class__ in skip_option_classes: continue if option.dest not in eo.EXPECTED_DEFAULTS: missing_defaults.add(option.dest) if len(missing_defaults) > 0: self.fail('non-exhaustive default values for options, missing: {}' .format(missing_defaults)) # ------------------------------------------------------------------------- # Manual option tests def test_option_clang_compiler_version(self): option_string = '--clang-compiler-version' self.parse_default_args([option_string, '5.0.0']) self.parse_default_args([option_string, '5.0.1']) self.parse_default_args([option_string, '5.0.0.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '1.2']) self.parse_default_args([option_string, '0.0.0.0.1']) def test_option_clang_user_visible_version(self): option_string = '--clang-user-visible-version' self.parse_default_args([option_string, '5.0.0']) self.parse_default_args([option_string, '5.0.1']) self.parse_default_args([option_string, '5.0.0.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '1.2']) self.parse_default_args([option_string, '0.0.0.0.1']) def test_option_swift_compiler_version(self): option_string = '--swift-compiler-version' self.parse_default_args([option_string, '4.1']) self.parse_default_args([option_string, '4.0.1']) self.parse_default_args([option_string, '200.99.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '0.0.0.1']) def test_option_swift_user_visible_version(self): option_string = '--swift-user-visible-version' self.parse_default_args([option_string, '4.1']) self.parse_default_args([option_string, '4.0.1']) self.parse_default_args([option_string, '200.99.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '0.0.0.1']) def test_option_I(self): with self.assertRaises(ValueError): self.parse_default_args(['-I']) def test_option_ios_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--ios-all']) def test_option_tvos_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--tvos-all']) def test_option_watchos_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--watchos-all']) # ------------------------------------------------------------------------- # Implied defaults tests def test_implied_defaults_assertions(self): namespace = self.parse_default_args(['--assertions']) self.assertTrue(namespace.cmark_assertions) self.assertTrue(namespace.llvm_assertions) self.assertTrue(namespace.swift_assertions) self.assertTrue(namespace.swift_stdlib_assertions) def test_implied_defaults_cmark_build_variant(self): namespace = self.parse_default_args(['--debug-cmark']) self.assertTrue(namespace.build_cmark) def test_implied_defaults_lldb_build_variant(self): namespace = self.parse_default_args(['--debug-lldb']) self.assertTrue(namespace.build_lldb) namespace = self.parse_default_args(['--lldb-assertions']) self.assertTrue(namespace.build_lldb) def test_implied_defaults_build_variant(self): namespace = self.parse_default_args(['--debug']) self.assertEqual(namespace.cmark_build_variant, 'Debug') self.assertEqual(namespace.foundation_build_variant, 'Debug') self.assertEqual(namespace.libdispatch_build_variant, 'Debug') self.assertEqual(namespace.libicu_build_variant, 'Debug') self.assertEqual(namespace.lldb_build_variant, 'Debug') self.assertEqual(namespace.llvm_build_variant, 'Debug') self.assertEqual(namespace.swift_build_variant, 'Debug') self.assertEqual(namespace.swift_stdlib_build_variant, 'Debug') def test_implied_defaults_skip_build_ios(self): namespace = self.parse_default_args(['--skip-build-ios']) self.assertFalse(namespace.build_ios_device) self.assertFalse(namespace.build_ios_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_ios_simulator) def test_implied_defaults_skip_build_tvos(self): namespace = self.parse_default_args(['--skip-build-tvos']) self.assertFalse(namespace.build_tvos_device) self.assertFalse(namespace.build_tvos_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_tvos_simulator) def test_implied_defaults_skip_build_watchos(self): namespace = self.parse_default_args(['--skip-build-watchos']) self.assertFalse(namespace.build_watchos_device) self.assertFalse(namespace.build_watchos_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_watchos_simulator) def test_implied_defaults_validation_test(self): namespace = self.parse_default_args(['--validation-test']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimized(self): namespace = self.parse_default_args(['--test-optimized']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimize_for_size(self): namespace = self.parse_default_args(['--test-optimize-for-size']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimize_none_with_implicit_dynamic(self): namespace = self.parse_default_args( ['--test-optimize-none-with-implicit-dynamic']) self.assertTrue(namespace.test) def test_implied_defaults_skip_all_tests(self): namespace = self.parse_default_args([ '--test', '0', '--validation-test', '0', '--long-test', '0', '--stress-test', '0', ]) self.assertFalse(namespace.test_linux) self.assertFalse(namespace.test_freebsd) self.assertFalse(namespace.test_cygwin) self.assertFalse(namespace.test_osx) self.assertFalse(namespace.test_ios) self.assertFalse(namespace.test_tvos) self.assertFalse(namespace.test_watchos) def test_implied_defaults_skip_test_ios(self): namespace = self.parse_default_args(['--skip-test-ios']) self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_ios_simulator) def test_implied_defaults_skip_test_tvos(self): namespace = self.parse_default_args(['--skip-test-tvos']) self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_tvos_simulator) def test_implied_defaults_skip_test_watchos(self): namespace = self.parse_default_args(['--skip-test-watchos']) self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_watchos_simulator) def test_implied_defaults_skip_build_android(self): namespace = self.parse_default_args(['--android', '0']) self.assertFalse(namespace.test_android_host) namespace = self.parse_default_args(['--skip-build-android']) self.assertFalse(namespace.test_android_host) def test_implied_defaults_host_test(self): namespace = self.parse_default_args(['--host-test', '0']) self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_android_host) self.assertFalse(namespace.build_libparser_only) def test_build_lib_swiftsyntaxparser_only(self): namespace = self.parse_default_args(['--build-libparser-only']) self.assertTrue(namespace.build_libparser_only) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475132"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">googlecodelabs/nest-tensorflow</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">wwn/access_token.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1354</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib import urllib2 import json import os nest_auth_url = 'https://home.nest.com/login/oauth2' nest_access_token_url = 'https://api.home.nest.com/oauth2/access_token' # Set your OAuth client ID and secret as environment variables. # See docker-compose.yml for an example of where they can be set # if not publishing that file. client_id = os.environ.get("CLIENT_ID", None) client_secret = os.environ.get("CLIENT_SECRET", None) def get_access_token(authorization_code): """Paste get_access_token(authorization_code) snippet below this line""" return def authorization_url(): query = urllib.urlencode({ 'client_id': client_id, 'state': 'STATE' }) return "{0}?{1}".format(nest_auth_url, query) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475133"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">RomanKharin/lrmq</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">test/async_agent_socket.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3459</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf8 -*- # Low-resource message queue framework # Access hub with tcp socket # Copyright (c) 2016 Roman Kharin <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="b6c4d9dbdfc798dddef6d1dbd7dfda98d5d9db">[email protected]</a>> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import sys import asyncio from asyncio.streams import StreamWriter, FlowControlMixin async def run(port): loop = asyncio.get_event_loop() # out command queue ans_queue = asyncio.Queue() stdreader = None stdwriter = None # stdio initiation # NOTE: os.fdopen(0, "wb") will not works in pipe # os.fdopen(sys.stdout, "wb") may crash print() writer_transport, writer_protocol = await loop.connect_write_pipe( FlowControlMixin, os.fdopen(sys.stdout.fileno(), "wb")) stdwriter = StreamWriter(writer_transport, writer_protocol, None, loop) stdreader = asyncio.StreamReader() reader_protocol = asyncio.StreamReaderProtocol(stdreader) await loop.connect_read_pipe(lambda: reader_protocol, sys.stdin.buffer) server_coro = None async def onclient(reader, writer): # read from socket async def coro_reader(): while True: data = await stdreader.readline() if not data: if server_coro: server_coro.cancel() break writer.write(data) await writer.drain() task = asyncio.ensure_future(coro_reader()) while True: data = await reader.readline() if not data: break stdwriter.write(data) await stdwriter.drain() task.cancel() server_coro = asyncio.start_server(onclient, port = port, backlog = 1) sock_server = await server_coro await sock_server.wait_closed() def main(): port = 5550 if len(sys.argv) > 1: if sys.argv[1] in ("-h", "--help"): print("Start with:") print("\tpython3 -m lrmq -a python3 async_agent_socket.py 5550") print("Then connect with") print("\ttelnet 127.0.0.1 5550") return port = int(sys.argv[1]) if sys.platform == "win32": loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) else: loop = asyncio.get_event_loop() try: loop.run_until_complete(run(port)) finally: loop.close() if __name__ == "__main__": main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475134"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ChromiumWebApps/chromium</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">base/android/jni_generator/jni_generator.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">48419</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Extracts native methods from a Java file and generates the JNI bindings. If you change this, please run and update the tests.""" import collections import errno import optparse import os import re import string from string import Template import subprocess import sys import textwrap import zipfile class ParseError(Exception): """Exception thrown when we can't parse the input file.""" def __init__(self, description, *context_lines): Exception.__init__(self) self.description = description self.context_lines = context_lines def __str__(self): context = '\n'.join(self.context_lines) return '***\nERROR: %s\n\n%s\n***' % (self.description, context) class Param(object): """Describes a param for a method, either java or native.""" def __init__(self, **kwargs): self.datatype = kwargs['datatype'] self.name = kwargs['name'] class NativeMethod(object): """Describes a C/C++ method that is called by Java code""" def __init__(self, **kwargs): self.static = kwargs['static'] self.java_class_name = kwargs['java_class_name'] self.return_type = kwargs['return_type'] self.name = kwargs['name'] self.params = kwargs['params'] if self.params: assert type(self.params) is list assert type(self.params[0]) is Param if (self.params and self.params[0].datatype == kwargs.get('ptr_type', 'int') and self.params[0].name.startswith('native')): self.type = 'method' self.p0_type = self.params[0].name[len('native'):] if kwargs.get('native_class_name'): self.p0_type = kwargs['native_class_name'] else: self.type = 'function' self.method_id_var_name = kwargs.get('method_id_var_name', None) class CalledByNative(object): """Describes a java method exported to c/c++""" def __init__(self, **kwargs): self.system_class = kwargs['system_class'] self.unchecked = kwargs['unchecked'] self.static = kwargs['static'] self.java_class_name = kwargs['java_class_name'] self.return_type = kwargs['return_type'] self.name = kwargs['name'] self.params = kwargs['params'] self.method_id_var_name = kwargs.get('method_id_var_name', None) self.signature = kwargs.get('signature') self.is_constructor = kwargs.get('is_constructor', False) self.env_call = GetEnvCall(self.is_constructor, self.static, self.return_type) self.static_cast = GetStaticCastForReturnType(self.return_type) def JavaDataTypeToC(java_type): """Returns a C datatype for the given java type.""" java_pod_type_map = { 'int': 'jint', 'byte': 'jbyte', 'char': 'jchar', 'short': 'jshort', 'boolean': 'jboolean', 'long': 'jlong', 'double': 'jdouble', 'float': 'jfloat', } java_type_map = { 'void': 'void', 'String': 'jstring', 'java/lang/String': 'jstring', 'java/lang/Class': 'jclass', } if java_type in java_pod_type_map: return java_pod_type_map[java_type] elif java_type in java_type_map: return java_type_map[java_type] elif java_type.endswith('[]'): if java_type[:-2] in java_pod_type_map: return java_pod_type_map[java_type[:-2]] + 'Array' return 'jobjectArray' elif java_type.startswith('Class'): # Checking just the start of the name, rather than a direct comparison, # in order to handle generics. return 'jclass' else: return 'jobject' def JavaReturnValueToC(java_type): """Returns a valid C return value for the given java type.""" java_pod_type_map = { 'int': '0', 'byte': '0', 'char': '0', 'short': '0', 'boolean': 'false', 'long': '0', 'double': '0', 'float': '0', 'void': '' } return java_pod_type_map.get(java_type, 'NULL') class JniParams(object): _imports = [] _fully_qualified_class = '' _package = '' _inner_classes = [] _remappings = [] @staticmethod def SetFullyQualifiedClass(fully_qualified_class): JniParams._fully_qualified_class = 'L' + fully_qualified_class JniParams._package = '/'.join(fully_qualified_class.split('/')[:-1]) @staticmethod def ExtractImportsAndInnerClasses(contents): contents = contents.replace('\n', '') re_import = re.compile(r'import.*?(?P<class>\S*?);') for match in re.finditer(re_import, contents): JniParams._imports += ['L' + match.group('class').replace('.', '/')] re_inner = re.compile(r'(class|interface)\s+?(?P<name>\w+?)\W') for match in re.finditer(re_inner, contents): inner = match.group('name') if not JniParams._fully_qualified_class.endswith(inner): JniParams._inner_classes += [JniParams._fully_qualified_class + '$' + inner] @staticmethod def ParseJavaPSignature(signature_line): prefix = 'Signature: ' return '"%s"' % signature_line[signature_line.index(prefix) + len(prefix):] @staticmethod def JavaToJni(param): """Converts a java param into a JNI signature type.""" pod_param_map = { 'int': 'I', 'boolean': 'Z', 'char': 'C', 'short': 'S', 'long': 'J', 'double': 'D', 'float': 'F', 'byte': 'B', 'void': 'V', } object_param_list = [ 'Ljava/lang/Boolean', 'Ljava/lang/Integer', 'Ljava/lang/Long', 'Ljava/lang/Object', 'Ljava/lang/String', 'Ljava/lang/Class', ] prefix = '' # Array? while param[-2:] == '[]': prefix += '[' param = param[:-2] # Generic? if '<' in param: param = param[:param.index('<')] if param in pod_param_map: return prefix + pod_param_map[param] if '/' in param: # Coming from javap, use the fully qualified param directly. return prefix + 'L' + JniParams.RemapClassName(param) + ';' for qualified_name in (object_param_list + [JniParams._fully_qualified_class] + JniParams._inner_classes): if (qualified_name.endswith('/' + param) or qualified_name.endswith('$' + param.replace('.', '$')) or qualified_name == 'L' + param): return prefix + JniParams.RemapClassName(qualified_name) + ';' # Is it from an import? (e.g. referecing Class from import pkg.Class; # note that referencing an inner class Inner from import pkg.Class.Inner # is not supported). for qualified_name in JniParams._imports: if qualified_name.endswith('/' + param): # Ensure it's not an inner class. components = qualified_name.split('/') if len(components) > 2 and components[-2][0].isupper(): raise SyntaxError('Inner class (%s) can not be imported ' 'and used by JNI (%s). Please import the outer ' 'class and use Outer.Inner instead.' % (qualified_name, param)) return prefix + JniParams.RemapClassName(qualified_name) + ';' # Is it an inner class from an outer class import? (e.g. referencing # Class.Inner from import pkg.Class). if '.' in param: components = param.split('.') outer = '/'.join(components[:-1]) inner = components[-1] for qualified_name in JniParams._imports: if qualified_name.endswith('/' + outer): return (prefix + JniParams.RemapClassName(qualified_name) + '$' + inner + ';') # Type not found, falling back to same package as this class. return (prefix + 'L' + JniParams.RemapClassName(JniParams._package + '/' + param) + ';') @staticmethod def Signature(params, returns, wrap): """Returns the JNI signature for the given datatypes.""" items = ['('] items += [JniParams.JavaToJni(param.datatype) for param in params] items += [')'] items += [JniParams.JavaToJni(returns)] if wrap: return '\n' + '\n'.join(['"' + item + '"' for item in items]) else: return '"' + ''.join(items) + '"' @staticmethod def Parse(params): """Parses the params into a list of Param objects.""" if not params: return [] ret = [] for p in [p.strip() for p in params.split(',')]: items = p.split(' ') if 'final' in items: items.remove('final') param = Param( datatype=items[0], name=(items[1] if len(items) > 1 else 'p%s' % len(ret)), ) ret += [param] return ret @staticmethod def RemapClassName(class_name): """Remaps class names using the jarjar mapping table.""" for old, new in JniParams._remappings: if old in class_name: return class_name.replace(old, new, 1) return class_name @staticmethod def SetJarJarMappings(mappings): """Parse jarjar mappings from a string.""" JniParams._remappings = [] for line in mappings.splitlines(): keyword, src, dest = line.split() if keyword != 'rule': continue assert src.endswith('.**') src = src[:-2].replace('.', '/') dest = dest.replace('.', '/') if dest.endswith('@0'): JniParams._remappings.append((src, dest[:-2] + src)) else: assert dest.endswith('@1') JniParams._remappings.append((src, dest[:-2])) def ExtractJNINamespace(contents): re_jni_namespace = re.compile('.*?@JNINamespace\("(.*?)"\)') m = re.findall(re_jni_namespace, contents) if not m: return '' return m[0] def ExtractFullyQualifiedJavaClassName(java_file_name, contents): re_package = re.compile('.*?package (.*?);') matches = re.findall(re_package, contents) if not matches: raise SyntaxError('Unable to find "package" line in %s' % java_file_name) return (matches[0].replace('.', '/') + '/' + os.path.splitext(os.path.basename(java_file_name))[0]) def ExtractNatives(contents, ptr_type): """Returns a list of dict containing information about a native method.""" contents = contents.replace('\n', '') natives = [] re_native = re.compile(r'(@NativeClassQualifiedName' '\(\"(?P<native_class_name>.*?)\"\))?\s*' '(@NativeCall(\(\"(?P<java_class_name>.*?)\"\)))?\s*' '(?P<qualifiers>\w+\s\w+|\w+|\s+)\s*?native ' '(?P<return_type>\S*?) ' '(?P<name>native\w+?)\((?P<params>.*?)\);') for match in re.finditer(re_native, contents): native = NativeMethod( static='static' in match.group('qualifiers'), java_class_name=match.group('java_class_name'), native_class_name=match.group('native_class_name'), return_type=match.group('return_type'), name=match.group('name').replace('native', ''), params=JniParams.Parse(match.group('params')), ptr_type=ptr_type) natives += [native] return natives def GetStaticCastForReturnType(return_type): type_map = { 'String' : 'jstring', 'java/lang/String' : 'jstring', 'boolean[]': 'jbooleanArray', 'byte[]': 'jbyteArray', 'char[]': 'jcharArray', 'short[]': 'jshortArray', 'int[]': 'jintArray', 'long[]': 'jlongArray', 'double[]': 'jdoubleArray' } ret = type_map.get(return_type, None) if ret: return ret if return_type.endswith('[]'): return 'jobjectArray' return None def GetEnvCall(is_constructor, is_static, return_type): """Maps the types availabe via env->Call__Method.""" if is_constructor: return 'NewObject' env_call_map = {'boolean': 'Boolean', 'byte': 'Byte', 'char': 'Char', 'short': 'Short', 'int': 'Int', 'long': 'Long', 'float': 'Float', 'void': 'Void', 'double': 'Double', 'Object': 'Object', } call = env_call_map.get(return_type, 'Object') if is_static: call = 'Static' + call return 'Call' + call + 'Method' def GetMangledParam(datatype): """Returns a mangled identifier for the datatype.""" if len(datatype) <= 2: return datatype.replace('[', 'A') ret = '' for i in range(1, len(datatype)): c = datatype[i] if c == '[': ret += 'A' elif c.isupper() or datatype[i - 1] in ['/', 'L']: ret += c.upper() return ret def GetMangledMethodName(name, params, return_type): """Returns a mangled method name for the given signature. The returned name can be used as a C identifier and will be unique for all valid overloads of the same method. Args: name: string. params: list of Param. return_type: string. Returns: A mangled name. """ mangled_items = [] for datatype in [return_type] + [x.datatype for x in params]: mangled_items += [GetMangledParam(JniParams.JavaToJni(datatype))] mangled_name = name + '_'.join(mangled_items) assert re.match(r'[0-9a-zA-Z_]+', mangled_name) return mangled_name def MangleCalledByNatives(called_by_natives): """Mangles all the overloads from the call_by_natives list.""" method_counts = collections.defaultdict( lambda: collections.defaultdict(lambda: 0)) for called_by_native in called_by_natives: java_class_name = called_by_native.java_class_name name = called_by_native.name method_counts[java_class_name][name] += 1 for called_by_native in called_by_natives: java_class_name = called_by_native.java_class_name method_name = called_by_native.name method_id_var_name = method_name if method_counts[java_class_name][method_name] > 1: method_id_var_name = GetMangledMethodName(method_name, called_by_native.params, called_by_native.return_type) called_by_native.method_id_var_name = method_id_var_name return called_by_natives # Regex to match the JNI return types that should be included in a # ScopedJavaLocalRef. RE_SCOPED_JNI_RETURN_TYPES = re.compile('jobject|jclass|jstring|.*Array') # Regex to match a string like "@CalledByNative public void foo(int bar)". RE_CALLED_BY_NATIVE = re.compile( '@CalledByNative(?P<Unchecked>(Unchecked)*?)(?:\("(?P<annotation>.*)"\))?' '\s+(?P<prefix>[\w ]*?)' '\s*(?P<return_type>\S+?)' '\s+(?P<name>\w+)' '\s*\((?P<params>[^\)]*)\)') def ExtractCalledByNatives(contents): """Parses all methods annotated with @CalledByNative. Args: contents: the contents of the java file. Returns: A list of dict with information about the annotated methods. TODO(bulach): return a CalledByNative object. Raises: ParseError: if unable to parse. """ called_by_natives = [] for match in re.finditer(RE_CALLED_BY_NATIVE, contents): called_by_natives += [CalledByNative( system_class=False, unchecked='Unchecked' in match.group('Unchecked'), static='static' in match.group('prefix'), java_class_name=match.group('annotation') or '', return_type=match.group('return_type'), name=match.group('name'), params=JniParams.Parse(match.group('params')))] # Check for any @CalledByNative occurrences that weren't matched. unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n') for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]): if '@CalledByNative' in line1: raise ParseError('could not parse @CalledByNative method signature', line1, line2) return MangleCalledByNatives(called_by_natives) class JNIFromJavaP(object): """Uses 'javap' to parse a .class file and generate the JNI header file.""" def __init__(self, contents, options): self.contents = contents self.namespace = options.namespace self.fully_qualified_class = re.match( '.*?(class|interface) (?P<class_name>.*?)( |{)', contents[1]).group('class_name') self.fully_qualified_class = self.fully_qualified_class.replace('.', '/') # Java 7's javap includes type parameters in output, like HashSet<T>. Strip # away the <...> and use the raw class name that Java 6 would've given us. self.fully_qualified_class = self.fully_qualified_class.split('<', 1)[0] JniParams.SetFullyQualifiedClass(self.fully_qualified_class) self.java_class_name = self.fully_qualified_class.split('/')[-1] if not self.namespace: self.namespace = 'JNI_' + self.java_class_name re_method = re.compile('(?P<prefix>.*?)(?P<return_type>\S+?) (?P<name>\w+?)' '\((?P<params>.*?)\)') self.called_by_natives = [] for lineno, content in enumerate(contents[2:], 2): match = re.match(re_method, content) if not match: continue self.called_by_natives += [CalledByNative( system_class=True, unchecked=False, static='static' in match.group('prefix'), java_class_name='', return_type=match.group('return_type').replace('.', '/'), name=match.group('name'), params=JniParams.Parse(match.group('params').replace('.', '/')), signature=JniParams.ParseJavaPSignature(contents[lineno + 1]))] re_constructor = re.compile('(.*?)public ' + self.fully_qualified_class.replace('/', '.') + '\((?P<params>.*?)\)') for lineno, content in enumerate(contents[2:], 2): match = re.match(re_constructor, content) if not match: continue self.called_by_natives += [CalledByNative( system_class=True, unchecked=False, static=False, java_class_name='', return_type=self.fully_qualified_class, name='Constructor', params=JniParams.Parse(match.group('params').replace('.', '/')), signature=JniParams.ParseJavaPSignature(contents[lineno + 1]), is_constructor=True)] self.called_by_natives = MangleCalledByNatives(self.called_by_natives) self.inl_header_file_generator = InlHeaderFileGenerator( self.namespace, self.fully_qualified_class, [], self.called_by_natives, options) def GetContent(self): return self.inl_header_file_generator.GetContent() @staticmethod def CreateFromClass(class_file, options): class_name = os.path.splitext(os.path.basename(class_file))[0] p = subprocess.Popen(args=[options.javap, '-s', class_name], cwd=os.path.dirname(class_file), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = p.communicate() jni_from_javap = JNIFromJavaP(stdout.split('\n'), options) return jni_from_javap class JNIFromJavaSource(object): """Uses the given java source file to generate the JNI header file.""" def __init__(self, contents, fully_qualified_class, options): contents = self._RemoveComments(contents, options) JniParams.SetFullyQualifiedClass(fully_qualified_class) JniParams.ExtractImportsAndInnerClasses(contents) jni_namespace = ExtractJNINamespace(contents) or options.namespace natives = ExtractNatives(contents, options.ptr_type) called_by_natives = ExtractCalledByNatives(contents) if len(natives) == 0 and len(called_by_natives) == 0: raise SyntaxError('Unable to find any JNI methods for %s.' % fully_qualified_class) inl_header_file_generator = InlHeaderFileGenerator( jni_namespace, fully_qualified_class, natives, called_by_natives, options) self.content = inl_header_file_generator.GetContent() def _RemoveComments(self, contents, options): # We need to support both inline and block comments, and we need to handle # strings that contain '//' or '/*'. Rather than trying to do all that with # regexps, we just pipe the contents through the C preprocessor. We tell cpp # the file has already been preprocessed, so it just removes comments and # doesn't try to parse #include, #pragma etc. # # TODO(husky): This is a bit hacky. It would be cleaner to use a real Java # parser. Maybe we could ditch JNIFromJavaSource and just always use # JNIFromJavaP; or maybe we could rewrite this script in Java and use APT. # http://code.google.com/p/chromium/issues/detail?id=138941 p = subprocess.Popen(args=[options.cpp, '-fpreprocessed'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = p.communicate(contents) return stdout def GetContent(self): return self.content @staticmethod def CreateFromFile(java_file_name, options): contents = file(java_file_name).read() fully_qualified_class = ExtractFullyQualifiedJavaClassName(java_file_name, contents) return JNIFromJavaSource(contents, fully_qualified_class, options) class InlHeaderFileGenerator(object): """Generates an inline header file for JNI integration.""" def __init__(self, namespace, fully_qualified_class, natives, called_by_natives, options): self.namespace = namespace self.fully_qualified_class = fully_qualified_class self.class_name = self.fully_qualified_class.split('/')[-1] self.natives = natives self.called_by_natives = called_by_natives self.header_guard = fully_qualified_class.replace('/', '_') + '_JNI' self.options = options self.init_native = self.ExtractInitNative(options) def ExtractInitNative(self, options): for native in self.natives: if options.jni_init_native_name == 'native' + native.name: self.natives.remove(native) return native return None def GetContent(self): """Returns the content of the JNI binding file.""" template = Template("""\ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is autogenerated by // ${SCRIPT_NAME} // For // ${FULLY_QUALIFIED_CLASS} #ifndef ${HEADER_GUARD} #define ${HEADER_GUARD} #include <jni.h> ${INCLUDES} // Step 1: forward declarations. namespace { $CLASS_PATH_DEFINITIONS $METHOD_ID_DEFINITIONS } // namespace $OPEN_NAMESPACE $FORWARD_DECLARATIONS // Step 2: method stubs. $METHOD_STUBS // Step 3: RegisterNatives. $JNI_NATIVE_METHODS $REGISTER_NATIVES $CLOSE_NAMESPACE $JNI_REGISTER_NATIVES #endif // ${HEADER_GUARD} """) values = { 'SCRIPT_NAME': self.options.script_name, 'FULLY_QUALIFIED_CLASS': self.fully_qualified_class, 'CLASS_PATH_DEFINITIONS': self.GetClassPathDefinitionsString(), 'METHOD_ID_DEFINITIONS': self.GetMethodIDDefinitionsString(), 'FORWARD_DECLARATIONS': self.GetForwardDeclarationsString(), 'METHOD_STUBS': self.GetMethodStubsString(), 'OPEN_NAMESPACE': self.GetOpenNamespaceString(), 'JNI_NATIVE_METHODS': self.GetJNINativeMethodsString(), 'REGISTER_NATIVES': self.GetRegisterNativesString(), 'CLOSE_NAMESPACE': self.GetCloseNamespaceString(), 'HEADER_GUARD': self.header_guard, 'INCLUDES': self.GetIncludesString(), 'JNI_REGISTER_NATIVES': self.GetJNIRegisterNativesString() } return WrapOutput(template.substitute(values)) def GetClassPathDefinitionsString(self): ret = [] ret += [self.GetClassPathDefinitions()] return '\n'.join(ret) def GetMethodIDDefinitionsString(self): """Returns the definition of method ids for the called by native methods.""" if not self.options.eager_called_by_natives: return '' template = Template("""\ jmethodID g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = NULL;""") ret = [] for called_by_native in self.called_by_natives: values = { 'JAVA_CLASS': called_by_native.java_class_name or self.class_name, 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name, } ret += [template.substitute(values)] return '\n'.join(ret) def GetForwardDeclarationsString(self): ret = [] for native in self.natives: if native.type != 'method': ret += [self.GetForwardDeclaration(native)] return '\n'.join(ret) def GetMethodStubsString(self): """Returns the code corresponding to method stubs.""" ret = [] for native in self.natives: if native.type == 'method': ret += [self.GetNativeMethodStubString(native)] if self.options.eager_called_by_natives: ret += self.GetEagerCalledByNativeMethodStubs() else: ret += self.GetLazyCalledByNativeMethodStubs() return '\n'.join(ret) def GetLazyCalledByNativeMethodStubs(self): return [self.GetLazyCalledByNativeMethodStub(called_by_native) for called_by_native in self.called_by_natives] def GetEagerCalledByNativeMethodStubs(self): ret = [] if self.called_by_natives: ret += ['namespace {'] for called_by_native in self.called_by_natives: ret += [self.GetEagerCalledByNativeMethodStub(called_by_native)] ret += ['} // namespace'] return ret def GetIncludesString(self): if not self.options.includes: return '' includes = self.options.includes.split(',') return '\n'.join('#include "%s"' % x for x in includes) def GetKMethodsString(self, clazz): ret = [] for native in self.natives: if (native.java_class_name == clazz or (not native.java_class_name and clazz == self.class_name)): ret += [self.GetKMethodArrayEntry(native)] return '\n'.join(ret) def SubstituteNativeMethods(self, template): """Substitutes JAVA_CLASS and KMETHODS in the provided template.""" ret = [] all_classes = self.GetUniqueClasses(self.natives) all_classes[self.class_name] = self.fully_qualified_class for clazz in all_classes: kmethods = self.GetKMethodsString(clazz) if kmethods: values = {'JAVA_CLASS': clazz, 'KMETHODS': kmethods} ret += [template.substitute(values)] if not ret: return '' return '\n' + '\n'.join(ret) def GetJNINativeMethodsString(self): """Returns the implementation of the array of native methods.""" template = Template("""\ static const JNINativeMethod kMethods${JAVA_CLASS}[] = { ${KMETHODS} }; """) return self.SubstituteNativeMethods(template) def GetRegisterCalledByNativesImplString(self): """Returns the code for registering the called by native methods.""" if not self.options.eager_called_by_natives: return '' template = Template("""\ g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = ${GET_METHOD_ID_IMPL} if (g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} == NULL) { return false; } """) ret = [] for called_by_native in self.called_by_natives: values = { 'JAVA_CLASS': called_by_native.java_class_name or self.class_name, 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name, 'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native), } ret += [template.substitute(values)] return '\n'.join(ret) def GetRegisterNativesString(self): """Returns the code for RegisterNatives.""" template = Template("""\ ${REGISTER_NATIVES_SIGNATURE} { ${CLASSES} ${NATIVES} ${CALLED_BY_NATIVES} return true; } """) signature = 'static bool RegisterNativesImpl(JNIEnv* env' if self.init_native: signature += ', jclass clazz)' else: signature += ')' natives = self.GetRegisterNativesImplString() called_by_natives = self.GetRegisterCalledByNativesImplString() values = {'REGISTER_NATIVES_SIGNATURE': signature, 'CLASSES': self.GetFindClasses(), 'NATIVES': natives, 'CALLED_BY_NATIVES': called_by_natives, } return template.substitute(values) def GetRegisterNativesImplString(self): """Returns the shared implementation for RegisterNatives.""" template = Template("""\ const int kMethods${JAVA_CLASS}Size = arraysize(kMethods${JAVA_CLASS}); if (env->RegisterNatives(g_${JAVA_CLASS}_clazz, kMethods${JAVA_CLASS}, kMethods${JAVA_CLASS}Size) < 0) { jni_generator::HandleRegistrationError( env, g_${JAVA_CLASS}_clazz, __FILE__); return false; } """) return self.SubstituteNativeMethods(template) def GetJNIRegisterNativesString(self): """Returns the implementation for the JNI registration of native methods.""" if not self.init_native: return '' template = Template("""\ extern "C" JNIEXPORT bool JNICALL Java_${FULLY_QUALIFIED_CLASS}_${INIT_NATIVE_NAME}(JNIEnv* env, jclass clazz) { return ${NAMESPACE}RegisterNativesImpl(env, clazz); } """) fully_qualified_class = self.fully_qualified_class.replace('/', '_') namespace = '' if self.namespace: namespace = self.namespace + '::' values = {'FULLY_QUALIFIED_CLASS': fully_qualified_class, 'INIT_NATIVE_NAME': 'native' + self.init_native.name, 'NAMESPACE': namespace, 'REGISTER_NATIVES_IMPL': self.GetRegisterNativesImplString() } return template.substitute(values) def GetOpenNamespaceString(self): if self.namespace: all_namespaces = ['namespace %s {' % ns for ns in self.namespace.split('::')] return '\n'.join(all_namespaces) return '' def GetCloseNamespaceString(self): if self.namespace: all_namespaces = ['} // namespace %s' % ns for ns in self.namespace.split('::')] all_namespaces.reverse() return '\n'.join(all_namespaces) + '\n' return '' def GetJNIFirstParam(self, native): ret = [] if native.type == 'method': ret = ['jobject jcaller'] elif native.type == 'function': if native.static: ret = ['jclass jcaller'] else: ret = ['jobject jcaller'] return ret def GetParamsInDeclaration(self, native): """Returns the params for the stub declaration. Args: native: the native dictionary describing the method. Returns: A string containing the params. """ return ',\n '.join(self.GetJNIFirstParam(native) + [JavaDataTypeToC(param.datatype) + ' ' + param.name for param in native.params]) def GetCalledByNativeParamsInDeclaration(self, called_by_native): return ',\n '.join([JavaDataTypeToC(param.datatype) + ' ' + param.name for param in called_by_native.params]) def GetForwardDeclaration(self, native): template = Template(""" static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS}); """) values = {'RETURN': JavaDataTypeToC(native.return_type), 'NAME': native.name, 'PARAMS': self.GetParamsInDeclaration(native)} return template.substitute(values) def GetNativeMethodStubString(self, native): """Returns stubs for native methods.""" template = Template("""\ static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS_IN_DECLARATION}) { ${P0_TYPE}* native = reinterpret_cast<${P0_TYPE}*>(${PARAM0_NAME}); CHECK_NATIVE_PTR(env, jcaller, native, "${NAME}"${OPTIONAL_ERROR_RETURN}); return native->${NAME}(${PARAMS_IN_CALL})${POST_CALL}; } """) params = [] if not self.options.pure_native_methods: params = ['env', 'jcaller'] params_in_call = ', '.join(params + [p.name for p in native.params[1:]]) return_type = JavaDataTypeToC(native.return_type) optional_error_return = JavaReturnValueToC(native.return_type) if optional_error_return: optional_error_return = ', ' + optional_error_return post_call = '' if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type): post_call = '.Release()' values = { 'RETURN': return_type, 'OPTIONAL_ERROR_RETURN': optional_error_return, 'NAME': native.name, 'PARAMS_IN_DECLARATION': self.GetParamsInDeclaration(native), 'PARAM0_NAME': native.params[0].name, 'P0_TYPE': native.p0_type, 'PARAMS_IN_CALL': params_in_call, 'POST_CALL': post_call } return template.substitute(values) def GetCalledByNativeValues(self, called_by_native): """Fills in necessary values for the CalledByNative methods.""" if called_by_native.static or called_by_native.is_constructor: first_param_in_declaration = '' first_param_in_call = ('g_%s_clazz' % (called_by_native.java_class_name or self.class_name)) else: first_param_in_declaration = ', jobject obj' first_param_in_call = 'obj' params_in_declaration = self.GetCalledByNativeParamsInDeclaration( called_by_native) if params_in_declaration: params_in_declaration = ', ' + params_in_declaration params_in_call = ', '.join(param.name for param in called_by_native.params) if params_in_call: params_in_call = ', ' + params_in_call pre_call = '' post_call = '' if called_by_native.static_cast: pre_call = 'static_cast<%s>(' % called_by_native.static_cast post_call = ')' check_exception = '' if not called_by_native.unchecked: check_exception = 'jni_generator::CheckException(env);' return_type = JavaDataTypeToC(called_by_native.return_type) optional_error_return = JavaReturnValueToC(called_by_native.return_type) if optional_error_return: optional_error_return = ', ' + optional_error_return return_declaration = '' return_clause = '' if return_type != 'void': pre_call = ' ' + pre_call return_declaration = return_type + ' ret =' if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type): return_type = 'base::android::ScopedJavaLocalRef<' + return_type + '>' return_clause = 'return ' + return_type + '(env, ret);' else: return_clause = 'return ret;' return { 'JAVA_CLASS': called_by_native.java_class_name or self.class_name, 'RETURN_TYPE': return_type, 'OPTIONAL_ERROR_RETURN': optional_error_return, 'RETURN_DECLARATION': return_declaration, 'RETURN_CLAUSE': return_clause, 'FIRST_PARAM_IN_DECLARATION': first_param_in_declaration, 'PARAMS_IN_DECLARATION': params_in_declaration, 'PRE_CALL': pre_call, 'POST_CALL': post_call, 'ENV_CALL': called_by_native.env_call, 'FIRST_PARAM_IN_CALL': first_param_in_call, 'PARAMS_IN_CALL': params_in_call, 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name, 'CHECK_EXCEPTION': check_exception, 'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native) } def GetEagerCalledByNativeMethodStub(self, called_by_native): """Returns the implementation of the called by native method.""" template = Template(""" static ${RETURN_TYPE} ${METHOD_ID_VAR_NAME}(\ JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION}) { ${RETURN_DECLARATION}${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL}, g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}${PARAMS_IN_CALL})${POST_CALL}; ${RETURN_CLAUSE} }""") values = self.GetCalledByNativeValues(called_by_native) return template.substitute(values) def GetLazyCalledByNativeMethodStub(self, called_by_native): """Returns a string.""" function_signature_template = Template("""\ static ${RETURN_TYPE} Java_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}(\ JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION})""") function_header_template = Template("""\ ${FUNCTION_SIGNATURE} {""") function_header_with_unused_template = Template("""\ ${FUNCTION_SIGNATURE} __attribute__ ((unused)); ${FUNCTION_SIGNATURE} {""") template = Template(""" static base::subtle::AtomicWord g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = 0; ${FUNCTION_HEADER} /* Must call RegisterNativesImpl() */ CHECK_CLAZZ(env, ${FIRST_PARAM_IN_CALL}, g_${JAVA_CLASS}_clazz${OPTIONAL_ERROR_RETURN}); jmethodID method_id = ${GET_METHOD_ID_IMPL} ${RETURN_DECLARATION} ${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL}, method_id${PARAMS_IN_CALL})${POST_CALL}; ${CHECK_EXCEPTION} ${RETURN_CLAUSE} }""") values = self.GetCalledByNativeValues(called_by_native) values['FUNCTION_SIGNATURE'] = ( function_signature_template.substitute(values)) if called_by_native.system_class: values['FUNCTION_HEADER'] = ( function_header_with_unused_template.substitute(values)) else: values['FUNCTION_HEADER'] = function_header_template.substitute(values) return template.substitute(values) def GetKMethodArrayEntry(self, native): template = Template("""\ { "native${NAME}", ${JNI_SIGNATURE}, reinterpret_cast<void*>(${NAME}) },""") values = {'NAME': native.name, 'JNI_SIGNATURE': JniParams.Signature(native.params, native.return_type, True)} return template.substitute(values) def GetUniqueClasses(self, origin): ret = {self.class_name: self.fully_qualified_class} for entry in origin: class_name = self.class_name jni_class_path = self.fully_qualified_class if entry.java_class_name: class_name = entry.java_class_name jni_class_path = self.fully_qualified_class + '$' + class_name ret[class_name] = jni_class_path return ret def GetClassPathDefinitions(self): """Returns the ClassPath constants.""" ret = [] template = Template("""\ const char k${JAVA_CLASS}ClassPath[] = "${JNI_CLASS_PATH}";""") native_classes = self.GetUniqueClasses(self.natives) called_by_native_classes = self.GetUniqueClasses(self.called_by_natives) all_classes = native_classes all_classes.update(called_by_native_classes) for clazz in all_classes: values = { 'JAVA_CLASS': clazz, 'JNI_CLASS_PATH': JniParams.RemapClassName(all_classes[clazz]), } ret += [template.substitute(values)] ret += '' for clazz in called_by_native_classes: template = Template("""\ // Leaking this jclass as we cannot use LazyInstance from some threads. jclass g_${JAVA_CLASS}_clazz = NULL;""") values = { 'JAVA_CLASS': clazz, } ret += [template.substitute(values)] return '\n'.join(ret) def GetFindClasses(self): """Returns the imlementation of FindClass for all known classes.""" if self.init_native: template = Template("""\ g_${JAVA_CLASS}_clazz = static_cast<jclass>(env->NewWeakGlobalRef(clazz));""") else: template = Template("""\ g_${JAVA_CLASS}_clazz = reinterpret_cast<jclass>(env->NewGlobalRef( base::android::GetClass(env, k${JAVA_CLASS}ClassPath).obj()));""") ret = [] for clazz in self.GetUniqueClasses(self.called_by_natives): values = {'JAVA_CLASS': clazz} ret += [template.substitute(values)] return '\n'.join(ret) def GetMethodIDImpl(self, called_by_native): """Returns the implementation of GetMethodID.""" if self.options.eager_called_by_natives: template = Template("""\ env->Get${STATIC_METHOD_PART}MethodID( g_${JAVA_CLASS}_clazz, "${JNI_NAME}", ${JNI_SIGNATURE});""") else: template = Template("""\ base::android::MethodID::LazyGet< base::android::MethodID::TYPE_${STATIC}>( env, g_${JAVA_CLASS}_clazz, "${JNI_NAME}", ${JNI_SIGNATURE}, &g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}); """) jni_name = called_by_native.name jni_return_type = called_by_native.return_type if called_by_native.is_constructor: jni_name = '<init>' jni_return_type = 'void' if called_by_native.signature: signature = called_by_native.signature else: signature = JniParams.Signature(called_by_native.params, jni_return_type, True) values = { 'JAVA_CLASS': called_by_native.java_class_name or self.class_name, 'JNI_NAME': jni_name, 'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name, 'STATIC': 'STATIC' if called_by_native.static else 'INSTANCE', 'STATIC_METHOD_PART': 'Static' if called_by_native.static else '', 'JNI_SIGNATURE': signature, } return template.substitute(values) def WrapOutput(output): ret = [] for line in output.splitlines(): # Do not wrap lines under 80 characters or preprocessor directives. if len(line) < 80 or line.lstrip()[:1] == '#': stripped = line.rstrip() if len(ret) == 0 or len(ret[-1]) or len(stripped): ret.append(stripped) else: first_line_indent = ' ' * (len(line) - len(line.lstrip())) subsequent_indent = first_line_indent + ' ' * 4 if line.startswith('//'): subsequent_indent = '//' + subsequent_indent wrapper = textwrap.TextWrapper(width=80, subsequent_indent=subsequent_indent, break_long_words=False) ret += [wrapped.rstrip() for wrapped in wrapper.wrap(line)] ret += [''] return '\n'.join(ret) def ExtractJarInputFile(jar_file, input_file, out_dir): """Extracts input file from jar and returns the filename. The input file is extracted to the same directory that the generated jni headers will be placed in. This is passed as an argument to script. Args: jar_file: the jar file containing the input files to extract. input_files: the list of files to extract from the jar file. out_dir: the name of the directories to extract to. Returns: the name of extracted input file. """ jar_file = zipfile.ZipFile(jar_file) out_dir = os.path.join(out_dir, os.path.dirname(input_file)) try: os.makedirs(out_dir) except OSError as e: if e.errno != errno.EEXIST: raise extracted_file_name = os.path.join(out_dir, os.path.basename(input_file)) with open(extracted_file_name, 'w') as outfile: outfile.write(jar_file.read(input_file)) return extracted_file_name def GenerateJNIHeader(input_file, output_file, options): try: if os.path.splitext(input_file)[1] == '.class': jni_from_javap = JNIFromJavaP.CreateFromClass(input_file, options) content = jni_from_javap.GetContent() else: jni_from_java_source = JNIFromJavaSource.CreateFromFile( input_file, options) content = jni_from_java_source.GetContent() except ParseError, e: print e sys.exit(1) if output_file: if not os.path.exists(os.path.dirname(os.path.abspath(output_file))): os.makedirs(os.path.dirname(os.path.abspath(output_file))) if options.optimize_generation and os.path.exists(output_file): with file(output_file, 'r') as f: existing_content = f.read() if existing_content == content: return with file(output_file, 'w') as f: f.write(content) else: print output def GetScriptName(): script_components = os.path.abspath(sys.argv[0]).split(os.path.sep) base_index = 0 for idx, value in enumerate(script_components): if value == 'base' or value == 'third_party': base_index = idx break return os.sep.join(script_components[base_index:]) def main(argv): usage = """usage: %prog [OPTIONS] This script will parse the given java source code extracting the native declarations and print the header file to stdout (or a file). See SampleForTests.java for more details. """ option_parser = optparse.OptionParser(usage=usage) option_parser.add_option('-j', dest='jar_file', help='Extract the list of input files from' ' a specified jar file.' ' Uses javap to extract the methods from a' ' pre-compiled class. --input should point' ' to pre-compiled Java .class files.') option_parser.add_option('-n', dest='namespace', help='Uses as a namespace in the generated header ' 'instead of the javap class name, or when there is ' 'no JNINamespace annotation in the java source.') option_parser.add_option('--input_file', help='Single input file name. The output file name ' 'will be derived from it. Must be used with ' '--output_dir.') option_parser.add_option('--output_dir', help='The output directory. Must be used with ' '--input') option_parser.add_option('--optimize_generation', type="int", default=0, help='Whether we should optimize JNI ' 'generation by not regenerating files if they have ' 'not changed.') option_parser.add_option('--jarjar', help='Path to optional jarjar rules file.') option_parser.add_option('--script_name', default=GetScriptName(), help='The name of this script in the generated ' 'header.') option_parser.add_option('--includes', help='The comma-separated list of header files to ' 'include in the generated header.') option_parser.add_option('--pure_native_methods', action='store_true', dest='pure_native_methods', help='When true, the native methods will be called ' 'without any JNI-specific arguments.') option_parser.add_option('--ptr_type', default='int', type='choice', choices=['int', 'long'], help='The type used to represent native pointers in ' 'Java code. For 32-bit, use int; ' 'for 64-bit, use long.') option_parser.add_option('--jni_init_native_name', default='', help='The name of the JNI registration method that ' 'is used to initialize all native methods. If a ' 'method with this name is not present in the Java ' 'source file, setting this option is a no-op. When ' 'a method with this name is found however, the ' 'naming convention Java_<packageName>_<className> ' 'will limit the initialization to only the ' 'top-level class.') option_parser.add_option('--eager_called_by_natives', action='store_true', dest='eager_called_by_natives', help='When true, the called-by-native methods will ' 'be initialized in a non-atomic way.') option_parser.add_option('--cpp', default='cpp', help='The path to cpp command.') option_parser.add_option('--javap', default='javap', help='The path to javap command.') options, args = option_parser.parse_args(argv) if options.jar_file: input_file = ExtractJarInputFile(options.jar_file, options.input_file, options.output_dir) elif options.input_file: input_file = options.input_file else: option_parser.print_help() print '\nError: Must specify --jar_file or --input_file.' return 1 output_file = None if options.output_dir: root_name = os.path.splitext(os.path.basename(input_file))[0] output_file = os.path.join(options.output_dir, root_name) + '_jni.h' if options.jarjar: with open(options.jarjar) as f: JniParams.SetJarJarMappings(f.read()) GenerateJNIHeader(input_file, output_file, options) if __name__ == '__main__': sys.exit(main(sys.argv)) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475135"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">yvaucher/purchase-workflow</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">__unported__/purchase_group_orders/purchase_group_orders.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9678</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # Author: Alexandre Fayolle # Copyright 2012 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging from openerp.osv.orm import Model, browse_record, browse_null from openerp.osv import fields from openerp import netsvc class procurement_order(Model): _inherit = 'procurement.order' _columns = {'sale_id': fields.many2one('sale.order', 'Sale Order', help='the sale order which generated the procurement'), 'origin': fields.char('Source Document', size=512, help="Reference of the document that created this Procurement.\n" "This is automatically completed by OpenERP."), } def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None): """Create the purchase order from the procurement, using the provided field values, after adding the given purchase order line in the purchase order. :params procurement: the procurement object generating the purchase order :params dict po_vals: field values for the new purchase order (the ``order_line`` field will be overwritten with one single line, as passed in ``line_vals``). :params dict line_vals: field values of the single purchase order line that the purchase order will contain. :return: id of the newly created purchase order :rtype: int """ po_vals.update({'order_line': [(0,0,line_vals)]}) if procurement.sale_id: sale = procurement.sale_id update = {'shop_id': sale.shop_id.id, 'carrier_id': sale.carrier_id.id} po_vals.update(update) return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context) class sale_order(Model): _inherit = 'sale.order' def _prepare_order_line_procurement(self, cr, uid, order, line, move_id, date_planned, context=None): proc_data = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, move_id, date_planned, context) proc_data['sale_id'] = order.id return proc_data class purchase_order(Model): _inherit = 'purchase.order' _columns = { 'shop_id': fields.many2one('sale.shop', 'Shop', help='the shop which generated the sale which triggered the PO'), 'carrier_id': fields.many2one('delivery.carrier', 'Carrier', help='the carrier in charge for delivering the related sale order'), 'carrier_partner_id': fields.related('carrier_id', 'partner_id', type='many2one', relation='res.partner', string='Carrier Name', readonly=True, help="Name of the carrier partner in charge of delivering the related sale order"), 'origin': fields.char('Source Document', size=512, help="Reference of the document that generated this purchase order request."), } def do_merge(self, cr, uid, ids, context=None): """ To merge similar type of purchase orders. Orders will only be merged if: * Purchase Orders are in draft * Purchase Orders belong to the same partner * Purchase Orders have same stock location, same pricelist * Purchase Orders have the same shop and the same carrier (NEW in this module) Lines will only be merged if: * Order lines are exactly the same except for the quantity and unit """ #TOFIX: merged order line should be unlink wf_service = netsvc.LocalService("workflow") def make_key(br, fields): list_key = [] for field in fields: field_val = getattr(br, field) if field in ('product_id', 'move_dest_id', 'account_analytic_id'): if not field_val: field_val = False if isinstance(field_val, browse_record): field_val = field_val.id elif isinstance(field_val, browse_null): field_val = False elif isinstance(field_val, list): field_val = ((6, 0, tuple([v.id for v in field_val])),) list_key.append((field, field_val)) list_key.sort() return tuple(list_key) # compute what the new orders should contain new_orders = {} for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']: order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'shop_id', 'carrier_id')) # added line new_order = new_orders.setdefault(order_key, ({}, [])) new_order[1].append(porder.id) order_infos = new_order[0] if not order_infos: order_infos.update({ 'origin': porder.origin, 'date_order': porder.date_order, 'partner_id': porder.partner_id.id, 'partner_address_id': porder.partner_address_id.id, 'dest_address_id': porder.dest_address_id.id, 'warehouse_id': porder.warehouse_id.id, 'location_id': porder.location_id.id, 'pricelist_id': porder.pricelist_id.id, 'state': 'draft', 'order_line': {}, 'notes': '%s' % (porder.notes or '',), 'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False, 'shop_id': porder.shop_id and porder.shop_id.id, # added line 'carrier_id': porder.carrier_id and porder.carrier_id.id, # added line }) else: if porder.date_order < order_infos['date_order']: order_infos['date_order'] = porder.date_order if porder.notes: order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,)) if porder.origin: order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin for order_line in porder.order_line: line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'notes', 'product_id', 'move_dest_id', 'account_analytic_id')) o_line = order_infos['order_line'].setdefault(line_key, {}) if o_line: # merge the line with an existing line o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor'] else: # append a new "standalone" line for field in ('product_qty', 'product_uom'): field_val = getattr(order_line, field) if isinstance(field_val, browse_record): field_val = field_val.id o_line[field] = field_val o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0 allorders = [] orders_info = {} for order_key, (order_data, old_ids) in new_orders.iteritems(): # skip merges with only one order if len(old_ids) < 2: allorders += (old_ids or []) continue # cleanup order line data for key, value in order_data['order_line'].iteritems(): del value['uom_factor'] value.update(dict(key)) order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()] # create the new order neworder_id = self.create(cr, uid, order_data) orders_info.update({neworder_id: old_ids}) allorders.append(neworder_id) # make triggers pointing to the old orders point to the new order for old_id in old_ids: wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr) wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr) return orders_info </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475136"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">glwu/python-for-android</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python3-alpha/python3-src/Lib/distutils/tests/test_install_data.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">147</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2603</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""Tests for distutils.command.install_data.""" import sys import os import unittest import getpass from distutils.command.install_data import install_data from distutils.tests import support from test.support import run_unittest class InstallDataTestCase(support.TempdirManager, support.LoggingSilencer, support.EnvironGuard, unittest.TestCase): def test_simple_run(self): pkg_dir, dist = self.create_dist() cmd = install_data(dist) cmd.install_dir = inst = os.path.join(pkg_dir, 'inst') # data_files can contain # - simple files # - a tuple with a path, and a list of file one = os.path.join(pkg_dir, 'one') self.write_file(one, 'xxx') inst2 = os.path.join(pkg_dir, 'inst2') two = os.path.join(pkg_dir, 'two') self.write_file(two, 'xxx') cmd.data_files = [one, (inst2, [two])] self.assertEqual(cmd.get_inputs(), [one, (inst2, [two])]) # let's run the command cmd.ensure_finalized() cmd.run() # let's check the result self.assertEqual(len(cmd.get_outputs()), 2) rtwo = os.path.split(two)[-1] self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) rone = os.path.split(one)[-1] self.assertTrue(os.path.exists(os.path.join(inst, rone))) cmd.outfiles = [] # let's try with warn_dir one cmd.warn_dir = 1 cmd.ensure_finalized() cmd.run() # let's check the result self.assertEqual(len(cmd.get_outputs()), 2) self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) self.assertTrue(os.path.exists(os.path.join(inst, rone))) cmd.outfiles = [] # now using root and empty dir cmd.root = os.path.join(pkg_dir, 'root') inst3 = os.path.join(cmd.install_dir, 'inst3') inst4 = os.path.join(pkg_dir, 'inst4') three = os.path.join(cmd.install_dir, 'three') self.write_file(three, 'xx') cmd.data_files = [one, (inst2, [two]), ('inst3', [three]), (inst4, [])] cmd.ensure_finalized() cmd.run() # let's check the result self.assertEqual(len(cmd.get_outputs()), 4) self.assertTrue(os.path.exists(os.path.join(inst2, rtwo))) self.assertTrue(os.path.exists(os.path.join(inst, rone))) def test_suite(): return unittest.makeSuite(InstallDataTestCase) if __name__ == "__main__": run_unittest(test_suite()) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475137"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">yugangw-msft/azure-cli</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/azure-cli-core/azure/cli/core/extension/tests/latest/test_extension_commands.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">24452</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import tempfile import unittest import shutil import hashlib import mock import sys from azure.cli.core.util import CLIError from azure.cli.core.extension import get_extension, build_extension_path from azure.cli.core.extension.operations import (add_extension_to_path, list_extensions, add_extension, show_extension, remove_extension, update_extension, list_available_extensions, OUT_KEY_NAME, OUT_KEY_VERSION, OUT_KEY_METADATA, OUT_KEY_PATH) from azure.cli.core.extension._resolve import NoExtensionCandidatesError from azure.cli.core.mock import DummyCli from . import IndexPatch, mock_ext def _get_test_data_file(filename): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', filename) def _compute_file_hash(filename): sha256 = hashlib.sha256() with open(filename, 'rb') as f: sha256.update(f.read()) return sha256.hexdigest() MY_EXT_NAME = 'myfirstcliextension' MY_EXT_SOURCE = _get_test_data_file('myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl') MY_BAD_EXT_SOURCE = _get_test_data_file('notanextension.txt') MY_SECOND_EXT_NAME_DASHES = 'my-second-cli-extension' MY_SECOND_EXT_SOURCE_DASHES = _get_test_data_file('my_second_cli_extension-0.0.1+dev-py2.py3-none-any.whl') class TestExtensionCommands(unittest.TestCase): def setUp(self): self.ext_dir = tempfile.mkdtemp() self.ext_sys_dir = tempfile.mkdtemp() self.patchers = [mock.patch('azure.cli.core.extension.EXTENSIONS_DIR', self.ext_dir), mock.patch('azure.cli.core.extension.EXTENSIONS_SYS_DIR', self.ext_sys_dir)] for patcher in self.patchers: patcher.start() self.cmd = self._setup_cmd() def tearDown(self): for patcher in self.patchers: patcher.stop() shutil.rmtree(self.ext_dir, ignore_errors=True) shutil.rmtree(self.ext_sys_dir, ignore_errors=True) def test_no_extensions_dir(self): shutil.rmtree(self.ext_dir) actual = list_extensions() self.assertEqual(len(actual), 0) def test_no_extensions_in_dir(self): actual = list_extensions() self.assertEqual(len(actual), 0) def test_add_list_show_remove_extension(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) actual = list_extensions() self.assertEqual(len(actual), 1) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) remove_extension(MY_EXT_NAME) num_exts = len(list_extensions()) self.assertEqual(num_exts, 0) def test_add_list_show_remove_system_extension(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True) actual = list_extensions() self.assertEqual(len(actual), 1) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) remove_extension(MY_EXT_NAME) num_exts = len(list_extensions()) self.assertEqual(num_exts, 0) def test_add_list_show_remove_user_system_extensions(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES, system=True) actual = list_extensions() self.assertEqual(len(actual), 2) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_PATH], build_extension_path(MY_EXT_NAME)) second_ext = show_extension(MY_SECOND_EXT_NAME_DASHES) self.assertEqual(second_ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES) self.assertEqual(second_ext[OUT_KEY_PATH], build_extension_path(MY_SECOND_EXT_NAME_DASHES, system=True)) remove_extension(MY_EXT_NAME) num_exts = len(list_extensions()) self.assertEqual(num_exts, 1) remove_extension(MY_SECOND_EXT_NAME_DASHES) num_exts = len(list_extensions()) self.assertEqual(num_exts, 0) def test_add_list_show_remove_extension_with_dashes(self): add_extension(cmd=self.cmd, source=MY_SECOND_EXT_SOURCE_DASHES) actual = list_extensions() self.assertEqual(len(actual), 1) ext = show_extension(MY_SECOND_EXT_NAME_DASHES) self.assertEqual(ext[OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES) self.assertIn(OUT_KEY_NAME, ext[OUT_KEY_METADATA], "Unable to get full metadata") self.assertEqual(ext[OUT_KEY_METADATA][OUT_KEY_NAME], MY_SECOND_EXT_NAME_DASHES) remove_extension(MY_SECOND_EXT_NAME_DASHES) num_exts = len(list_extensions()) self.assertEqual(num_exts, 0) def test_add_extension_twice(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) num_exts = len(list_extensions()) self.assertEqual(num_exts, 1) with self.assertRaises(CLIError): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) def test_add_same_extension_user_system(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) num_exts = len(list_extensions()) self.assertEqual(num_exts, 1) with self.assertRaises(CLIError): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, system=True) def test_add_extension_invalid(self): with self.assertRaises(ValueError): add_extension(cmd=self.cmd, source=MY_BAD_EXT_SOURCE) actual = list_extensions() self.assertEqual(len(actual), 0) def test_add_extension_invalid_whl_name(self): with self.assertRaises(CLIError): add_extension(cmd=self.cmd, source=os.path.join('invalid', 'ext', 'path', 'file.whl')) actual = list_extensions() self.assertEqual(len(actual), 0) def test_add_extension_valid_whl_name_filenotfound(self): with self.assertRaises(CLIError): add_extension(cmd=self.cmd, source=_get_test_data_file('mywheel-0.0.3+dev-py2.py3-none-any.whl')) actual = list_extensions() self.assertEqual(len(actual), 0) def test_add_extension_with_pip_proxy(self): extension_name = MY_EXT_NAME proxy_param = '--proxy' proxy_endpoint = "https://user:<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="55253426261525273a2d2c7b383c36273a263a33217b363a38">[email protected]</a>" computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.shutil'), \ mock.patch('azure.cli.core.extension.operations.check_output') as check_output: add_extension(cmd=self.cmd, extension_name=extension_name, pip_proxy=proxy_endpoint) args = check_output.call_args pip_cmd = args[0][0] proxy_index = pip_cmd.index(proxy_param) assert pip_cmd[proxy_index + 1] == proxy_endpoint def test_add_extension_verify_no_pip_proxy(self): extension_name = MY_EXT_NAME computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.shutil'), \ mock.patch('azure.cli.core.extension.operations.check_output') as check_output: add_extension(cmd=self.cmd, extension_name=extension_name) args = check_output.call_args pip_cmd = args[0][0] if '--proxy' in pip_cmd: raise AssertionError("proxy parameter in check_output args although no proxy specified") def test_add_extension_with_specific_version(self): extension_name = MY_EXT_NAME extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl' extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl' mocked_index_data = { extension_name: [ mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)), mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2)) ] } with IndexPatch(mocked_index_data): add_extension(self.cmd, extension_name=extension_name, version='0.0.3+dev') ext = show_extension(extension_name) self.assertEqual(ext['name'], extension_name) self.assertEqual(ext['version'], '0.0.3+dev') def test_add_extension_with_non_existing_version(self): extension_name = MY_EXT_NAME extension1 = 'myfirstcliextension-0.0.3+dev-py2.py3-none-any.whl' extension2 = 'myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl' mocked_index_data = { extension_name: [ mock_ext(extension1, version='0.0.3+dev', download_url=_get_test_data_file(extension1)), mock_ext(extension2, version='0.0.4+dev', download_url=_get_test_data_file(extension2)) ] } non_existing_version = '0.0.5' with IndexPatch(mocked_index_data): with self.assertRaisesRegex(CLIError, non_existing_version): add_extension(self.cmd, extension_name=extension_name, version=non_existing_version) def test_add_extension_with_name_valid_checksum(self): extension_name = MY_EXT_NAME computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)): add_extension(cmd=self.cmd, extension_name=extension_name) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) def test_add_extension_with_name_invalid_checksum(self): extension_name = MY_EXT_NAME bad_sha256 = 'thishashisclearlywrong' with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, bad_sha256)): with self.assertRaises(CLIError) as err: add_extension(cmd=self.cmd, extension_name=extension_name) self.assertTrue('The checksum of the extension does not match the expected value.' in str(err.exception)) def test_add_extension_with_name_source_not_whl(self): extension_name = 'myextension' with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=('{}.notwhl'.format(extension_name), None)): with self.assertRaises(ValueError) as err: add_extension(cmd=self.cmd, extension_name=extension_name) self.assertTrue('Unknown extension type. Only Python wheels are supported.' in str(err.exception)) def test_add_extension_with_name_but_it_already_exists(self): # Add extension without name first add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) # Now add using name computed_extension_sha256 = _compute_file_hash(MY_EXT_SOURCE) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)): with mock.patch('azure.cli.core.extension.operations.logger') as mock_logger: add_extension(cmd=self.cmd, extension_name=MY_EXT_NAME) call_args = mock_logger.warning.call_args self.assertEqual("Extension '%s' is already installed.", call_args[0][0]) self.assertEqual(MY_EXT_NAME, call_args[0][1]) self.assertEqual(mock_logger.warning.call_count, 1) def test_update_extension(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl') computed_extension_sha256 = _compute_file_hash(newer_extension) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)): update_extension(self.cmd, MY_EXT_NAME) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev') def test_update_extension_with_pip_proxy(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl') computed_extension_sha256 = _compute_file_hash(newer_extension) proxy_param = '--proxy' proxy_endpoint = "https://user:<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="20504153536050524f58590e4d4943524f534f46540e434f4d">[email protected]</a>" with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.shutil'), \ mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \ mock.patch('azure.cli.core.extension.operations.check_output') as check_output: update_extension(self.cmd, MY_EXT_NAME, pip_proxy=proxy_endpoint) args = check_output.call_args pip_cmd = args[0][0] proxy_index = pip_cmd.index(proxy_param) assert pip_cmd[proxy_index + 1] == proxy_endpoint def test_update_extension_verify_no_pip_proxy(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl') computed_extension_sha256 = _compute_file_hash(newer_extension) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(MY_EXT_SOURCE, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.shutil'), \ mock.patch('azure.cli.core.extension.operations.is_valid_sha256sum', return_value=(True, computed_extension_sha256)), \ mock.patch('azure.cli.core.extension.operations.extension_exists', return_value=None), \ mock.patch('azure.cli.core.extension.operations.check_output') as check_output: update_extension(self.cmd, MY_EXT_NAME) args = check_output.call_args pip_cmd = args[0][0] if '--proxy' in pip_cmd: raise AssertionError("proxy parameter in check_output args although no proxy specified") def test_update_extension_not_found(self): with self.assertRaises(CLIError) as err: update_extension(self.cmd, MY_EXT_NAME) self.assertEqual(str(err.exception), 'The extension {} is not installed.'.format(MY_EXT_NAME)) def test_update_extension_no_updates(self): logger_msgs = [] def mock_log_warning(_, msg): logger_msgs.append(msg) add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') with mock.patch('azure.cli.core.extension.operations.resolve_from_index', side_effect=NoExtensionCandidatesError()), \ mock.patch('logging.Logger.warning', mock_log_warning): update_extension(self.cmd, MY_EXT_NAME) self.assertTrue("No updates available for '{}'.".format(MY_EXT_NAME) in logger_msgs[0]) def test_update_extension_exception_in_update_and_rolled_back(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl') bad_sha256 = 'thishashisclearlywrong' with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, bad_sha256)): with self.assertRaises(CLIError) as err: update_extension(self.cmd, MY_EXT_NAME) self.assertTrue('Failed to update. Rolled {} back to {}.'.format(ext['name'], ext[OUT_KEY_VERSION]) in str(err.exception)) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') def test_list_available_extensions_default(self): with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c: list_available_extensions(cli_ctx=self.cmd.cli_ctx) c.assert_called_once_with(None, self.cmd.cli_ctx) def test_list_available_extensions_operations_index_url(self): with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c: index_url = 'http://contoso.com' list_available_extensions(index_url=index_url, cli_ctx=self.cmd.cli_ctx) c.assert_called_once_with(index_url, self.cmd.cli_ctx) def test_list_available_extensions_show_details(self): with mock.patch('azure.cli.core.extension.operations.get_index_extensions', autospec=True) as c: list_available_extensions(show_details=True, cli_ctx=self.cmd.cli_ctx) c.assert_called_once_with(None, self.cmd.cli_ctx) def test_list_available_extensions_no_show_details(self): sample_index_extensions = { 'test_sample_extension1': [{ 'metadata': { 'name': 'test_sample_extension1', 'summary': 'my summary', 'version': '0.1.0' }}], 'test_sample_extension2': [{ 'metadata': { 'name': 'test_sample_extension2', 'summary': 'my summary', 'version': '0.1.0', 'azext.isPreview': True, 'azext.isExperimental': True }}] } with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions): res = list_available_extensions(cli_ctx=self.cmd.cli_ctx) self.assertIsInstance(res, list) self.assertEqual(len(res), len(sample_index_extensions)) self.assertEqual(res[0]['name'], 'test_sample_extension1') self.assertEqual(res[0]['summary'], 'my summary') self.assertEqual(res[0]['version'], '0.1.0') self.assertEqual(res[0]['preview'], False) self.assertEqual(res[0]['experimental'], False) with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions): res = list_available_extensions(cli_ctx=self.cmd.cli_ctx) self.assertIsInstance(res, list) self.assertEqual(len(res), len(sample_index_extensions)) self.assertEqual(res[1]['name'], 'test_sample_extension2') self.assertEqual(res[1]['summary'], 'my summary') self.assertEqual(res[1]['version'], '0.1.0') self.assertEqual(res[1]['preview'], True) self.assertEqual(res[1]['experimental'], True) def test_list_available_extensions_incompatible_cli_version(self): sample_index_extensions = { 'test_sample_extension1': [{ 'metadata': { "azext.maxCliCoreVersion": "0.0.0", 'name': 'test_sample_extension1', 'summary': 'my summary', 'version': '0.1.0' }}] } with mock.patch('azure.cli.core.extension.operations.get_index_extensions', return_value=sample_index_extensions): res = list_available_extensions(cli_ctx=self.cmd.cli_ctx) self.assertIsInstance(res, list) self.assertEqual(len(res), 0) def test_add_list_show_remove_extension_extra_index_url(self): """ Tests extension addition while specifying --extra-index-url parameter. :return: """ extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple'] add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls) actual = list_extensions() self.assertEqual(len(actual), 1) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_NAME], MY_EXT_NAME) remove_extension(MY_EXT_NAME) num_exts = len(list_extensions()) self.assertEqual(num_exts, 0) def test_update_extension_extra_index_url(self): """ Tests extension update while specifying --extra-index-url parameter. :return: """ extra_index_urls = ['https://testpypi.python.org/simple', 'https://pypi.python.org/simple'] add_extension(cmd=self.cmd, source=MY_EXT_SOURCE, pip_extra_index_urls=extra_index_urls) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.3+dev') newer_extension = _get_test_data_file('myfirstcliextension-0.0.4+dev-py2.py3-none-any.whl') computed_extension_sha256 = _compute_file_hash(newer_extension) with mock.patch('azure.cli.core.extension.operations.resolve_from_index', return_value=(newer_extension, computed_extension_sha256)): update_extension(self.cmd, MY_EXT_NAME, pip_extra_index_urls=extra_index_urls) ext = show_extension(MY_EXT_NAME) self.assertEqual(ext[OUT_KEY_VERSION], '0.0.4+dev') def test_add_extension_to_path(self): add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) num_exts = len(list_extensions()) self.assertEqual(num_exts, 1) ext = get_extension('myfirstcliextension') old_path = sys.path[:] try: add_extension_to_path(ext.name) self.assertSequenceEqual(old_path, sys.path[:-1]) self.assertEqual(ext.path, sys.path[-1]) finally: sys.path[:] = old_path def test_add_extension_azure_to_path(self): import azure import azure.mgmt old_path_0 = list(sys.path) old_path_1 = list(azure.__path__) old_path_2 = list(azure.mgmt.__path__) add_extension(cmd=self.cmd, source=MY_EXT_SOURCE) ext = get_extension('myfirstcliextension') azure_dir = os.path.join(ext.path, "azure") azure_mgmt_dir = os.path.join(azure_dir, "mgmt") os.mkdir(azure_dir) os.mkdir(azure_mgmt_dir) try: add_extension_to_path(ext.name) new_path_1 = list(azure.__path__) new_path_2 = list(azure.mgmt.__path__) finally: sys.path.remove(ext.path) remove_extension(ext.name) if isinstance(azure.__path__, list): azure.__path__[:] = old_path_1 else: list(azure.__path__) if isinstance(azure.mgmt.__path__, list): azure.mgmt.__path__[:] = old_path_2 else: list(azure.mgmt.__path__) self.assertSequenceEqual(old_path_1, new_path_1[:-1]) self.assertSequenceEqual(old_path_2, new_path_2[:-1]) self.assertEqual(azure_dir, new_path_1[-1]) self.assertEqual(azure_mgmt_dir, new_path_2[-1]) self.assertSequenceEqual(old_path_0, list(sys.path)) self.assertSequenceEqual(old_path_1, list(azure.__path__)) self.assertSequenceEqual(old_path_2, list(azure.mgmt.__path__)) def _setup_cmd(self): cmd = mock.MagicMock() cmd.cli_ctx = DummyCli() return cmd if __name__ == '__main__': unittest.main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475138"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openstack/taskflow</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">taskflow/examples/resume_many_flows/run_flow.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1433</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import sys logging.basicConfig(level=logging.ERROR) self_dir = os.path.abspath(os.path.dirname(__file__)) top_dir = os.path.abspath( os.path.join(self_dir, os.pardir, os.pardir, os.pardir)) example_dir = os.path.abspath(os.path.join(self_dir, os.pardir)) sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) sys.path.insert(0, example_dir) import taskflow.engines import example_utils # noqa import my_flows # noqa with example_utils.get_backend() as backend: engine = taskflow.engines.load_from_factory(my_flows.flow_factory, backend=backend) print('Running flow %s %s' % (engine.storage.flow_name, engine.storage.flow_uuid)) engine.run() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475139"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jehine-MSFT/azure-storage-python</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/blob_performance.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5539</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-------------------------------------------------------------------------- import os import datetime import sys from azure.storage.blob import ( BlockBlobService, PageBlobService, AppendBlobService, ) import tests.settings_real as settings # Warning: # This script will take a while to run with everything enabled. # Edit the lists below to enable only the blob sizes and connection # counts that you are interested in. # NAME, SIZE (MB), +ADD SIZE (B) LOCAL_BLOCK_BLOB_FILES = [ ('BLOC-0080M+000B', 80, 0), ('BLOC-0080M+013B', 80, 13), ('BLOC-0500M+000B', 500, 0), ('BLOC-2500M+000B', 2500, 0), ] LOCAL_PAGE_BLOB_FILES = [ ('PAGE-0072M+000B', 72, 0), ('PAGE-0072M+512B', 72, 512), ('PAGE-0500M+000B', 500, 0), ('PAGE-2500M+000B', 2500, 0), ] LOCAL_APPEND_BLOB_FILES = [ ('APPD-0072M+000B', 80, 0), ('APPD-0072M+512B', 80, 13), ('APPD-0500M+000B', 500, 0), ('APPD-2500M+000B', 2500, 0), ] CONNECTION_COUNTS = [1, 2, 5, 10, 50] CONTAINER_NAME = 'performance' def input_file(name): return 'input-' + name def output_file(name): return 'output-' + name def create_random_content_file(name, size_in_megs, additional_byte_count=0): file_name = input_file(name) if not os.path.exists(file_name): print('generating {0}'.format(name)) with open(file_name, 'wb') as stream: for i in range(size_in_megs): stream.write(os.urandom(1048576)) if additional_byte_count > 0: stream.write(os.urandom(additional_byte_count)) def upload_blob(service, name, connections): blob_name = name file_name = input_file(name) sys.stdout.write('\tUp:') start_time = datetime.datetime.now() if isinstance(service, BlockBlobService): service.create_blob_from_path( CONTAINER_NAME, blob_name, file_name, max_connections=connections) elif isinstance(service, PageBlobService): service.create_blob_from_path( CONTAINER_NAME, blob_name, file_name, max_connections=connections) elif isinstance(service, AppendBlobService): service.append_blob_from_path( CONTAINER_NAME, blob_name, file_name, max_connections=connections) else: service.create_blob_from_path( CONTAINER_NAME, blob_name, file_name, max_connections=connections) elapsed_time = datetime.datetime.now() - start_time sys.stdout.write('{0}s'.format(elapsed_time.total_seconds())) def download_blob(service, name, connections): blob_name = name target_file_name = output_file(name) if os.path.exists(target_file_name): os.remove(target_file_name) sys.stdout.write('\tDn:') start_time = datetime.datetime.now() service.get_blob_to_path( CONTAINER_NAME, blob_name, target_file_name, max_connections=connections) elapsed_time = datetime.datetime.now() - start_time sys.stdout.write('{0}s'.format(elapsed_time.total_seconds())) def file_contents_equal(first_file_path, second_file_path): first_size = os.path.getsize(first_file_path); second_size = os.path.getsize(second_file_path) if first_size != second_size: return False with open(first_file_path, 'rb') as first_stream: with open(second_file_path, 'rb') as second_stream: while True: first_data = first_stream.read(1048576) second_data = second_stream.read(1048576) if first_data != second_data: return False if not first_data: return True def compare_files(name): first_file_path = input_file(name) second_file_path = output_file(name) sys.stdout.write('\tCmp:') if file_contents_equal(first_file_path, second_file_path): sys.stdout.write('ok') else: sys.stdout.write('ERR!') def process(service, blobs, counts): for name, size_in_megs, additional in blobs: create_random_content_file(name, size_in_megs, additional) for name, _, _ in blobs: for max_conn in counts: sys.stdout.write('{0}\tParallel:{1}'.format(name, max_conn)) upload_blob(service, name, max_conn) download_blob(service, name, max_conn) compare_files(name) print('') print('') def main(): bbs = BlockBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY) pbs = PageBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY) abs = AppendBlobService(settings.STORAGE_ACCOUNT_NAME, settings.STORAGE_ACCOUNT_KEY) service.create_container(CONTAINER_NAME) process(bbs, LOCAL_BLOCK_BLOB_FILES, CONNECTION_COUNTS) process(pbs, LOCAL_PAGE_BLOB_FILES, CONNECTION_COUNTS) process(abs, LOCAL_APPEND_BLOB_FILES, CONNECTION_COUNTS) if __name__ == '__main__': main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475140"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">MeirKriheli/Open-Knesset</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mks/urls.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">14</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2520</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.conf import settings from django.conf.urls import url, patterns from . import views as mkv from feeds import MemberActivityFeed mksurlpatterns = patterns('mks.views', url(r'^parties-members/$', mkv.PartiesMembersRedirctView.as_view(), name='parties-members-index'), url(r'^parties-members/(?P<pk>\d+)/$', mkv.PartiesMembersView.as_view(), name='parties-members-list'), url(r'^member/$', mkv.MemberRedirectView.as_view(), name='member-list'), url(r'^member/csv$', mkv.MemberCsvView.as_view()), url(r'^party/csv$', mkv.PartyCsvView.as_view()), url(r'^member/(?P<pk>\d+)/$', 'mk_detail', name='member-detail'), url(r'^member/(?P<pk>\d+)/embed/$', mkv.MemberEmbedView.as_view(), name='member-embed'), # "more" actions url(r'^member/(?P<pk>\d+)/more_actions/$', mkv.MemeberMoreActionsView.as_view(), name='member-more-actions'), url(r'^member/(?P<pk>\d+)/more_legislation/$', mkv.MemeberMoreLegislationView.as_view(), name='member-more-legislation'), url(r'^member/(?P<pk>\d+)/more_committee/$', mkv.MemeberMoreCommitteeView.as_view(), name='member-more-committees'), url(r'^member/(?P<pk>\d+)/more_plenum/$', mkv.MemeberMorePlenumView.as_view(), name='member-more-plenums'), url(r'^member/(?P<pk>\d+)/more_mmm/$', mkv.MemeberMoreMMMView.as_view(), name='member-more-mmm'), url(r'^member/(?P<object_id>\d+)/rss/$', MemberActivityFeed(), name='member-activity-feed'), url(r'^member/(?P<pk>\d+)/(?P<slug>[\w\-\"]+)/$', 'mk_detail', name='member-detail-with-slug'), # TODO:the next url is hardcoded in a js file url(r'^member/auto_complete/$', mkv.member_auto_complete, name='member-auto-complete'), url(r'^member/search/?$', mkv.member_by_name, name='member-by-name'), url(r'^member/by/(?P<stat_type>' + '|'.join(x[0] for x in mkv.MemberListView.pages) + ')/$', mkv.MemberListView.as_view(), name='member-stats'), # a JS view for adding mks tooltips on a page url(r'^member/tooltip.js', mkv.members_tooltips, name='member-tooltip'), url(r'^party/$', mkv.PartyRedirectView.as_view(), name='party-list'), url(r'^party/(?P<pk>\d+)/$', mkv.PartyDetailView.as_view(), name='party-detail'), url(r'^party/(?P<pk>\d+)/(?P<slug>[\w\-\"]+)/$', mkv.PartyDetailView.as_view(), name='party-detail-with-slug'), url(r'^party/by/(?P<stat_type>' + '|'.join(x[0] for x in mkv.PartyListView.pages) + ')/$', mkv.PartyListView.as_view(), name='party-stats'), url(r'^party/search/?$', mkv.party_by_name, name='party-by-name'), ) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475141"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Froggiewalker/geonode</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">geonode/base/enumerations.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">13719</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from django.utils.translation import ugettext_lazy as _ LINK_TYPES = ['original', 'data', 'image', 'metadata', 'html', 'OGC:WMS', 'OGC:WFS', 'OGC:WCS'] HIERARCHY_LEVELS = ( ('series', _('series')), ('software', _('computer program or routine')), ('featureType', _('feature type')), ('model', _('copy or imitation of an existing or hypothetical object')), ('collectionHardware', _('collection hardware')), ('collectionSession', _('collection session')), ('nonGeographicDataset', _('non-geographic data')), ('propertyType', _('property type')), ('fieldSession', _('field session')), ('dataset', _('dataset')), ('service', _('service interfaces')), ('attribute', _('attribute class')), ('attributeType', _('characteristic of a feature')), ('tile', _('tile or spatial subset of geographic data')), ('feature', _('feature')), ('dimensionGroup', _('dimension group')), ) UPDATE_FREQUENCIES = ( ('unknown', _('frequency of maintenance for the data is not known')), ('continual', _('data is repeatedly and frequently updated')), ('notPlanned', _('there are no plans to update the data')), ('daily', _('data is updated each day')), ('annually', _('data is updated every year')), ('asNeeded', _('data is updated as deemed necessary')), ('monthly', _('data is updated each month')), ('fortnightly', _('data is updated every two weeks')), ('irregular', _('data is updated in intervals that are uneven in duration')), ('weekly', _('data is updated on a weekly basis')), ('biannually', _('data is updated twice each year')), ('quarterly', _('data is updated every three months')), ) CONTACT_FIELDS = [ 'name', 'organization', 'position', 'voice', 'facsimile', 'delivery_point', 'city', 'administrative_area', 'postal_code', 'country', 'email', 'role' ] DEFAULT_SUPPLEMENTAL_INFORMATION = _( _('No information provided') ) COUNTRIES = ( ('AFG', 'Afghanistan'), ('ALA', 'Aland Islands'), ('ALB', 'Albania'), ('DZA', 'Algeria'), ('ASM', 'American Samoa'), ('AND', 'Andorra'), ('AGO', 'Angola'), ('AIA', 'Anguilla'), ('ATG', 'Antigua and Barbuda'), ('ARG', 'Argentina'), ('ARM', 'Armenia'), ('ABW', 'Aruba'), ('AUS', 'Australia'), ('AUT', 'Austria'), ('AZE', 'Azerbaijan'), ('BHS', 'Bahamas'), ('BHR', 'Bahrain'), ('BGD', 'Bangladesh'), ('BRB', 'Barbados'), ('BLR', 'Belarus'), ('BEL', 'Belgium'), ('BLZ', 'Belize'), ('BEN', 'Benin'), ('BMU', 'Bermuda'), ('BTN', 'Bhutan'), ('BOL', 'Bolivia'), ('BIH', 'Bosnia and Herzegovina'), ('BWA', 'Botswana'), ('BRA', 'Brazil'), ('VGB', 'British Virgin Islands'), ('BRN', 'Brunei Darussalam'), ('BGR', 'Bulgaria'), ('BFA', 'Burkina Faso'), ('BDI', 'Burundi'), ('KHM', 'Cambodia'), ('CMR', 'Cameroon'), ('CAN', 'Canada'), ('CPV', 'Cape Verde'), ('CYM', 'Cayman Islands'), ('CAF', 'Central African Republic'), ('TCD', 'Chad'), ('CIL', 'Channel Islands'), ('CHL', 'Chile'), ('CHN', 'China'), ('HKG', 'China - Hong Kong'), ('MAC', 'China - Macao'), ('COL', 'Colombia'), ('COM', 'Comoros'), ('COG', 'Congo'), ('COK', 'Cook Islands'), ('CRI', 'Costa Rica'), ('CIV', 'Cote d\'Ivoire'), ('HRV', 'Croatia'), ('CUB', 'Cuba'), ('CYP', 'Cyprus'), ('CZE', 'Czech Republic'), ('PRK', 'Democratic People\'s Republic of Korea'), ('COD', 'Democratic Republic of the Congo'), ('DNK', 'Denmark'), ('DJI', 'Djibouti'), ('DMA', 'Dominica'), ('DOM', 'Dominican Republic'), ('ECU', 'Ecuador'), ('EGY', 'Egypt'), ('SLV', 'El Salvador'), ('GNQ', 'Equatorial Guinea'), ('ERI', 'Eritrea'), ('EST', 'Estonia'), ('ETH', 'Ethiopia'), ('FRO', 'Faeroe Islands'), ('FLK', 'Falkland Islands (Malvinas)'), ('FJI', 'Fiji'), ('FIN', 'Finland'), ('FRA', 'France'), ('GUF', 'French Guiana'), ('PYF', 'French Polynesia'), ('GAB', 'Gabon'), ('GMB', 'Gambia'), ('GEO', 'Georgia'), ('DEU', 'Germany'), ('GHA', 'Ghana'), ('GIB', 'Gibraltar'), ('GRC', 'Greece'), ('GRL', 'Greenland'), ('GRD', 'Grenada'), ('GLP', 'Guadeloupe'), ('GUM', 'Guam'), ('GTM', 'Guatemala'), ('GGY', 'Guernsey'), ('GIN', 'Guinea'), ('GNB', 'Guinea-Bissau'), ('GUY', 'Guyana'), ('HTI', 'Haiti'), ('VAT', 'Holy See (Vatican City)'), ('HND', 'Honduras'), ('HUN', 'Hungary'), ('ISL', 'Iceland'), ('IND', 'India'), ('IDN', 'Indonesia'), ('IRN', 'Iran'), ('IRQ', 'Iraq'), ('IRL', 'Ireland'), ('IMN', 'Isle of Man'), ('ISR', 'Israel'), ('ITA', 'Italy'), ('JAM', 'Jamaica'), ('JPN', 'Japan'), ('JEY', 'Jersey'), ('JOR', 'Jordan'), ('KAZ', 'Kazakhstan'), ('KEN', 'Kenya'), ('KIR', 'Kiribati'), ('KWT', 'Kuwait'), ('KGZ', 'Kyrgyzstan'), ('LAO', 'Lao People\'s Democratic Republic'), ('LVA', 'Latvia'), ('LBN', 'Lebanon'), ('LSO', 'Lesotho'), ('LBR', 'Liberia'), ('LBY', 'Libyan Arab Jamahiriya'), ('LIE', 'Liechtenstein'), ('LTU', 'Lithuania'), ('LUX', 'Luxembourg'), ('MKD', 'Macedonia'), ('MDG', 'Madagascar'), ('MWI', 'Malawi'), ('MYS', 'Malaysia'), ('MDV', 'Maldives'), ('MLI', 'Mali'), ('MLT', 'Malta'), ('MHL', 'Marshall Islands'), ('MTQ', 'Martinique'), ('MRT', 'Mauritania'), ('MUS', 'Mauritius'), ('MYT', 'Mayotte'), ('MEX', 'Mexico'), ('FSM', 'Micronesia, Federated States of'), ('MCO', 'Monaco'), ('MNG', 'Mongolia'), ('MNE', 'Montenegro'), ('MSR', 'Montserrat'), ('MAR', 'Morocco'), ('MOZ', 'Mozambique'), ('MMR', 'Myanmar'), ('NAM', 'Namibia'), ('NRU', 'Nauru'), ('NPL', 'Nepal'), ('NLD', 'Netherlands'), ('ANT', 'Netherlands Antilles'), ('NCL', 'New Caledonia'), ('NZL', 'New Zealand'), ('NIC', 'Nicaragua'), ('NER', 'Niger'), ('NGA', 'Nigeria'), ('NIU', 'Niue'), ('NFK', 'Norfolk Island'), ('MNP', 'Northern Mariana Islands'), ('NOR', 'Norway'), ('PSE', 'Occupied Palestinian Territory'), ('OMN', 'Oman'), ('PAK', 'Pakistan'), ('PLW', 'Palau'), ('PAN', 'Panama'), ('PNG', 'Papua New Guinea'), ('PRY', 'Paraguay'), ('PER', 'Peru'), ('PHL', 'Philippines'), ('PCN', 'Pitcairn'), ('POL', 'Poland'), ('PRT', 'Portugal'), ('PRI', 'Puerto Rico'), ('QAT', 'Qatar'), ('KOR', 'Republic of Korea'), ('MDA', 'Republic of Moldova'), ('REU', 'Reunion'), ('ROU', 'Romania'), ('RUS', 'Russian Federation'), ('RWA', 'Rwanda'), ('BLM', 'Saint-Barthelemy'), ('SHN', 'Saint Helena'), ('KNA', 'Saint Kitts and Nevis'), ('LCA', 'Saint Lucia'), ('MAF', 'Saint-Martin (French part)'), ('SPM', 'Saint Pierre and Miquelon'), ('VCT', 'Saint Vincent and the Grenadines'), ('WSM', 'Samoa'), ('SMR', 'San Marino'), ('STP', 'Sao Tome and Principe'), ('SAU', 'Saudi Arabia'), ('SEN', 'Senegal'), ('SRB', 'Serbia'), ('SYC', 'Seychelles'), ('SLE', 'Sierra Leone'), ('SGP', 'Singapore'), ('SVK', 'Slovakia'), ('SVN', 'Slovenia'), ('SLB', 'Solomon Islands'), ('SOM', 'Somalia'), ('ZAF', 'South Africa'), ('SSD', 'South Sudan'), ('ESP', 'Spain'), ('LKA', 'Sri Lanka'), ('SDN', 'Sudan'), ('SUR', 'Suriname'), ('SJM', 'Svalbard and Jan Mayen Islands'), ('SWZ', 'Swaziland'), ('SWE', 'Sweden'), ('CHE', 'Switzerland'), ('SYR', 'Syrian Arab Republic'), ('TJK', 'Tajikistan'), ('THA', 'Thailand'), ('TLS', 'Timor-Leste'), ('TGO', 'Togo'), ('TKL', 'Tokelau'), ('TON', 'Tonga'), ('TTO', 'Trinidad and Tobago'), ('TUN', 'Tunisia'), ('TUR', 'Turkey'), ('TKM', 'Turkmenistan'), ('TCA', 'Turks and Caicos Islands'), ('TUV', 'Tuvalu'), ('UGA', 'Uganda'), ('UKR', 'Ukraine'), ('ARE', 'United Arab Emirates'), ('GBR', 'United Kingdom'), ('TZA', 'United Republic of Tanzania'), ('USA', 'United States of America'), ('VIR', 'United States Virgin Islands'), ('URY', 'Uruguay'), ('UZB', 'Uzbekistan'), ('VUT', 'Vanuatu'), ('VEN', 'Venezuela (Bolivarian Republic of)'), ('VNM', 'Viet Nam'), ('WLF', 'Wallis and Futuna Islands'), ('ESH', 'Western Sahara'), ('YEM', 'Yemen'), ('ZMB', 'Zambia'), ('ZWE', 'Zimbabwe'), ) # Taken from http://www.w3.org/WAI/ER/IG/ert/iso639.htm ALL_LANGUAGES = ( ('abk', 'Abkhazian'), ('aar', 'Afar'), ('afr', 'Afrikaans'), ('amh', 'Amharic'), ('ara', 'Arabic'), ('asm', 'Assamese'), ('aym', 'Aymara'), ('aze', 'Azerbaijani'), ('bak', 'Bashkir'), ('ben', 'Bengali'), ('bih', 'Bihari'), ('bis', 'Bislama'), ('bre', 'Breton'), ('bul', 'Bulgarian'), ('bel', 'Byelorussian'), ('cat', 'Catalan'), ('cos', 'Corsican'), ('dan', 'Danish'), ('dzo', 'Dzongkha'), ('eng', 'English'), ('fra', 'French'), ('epo', 'Esperanto'), ('est', 'Estonian'), ('fao', 'Faroese'), ('fij', 'Fijian'), ('fin', 'Finnish'), ('fry', 'Frisian'), ('glg', 'Gallegan'), ('kal', 'Greenlandic'), ('grn', 'Guarani'), ('guj', 'Gujarati'), ('hau', 'Hausa'), ('heb', 'Hebrew'), ('hin', 'Hindi'), ('hun', 'Hungarian'), ('ind', 'Indonesian'), ('ina', 'Interlingua (International Auxiliary language Association)'), ('iku', 'Inuktitut'), ('ipk', 'Inupiak'), ('ita', 'Italian'), ('jpn', 'Japanese'), ('kan', 'Kannada'), ('kas', 'Kashmiri'), ('kaz', 'Kazakh'), ('khm', 'Khmer'), ('kin', 'Kinyarwanda'), ('kir', 'Kirghiz'), ('kor', 'Korean'), ('kur', 'Kurdish'), ('oci', 'Langue d \'Oc (post 1500)'), ('lao', 'Lao'), ('lat', 'Latin'), ('lav', 'Latvian'), ('lin', 'Lingala'), ('lit', 'Lithuanian'), ('mlg', 'Malagasy'), ('mlt', 'Maltese'), ('mar', 'Marathi'), ('mol', 'Moldavian'), ('mon', 'Mongolian'), ('nau', 'Nauru'), ('nep', 'Nepali'), ('nor', 'Norwegian'), ('ori', 'Oriya'), ('orm', 'Oromo'), ('pan', 'Panjabi'), ('pol', 'Polish'), ('por', 'Portuguese'), ('pus', 'Pushto'), ('que', 'Quechua'), ('roh', 'Rhaeto-Romance'), ('run', 'Rundi'), ('rus', 'Russian'), ('smo', 'Samoan'), ('sag', 'Sango'), ('san', 'Sanskrit'), ('scr', 'Serbo-Croatian'), ('sna', 'Shona'), ('snd', 'Sindhi'), ('sin', 'Singhalese'), ('ssw', 'Siswant'), ('slv', 'Slovenian'), ('som', 'Somali'), ('sot', 'Sotho'), ('spa', 'Spanish'), ('sun', 'Sudanese'), ('swa', 'Swahili'), ('tgl', 'Tagalog'), ('tgk', 'Tajik'), ('tam', 'Tamil'), ('tat', 'Tatar'), ('tel', 'Telugu'), ('tha', 'Thai'), ('tir', 'Tigrinya'), ('tog', 'Tonga (Nyasa)'), ('tso', 'Tsonga'), ('tsn', 'Tswana'), ('tur', 'Turkish'), ('tuk', 'Turkmen'), ('twi', 'Twi'), ('uig', 'Uighur'), ('ukr', 'Ukrainian'), ('urd', 'Urdu'), ('uzb', 'Uzbek'), ('vie', 'Vietnamese'), ('vol', 'Volapük'), ('wol', 'Wolof'), ('xho', 'Xhosa'), ('yid', 'Yiddish'), ('yor', 'Yoruba'), ('zha', 'Zhuang'), ('zul', 'Zulu'), ) CHARSETS = (('', 'None/Unknown'), ('UTF-8', 'UTF-8/Unicode'), ('ISO-8859-1', 'Latin1/ISO-8859-1'), ('ISO-8859-2', 'Latin2/ISO-8859-2'), ('ISO-8859-3', 'Latin3/ISO-8859-3'), ('ISO-8859-4', 'Latin4/ISO-8859-4'), ('ISO-8859-5', 'Latin5/ISO-8859-5'), ('ISO-8859-6', 'Latin6/ISO-8859-6'), ('ISO-8859-7', 'Latin7/ISO-8859-7'), ('ISO-8859-8', 'Latin8/ISO-8859-8'), ('ISO-8859-9', 'Latin9/ISO-8859-9'), ('ISO-8859-10', 'Latin10/ISO-8859-10'), ('ISO-8859-13', 'Latin13/ISO-8859-13'), ('ISO-8859-14', 'Latin14/ISO-8859-14'), ('ISO8859-15', 'Latin15/ISO-8859-15'), ('Big5', 'BIG5'), ('EUC-JP', 'EUC-JP'), ('EUC-KR', 'EUC-KR'), ('GBK', 'GBK'), ('GB18030', 'GB18030'), ('Shift_JIS', 'Shift_JIS'), ('KOI8-R', 'KOI8-R'), ('KOI8-U', 'KOI8-U'), ('windows-874', 'Windows CP874'), ('windows-1250', 'Windows CP1250'), ('windows-1251', 'Windows CP1251'), ('windows-1252', 'Windows CP1252'), ('windows-1253', 'Windows CP1253'), ('windows-1254', 'Windows CP1254'), ('windows-1255', 'Windows CP1255'), ('windows-1256', 'Windows CP1256'), ('windows-1257', 'Windows CP1257'), ('windows-1258', 'Windows CP1258')) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475142"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">moio/spacewalk</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">client/solaris/smartpm/smart/transaction.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">61738</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># # Copyright (c) 2004 Conectiva, Inc. # # Written by Gustavo Niemeyer <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="1c727579717965796e5c7f7372797f68756a7d327f7371">[email protected]</a>> # # This file is part of Smart Package Manager. # # Smart Package Manager is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # Smart Package Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Smart Package Manager; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # from smart.const import INSTALL, REMOVE, UPGRADE, FIX, REINSTALL, KEEP from smart.cache import PreRequires, Package from smart import * class ChangeSet(dict): def __init__(self, cache, state=None): self._cache = cache if state: self.update(state) def getCache(self): return self._cache def getState(self): return self.copy() def setState(self, state): if state is not self: self.clear() self.update(state) def getPersistentState(self): state = {} for pkg in self: state[(pkg.__class__, pkg.name, pkg.version)] = self[pkg] return state def setPersistentState(self, state): self.clear() for pkg in self._cache.getPackages(): op = state.get((pkg.__class__, pkg.name, pkg.version)) if op is not None: self[pkg] = op def copy(self): return ChangeSet(self._cache, self) def set(self, pkg, op, force=False): if self.get(pkg) is op: return if op is INSTALL: if force or not pkg.installed: self[pkg] = INSTALL else: if pkg in self: del self[pkg] else: if force or pkg.installed: self[pkg] = REMOVE else: if pkg in self: del self[pkg] def installed(self, pkg): op = self.get(pkg) return op is INSTALL or pkg.installed and not op is REMOVE def difference(self, other): diff = ChangeSet(self._cache) for pkg in self: sop = self[pkg] if sop is not other.get(pkg): diff[pkg] = sop return diff def intersect(self, other): isct = ChangeSet(self._cache) for pkg in self: sop = self[pkg] if sop is other.get(pkg): isct[pkg] = sop return isct def __str__(self): l = [] for pkg in self: l.append("%s %s\n" % (self[pkg] is INSTALL and "I" or "R", pkg)) return "".join(l) class Policy(object): def __init__(self, trans): self._trans = trans self._locked = {} self._sysconflocked = [] self._priorities = {} def runStarting(self): self._priorities.clear() cache = self._trans.getCache() for pkg in pkgconf.filterByFlag("lock", cache.getPackages()): if pkg not in self._locked: self._sysconflocked.append(pkg) self._locked[pkg] = True def runFinished(self): self._priorities.clear() for pkg in self._sysconflocked: del self._locked[pkg] del self._sysconflocked[:] def getLocked(self, pkg): return pkg in self._locked def setLocked(self, pkg, flag): if flag: self._locked[pkg] = True else: if pkg in self._locked: del self._locked[pkg] def getLockedSet(self): return self._locked def getWeight(self, changeset): return 0 def getPriority(self, pkg): priority = self._priorities.get(pkg) if priority is None: self._priorities[pkg] = priority = pkg.getPriority() return priority def getPriorityWeights(self, targetPkg, pkgs): set = {} lower = None for pkg in pkgs: priority = self.getPriority(pkg) if lower is None or priority < lower: lower = priority set[pkg] = priority for pkg in set: set[pkg] = -(set[pkg] - lower)*10 return set class PolicyInstall(Policy): """Give precedence for keeping functionality in the system.""" def runStarting(self): Policy.runStarting(self) self._upgrading = upgrading = {} self._upgraded = upgraded = {} self._downgraded = downgraded = {} for pkg in self._trans.getCache().getPackages(): # Precompute upgrade relations. for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if prvpkg.installed: if (self.getPriority(pkg) >= self.getPriority(prvpkg)): upgrading[pkg] = True if prvpkg in upgraded: upgraded[prvpkg].append(pkg) else: upgraded[prvpkg] = [pkg] else: if prvpkg in downgraded: downgraded[prvpkg].append(pkg) else: downgraded[prvpkg] = [pkg] # Downgrades are upgrades if they have a higher priority. for prv in pkg.provides: for upg in prv.upgradedby: for upgpkg in upg.packages: if upgpkg.installed: if (self.getPriority(pkg) > self.getPriority(upgpkg)): upgrading[pkg] = True if upgpkg in upgraded: upgraded[upgpkg].append(pkg) else: upgraded[upgpkg] = [pkg] else: if upgpkg in downgraded: downgraded[upgpkg].append(pkg) else: downgraded[upgpkg] = [pkg] def runFinished(self): Policy.runFinished(self) del self._upgrading del self._upgraded del self._downgraded def getWeight(self, changeset): weight = 0 upgrading = self._upgrading upgraded = self._upgraded downgraded = self._downgraded for pkg in changeset: if changeset[pkg] is REMOVE: # Upgrading a package that will be removed # is better than upgrading a package that will # stay in the system. for upgpkg in upgraded.get(pkg, ()): if changeset.get(upgpkg) is INSTALL: weight -= 1 break else: for dwnpkg in downgraded.get(pkg, ()): if changeset.get(dwnpkg) is INSTALL: weight += 15 break else: weight += 20 else: if pkg in upgrading: weight += 2 else: weight += 3 return weight class PolicyRemove(Policy): """Give precedence to the choice with less changes.""" def getWeight(self, changeset): weight = 0 for pkg in changeset: if changeset[pkg] is REMOVE: weight += 1 else: weight += 5 return weight class PolicyUpgrade(Policy): """Give precedence to the choice with more upgrades and smaller impact.""" def runStarting(self): Policy.runStarting(self) self._upgrading = upgrading = {} self._upgraded = upgraded = {} self._sortbonus = sortbonus = {} self._requiredbonus = requiredbonus = {} queue = self._trans.getQueue() for pkg in self._trans.getCache().getPackages(): # Precompute upgrade relations. for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if (prvpkg.installed and self.getPriority(pkg) >= self.getPriority(prvpkg)): dct = upgrading.get(pkg) if dct: dct[prvpkg] = True else: upgrading[pkg] = {prvpkg: True} lst = upgraded.get(prvpkg) if lst: lst.append(pkg) else: upgraded[prvpkg] = [pkg] # Downgrades are upgrades if they have a higher priority. for prv in pkg.provides: for upg in prv.upgradedby: for upgpkg in upg.packages: if (upgpkg.installed and self.getPriority(pkg) > self.getPriority(upgpkg)): dct = upgrading.get(pkg) if dct: dct[upgpkg] = True else: upgrading[pkg] = {upgpkg: True} lst = upgraded.get(upgpkg) if lst: lst.append(pkg) else: upgraded[upgpkg] = [pkg] pkgs = self._trans._queue.keys() sortUpgrades(pkgs, self) for i, pkg in enumerate(pkgs): self._sortbonus[pkg] = -1./(i+100) def runFinished(self): Policy.runFinished(self) del self._upgrading del self._upgraded def getWeight(self, changeset): weight = 0 upgrading = self._upgrading upgraded = self._upgraded sortbonus = self._sortbonus requiredbonus = self._requiredbonus installedcount = 0 upgradedmap = {} for pkg in changeset: if changeset[pkg] is REMOVE: # Upgrading a package that will be removed # is better than upgrading a package that will # stay in the system. lst = upgraded.get(pkg, ()) for lstpkg in lst: if changeset.get(lstpkg) is INSTALL: weight -= 1 break else: weight += 3 else: installedcount += 1 upgpkgs = upgrading.get(pkg) if upgpkgs: weight += sortbonus.get(pkg, 0) upgradedmap.update(upgpkgs) upgradedcount = len(upgradedmap) weight += -30*upgradedcount+(installedcount-upgradedcount) return weight class Failed(Error): pass PENDING_REMOVE = 1 PENDING_INSTALL = 2 PENDING_UPDOWN = 3 class Transaction(object): def __init__(self, cache, policy=None, changeset=None, queue=None): self._cache = cache self._policy = policy and policy(self) or Policy(self) self._changeset = changeset or ChangeSet(cache) self._queue = queue or {} def clear(self): self._changeset.clear() self._queue.clear() def getCache(self): return self._cache def getQueue(self): return self._queue def getPolicy(self): return self._policy def setPolicy(self, policy): self._policy = policy(self) def getWeight(self): return self._policy.getWeight(self._changeset) def getChangeSet(self): return self._changeset def setChangeSet(self, changeset): self._changeset = changeset def getState(self): return self._changeset.getState() def setState(self, state): self._changeset.setState(state) def __nonzero__(self): return bool(self._changeset) def __str__(self): return str(self._changeset) def _install(self, pkg, changeset, locked, pending, depth=0): #print "[%03d] _install(%s)" % (depth, pkg) #depth += 1 locked[pkg] = True changeset.set(pkg, INSTALL) isinst = changeset.installed # Remove packages conflicted by this one. for cnf in pkg.conflicts: for prv in cnf.providedby: for prvpkg in prv.packages: if prvpkg is pkg: continue if not isinst(prvpkg): locked[prvpkg] = True continue if prvpkg in locked: raise Failed, _("Can't install %s: conflicted package " "%s is locked") % (pkg, prvpkg) self._remove(prvpkg, changeset, locked, pending, depth) pending.append((PENDING_UPDOWN, prvpkg)) # Remove packages conflicting with this one. for prv in pkg.provides: for cnf in prv.conflictedby: for cnfpkg in cnf.packages: if cnfpkg is pkg: continue if not isinst(cnfpkg): locked[cnfpkg] = True continue if cnfpkg in locked: raise Failed, _("Can't install %s: it's conflicted by " "the locked package %s") \ % (pkg, cnfpkg) self._remove(cnfpkg, changeset, locked, pending, depth) pending.append((PENDING_UPDOWN, cnfpkg)) # Remove packages with the same name that can't # coexist with this one. namepkgs = self._cache.getPackages(pkg.name) for namepkg in namepkgs: if namepkg is not pkg and not pkg.coexists(namepkg): if not isinst(namepkg): locked[namepkg] = True continue if namepkg in locked: raise Failed, _("Can't install %s: it can't coexist " "with %s") % (pkg, namepkg) self._remove(namepkg, changeset, locked, pending, depth) # Install packages required by this one. for req in pkg.requires: # Check if someone is already providing it. prvpkgs = {} found = False for prv in req.providedby: for prvpkg in prv.packages: if isinst(prvpkg): found = True break if prvpkg not in locked: prvpkgs[prvpkg] = True else: continue break if found: # Someone is already providing it. Good. continue # No one is currently providing it. Do something. if not prvpkgs: # No packages provide it at all. Give up. raise Failed, _("Can't install %s: no package provides %s") % \ (pkg, req) if len(prvpkgs) == 1: # Don't check locked here. prvpkgs was # already filtered above. self._install(prvpkgs.popitem()[0], changeset, locked, pending, depth) else: # More than one package provide it. This package # must be post-processed. pending.append((PENDING_INSTALL, pkg, req, prvpkgs.keys())) def _remove(self, pkg, changeset, locked, pending, depth=0): #print "[%03d] _remove(%s)" % (depth, pkg) #depth += 1 if pkg.essential: raise Failed, _("Can't remove %s: it's an essential package") locked[pkg] = True changeset.set(pkg, REMOVE) isinst = changeset.installed # Check packages requiring this one. for prv in pkg.provides: for req in prv.requiredby: # Check if someone installed is requiring it. for reqpkg in req.packages: if isinst(reqpkg): break else: # No one requires it, so it doesn't matter. continue # Check if someone installed is still providing it. prvpkgs = {} found = False for prv in req.providedby: for prvpkg in prv.packages: if prvpkg is pkg: continue if isinst(prvpkg): found = True break if prvpkg not in locked: prvpkgs[prvpkg] = True else: continue break if found: # Someone is still providing it. Good. continue # No one is providing it anymore. We'll have to do # something about it. if prvpkgs: # There are other options, besides removing. pending.append((PENDING_REMOVE, pkg, prv, req.packages, prvpkgs.keys())) else: # Remove every requiring package, or # upgrade/downgrade them to something which # does not require this dependency. for reqpkg in req.packages: if not isinst(reqpkg): continue if reqpkg in locked: raise Failed, _("Can't remove %s: %s is locked") \ % (pkg, reqpkg) self._remove(reqpkg, changeset, locked, pending, depth) pending.append((PENDING_UPDOWN, reqpkg)) def _updown(self, pkg, changeset, locked, depth=0): #print "[%03d] _updown(%s)" % (depth, pkg) #depth += 1 isinst = changeset.installed getpriority = self._policy.getPriority pkgpriority = getpriority(pkg) # Check if any upgrading version of this package is installed. # If so, we won't try to install any other version. upgpkgs = {} for prv in pkg.provides: for upg in prv.upgradedby: for upgpkg in upg.packages: if isinst(upgpkg): return if getpriority(upgpkg) < pkgpriority: continue if upgpkg not in locked and upgpkg not in upgpkgs: upgpkgs[upgpkg] = True # Also check if any downgrading version with a higher # priority is installed. for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if getpriority(prvpkg) <= pkgpriority: continue if isinst(prvpkg): return if prvpkg not in locked and prvpkg not in upgpkgs: upgpkgs[prvpkg] = True # No, let's try to upgrade it. getweight = self._policy.getWeight alternatives = [(getweight(changeset), changeset)] # Check if upgrading is possible. for upgpkg in upgpkgs: try: cs = changeset.copy() lk = locked.copy() _pending = [] self._install(upgpkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed: pass else: alternatives.append((getweight(cs), cs)) # Is any downgrading version of this package installed? try: dwnpkgs = {} for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if getpriority(prvpkg) > pkgpriority: continue if isinst(prvpkg): raise StopIteration if prvpkg not in locked: dwnpkgs[prvpkg] = True # Also check if any upgrading version with a lower # priority is installed. for prv in pkg.provides: for upg in prv.upgradedby: for upgpkg in upg.packages: if getpriority(upgpkg) >= pkgpriority: continue if isinst(upgpkg): raise StopIteration if upgpkg not in locked: dwnpkgs[upgpkg] = True except StopIteration: pass else: # Check if downgrading is possible. for dwnpkg in dwnpkgs: try: cs = changeset.copy() lk = locked.copy() _pending = [] self._install(dwnpkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed: pass else: alternatives.append((getweight(cs), cs)) # If there's only one alternative, it's the one currenlty in use. if len(alternatives) > 1: alternatives.sort() changeset.setState(alternatives[0][1]) def _pending(self, changeset, locked, pending, depth=0): #print "[%03d] _pending()" % depth #depth += 1 isinst = changeset.installed getweight = self._policy.getWeight updown = [] while pending: item = pending.pop(0) kind = item[0] if kind == PENDING_UPDOWN: updown.append(item[1]) elif kind == PENDING_INSTALL: kind, pkg, req, prvpkgs = item # Check if any prvpkg was already selected for installation # due to some other change. found = False for i in range(len(prvpkgs)-1,-1,-1): prvpkg = prvpkgs[i] if isinst(prvpkg): found = True break if prvpkg in locked: del prvpkgs[i] if found: continue if not prvpkgs: # No packages provide it at all. Give up. raise Failed, _("Can't install %s: no package " "provides %s") % (pkg, req) if len(prvpkgs) > 1: # More than one package provide it. We use _pending here, # since any option must consider the whole change for # weighting. alternatives = [] failures = [] sortUpgrades(prvpkgs) keeporder = 0.000001 pw = self._policy.getPriorityWeights(pkg, prvpkgs) for prvpkg in prvpkgs: try: _pending = [] cs = changeset.copy() lk = locked.copy() self._install(prvpkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: failures.append(unicode(e)) else: alternatives.append((getweight(cs)+pw[prvpkg]+ keeporder, cs, lk)) keeporder += 0.000001 if not alternatives: raise Failed, _("Can't install %s: all packages " "providing %s failed to install:\n%s")\ % (pkg, req, "\n".join(failures)) alternatives.sort() changeset.setState(alternatives[0][1]) if len(alternatives) == 1: locked.update(alternatives[0][2]) else: # This turned out to be the only way. self._install(prvpkgs[0], changeset, locked, pending, depth) elif kind == PENDING_REMOVE: kind, pkg, prv, reqpkgs, prvpkgs = item # Check if someone installed is still requiring it. reqpkgs = [x for x in reqpkgs if isinst(x)] if not reqpkgs: continue # Check if someone installed is providing it. found = False for prvpkg in prvpkgs: if isinst(prvpkg): found = True break if found: # Someone is still providing it. Good. continue prvpkgs = [x for x in prvpkgs if x not in locked] # No one is providing it anymore. We'll have to do # something about it. # Try to install other providing packages. if prvpkgs: alternatives = [] failures = [] pw = self._policy.getPriorityWeights(pkg, prvpkgs) for prvpkg in prvpkgs: try: _pending = [] cs = changeset.copy() lk = locked.copy() self._install(prvpkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: failures.append(unicode(e)) else: alternatives.append((getweight(cs)+pw[prvpkg], cs, lk)) if not prvpkgs or not alternatives: # There's no alternatives. We must remove # every requiring package. for reqpkg in reqpkgs: if reqpkg in locked and isinst(reqpkg): raise Failed, _("Can't remove %s: requiring " "package %s is locked") % \ (pkg, reqpkg) for reqpkg in reqpkgs: # We check again, since other actions may have # changed their state. if not isinst(reqpkg): continue if reqpkg in locked: raise Failed, _("Can't remove %s: requiring " "package %s is locked") % \ (pkg, reqpkg) self._remove(reqpkg, changeset, locked, pending, depth) continue # Then, remove every requiring package, or # upgrade/downgrade them to something which # does not require this dependency. cs = changeset.copy() lk = locked.copy() try: for reqpkg in reqpkgs: if reqpkg in locked and isinst(reqpkg): raise Failed, _("%s is locked") % reqpkg for reqpkg in reqpkgs: if not cs.installed(reqpkg): continue if reqpkg in lk: raise Failed, _("%s is locked") % reqpkg _pending = [] self._remove(reqpkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: failures.append(unicode(e)) else: alternatives.append((getweight(cs), cs, lk)) if not alternatives: raise Failed, _("Can't install %s: all packages providing " "%s failed to install:\n%s") \ % (pkg, prv, "\n".join(failures)) alternatives.sort() changeset.setState(alternatives[0][1]) if len(alternatives) == 1: locked.update(alternatives[0][2]) for pkg in updown: self._updown(pkg, changeset, locked, depth) del pending[:] def _upgrade(self, pkgs, changeset, locked, pending, depth=0): #print "[%03d] _upgrade()" % depth #depth += 1 isinst = changeset.installed getweight = self._policy.getWeight sortUpgrades(pkgs, self._policy) pkgs.reverse() lockedstate = {} origchangeset = changeset.copy() weight = getweight(changeset) for pkg in pkgs: if pkg in locked and not isinst(pkg): continue try: cs = changeset.copy() lk = locked.copy() _pending = [] self._install(pkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: pass else: lockedstate[pkg] = lk csweight = getweight(cs) if csweight < weight: weight = csweight changeset.setState(cs) lockedstates = {} for pkg in pkgs: if changeset.get(pkg) is INSTALL: state = lockedstate.get(pkg) if state: lockedstates.update(state) for pkg in changeset.keys(): op = changeset.get(pkg) if (op and op != origchangeset.get(pkg) and pkg not in locked and pkg not in lockedstates): try: cs = changeset.copy() lk = locked.copy() _pending = [] if op is REMOVE: self._install(pkg, cs, lk, _pending, depth) elif op is INSTALL: self._remove(pkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: pass else: csweight = getweight(cs) if csweight < weight: weight = csweight changeset.setState(cs) def _fix(self, pkgs, changeset, locked, pending, depth=0): #print "[%03d] _fix()" % depth #depth += 1 getweight = self._policy.getWeight isinst = changeset.installed for pkg in pkgs: if not isinst(pkg): continue # Is it broken at all? try: for req in pkg.requires: for prv in req.providedby: for prvpkg in prv.packages: if isinst(prvpkg): break else: continue break else: iface.debug(_("Unsatisfied dependency: " "%s requires %s") % (pkg, req)) raise StopIteration for cnf in pkg.conflicts: for prv in cnf.providedby: for prvpkg in prv.packages: if prvpkg is pkg: continue if isinst(prvpkg): iface.debug(_("Unsatisfied dependency: " "%s conflicts with %s") % (pkg, prvpkg)) raise StopIteration for prv in pkg.provides: for cnf in prv.conflictedby: for cnfpkg in cnf.packages: if cnfpkg is pkg: continue if isinst(cnfpkg): iface.debug(_("Unsatisfied dependency: " "%s conflicts with %s") % (cnfpkg, pkg)) raise StopIteration # Check packages with the same name that can't # coexist with this one. namepkgs = self._cache.getPackages(pkg.name) for namepkg in namepkgs: if (isinst(namepkg) and namepkg is not pkg and not pkg.coexists(namepkg)): iface.debug(_("Package %s can't coexist with %s") % (namepkg, pkg)) raise StopIteration except StopIteration: pass else: continue # We have a broken package. Fix it. alternatives = [] failures = [] # Try to fix by installing it. try: cs = changeset.copy() lk = locked.copy() _pending = [] self._install(pkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) except Failed, e: failures.append(unicode(e)) else: # If they weight the same, it's better to keep the package. alternatives.append((getweight(cs)-0.000001, cs)) # Try to fix by removing it. try: cs = changeset.copy() lk = locked.copy() _pending = [] self._remove(pkg, cs, lk, _pending, depth) if _pending: self._pending(cs, lk, _pending, depth) self._updown(pkg, cs, lk, depth) except Failed, e: failures.append(unicode(e)) else: alternatives.append((getweight(cs), cs)) if not alternatives: raise Failed, _("Can't fix %s:\n%s") % \ (pkg, "\n".join(failures)) alternatives.sort() changeset.setState(alternatives[0][1]) def enqueue(self, pkg, op): if op is UPGRADE: isinst = self._changeset.installed _upgpkgs = {} try: pkgpriority = pkg.getPriority() for prv in pkg.provides: for upg in prv.upgradedby: for upgpkg in upg.packages: if upgpkg.getPriority() < pkgpriority: continue if isinst(upgpkg): raise StopIteration _upgpkgs[upgpkg] = True for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if prvpkg.getPriority() <= pkgpriority: continue if isinst(prvpkg): raise StopIteration _upgpkgs[prvpkg] = True except StopIteration: pass else: for upgpkg in _upgpkgs: self._queue[upgpkg] = op else: self._queue[pkg] = op def run(self): self._policy.runStarting() try: changeset = self._changeset.copy() isinst = changeset.installed locked = self._policy.getLockedSet().copy() pending = [] for pkg in self._queue: op = self._queue[pkg] if op is KEEP: if pkg in changeset: del changeset[pkg] elif op is INSTALL: if not isinst(pkg) and pkg in locked: raise Failed, _("Can't install %s: it's locked") % pkg changeset.set(pkg, INSTALL) elif op is REMOVE: if isinst(pkg) and pkg in locked: raise Failed, _("Can't remove %s: it's locked") % pkg changeset.set(pkg, REMOVE) elif op is REINSTALL: if pkg in locked: raise Failed, _("Can't reinstall %s: it's locked")%pkg changeset.set(pkg, INSTALL, force=True) upgpkgs = [] fixpkgs = [] for pkg in self._queue: op = self._queue[pkg] if op is KEEP: if pkg.installed: op = INSTALL else: op = REMOVE if op is INSTALL or op is REINSTALL: self._install(pkg, changeset, locked, pending) elif op is REMOVE: self._remove(pkg, changeset, locked, pending) elif op is UPGRADE: upgpkgs.append(pkg) elif op is FIX: fixpkgs.append(pkg) if pending: self._pending(changeset, locked, pending) if upgpkgs: self._upgrade(upgpkgs, changeset, locked, pending) if fixpkgs: self._fix(fixpkgs, changeset, locked, pending) self._changeset.setState(changeset) finally: self._queue.clear() self._policy.runFinished() class ChangeSetSplitter(object): # This class operates on *sane* changesets. DEBUG = 0 def __init__(self, changeset, forcerequires=True): self._changeset = changeset self._forcerequires = forcerequires self._locked = {} def getForceRequires(self): return self._userequires def setForceRequires(self, flag): self._forcerequires = flag def getLocked(self, pkg): return pkg in self._locked def setLocked(self, pkg, flag): if flag: self._locked[pkg] = True else: if pkg in self._locked: del self._locked[pkg] def setLockedSet(self, set): self._locked.clear() self._locked.update(set) def resetLocked(self): self._locked.clear() def _remove(self, subset, pkg, locked): set = self._changeset # Include requiring packages being removed, or exclude # requiring packages being installed. for prv in pkg.provides: for req in prv.requiredby: reqpkgs = [reqpkg for reqpkg in req.packages if subset.get(reqpkg) is INSTALL or subset.get(reqpkg) is not REMOVE and reqpkg.installed] if not reqpkgs: continue # Check if some package that will stay # in the system or some package already # selected for installation provide the # needed dependency. found = False for prv in req.providedby: for prvpkg in prv.packages: if (subset.get(prvpkg) is INSTALL or (prvpkg.installed and not subset.get(prvpkg) is REMOVE)): found = True break else: continue break if found: continue # Try to include some providing package # that is selected for installation. found = False for prv in req.providedby: for prvpkg in prv.packages: if (set.get(prvpkg) is INSTALL and prvpkg not in locked): try: self.include(subset, prvpkg, locked) except Error: pass else: found = True break else: continue break if found: continue # Now, try to keep in the system some # providing package which is already installed. found = False wasbroken = True for prv in req.providedby: for prvpkg in prv.packages: if set.get(prvpkg) is not REMOVE: continue wasbroken = False # Package is necessarily in subset # otherwise we wouldn't get here. if prvpkg not in locked: try: self.exclude(subset, prvpkg, locked) except Error: pass else: found = True break else: continue break if found: continue needed = (not wasbroken and (self._forcerequires or isinstance(req, PreRequires))) for reqpkg in reqpkgs: # Finally, try to exclude the requiring # package if it is being installed, or # include it if it's being removed. reqpkgop = set.get(reqpkg) if reqpkgop and reqpkg not in locked: try: if reqpkgop is INSTALL: self.exclude(subset, reqpkg, locked) else: self.include(subset, reqpkg, locked) except Error: if needed: raise else: continue # Should we care about this? if needed: raise Error, _("No providers for '%s', " "required by '%s'") % (req, reqpkg) # Check upgrading/downgrading packages. relpkgs = [upgpkg for prv in pkg.provides for upg in prv.upgradedby for upgpkg in upg.packages] relpkgs.extend([prvpkg for upg in pkg.upgrades for prv in upg.providedby for prvpkg in prv.packages]) if set[pkg] is INSTALL: # Package is being installed, but excluded from the # subset. Exclude every related package which is # being removed. for relpkg in relpkgs: if subset.get(relpkg) is REMOVE: if relpkg in locked: raise Error, _("Package '%s' is locked") % relpkg self.exclude(subset, relpkg, locked) else: # Package is being removed, and included in the # subset. Include every related package which is # being installed. for relpkg in relpkgs: if set.get(relpkg) is INSTALL and relpkg not in subset: if relpkg in locked: raise Error, _("Package '%s' is locked") % relpkg self.include(subset, relpkg, locked) def _install(self, subset, pkg, locked): set = self._changeset # Check all dependencies needed by this package. for req in pkg.requires: # Check if any already installed or to be installed # package will solve the problem. found = False for prv in req.providedby: for prvpkg in prv.packages: if (subset.get(prvpkg) is INSTALL or (prvpkg.installed and subset.get(prvpkg) is not REMOVE)): found = True break else: continue break if found: continue # Check if any package that could be installed # may solve the problem. found = False for prv in req.providedby: for prvpkg in prv.packages: if (set.get(prvpkg) is INSTALL and prvpkg not in locked): try: self.include(subset, prvpkg, locked) except Error: pass else: found = True break else: continue break if found: continue # Nope. Let's try to keep in the system some # package providing the dependency. found = False wasbroken = True for prv in req.providedby: for prvpkg in prv.packages: if set.get(prvpkg) is not REMOVE: continue wasbroken = False # Package is necessarily in subset # otherwise we wouldn't get here. if prvpkg not in locked: try: self.exclude(subset, prvpkg, locked) except Error: pass else: found = True break else: continue break if found or wasbroken: continue # There are no solutions for the problem. # Should we really care about it? if (self._forcerequires or isinstance(req, PreRequires)): raise Error, _("No providers for '%s', " "required by '%s'") % (req, pkg) cnfpkgs = [prvpkg for cnf in pkg.conflicts for prv in cnf.providedby for prvpkg in prv.packages if prvpkg is not pkg] cnfpkgs.extend([cnfpkg for prv in pkg.provides for cnf in prv.conflictedby for cnfpkg in cnf.packages if cnfpkg is not pkg]) for cnfpkg in cnfpkgs: if (subset.get(cnfpkg) is INSTALL or cnfpkg.installed and subset.get(cnfpkg) is not REMOVE): if cnfpkg not in set: raise Error, _("Can't remove %s, which conflicts with %s")\ % (cnfpkg, pkg) if set[cnfpkg] is INSTALL: self.exclude(subset, cnfpkg, locked) else: self.include(subset, cnfpkg, locked) # Check upgrading/downgrading packages. relpkgs = [upgpkg for prv in pkg.provides for upg in prv.upgradedby for upgpkg in upg.packages] relpkgs.extend([prvpkg for upg in pkg.upgrades for prv in upg.providedby for prvpkg in prv.packages]) if set[pkg] is INSTALL: # Package is being installed, and included in the # subset. Include every related package which is # being removed. for relpkg in relpkgs: if set.get(relpkg) is REMOVE and relpkg not in subset: if relpkg in locked: raise Error, _("Package '%s' is locked") % relpkg self.include(subset, relpkg, locked) else: # Package is being removed, but excluded from the # subset. Exclude every related package which is # being installed. for relpkg in relpkgs: if subset.get(relpkg) is INSTALL: if relpkg in locked: raise Error, _("Package '%s' is locked") % relpkg self.exclude(subset, relpkg, locked) def include(self, subset, pkg, locked=None): set = self._changeset if locked is None: locked = self._locked if self.DEBUG: print "-"*79 else: locked = locked.copy() if self.DEBUG: strop = set.get(pkg) is INSTALL and "INSTALL" or "REMOVE" print "Including %s of %s" % (strop, pkg) if pkg not in set: raise Error, _("Package '%s' is not in changeset") % pkg if pkg in locked: raise Error, _("Package '%s' is locked") % pkg locked[pkg] = True op = subset[pkg] = set[pkg] try: if op is INSTALL: self._install(subset, pkg, locked) else: self._remove(subset, pkg, locked) except Error, e: if self.DEBUG: print "FAILED: Including %s of %s: %s" % (strop, pkg, e) del subset[pkg] raise def exclude(self, subset, pkg, locked=None): set = self._changeset if locked is None: locked = self._locked if self.DEBUG: print "-"*79 else: locked = locked.copy() if self.DEBUG: strop = set.get(pkg) is INSTALL and "INSTALL" or "REMOVE" print "Excluding %s of %s" % (strop, pkg) if pkg not in set: raise Error, _("Package '%s' is not in changeset") % pkg if pkg in locked: raise Error, _("Package '%s' is locked") % pkg locked[pkg] = True if pkg in subset: del subset[pkg] op = set[pkg] try: if op is INSTALL: self._remove(subset, pkg, locked) elif op is REMOVE: self._install(subset, pkg, locked) except Error, e: if self.DEBUG: print "FAILED: Excluding %s of %s: %s" % (strop, pkg, e) subset[pkg] = op raise def includeAll(self, subset): # Include everything that doesn't change locked packages set = self._changeset.get() for pkg in set.keys(): try: self.include(subset, pkg) except Error: pass def excludeAll(self, subset): # Exclude everything that doesn't change locked packages set = self._changeset.get() for pkg in set.keys(): try: self.exclude(subset, pkg) except Error: pass def sortUpgrades(pkgs, policy=None): upgpkgs = {} for pkg in pkgs: dct = {} rupg = recursiveUpgrades(pkg, dct) del dct[pkg] upgpkgs[pkg] = dct pkgs.sort() pkgs.reverse() newpkgs = [] priority = {} if policy: for pkg in pkgs: priority[pkg] = policy.getPriority(pkg) else: for pkg in pkgs: priority[pkg] = pkg.getPriority() for pkg in pkgs: pkgupgs = upgpkgs[pkg] for i in range(len(newpkgs)): newpkg = newpkgs[i] if newpkg in pkgupgs or priority[pkg] > priority[newpkg]: newpkgs.insert(i, pkg) break else: newpkgs.append(pkg) pkgs[:] = newpkgs def recursiveUpgrades(pkg, set): set[pkg] = True for upg in pkg.upgrades: for prv in upg.providedby: for prvpkg in prv.packages: if prvpkg not in set: recursiveUpgrades(prvpkg, set) def sortInternalRequires(pkgs): rellst = [] numrel = {} pkgmap = dict.fromkeys(pkgs, True) for pkg in pkgs: rellst.append((recursiveInternalRequires(pkgmap, pkg, numrel), pkg)) rellst.sort() rellst.reverse() pkgs[:] = [x[1] for x in rellst] def recursiveInternalRequires(pkgmap, pkg, numrel, done=None): if done is None: done = {} done[pkg] = True if pkg in numrel: return numrel[pkg] n = 0 for prv in pkg.provides: for req in prv.requiredby: for relpkg in req.packages: if relpkg in pkgmap and relpkg not in done: n += 1 if relpkg in numrel: n += numrel[relpkg] else: n += recursiveInternalRequires(pkgmap, relpkg, numrel, done) numrel[pkg] = n return n def forwardRequires(pkg, map): for req in pkg.requires: if req not in map: map[req] = True for prv in req.providedby: if prv not in map: map[prv] = True for prvpkg in prv.packages: if prvpkg not in map: map[prvpkg] = True forwardRequires(prvpkg, map) def backwardRequires(pkg, map): for prv in pkg.provides: if prv not in map: map[prv] = True for req in prv.requiredby: if req not in map: map[req] = True for reqpkg in req.packages: if reqpkg not in map: map[reqpkg] = True backwardRequires(reqpkg, map) def forwardPkgRequires(pkg, map=None): if map is None: map = {} forwardRequires(pkg, map) for item in map.keys(): if not isinstance(item, Package): del map[item] return map def backwardPkgRequires(pkg, map=None): if map is None: map = {} backwardRequires(pkg, map) for item in map.keys(): if not isinstance(item, Package): del map[item] return map def getAlternates(pkg, cache): """ For a given package, return every package that *might* get removed if the given package was installed. The alternate packages are every package that conflicts with any of the required packages, or require any package conflicting with any of the required packages. """ conflicts = {} # Direct conflicts. for namepkg in cache.getPackages(pkg.name): if namepkg is not pkg and not pkg.coexists(namepkg): conflicts[(pkg, namepkg)] = True for cnf in pkg.conflicts: for prv in cnf.providedby: for prvpkg in prv.packages: if prvpkg is not pkg: conflicts[(pkg, prvpkg)] = True for prv in pkg.provides: for cnf in prv.conflictedby: for cnfpkg in cnf.packages: if cnfpkg is not pkg: conflicts[(pkg, cnfpkg)] = True # Conflicts of requires. queue = [pkg] done = {} while queue: qpkg = queue.pop() done[qpkg] = True for req in qpkg.requires: prvpkgs = {} for prv in req.providedby: for prvpkg in prv.packages: if prvpkg is qpkg or prvpkg is pkg: break prvpkgs[prvpkg] = True else: continue break else: for prvpkg in prvpkgs: if prvpkg in done: continue done[prvpkg] = True queue.append(prvpkg) for namepkg in cache.getPackages(prvpkg.name): if (namepkg not in prvpkgs and namepkg is not pkg and not prvpkg.coexists(namepkg)): conflicts[(prvpkg, namepkg)] = True for cnf in prvpkg.conflicts: for prv in cnf.providedby: for _prvpkg in prv.packages: if (_prvpkg is not pkg and _prvpkg not in prvpkgs): conflicts[(prvpkg, _prvpkg)] = True for prv in prvpkg.provides: for cnf in prv.conflictedby: for cnfpkg in cnf.packages: if (cnfpkg is not pkg and cnfpkg not in prvpkgs): conflicts[(prvpkg, cnfpkg)] = True alternates = {} for reqpkg, cnfpkg in conflicts: print reqpkg, cnfpkg alternates[cnfpkg] = True for prv in cnfpkg.provides: for req in prv.requiredby: # Do not ascend if reqpkg also provides # what cnfpkg is offering. for _prv in req.providedby: if reqpkg in _prv.packages: break else: for _reqpkg in req.packages: alternates[_reqpkg] = True alternates.update(backwardPkgRequires(_reqpkg)) return alternates def checkPackages(cache, pkgs, report=False, all=False, uninstalled=False): pkgs.sort() problems = False coexistchecked = {} for pkg in pkgs: if not all: if uninstalled: for loader in pkg.loaders: if not loader.getInstalled(): break else: continue elif not pkg.installed: continue for req in pkg.requires: for prv in req.providedby: for prvpkg in prv.packages: if all: break elif uninstalled: for loader in prvpkg.loaders: if not loader.getInstalled(): break else: continue break elif prvpkg.installed: break else: continue break else: if report: iface.info(_("Unsatisfied dependency: %s requires %s") % (pkg, req)) problems = True if not pkg.installed: continue for cnf in pkg.conflicts: for prv in cnf.providedby: for prvpkg in prv.packages: if prvpkg is pkg: continue if prvpkg.installed: if report: iface.info(_("Unsatisfied dependency: " "%s conflicts with %s") % (pkg, prvpkg)) problems = True namepkgs = cache.getPackages(pkg.name) for namepkg in namepkgs: if (namepkg, pkg) in coexistchecked: continue coexistchecked[(pkg, namepkg)] = True if (namepkg.installed and namepkg is not pkg and not pkg.coexists(namepkg)): if report: iface.info(_("Package %s can't coexist with %s") % (namepkg, pkg)) problems = True return not problems # vim:ts=4:sw=4:et </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475143"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">yewang15215/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/m2m_through_regress/models.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">273</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2771</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals from django.contrib.auth.models import User from django.db import models from django.utils.encoding import python_2_unicode_compatible # Forward declared intermediate model @python_2_unicode_compatible class Membership(models.Model): person = models.ForeignKey('Person', models.CASCADE) group = models.ForeignKey('Group', models.CASCADE) price = models.IntegerField(default=100) def __str__(self): return "%s is a member of %s" % (self.person.name, self.group.name) # using custom id column to test ticket #11107 @python_2_unicode_compatible class UserMembership(models.Model): id = models.AutoField(db_column='usermembership_id', primary_key=True) user = models.ForeignKey(User, models.CASCADE) group = models.ForeignKey('Group', models.CASCADE) price = models.IntegerField(default=100) def __str__(self): return "%s is a user and member of %s" % (self.user.username, self.group.name) @python_2_unicode_compatible class Person(models.Model): name = models.CharField(max_length=128) def __str__(self): return self.name @python_2_unicode_compatible class Group(models.Model): name = models.CharField(max_length=128) # Membership object defined as a class members = models.ManyToManyField(Person, through=Membership) user_members = models.ManyToManyField(User, through='UserMembership') def __str__(self): return self.name # A set of models that use an non-abstract inherited model as the 'through' model. class A(models.Model): a_text = models.CharField(max_length=20) class ThroughBase(models.Model): a = models.ForeignKey(A, models.CASCADE) b = models.ForeignKey('B', models.CASCADE) class Through(ThroughBase): extra = models.CharField(max_length=20) class B(models.Model): b_text = models.CharField(max_length=20) a_list = models.ManyToManyField(A, through=Through) # Using to_field on the through model @python_2_unicode_compatible class Car(models.Model): make = models.CharField(max_length=20, unique=True, null=True) drivers = models.ManyToManyField('Driver', through='CarDriver') def __str__(self): return "%s" % self.make @python_2_unicode_compatible class Driver(models.Model): name = models.CharField(max_length=20, unique=True, null=True) def __str__(self): return "%s" % self.name class Meta: ordering = ('name',) @python_2_unicode_compatible class CarDriver(models.Model): car = models.ForeignKey('Car', models.CASCADE, to_field='make') driver = models.ForeignKey('Driver', models.CASCADE, to_field='name') def __str__(self): return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475144"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">danstoner/python_experiments</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pgu/pgu/gui/menus.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">13</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3333</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" """ from .const import * from . import table from . import basic, button class _Menu_Options(table.Table): def __init__(self,menu,**params): table.Table.__init__(self,**params) self.menu = menu def event(self,e): handled = False arect = self.get_abs_rect() if e.type == MOUSEMOTION: abspos = e.pos[0]+arect.x,e.pos[1]+arect.y for w in self.menu.container.widgets: if not w is self.menu: mrect = w.get_abs_rect() if mrect.collidepoint(abspos): self.menu._close(None) w._open(None) handled = True if not handled: table.Table.event(self,e) class _Menu(button.Button): def __init__(self,parent,widget=None,**params): #TODO widget= could conflict with module widget params.setdefault('cls','menu') button.Button.__init__(self,widget,**params) self.parent = parent self._cls = self.cls self.options = _Menu_Options(self, cls=self.cls+".options") self.connect(CLICK,self._open,None) self.pos = 0 def _open(self,value): self.parent.value = self self.pcls = 'down' self.repaint() self.container.open(self.options,self.rect.x,self.rect.bottom) self.options.connect(BLUR,self._close,None) self.options.focus() self.repaint() def _pass(self,value): pass def _close(self,value): self.pcls = '' self.parent.value = None self.repaint() self.options.close() def _valuefunc(self,value): self._close(None) if value['fnc'] != None: value['fnc'](value['value']) def event(self,e): button.Button.event(self,e) if self.parent.value == self: self.pcls = 'down' def add(self,w,fnc=None,value=None): w.style.align = -1 b = button.Button(w,cls=self.cls+".option") b.connect(CLICK,self._valuefunc,{'fnc':fnc,'value':value}) self.options.tr() self.options.add(b) return b class Menus(table.Table): """A drop down menu bar. Example: data = [ ('File/Save', fnc_save, None), ('File/New', fnc_new, None), ('Edit/Copy', fnc_copy, None), ('Edit/Cut', fnc_cut, None), ('Help/About', fnc_help, help_about_content), ('Help/Reference', fnc_help, help_reference_content), ] w = Menus(data) """ def __init__(self,data,menu_cls='menu',**params): params.setdefault('cls','menus') table.Table.__init__(self,**params) self.value = None n,m,mt = 0,None,None for path,cmd,value in data: parts = path.split("/") if parts[0] != mt: mt = parts[0] m = _Menu(self,basic.Label(mt,cls=menu_cls+".label"),cls=menu_cls) self.add(m,n,0) n += 1 #print ("add", parts[1], cmd, value) m.add(basic.Label(parts[1],cls=m.cls+".option.label"),cmd,value) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475145"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jeffery9/mixprint_addons</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">account_check_writing/__openerp__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">58</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1721</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Check Writing', 'version': '1.1', 'author': 'OpenERP SA, NovaPoint Group', 'category': 'Generic Modules/Accounting', 'description': """ Module for the Check Writing and Check Printing. ================================================ """, 'website': 'http://www.openerp.com', 'depends' : ['account_voucher'], 'data': [ 'wizard/account_check_batch_printing_view.xml', 'account_check_writing_report.xml', 'account_view.xml', 'account_voucher_view.xml', 'account_check_writing_data.xml', ], 'demo': ['account_demo.xml'], 'test': [], 'installable': True, 'active': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475146"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jolyonb/edx-platform</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">openedx/core/djangoapps/credentials/signals.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6466</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" This file contains signal handlers for credentials-related functionality. """ from logging import getLogger from course_modes.models import CourseMode from django.contrib.sites.models import Site from lms.djangoapps.certificates.models import CertificateStatuses, GeneratedCertificate from lms.djangoapps.grades.api import CourseGradeFactory from openedx.core.djangoapps.catalog.utils import get_programs from openedx.core.djangoapps.credentials.models import CredentialsApiConfig from openedx.core.djangoapps.site_configuration import helpers from .tasks.v1.tasks import send_grade_to_credentials log = getLogger(__name__) # "interesting" here means "credentials will want to know about it" INTERESTING_MODES = CourseMode.CERTIFICATE_RELEVANT_MODES INTERESTING_STATUSES = [ CertificateStatuses.notpassing, CertificateStatuses.downloadable, ] # These handlers have Credentials business logic that has bled into the LMS. But we want to filter here in order to # not flood our task queue with a bunch of signals. So we put up with it. def is_course_run_in_a_program(course_run_key): """ Returns true if the given course key is in any program at all. """ # We don't have an easy way to go from course_run_key to a specific site that owns it. So just search each site. sites = Site.objects.all() str_key = str(course_run_key) for site in sites: for program in get_programs(site): for course in program['courses']: for course_run in course['course_runs']: if str_key == course_run['key']: return True return False def send_grade_if_interesting(user, course_run_key, mode, status, letter_grade, percent_grade, verbose=False): """ Checks if grade is interesting to Credentials and schedules a Celery task if so. """ if verbose: msg = u"Starting send_grade_if_interesting with params: "\ u"user [{username}], "\ u"course_run_key [{key}], "\ u"mode [{mode}], "\ u"status [{status}], "\ u"letter_grade [{letter_grade}], "\ u"percent_grade [{percent_grade}], "\ u"verbose [{verbose}]"\ .format( username=getattr(user, 'username', None), key=str(course_run_key), mode=mode, status=status, letter_grade=letter_grade, percent_grade=percent_grade, verbose=verbose ) log.info(msg) # Avoid scheduling new tasks if certification is disabled. (Grades are a part of the records/cert story) if not CredentialsApiConfig.current().is_learner_issuance_enabled: if verbose: log.info("Skipping send grade: is_learner_issuance_enabled False") return # Avoid scheduling new tasks if learner records are disabled for this site. if not helpers.get_value_for_org(course_run_key.org, 'ENABLE_LEARNER_RECORDS', True): if verbose: log.info( u"Skipping send grade: ENABLE_LEARNER_RECORDS False for org [{org}]".format( org=course_run_key.org ) ) return # Grab mode/status if we don't have them in hand if mode is None or status is None: try: cert = GeneratedCertificate.objects.get(user=user, course_id=course_run_key) # pylint: disable=no-member mode = cert.mode status = cert.status except GeneratedCertificate.DoesNotExist: # We only care about grades for which there is a certificate. if verbose: log.info( u"Skipping send grade: no cert for user [{username}] & course_id [{course_id}]".format( username=getattr(user, 'username', None), course_id=str(course_run_key) ) ) return # Don't worry about whether it's available as well as awarded. Just awarded is good enough to record a verified # attempt at a course. We want even the grades that didn't pass the class because Credentials wants to know about # those too. if mode not in INTERESTING_MODES or status not in INTERESTING_STATUSES: if verbose: log.info( u"Skipping send grade: mode/status uninteresting for mode [{mode}] & status [{status}]".format( mode=mode, status=status ) ) return # If the course isn't in any program, don't bother telling Credentials about it. When Credentials grows support # for course records as well as program records, we'll need to open this up. if not is_course_run_in_a_program(course_run_key): if verbose: log.info( u"Skipping send grade: course run not in a program. [{course_id}]".format(course_id=str(course_run_key)) ) return # Grab grades if we don't have them in hand if letter_grade is None or percent_grade is None: grade = CourseGradeFactory().read(user, course_key=course_run_key, create_if_needed=False) if grade is None: if verbose: log.info( u"Skipping send grade: No grade found for user [{username}] & course_id [{course_id}]".format( username=getattr(user, 'username', None), course_id=str(course_run_key) ) ) return letter_grade = grade.letter_grade percent_grade = grade.percent send_grade_to_credentials.delay(user.username, str(course_run_key), True, letter_grade, percent_grade) def handle_grade_change(user, course_grade, course_key, **kwargs): """ Notifies the Credentials IDA about certain grades it needs for its records, when a grade changes. """ send_grade_if_interesting( user, course_key, None, None, course_grade.letter_grade, course_grade.percent, verbose=kwargs.get('verbose', False) ) def handle_cert_change(user, course_key, mode, status, **kwargs): """ Notifies the Credentials IDA about certain grades it needs for its records, when a cert changes. """ send_grade_if_interesting(user, course_key, mode, status, None, None, verbose=kwargs.get('verbose', False)) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475147"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mcalhoun/ansible-modules-core</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cloud/openstack/quantum_router_interface.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">99</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">8558</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="3153545f5f5e71505f4258535d541f525e5c">[email protected]</a>> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient HAVE_DEPS = True except ImportError: HAVE_DEPS = False DOCUMENTATION = ''' --- module: quantum_router_interface version_added: "1.2" author: "Benno Joy (@bennojoy)" short_description: Attach/Dettach a subnet's interface to a router description: - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: 'yes' login_tenant_name: description: - The tenant name of the login user required: true default: 'yes' auth_url: description: - The keystone URL for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present router_name: description: - Name of the router to which the subnet's interface should be attached. required: true default: None subnet_name: description: - Name of the subnet to whose interface should be attached to the router. required: true default: None tenant_name: description: - Name of the tenant whose subnet has to be attached. required: false default: None requirements: - "python >= 2.6" - "python-neutronclient or python-quantumclient" - "python-keystoneclient" ''' EXAMPLES = ''' # Attach tenant1's subnet to the external router - quantum_router_interface: state=present login_username=admin login_password=admin login_tenant_name=admin tenant_name=tenant1 router_name=external_route subnet_name=t1subnet ''' _os_keystone = None _os_tenant_id = None def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: login_tenant_name = module.params['login_tenant_name'] else: login_tenant_name = module.params['tenant_name'] for tenant in _os_keystone.tenants.list(): if tenant.name == login_tenant_name: _os_tenant_id = tenant.id break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], } try: routers = neutron.list_routers(**kwargs) except Exception, e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None return routers['routers'][0]['id'] def _get_subnet_id(module, neutron): subnet_id = None kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['subnet_name'], } try: subnets = neutron.list_subnets(**kwargs) except Exception, e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] def _get_port_id(neutron, module, router_id, subnet_id): kwargs = { 'tenant_id': _os_tenant_id, 'device_id': router_id, } try: ports = neutron.list_ports(**kwargs) except Exception, e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None for port in ports['ports']: for subnet in port['fixed_ips']: if subnet['subnet_id'] == subnet_id: return port['id'] return None def _add_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: neutron.add_interface_router(router_id, kwargs) except Exception, e: module.fail_json(msg = "Error in adding interface to router: %s" % e.message) return True def _remove_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: neutron.remove_interface_router(router_id, kwargs) except Exception, e: module.fail_json(msg="Error in removing interface from router: %s" % e.message) return True def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( router_name = dict(required=True), subnet_name = dict(required=True), tenant_name = dict(default=None), state = dict(default='present', choices=['absent', 'present']), )) module = AnsibleModule(argument_spec=argument_spec) if not HAVE_DEPS: module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required') neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) router_id = _get_router_id(module, neutron) if not router_id: module.fail_json(msg="failed to get the router id, please check the router name") subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.fail_json(msg="failed to get the subnet id, please check the subnet name") if module.params['state'] == 'present': port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: _add_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="created", id=port_id) module.exit_json(changed=False, result="success", id=port_id) if module.params['state'] == 'absent': port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: module.exit_json(changed = False, result = "Success") _remove_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="Deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475148"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kangkot/arangodb</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/editor/ModuleBrowser.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">17</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7082</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># ModuleBrowser.py - A view that provides a module browser for an editor document. import pywin.mfc.docview import win32ui import win32con import commctrl import win32api from pywin.tools import hierlist, browser import pywin.framework.scriptutils import afxres import pyclbr class HierListCLBRModule(hierlist.HierListItem): def __init__(self, modName, clbrdata): self.modName = modName self.clbrdata = clbrdata def GetText(self): return self.modName def GetSubList(self): ret = [] for item in self.clbrdata.values(): if item.__class__ != pyclbr.Class: # ie, it is a pyclbr Function instance (only introduced post 1.5.2) ret.append(HierListCLBRFunction( item ) ) else: ret.append(HierListCLBRClass( item) ) ret.sort() return ret def IsExpandable(self): return 1 class HierListCLBRItem(hierlist.HierListItem): def __init__(self, name, file, lineno, suffix = ""): self.name = str(name) self.file = file self.lineno = lineno self.suffix = suffix def __cmp__(self, other): return cmp(self.name, other.name) def GetText(self): return self.name + self.suffix def TakeDefaultAction(self): if self.file: pywin.framework.scriptutils.JumpToDocument(self.file, self.lineno, bScrollToTop = 1) else: win32ui.SetStatusText("Can not locate the source code for this object.") def PerformItemSelected(self): if self.file is None: msg = "%s - source can not be located." % (self.name, ) else: msg = "%s defined at line %d of %s" % (self.name, self.lineno, self.file) win32ui.SetStatusText(msg) class HierListCLBRClass(HierListCLBRItem): def __init__(self, clbrclass, suffix = ""): try: name = clbrclass.name file = clbrclass.file lineno = clbrclass.lineno self.super = clbrclass.super self.methods = clbrclass.methods except AttributeError: name = clbrclass file = lineno = None self.super = []; self.methods = {} HierListCLBRItem.__init__(self, name, file, lineno, suffix) def __cmp__(self,other): ret = cmp(self.name,other.name) if ret==0 and (self is not other) and self.file==other.file: self.methods = other.methods self.super = other.super self.lineno = other.lineno return ret def GetSubList(self): r1 = [] for c in self.super: r1.append(HierListCLBRClass(c, " (Parent class)")) r1.sort() r2=[] for meth, lineno in self.methods.items(): r2.append(HierListCLBRMethod(meth, self.file, lineno)) r2.sort() return r1+r2 def IsExpandable(self): return len(self.methods) + len(self.super) def GetBitmapColumn(self): return 21 class HierListCLBRFunction(HierListCLBRItem): def __init__(self, clbrfunc, suffix = ""): name = clbrfunc.name file = clbrfunc.file lineno = clbrfunc.lineno HierListCLBRItem.__init__(self, name, file, lineno, suffix) def GetBitmapColumn(self): return 22 class HierListCLBRMethod(HierListCLBRItem): def GetBitmapColumn(self): return 22 class HierListCLBRErrorItem(hierlist.HierListItem): def __init__(self, text): self.text = text def GetText(self): return self.text def GetSubList(self): return [HierListCLBRErrorItem(self.text)] def IsExpandable(self): return 0 class HierListCLBRErrorRoot(HierListCLBRErrorItem): def IsExpandable(self): return 1 class BrowserView(pywin.mfc.docview.TreeView): def OnInitialUpdate(self): self.list = None rc = self._obj_.OnInitialUpdate() self.HookMessage(self.OnSize, win32con.WM_SIZE) self.bDirty = 0 self.destroying = 0 return rc def DestroyBrowser(self): self.DestroyList() def OnActivateView(self, activate, av, dv): # print "AV", self.bDirty, activate if activate: self.CheckRefreshList() return self._obj_.OnActivateView(activate, av, dv) def _MakeRoot(self): path = self.GetDocument().GetPathName() if not path: return HierListCLBRErrorRoot("Error: Can not browse a file until it is saved") else: mod, path = pywin.framework.scriptutils.GetPackageModuleName(path) if self.bDirty: what = "Refreshing" # Hack for pyclbr being too smart try: del pyclbr._modules[mod] except (KeyError, AttributeError): pass else: what = "Building" win32ui.SetStatusText("%s class list - please wait..." % (what,), 1) win32ui.DoWaitCursor(1) try: reader = pyclbr.readmodule_ex # new version post 1.5.2 except AttributeError: reader = pyclbr.readmodule try: data = reader(mod, [path]) if data: return HierListCLBRModule(mod, data) else: return HierListCLBRErrorRoot("No Python classes in module.") finally: win32ui.DoWaitCursor(0) win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE)) def DestroyList(self): self.destroying = 1 list = getattr(self, "list", None) # If the document was not successfully opened, we may not have a list. self.list = None if list is not None: list.HierTerm() self.destroying = 0 def CheckMadeList(self): if self.list is not None or self.destroying: return self.rootitem = root = self._MakeRoot() self.list = list = hierlist.HierListWithItems( root, win32ui.IDB_BROWSER_HIER) list.HierInit(self.GetParentFrame(), self) list.SetStyle(commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS) def CheckRefreshList(self): if self.bDirty: if self.list is None: self.CheckMadeList() else: new_root = self._MakeRoot() if self.rootitem.__class__==new_root.__class__==HierListCLBRModule: self.rootitem.modName = new_root.modName self.rootitem.clbrdata = new_root.clbrdata self.list.Refresh() else: self.list.AcceptRoot(self._MakeRoot()) self.bDirty = 0 def OnSize(self, params): lparam = params[3] w = win32api.LOWORD(lparam) h = win32api.HIWORD(lparam) if w != 0: self.CheckMadeList() elif w == 0: self.DestroyList() return 1 def _UpdateUIForState(self): self.bDirty = 1 </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475149"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">utamaro/youtube-dl</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">youtube_dl/extractor/tv2.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">113</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4640</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># encoding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, float_or_none, parse_iso8601, remove_end, ) class TV2IE(InfoExtractor): _VALID_URL = 'http://(?:www\.)?tv2\.no/v/(?P<id>\d+)' _TEST = { 'url': 'http://www.tv2.no/v/916509/', 'md5': '9cb9e3410b18b515d71892f27856e9b1', 'info_dict': { 'id': '916509', 'ext': 'flv', 'title': 'Se Gryttens hyllest av Steven Gerrard', 'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.', 'timestamp': 1431715610, 'upload_date': '20150515', 'duration': 156.967, 'view_count': int, 'categories': list, } } def _real_extract(self, url): video_id = self._match_id(url) formats = [] format_urls = [] for protocol in ('HDS', 'HLS'): data = self._download_json( 'http://sumo.tv2.no/api/web/asset/%s/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % (video_id, protocol), video_id, 'Downloading play JSON')['playback'] for item in data['items']['item']: video_url = item.get('url') if not video_url or video_url in format_urls: continue format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat')) if not self._is_valid_url(video_url, video_id, format_id): continue format_urls.append(video_url) ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id=format_id)) elif ext == 'ism' or video_url.endswith('.ism/Manifest'): pass else: formats.append({ 'url': video_url, 'format_id': format_id, 'tbr': int_or_none(item.get('bitrate')), 'filesize': int_or_none(item.get('fileSize')), }) self._sort_formats(formats) asset = self._download_json( 'http://sumo.tv2.no/api/web/asset/%s.json' % video_id, video_id, 'Downloading metadata JSON')['asset'] title = asset['title'] description = asset.get('description') timestamp = parse_iso8601(asset.get('createTime')) duration = float_or_none(asset.get('accurateDuration') or asset.get('duration')) view_count = int_or_none(asset.get('views')) categories = asset.get('keywords', '').split(',') thumbnails = [{ 'id': thumbnail.get('@type'), 'url': thumbnail.get('url'), } for _, thumbnail in asset.get('imageVersions', {}).items()] return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnails': thumbnails, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'categories': categories, 'formats': formats, } class TV2ArticleIE(InfoExtractor): _VALID_URL = 'http://(?:www\.)?tv2\.no/(?:a|\d{4}/\d{2}/\d{2}(/[^/]+)+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tv2.no/2015/05/16/nyheter/alesund/krim/pingvin/6930542', 'info_dict': { 'id': '6930542', 'title': 'Russen hetses etter pingvintyveri – innrømmer å ha åpnet luken på buret', 'description': 'md5:339573779d3eea3542ffe12006190954', }, 'playlist_count': 2, }, { 'url': 'http://www.tv2.no/a/6930542', 'only_matching': True, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) entries = [ self.url_result('http://www.tv2.no/v/%s' % video_id, 'TV2') for video_id in re.findall(r'data-assetid="(\d+)"', webpage)] title = remove_end(self._og_search_title(webpage), ' - TV2.no') description = remove_end(self._og_search_description(webpage), ' - TV2.no') return self.playlist_result(entries, playlist_id, title, description) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">unlicense</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475150"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">simsong/grr-insider</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/objectfilter.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">26902</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Copyright 2012 Google Inc. All Rights Reserved. """Classes to perform filtering of objects based on their data members. Given a list of objects and a textual filter expression, these classes allow you to determine which objects match the filter. The system has two main pieces: A parser for the supported grammar and a filter implementation. Given any complying user-supplied grammar, it is parsed with a custom lexer based on GRR's lexer and then compiled into an actual implementation by using the filter implementation. A filter implementation simply provides actual implementations for the primitives required to perform filtering. The compiled result is always a class supporting the Filter interface. If we define a class called Car such as: class Car(object): def __init__(self, code, color="white", doors=3): self.code = code self.color = color self.doors = 3 And we have two instances: ford_ka = Car("FORDKA1", color="grey") toyota_corolla = Car("COROLLA1", color="white", doors=5) fleet = [ford_ka, toyota_corolla] We want to find cars that are grey and have 3 or more doors. We could filter our fleet like this: criteria = "(color is grey) and (doors >= 3)" parser = ContextFilterParser(criteria).Parse() compiled_filter = parser.Compile(LowercaseAttributeFilterImp) for car in fleet: if compiled_filter.Matches(car): print "Car %s matches the supplied filter." % car.code The filter expression contains two subexpressions joined by an AND operator: "color is grey" and "doors >= 3" This means we want to search for objects matching these two subexpressions. Let's analyze the first one in depth "color is grey": "color": the left operand specifies a search path to look for the data. This tells our filtering system to look for the color property on passed objects. "is": the operator. Values retrieved for the "color" property will be checked against the right operand to see if they are equal. "grey": the right operand. It specifies an explicit value to check for. So each time an object is passed through the filter, it will expand the value of the color data member, and compare its value against "grey". Because data members of objects are often not simple datatypes but other objects, the system allows you to reference data members within other data members by separating each by a dot. Let's see an example: Let's add a more complex Car class with default tyre data: class CarWithTyres(Car): def __init__(self, code, tyres=None, color="white", doors=3): super(self, CarWithTyres).__init__(code, color, doors) tyres = tyres or Tyre("Pirelli", "PZERO") class Tyre(object): def __init__(self, brand, code): self.brand = brand self.code = code And two new instances: ford_ka = CarWithTyres("FORDKA", color="grey", tyres=Tyre("AVON", "ZT5")) toyota_corolla = Car("COROLLA1", color="white", doors=5) fleet = [ford_ka, toyota_corolla] To filter a car based on the tyre brand, we would use a search path of "tyres.brand". Because the filter implementation provides the actual classes that perform handling of the search paths, operators, etc. customizing the behaviour of the filter is easy. Three basic filter implementations are given: BaseFilterImplementation: search path expansion is done on attribute names as provided (case-sensitive). LowercaseAttributeFilterImp: search path expansion is done on the lowercased attribute name, so that it only accesses attributes, not methods. DictFilterImplementation: search path expansion is done on dictionary access to the given object. So "a.b" expands the object obj to obj["a"]["b"] """ import abc import binascii import logging import re from grr.lib import lexer from grr.lib import utils class Error(Exception): """Base module exception.""" class MalformedQueryError(Error): """The provided filter query is malformed.""" class ParseError(Error, lexer.ParseError): """The parser for textual queries returned invalid results.""" class InvalidNumberOfOperands(Error): """The number of operands provided to this operator is wrong.""" class Filter(object): """Base class for every filter.""" def __init__(self, arguments=None, value_expander=None): """Constructor. Args: arguments: Arguments to the filter. value_expander: A callable that will be used to expand values for the objects passed to this filter. Implementations expanders are provided by subclassing ValueExpander. Raises: Error: If the given value_expander is not a subclass of ValueExpander """ self.value_expander = None self.value_expander_cls = value_expander if self.value_expander_cls: if not issubclass(self.value_expander_cls, ValueExpander): raise Error("%s is not a valid value expander" % ( self.value_expander_cls)) self.value_expander = self.value_expander_cls() self.args = arguments or [] logging.debug("Adding %s", arguments) @abc.abstractmethod def Matches(self, obj): """Whether object obj matches this filter.""" def Filter(self, objects): """Returns a list of objects that pass the filter.""" return filter(self.Matches, objects) def __str__(self): return "%s(%s)" % (self.__class__.__name__, ", ".join([str(arg) for arg in self.args])) class AndFilter(Filter): """Performs a boolean AND of the given Filter instances as arguments. Note that if no conditions are passed, all objects will pass. """ def Matches(self, obj): for child_filter in self.args: if not child_filter.Matches(obj): return False return True class OrFilter(Filter): """Performs a boolean OR of the given Filter instances as arguments. Note that if no conditions are passed, all objects will pass. """ def Matches(self, obj): if not self.args: return True for child_filter in self.args: if child_filter.Matches(obj): return True return False class Operator(Filter): """Base class for all operators.""" class IdentityFilter(Operator): def Matches(self, _): return True class UnaryOperator(Operator): """Base class for unary operators.""" def __init__(self, operand, **kwargs): """Constructor.""" super(UnaryOperator, self).__init__(arguments=[operand], **kwargs) if len(self.args) != 1: raise InvalidNumberOfOperands("Only one operand is accepted by %s. " "Received %d." % (self.__class__.__name__, len(self.args))) class BinaryOperator(Operator): """Base class for binary operators. The left operand is always a path into the object which will be expanded for values. The right operand is a value defined at initialization and is stored at self.right_operand. """ def __init__(self, arguments=None, **kwargs): super(BinaryOperator, self).__init__(arguments=arguments, **kwargs) if len(self.args) != 2: raise InvalidNumberOfOperands("Only two operands are accepted by %s. " "Received %d." % (self.__class__.__name__, len(self.args))) self.left_operand = self.args[0] self.right_operand = self.args[1] class GenericBinaryOperator(BinaryOperator): """Allows easy implementations of operators.""" def Operation(self, x, y): """Performs the operation between two values.""" def Operate(self, values): """Takes a list of values and if at least one matches, returns True.""" for val in values: try: logging.debug("Operating %s with x=%s and y=%s", self.__class__.__name__, val, self.right_operand) if self.Operation(val, self.right_operand): return True else: continue except (ValueError, TypeError): continue return False def Matches(self, obj): key = self.left_operand values = self.value_expander.Expand(obj, key) if values and self.Operate(values): return True return False class Equals(GenericBinaryOperator): """Matches objects when the right operand equals the expanded value.""" def Operation(self, x, y): return x == y class NotEquals(GenericBinaryOperator): """Matches when the right operand isn't equal to the expanded value.""" def Operate(self, values): return not Equals(arguments=self.args, value_expander=self.value_expander_cls).Operate(values) class Less(GenericBinaryOperator): """Whether the expanded value >= right_operand.""" def Operation(self, x, y): return x < y class LessEqual(GenericBinaryOperator): """Whether the expanded value <= right_operand.""" def Operation(self, x, y): return x <= y class Greater(GenericBinaryOperator): """Whether the expanded value > right_operand.""" def Operation(self, x, y): return x > y class GreaterEqual(GenericBinaryOperator): """Whether the expanded value >= right_operand.""" def Operation(self, x, y): return x >= y class Contains(GenericBinaryOperator): """Whether the right operand is contained in the value.""" def Operation(self, x, y): return y in x class NotContains(GenericBinaryOperator): """Whether the right operand is not contained in the values.""" def Operate(self, values): return not Contains(arguments=self.args, value_expander=self.value_expander_cls).Operate(values) # TODO(user): Change to an N-ary Operator? class InSet(GenericBinaryOperator): """Whether all values are contained within the right operand.""" def Operation(self, x, y): """Whether x is fully contained in y.""" if x in y: return True # x might be an iterable # first we need to skip strings or we'll do silly things if (isinstance(x, basestring) or isinstance(x, bytes)): return False try: for value in x: if value not in y: return False return True except TypeError: return False class NotInSet(GenericBinaryOperator): """Whether at least a value is not present in the right operand.""" def Operate(self, values): return not InSet(arguments=self.args, value_expander=self.value_expander_cls).Operate(values) class Regexp(GenericBinaryOperator): """Whether the value matches the regexp in the right operand.""" def __init__(self, *children, **kwargs): super(Regexp, self).__init__(*children, **kwargs) logging.debug("Compiled: %s", self.right_operand) try: self.compiled_re = re.compile(utils.SmartUnicode(self.right_operand)) except re.error: raise ValueError("Regular expression \"%s\" is malformed." % self.right_operand) def Operation(self, x, y): try: if self.compiled_re.search(utils.SmartUnicode(x)): return True except TypeError: return False class Context(Operator): """Restricts the child operators to a specific context within the object. Solves the context problem. The context problem is the following: Suppose you store a list of loaded DLLs within a process. Suppose that for each of these DLLs you store the number of imported functions and each of the imported functions name. Imagine that a malicious DLL is injected into processes and its indicators are that it only imports one function and that it is RegQueryValueEx. You'd write your indicator like this: AndOperator( Equal("ImportedDLLs.ImpFunctions.Name", "RegQueryValueEx"), Equal("ImportedDLLs.NumImpFunctions", "1") ) Now imagine you have these two processes on a given system. Process1 +[0]__ImportedDlls +[0]__Name: "notevil.dll" |[0]__ImpFunctions | +[1]__Name: "CreateFileA" |[0]__NumImpFunctions: 1 | +[1]__Name: "alsonotevil.dll" |[1]__ImpFunctions | +[0]__Name: "RegQueryValueEx" | +[1]__Name: "CreateFileA" |[1]__NumImpFunctions: 2 Process2 +[0]__ImportedDlls +[0]__Name: "evil.dll" |[0]__ImpFunctions | +[0]__Name: "RegQueryValueEx" |[0]__NumImpFunctions: 1 Both Process1 and Process2 match your query, as each of the indicators are evaluated separatedly. While you wanted to express "find me processes that have a DLL that has both one imported function and ReqQueryValueEx is in the list of imported functions", your indicator actually means "find processes that have at least a DLL with 1 imported functions and at least one DLL that imports the ReqQueryValueEx function". To write such an indicator you need to specify a context of ImportedDLLs for these two clauses. Such that you convert your indicator to: Context("ImportedDLLs", AndOperator( Equal("ImpFunctions.Name", "RegQueryValueEx"), Equal("NumImpFunctions", "1") )) Context will execute the filter specified as the second parameter for each of the objects under "ImportedDLLs", thus applying the condition per DLL, not per object and returning the right result. """ def __init__(self, arguments=None, **kwargs): if len(arguments) != 2: raise InvalidNumberOfOperands("Context accepts only 2 operands.") super(Context, self).__init__(arguments=arguments, **kwargs) self.context, self.condition = self.args def Matches(self, obj): for object_list in self.value_expander.Expand(obj, self.context): for sub_object in object_list: if self.condition.Matches(sub_object): return True return False OP2FN = {"equals": Equals, "is": Equals, "==": Equals, "notequals": NotEquals, "isnot": NotEquals, "!=": NotEquals, "contains": Contains, "notcontains": NotContains, ">": Greater, ">=": GreaterEqual, "<": Less, "<=": LessEqual, "inset": InSet, "notinset": NotInSet, "regexp": Regexp, } class ValueExpander(object): """Encapsulates the logic to expand values available in an object. Once instantiated and called, this class returns all the values that follow a given field path. """ FIELD_SEPARATOR = "." def _GetAttributeName(self, path): """Returns the attribute name to fetch given a path.""" return path[0] def _GetValue(self, obj, attr_name): """Returns the value of tha attribute attr_name.""" raise NotImplementedError() def _AtLeaf(self, attr_value): """Called when at a leaf value. Should yield a value.""" yield attr_value def _AtNonLeaf(self, attr_value, path): """Called when at a non-leaf value. Should recurse and yield values.""" try: # Check first for iterables # If it's a dictionary, we yield it if isinstance(attr_value, dict): yield attr_value else: # If it's an iterable, we recurse on each value. for sub_obj in attr_value: for value in self.Expand(sub_obj, path[1:]): yield value except TypeError: # This is then not iterable, we recurse with the value for value in self.Expand(attr_value, path[1:]): yield value def Expand(self, obj, path): """Returns a list of all the values for the given path in the object obj. Given a path such as ["sub1", "sub2"] it returns all the values available in obj.sub1.sub2 as a list. sub1 and sub2 must be data attributes or properties. If sub1 returns a list of objects, or a generator, Expand aggregates the values for the remaining path for each of the objects, thus returning a list of all the values under the given path for the input object. Args: obj: An object that will be traversed for the given path path: A list of strings Yields: The values once the object is traversed. """ if isinstance(path, basestring): path = path.split(self.FIELD_SEPARATOR) attr_name = self._GetAttributeName(path) attr_value = self._GetValue(obj, attr_name) if attr_value is None: return if len(path) == 1: for value in self._AtLeaf(attr_value): yield value else: for value in self._AtNonLeaf(attr_value, path): yield value class AttributeValueExpander(ValueExpander): """An expander that gives values based on object attribute names.""" def _GetValue(self, obj, attr_name): return getattr(obj, attr_name, None) class LowercaseAttributeValueExpander(AttributeValueExpander): """An expander that lowercases all attribute names before access.""" def _GetAttributeName(self, path): return path[0].lower() class DictValueExpander(ValueExpander): """An expander that gets values from dictionary access to the object.""" def _GetValue(self, obj, attr_name): return obj.get(attr_name, None) ### PARSER DEFINITION class BasicExpression(lexer.Expression): def Compile(self, filter_implementation): arguments = [self.attribute] op_str = self.operator.lower() operator = filter_implementation.OPS.get(op_str, None) if not operator: raise ParseError("Unknown operator %s provided." % self.operator) arguments.extend(self.args) expander = filter_implementation.FILTERS["ValueExpander"] return operator(arguments=arguments, value_expander=expander) class ContextExpression(lexer.Expression): """Represents the context operator.""" def __init__(self, attribute="", part=None): self.attribute = attribute self.args = [] if part: self.args.append(part) super(ContextExpression, self).__init__() def __str__(self): return "Context(%s %s)" % ( self.attribute, [str(x) for x in self.args]) def SetExpression(self, expression): if isinstance(expression, lexer.Expression): self.args = [expression] else: raise ParseError("Expected expression, got %s" % expression) def Compile(self, filter_implementation): arguments = [self.attribute] for arg in self.args: arguments.append(arg.Compile(filter_implementation)) expander = filter_implementation.FILTERS["ValueExpander"] context_cls = filter_implementation.FILTERS["Context"] return context_cls(arguments=arguments, value_expander=expander) class BinaryExpression(lexer.BinaryExpression): def Compile(self, filter_implemention): """Compile the binary expression into a filter object.""" operator = self.operator.lower() if operator == "and" or operator == "&&": method = "AndFilter" elif operator == "or" or operator == "||": method = "OrFilter" else: raise ParseError("Invalid binary operator %s" % operator) args = [x.Compile(filter_implemention) for x in self.args] return filter_implemention.FILTERS[method](arguments=args) class IdentityExpression(lexer.Expression): def Compile(self, filter_implementation): return filter_implementation.FILTERS["IdentityFilter"]() class Parser(lexer.SearchParser): """Parses and generates an AST for a query written in the described language. Examples of valid syntax: size is 40 (name contains "Program Files" AND hash.md5 is "123abc") @imported_modules (num_symbols = 14 AND symbol.name is "FindWindow") """ expression_cls = BasicExpression binary_expression_cls = BinaryExpression context_cls = ContextExpression identity_expression_cls = IdentityExpression tokens = [ # Operators and related tokens lexer.Token("INITIAL", r"\@[\w._0-9]+", "ContextOperator,PushState", "CONTEXTOPEN"), lexer.Token("INITIAL", r"[^\s\(\)]", "PushState,PushBack", "ATTRIBUTE"), lexer.Token("INITIAL", r"\(", "PushState,BracketOpen", None), lexer.Token("INITIAL", r"\)", "BracketClose", "BINARY"), # Context lexer.Token("CONTEXTOPEN", r"\(", "BracketOpen", "INITIAL"), # Double quoted string lexer.Token("STRING", "\"", "PopState,StringFinish", None), lexer.Token("STRING", r"\\x(..)", "HexEscape", None), lexer.Token("STRING", r"\\(.)", "StringEscape", None), lexer.Token("STRING", r"[^\\\"]+", "StringInsert", None), # Single quoted string lexer.Token("SQ_STRING", "'", "PopState,StringFinish", None), lexer.Token("SQ_STRING", r"\\x(..)", "HexEscape", None), lexer.Token("SQ_STRING", r"\\(.)", "StringEscape", None), lexer.Token("SQ_STRING", r"[^\\']+", "StringInsert", None), # Basic expression lexer.Token("ATTRIBUTE", r"[\w._0-9]+", "StoreAttribute", "OPERATOR"), lexer.Token("OPERATOR", r"(\w+|[<>!=]=?)", "StoreOperator", "ARG"), lexer.Token("ARG", r"(\d+\.\d+)", "InsertFloatArg", "ARG"), lexer.Token("ARG", r"(0x\d+)", "InsertInt16Arg", "ARG"), lexer.Token("ARG", r"(\d+)", "InsertIntArg", "ARG"), lexer.Token("ARG", "\"", "PushState,StringStart", "STRING"), lexer.Token("ARG", "'", "PushState,StringStart", "SQ_STRING"), # When the last parameter from arg_list has been pushed # State where binary operators are supported (AND, OR) lexer.Token("BINARY", r"(?i)(and|or|\&\&|\|\|)", "BinaryOperator", "INITIAL"), # - We can also skip spaces lexer.Token("BINARY", r"\s+", None, None), # - But if it's not "and" or just spaces we have to go back lexer.Token("BINARY", ".", "PushBack,PopState", None), # Skip whitespace. lexer.Token(".", r"\s+", None, None), ] def InsertArg(self, string="", **_): """Insert an arg to the current expression.""" logging.debug("Storing Argument %s", string) # This expression is complete if self.current_expression.AddArg(string): self.stack.append(self.current_expression) self.current_expression = self.expression_cls() # We go to the BINARY state, to find if there's an AND or OR operator return "BINARY" def InsertFloatArg(self, string="", **_): """Inserts a Float argument.""" try: float_value = float(string) return self.InsertArg(float_value) except (TypeError, ValueError): raise ParseError("%s is not a valid float." % string) def InsertIntArg(self, string="", **_): """Inserts an Integer argument.""" try: int_value = int(string) return self.InsertArg(int_value) except (TypeError, ValueError): raise ParseError("%s is not a valid integer." % string) def InsertInt16Arg(self, string="", **_): """Inserts an Integer in base16 argument.""" try: int_value = int(string, 16) return self.InsertArg(int_value) except (TypeError, ValueError): raise ParseError("%s is not a valid base16 integer." % string) def StringFinish(self, **_): if self.state == "ATTRIBUTE": return self.StoreAttribute(string=self.string) elif self.state == "ARG": return self.InsertArg(string=self.string) def StringEscape(self, string, match, **_): """Escape backslashes found inside a string quote. Backslashes followed by anything other than [\'"rnbt] will raise an Error. Args: string: The string that matched. match: The match object (m.group(1) is the escaped code) Raises: ParseError: When the escaped string is not one of [\'"rnbt] """ if match.group(1) in "\\'\"rnbt": self.string += string.decode("string_escape") else: raise ParseError("Invalid escape character %s." % string) def HexEscape(self, string, match, **_): """Converts a hex escaped string.""" logging.debug("HexEscape matched %s", string) hex_string = match.group(1) try: self.string += binascii.unhexlify(hex_string) except TypeError: raise ParseError("Invalid hex escape %s" % string) def ContextOperator(self, string="", **_): self.stack.append(self.context_cls(string[1:])) def Reduce(self): """Reduce the token stack into an AST.""" # Check for sanity if self.state != "INITIAL" and self.state != "BINARY": self.Error("Premature end of expression") length = len(self.stack) while length > 1: # Precendence order self._CombineParenthesis() self._CombineBinaryExpressions("and") self._CombineBinaryExpressions("or") self._CombineContext() # No change if len(self.stack) == length: break length = len(self.stack) if length != 1: self.Error("Illegal query expression") return self.stack[0] def Error(self, message=None, _=None): raise ParseError("%s in position %s: %s <----> %s )" % ( message, len(self.processed_buffer), self.processed_buffer, self.buffer)) def _CombineBinaryExpressions(self, operator): for i in range(1, len(self.stack)-1): item = self.stack[i] if (isinstance(item, lexer.BinaryExpression) and item.operator.lower() == operator.lower() and isinstance(self.stack[i-1], lexer.Expression) and isinstance(self.stack[i+1], lexer.Expression)): lhs = self.stack[i-1] rhs = self.stack[i+1] self.stack[i].AddOperands(lhs, rhs) self.stack[i-1] = None self.stack[i+1] = None self.stack = filter(None, self.stack) def _CombineContext(self): # Context can merge from item 0 for i in range(len(self.stack)-1, 0, -1): item = self.stack[i-1] if (isinstance(item, ContextExpression) and isinstance(self.stack[i], lexer.Expression)): expression = self.stack[i] self.stack[i-1].SetExpression(expression) self.stack[i] = None self.stack = filter(None, self.stack) ### FILTER IMPLEMENTATIONS class BaseFilterImplementation(object): """Defines the base implementation of an object filter by its attributes. Inherit from this class, switch any of the needed operators and pass it to the Compile method of a parsed string to obtain an executable filter. """ OPS = OP2FN FILTERS = {"ValueExpander": AttributeValueExpander, "AndFilter": AndFilter, "OrFilter": OrFilter, "IdentityFilter": IdentityFilter, "Context": Context} class LowercaseAttributeFilterImplementation(BaseFilterImplementation): """Does field name access on the lowercase version of names. Useful to only access attributes and properties with Google's python naming style. """ FILTERS = {} FILTERS.update(BaseFilterImplementation.FILTERS) FILTERS.update({"ValueExpander": LowercaseAttributeValueExpander}) class DictFilterImplementation(BaseFilterImplementation): """Does value fetching by dictionary access on the object.""" FILTERS = {} FILTERS.update(BaseFilterImplementation.FILTERS) FILTERS.update({"ValueExpander": DictValueExpander}) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475151"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">synologix/enigma2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/python/Tools/Notifications.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">66</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1963</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">notifications = [ ] notificationAdded = [ ] # notifications which are currently on screen (and might be closed by similiar notifications) current_notifications = [ ] def __AddNotification(fnc, screen, id, *args, **kwargs): if ".MessageBox'>" in `screen`: kwargs["simple"] = True notifications.append((fnc, screen, args, kwargs, id)) for x in notificationAdded: x() def AddNotification(screen, *args, **kwargs): AddNotificationWithCallback(None, screen, *args, **kwargs) def AddNotificationWithCallback(fnc, screen, *args, **kwargs): __AddNotification(fnc, screen, None, *args, **kwargs) def AddNotificationParentalControl(fnc, screen, *args, **kwargs): RemovePopup("Parental control") __AddNotification(fnc, screen, "Parental control", *args, **kwargs) def AddNotificationWithID(id, screen, *args, **kwargs): __AddNotification(None, screen, id, *args, **kwargs) def AddNotificationWithIDCallback(fnc, id, screen, *args, **kwargs): __AddNotification(fnc, screen, id, *args, **kwargs) # we don't support notifications with callback and ID as this # would require manually calling the callback on cancelled popups. def RemovePopup(id): # remove similiar notifications print "RemovePopup, id =", id for x in notifications: if x[4] and x[4] == id: print "(found in notifications)" notifications.remove(x) for x in current_notifications: if x[0] == id: print "(found in current notifications)" x[1].close() from Screens.MessageBox import MessageBox def AddPopup(text, type, timeout, id = None): if id is not None: RemovePopup(id) print "AddPopup, id =", id AddNotificationWithID(id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = True) def AddPopupWithCallback(fnc, text, type, timeout, id = None): if id is not None: RemovePopup(id) print "AddPopup, id =", id AddNotificationWithIDCallback(fnc, id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = False) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475152"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">BMan-L/shadowsocks</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/nose_plugin.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1072</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1164</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nose from nose.plugins.base import Plugin class ExtensionPlugin(Plugin): name = "ExtensionPlugin" def options(self, parser, env): Plugin.options(self, parser, env) def configure(self, options, config): Plugin.configure(self, options, config) self.enabled = True def wantFile(self, file): return file.endswith('.py') def wantDirectory(self, directory): return True def wantModule(self, file): return True if __name__ == '__main__': nose.main(addplugins=[ExtensionPlugin()]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475153"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">slint/zenodo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">zenodo/modules/exporter/__init__.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1189</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2018 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Exporter programmatic API.""" from __future__ import absolute_import, print_function from .api import Exporter from .streams import BZip2ResultStream, ResultStream from .writers import BucketWriter, filename_factory </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475154"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">flos-club/eekk</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">libmat2/abstract.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1460</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import abc import os import re from typing import Set, Dict, Union assert Set # make pyflakes happy class AbstractParser(abc.ABC): """ This is the base class of every parser. It might yield `ValueError` on instantiation on invalid files, and `RuntimeError` when something went wrong in `remove_all`. """ meta_list = set() # type: Set[str] mimetypes = set() # type: Set[str] def __init__(self, filename: str) -> None: """ :raises ValueError: Raised upon an invalid file """ if re.search('^[a-z0-9./]', filename) is None: # Some parsers are calling external binaries, # this prevents shell command injections filename = os.path.join('.', filename) self.filename = filename fname, extension = os.path.splitext(filename) # Special case for tar.gz, tar.bz2, … files if fname.endswith('.tar') and len(fname) > 4: fname, extension = fname[:-4], '.tar' + extension self.output_filename = fname + '.cleaned' + extension self.lightweight_cleaning = False @abc.abstractmethod def get_meta(self) -> Dict[str, Union[str, dict]]: """Return all the metadata of the current file""" @abc.abstractmethod def remove_all(self) -> bool: """ Remove all the metadata of the current file :raises RuntimeError: Raised if the cleaning process went wrong. """ </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475155"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">aragos/tichu-tournament</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">api/src/welcome_handler.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3909</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import webapp2 import json from generic_handler import GenericHandler from google.appengine.api import mail from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api.app_identity import get_application_id from handler_utils import CheckUserOwnsTournamentAndMaybeReturnStatus from handler_utils import GetTourneyWithIdAndMaybeReturnStatus from handler_utils import SetErrorStatus from models import Tournament from models import PlayerPair class WelcomeHandler(GenericHandler): ''' Handles reuqests to /api/tournament/:id/welcome. Responsible for emailing players with their player codes. ''' @ndb.toplevel def post(self, id): ''' Sends an email for all email addresses in the request. Checks that emails belong to players in the tournament and sends the email only to valid addresses. Args: id: tournament ID to look up. Tournament must already have been created. ''' user = users.get_current_user() tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id) if not tourney: return if not CheckUserOwnsTournamentAndMaybeReturnStatus(self.response, user, tourney): return request_dict = self._ParseRequestAndMaybeSetStatus() if not request_dict: return self._SendEmails(request_dict, user, tourney) self.response.headers['Content-Type'] = 'application/json' self.response.set_status(201) def _SendEmails(self, request_dict, user, tourney): '''Sends a welcome email for all email addresses in the request_dict. Args: request_dict: Parsed JSON dict. user: The ndb.User owning this tournament. tourney: The tournament model object. ''' player_pairs = PlayerPair._query(ancestor=tourney.key).fetch() requested_emails = request_dict["emails"] for player_pair in player_pairs: for player in player_pair.player_list(): if player.get("email") not in requested_emails: continue player_name = player.get("name") player_greeting = "Dear {},".format(player_name) if player_name else "Greetings!" email_text = """{} \nWelcome to Tichu tournament \"{}\". Your pair's ID is {}. You can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}. \nGood Luck! Your friendly neighborhood tournament director""".format( player_greeting, tourney.name, player_pair.id, player_pair.id) email_html = """{} <br/> <br/>Welcome to Tichu tournament \"{}\". Your pair's ID is <b>{}</b>. You can use it to view and enter your results on https://tichu-tournament.appspot.com/home/{}. <br/> <br/>Good Luck! <br/>Your friendly neighborhood tournament director """.format(player_greeting, tourney.name, player_pair.id, player_pair.id) mail.send_mail( sender="{} <welcome@{}.appspotmail.com>".format(tourney.name, get_application_id()), to=player["email"], subject="Your Tichu Tournament Pair Code", body=email_text, html=email_html, reply_to=user.email()) def _ParseRequestAndMaybeSetStatus(self): ''' Parses the client request for email sents an error status if the request is unreadable or the email list is empty. Returns: dict corresponding to the parsed request.s ''' try: request_dict = json.loads(self.request.body) except ValueError: SetErrorStatus(self.response, 500, "Invalid Input", "Unable to parse request body as JSON object") return None request_dict["emails"] = [e for e in request_dict["emails"] if e and e != ""] if len(request_dict["emails"]) == 0: SetErrorStatus(self.response, 400, "Invalid Input", "No emails specified.") return None return request_dict</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475156"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">PHOTOX/fuase</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ase/ase/tasks/io.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1499</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np from ase.parallel import world try: import json except ImportError: json = None if json is None: def dumps(obj): if isinstance(obj, str): return '"' + obj + '"' if isinstance(obj, (int, float)): return repr(obj) if isinstance(obj, dict): return '{' + ', '.join(dumps(key) + ': ' + dumps(value) for key, value in obj.items()) + '}' return '[' + ','.join(dumps(value) for value in obj) + ']' loads = eval else: class NDArrayEncoder(json.JSONEncoder): def __init__(self): json.JSONEncoder.__init__(self, sort_keys=True, indent=4) def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) dumps = NDArrayEncoder().encode loads = json.loads def numpyfy(obj): if isinstance(obj, dict): return dict((key, numpyfy(value)) for key, value in obj.items()) if isinstance(obj, list): try: obj = np.array(obj) except ValueError: obj = [numpyfy(value) for value in obj] return obj def write_json(name, results): if world.rank == 0: fd = open(name, 'w') fd.write(dumps(results)) fd.close() def read_json(name): fd = open(name, 'r') results = loads(fd.read()) fd.close() world.barrier() return numpyfy(results) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475157"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">luser/socorro</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">socorro/unittest/external/fs/test_fslegacydatedradixtreestorage.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">17117</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import shutil from mock import Mock from configman import ConfigurationManager from nose.tools import eq_, ok_, assert_raises from socorro.external.fs.crashstorage import ( FSLegacyDatedRadixTreeStorage, FSTemporaryStorage ) from socorro.external.crashstorage_base import ( CrashIDNotFound, MemoryDumpsMapping, ) from socorro.unittest.testbase import TestCase class TestFSLegacyDatedRadixTreeStorage(TestCase): CRASH_ID_1 = "0bba929f-8721-460c-dead-a43c20071025" CRASH_ID_2 = "0bba929f-8721-460c-dead-a43c20071026" CRASH_ID_3 = "0bba929f-8721-460c-dddd-a43c20071025" def setUp(self): with self._common_config_setup().context() as config: self.fsrts = FSLegacyDatedRadixTreeStorage(config) def tearDown(self): shutil.rmtree(self.fsrts.config.fs_root) def _common_config_setup(self): mock_logging = Mock() required_config = FSLegacyDatedRadixTreeStorage.get_required_config() required_config.add_option('logger', default=mock_logging) config_manager = ConfigurationManager( [required_config], app_name='testapp', app_version='1.0', app_description='app description', values_source_list=[{ 'logger': mock_logging, 'minute_slice_interval': 1 }], argv_source=[] ) return config_manager def _make_test_crash(self): self.fsrts.save_raw_crash({ "test": "TEST" }, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_1) def _make_test_crash_3(self): self.fsrts.save_raw_crash({ "test": "TEST" }, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_3) def test_save_raw_crash(self): self._make_test_crash() ok_(os.path.islink( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1)))) ok_(os.path.exists( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1), self.CRASH_ID_1))) def test_get_raw_crash(self): self._make_test_crash() eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'], "TEST") assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash, self.CRASH_ID_2) def test_get_raw_dump(self): self._make_test_crash() eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'), "bar") eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, self.fsrts.config.dump_field), "baz") assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump, self.CRASH_ID_2, "foo") assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1, "foor") def test_get_raw_dumps(self): self._make_test_crash() eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' })) assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps, self.CRASH_ID_2) def test_remove(self): self._make_test_crash() self.fsrts.remove(self.CRASH_ID_1) parent = os.path.realpath( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1))) p = os.path.join(parent, self.CRASH_ID_1) ok_(not os.path.exists(p)) assert_raises(CrashIDNotFound, self.fsrts.remove, self.CRASH_ID_2) def test_new_crashes(self): self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() self.fsrts._current_slot = lambda: ['00', '00_01'] eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1]) eq_(list(self.fsrts.new_crashes()), []) self.fsrts.remove(self.CRASH_ID_1) del self.fsrts._current_slot self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1, ['00', '00_00']) new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1, ['00', '00_01']) webhead_path = os.sep.join([new_date_path, 'webhead_0']) os.mkdir(new_date_path) os.rename(date_path, webhead_path) os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1])) os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix( self.CRASH_ID_1)), os.sep.join([webhead_path, self.CRASH_ID_1])) self.fsrts._current_slot = lambda: ['00', '00_02'] eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1]) def test_orphaned_symlink_clean_up(self): # Bug 971496 identified a problem where a second crash coming in with # the same crash id would derail saving the second crash and leave # an extra undeleted symbolic link in the file system. This link # would be sited as undeleted on every run of 'new_crashes'. # this test shows that we can clean these extra symlinks if we # encounter them. self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() self.fsrts._current_slot = lambda: ['00', '00_01'] # make sure we can't create the duplicate in a different slot assert_raises(OSError, self._make_test_crash) # make sure the second slot exists so we can make the bogus symlink self._make_test_crash_3() # create bogus orphan link self.fsrts._create_name_to_date_symlink( self.CRASH_ID_1, self.fsrts._current_slot() ) ok_(os.path.islink( './crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-' 'a43c20071025' )) # run through the new_crashes iterator which will yield each of the # crashes that has been submitted since the last run of new_crashes. # this should cause all the symlinks to be removed. # we don't bother saving the crashes, as we don't need them. for x in self.fsrts.new_crashes(): pass ok_(not os.path.exists( './crashes/20071025/date/00/00_01/0bba929f-8721-460c-dead-' 'a43c20071025' )) class MyFSTemporaryStorage(FSTemporaryStorage): def _get_current_date(self): return "25" class TestFSTemporaryStorage(TestCase): CRASH_ID_1 = "0bba929f-8721-460c-dead-a43c20071025" CRASH_ID_2 = "0bba929f-8721-460c-dead-a43c20071026" CRASH_ID_3 = "0bba929f-8721-460c-dddd-a43c20071025" CRASH_ID_4 = "0bba929f-8721-460c-dddd-a43c20071125" def setUp(self): with self._common_config_setup().context() as config: self.fsrts = MyFSTemporaryStorage(config) def tearDown(self): shutil.rmtree(self.fsrts.config.fs_root) def _common_config_setup(self): mock_logging = Mock() required_config = MyFSTemporaryStorage.get_required_config() required_config.add_option('logger', default=mock_logging) config_manager = ConfigurationManager( [required_config], app_name='testapp', app_version='1.0', app_description='app description', values_source_list=[{ 'logger': mock_logging, 'minute_slice_interval': 1 }], argv_source=[] ) return config_manager def _make_test_crash(self): self.fsrts.save_raw_crash( {"test": "TEST"}, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_1 ) def _make_test_crash_3(self): self.fsrts.save_raw_crash( {"test": "TEST"}, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_3 ) def _make_test_crash_4(self): self.fsrts.save_raw_crash( {"test": "TEST"}, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_4 ) def test_save_raw_crash(self): self._make_test_crash() ok_(os.path.islink( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1)))) ok_(os.path.exists( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1), self.CRASH_ID_1))) def test_get_raw_crash(self): self._make_test_crash() eq_(self.fsrts.get_raw_crash(self.CRASH_ID_1)['test'], "TEST") assert_raises(CrashIDNotFound, self.fsrts.get_raw_crash, self.CRASH_ID_2) def test_get_raw_dump(self): self._make_test_crash() eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, 'foo'), "bar") eq_(self.fsrts.get_raw_dump(self.CRASH_ID_1, self.fsrts.config.dump_field), "baz") assert_raises(CrashIDNotFound, self.fsrts.get_raw_dump, self.CRASH_ID_2, "foo") assert_raises(IOError, self.fsrts.get_raw_dump, self.CRASH_ID_1, "foor") def test_get_raw_dumps(self): self._make_test_crash() eq_(self.fsrts.get_raw_dumps(self.CRASH_ID_1), MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' })) assert_raises(CrashIDNotFound, self.fsrts.get_raw_dumps, self.CRASH_ID_2) def test_remove(self): self._make_test_crash() self.fsrts.remove(self.CRASH_ID_1) parent = os.path.realpath( os.path.join( self.fsrts._get_radixed_parent_directory(self.CRASH_ID_1), self.fsrts._get_date_root_name(self.CRASH_ID_1))) p = os.path.join(parent, self.CRASH_ID_1) ok_(not os.path.exists(p)) assert_raises(CrashIDNotFound, self.fsrts.remove, self.CRASH_ID_2) def test_new_crashes(self): self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() self.fsrts._current_slot = lambda: ['00', '00_01'] eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1]) eq_(list(self.fsrts.new_crashes()), []) self.fsrts.remove(self.CRASH_ID_1) del self.fsrts._current_slot self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1, ['00', '00_00']) new_date_path = self.fsrts._get_dated_parent_directory(self.CRASH_ID_1, ['00', '00_01']) webhead_path = os.sep.join([new_date_path, 'webhead_0']) os.mkdir(new_date_path) os.rename(date_path, webhead_path) os.unlink(os.sep.join([webhead_path, self.CRASH_ID_1])) os.symlink('../../../../name/' + os.sep.join(self.fsrts._get_radix( self.CRASH_ID_1)), os.sep.join([webhead_path, self.CRASH_ID_1])) self.fsrts._current_slot = lambda: ['00', '00_02'] eq_(list(self.fsrts.new_crashes()), [self.CRASH_ID_1]) def test_orphaned_symlink_clean_up(self): # Bug 971496 identified a problem where a second crash coming in with # the same crash id would derail saving the second crash and leave # an extra undeleted symbolic link in the file system. This link # would be sited as undeleted on every run of 'new_crashes'. # this test shows that we can clean these extra symlinks if we # encounter them. self.fsrts._current_slot = lambda: ['00', '00_00'] self._make_test_crash() self.fsrts._current_slot = lambda: ['00', '00_01'] # make sure we can't create the duplicate in a different slot assert_raises(OSError, self._make_test_crash) # make sure the second slot exists so we can make the bogus symlink self._make_test_crash_3() # create bogus orphan link self.fsrts._create_name_to_date_symlink( self.CRASH_ID_1, self.fsrts._current_slot() ) ok_(os.path.islink( './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-' 'a43c20071025' )) ok_(os.path.islink( './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-' 'a43c20071025' )) # make sure all slots in use are traversable self.fsrts._current_slot = lambda: ['00', '00_02'] # run through the new_crashes iterator which will yield each of the # crashes that has been submitted since the last run of new_crashes. # this should cause all the symlinks to be removed. # we don't bother saving the crashes, as we don't need them. for x in self.fsrts.new_crashes(): pass ok_(not os.path.exists( './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025' )) def test_make_sure_days_recycle(self): self.fsrts._current_slot = lambda: ['00', '00_01'] self._make_test_crash() self._make_test_crash_3() self._make_test_crash_4() ok_(os.path.exists( './crashes/25/date/00/00_01/0bba929f-8721-460c-dead-a43c20071025' )) ok_(os.path.exists( './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071025' )) ok_(os.path.exists( './crashes/25/date/00/00_01/0bba929f-8721-460c-dddd-a43c20071125' )) for x in self.fsrts.new_crashes(): pass def _secondary_config_setup(self): mock_logging = Mock() required_config = FSLegacyDatedRadixTreeStorage.get_required_config() required_config.add_option('logger', default=mock_logging) config_manager = ConfigurationManager( [required_config], app_name='testapp', app_version='1.0', app_description='app description', values_source_list=[{ 'logger': mock_logging, 'minute_slice_interval': 1 }], argv_source=[] ) return config_manager def test_make_sure_old_style_date_directories_are_traversed(self): with self._secondary_config_setup().context() as config: self.fsrts_old = FSLegacyDatedRadixTreeStorage(config) self.fsrts_old._current_slot = lambda: ['00', '00_00'] # save crash 1 in old system self.fsrts_old.save_raw_crash({ "test": "TEST" }, MemoryDumpsMapping({ 'foo': 'bar', self.fsrts.config.dump_field: 'baz' }), self.CRASH_ID_1) ok_(os.path.exists( './crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-' 'a43c20071025' )) self.fsrts._current_slot = lambda: ['00', '00_00'] #save crash 3 in new system self._make_test_crash_3() ok_(os.path.exists( './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025' )) # consume crashes for x in self.fsrts.new_crashes(): pass # should be consumed because it isn't in our working tree or slot ok_(not os.path.exists( './crashes/20071025/date/00/00_00/0bba929f-8721-460c-dead-' 'a43c20071025' )) # should not be consumed, while in working tree, it is in active slot ok_(os.path.exists( './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025' )) # switch to next active slot self.fsrts._current_slot = lambda: ['00', '00_01'] # consume crashes for x in self.fsrts.new_crashes(): pass # should be consumed because it is in working tree and inactive slot ok_( not os.path.exists( './crashes/25/date/00/00_00/0bba929f-8721-460c-dddd-a43c20071025' )) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475158"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ianan/demreg</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">python/dem_reg_map.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1067</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np def dem_reg_map(sigmaa,sigmab,U,W,data,err,reg_tweak,nmu=500): """ dem_reg_map computes the regularisation parameter Inputs sigmaa: gsv vector sigmab: gsv vector U: gsvd matrix V: gsvd matrix data: dn data err: dn error reg_tweak: how much to adjust the chisq each iteration Outputs opt: regularization paramater """ nf=data.shape[0] nreg=sigmaa.shape[0] arg=np.zeros([nreg,nmu]) discr=np.zeros([nmu]) sigs=sigmaa[:nf]/sigmab[:nf] maxx=max(sigs) minx=min(sigs)**2.0*1E-2 step=(np.log(maxx)-np.log(minx))/(nmu-1.) mu=np.exp(np.arange(nmu)*step)*minx for kk in np.arange(nf): coef=data@U[kk,:]-sigmaa[kk] for ii in np.arange(nmu): arg[kk,ii]=(mu[ii]*sigmab[kk]**2*coef/(sigmaa[kk]**2+mu[ii]*sigmab[kk]**2))**2 discr=np.sum(arg,axis=0)-np.sum(err**2)*reg_tweak opt=mu[np.argmin(np.abs(discr))] return opt</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475159"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">guorendong/iridium-browser-ubuntu</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">third_party/webpagereplay/third_party/dns/resolver.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">215</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">28920</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS stub resolver. @var default_resolver: The default resolver object @type default_resolver: dns.resolver.Resolver object""" import socket import sys import time import dns.exception import dns.message import dns.name import dns.query import dns.rcode import dns.rdataclass import dns.rdatatype if sys.platform == 'win32': import _winreg class NXDOMAIN(dns.exception.DNSException): """The query name does not exist.""" pass # The definition of the Timeout exception has moved from here to the # dns.exception module. We keep dns.resolver.Timeout defined for # backwards compatibility. Timeout = dns.exception.Timeout class NoAnswer(dns.exception.DNSException): """The response did not contain an answer to the question.""" pass class NoNameservers(dns.exception.DNSException): """No non-broken nameservers are available to answer the query.""" pass class NotAbsolute(dns.exception.DNSException): """Raised if an absolute domain name is required but a relative name was provided.""" pass class NoRootSOA(dns.exception.DNSException): """Raised if for some reason there is no SOA at the root name. This should never happen!""" pass class Answer(object): """DNS stub resolver answer Instances of this class bundle up the result of a successful DNS resolution. For convenience, the answer object implements much of the sequence protocol, forwarding to its rrset. E.g. "for a in answer" is equivalent to "for a in answer.rrset", "answer[i]" is equivalent to "answer.rrset[i]", and "answer[i:j]" is equivalent to "answer.rrset[i:j]". Note that CNAMEs or DNAMEs in the response may mean that answer node's name might not be the query name. @ivar qname: The query name @type qname: dns.name.Name object @ivar rdtype: The query type @type rdtype: int @ivar rdclass: The query class @type rdclass: int @ivar response: The response message @type response: dns.message.Message object @ivar rrset: The answer @type rrset: dns.rrset.RRset object @ivar expiration: The time when the answer expires @type expiration: float (seconds since the epoch) """ def __init__(self, qname, rdtype, rdclass, response): self.qname = qname self.rdtype = rdtype self.rdclass = rdclass self.response = response min_ttl = -1 rrset = None for count in xrange(0, 15): try: rrset = response.find_rrset(response.answer, qname, rdclass, rdtype) if min_ttl == -1 or rrset.ttl < min_ttl: min_ttl = rrset.ttl break except KeyError: if rdtype != dns.rdatatype.CNAME: try: crrset = response.find_rrset(response.answer, qname, rdclass, dns.rdatatype.CNAME) if min_ttl == -1 or crrset.ttl < min_ttl: min_ttl = crrset.ttl for rd in crrset: qname = rd.target break continue except KeyError: raise NoAnswer raise NoAnswer if rrset is None: raise NoAnswer self.rrset = rrset self.expiration = time.time() + min_ttl def __getattr__(self, attr): if attr == 'name': return self.rrset.name elif attr == 'ttl': return self.rrset.ttl elif attr == 'covers': return self.rrset.covers elif attr == 'rdclass': return self.rrset.rdclass elif attr == 'rdtype': return self.rrset.rdtype else: raise AttributeError(attr) def __len__(self): return len(self.rrset) def __iter__(self): return iter(self.rrset) def __getitem__(self, i): return self.rrset[i] def __delitem__(self, i): del self.rrset[i] def __getslice__(self, i, j): return self.rrset[i:j] def __delslice__(self, i, j): del self.rrset[i:j] class Cache(object): """Simple DNS answer cache. @ivar data: A dictionary of cached data @type data: dict @ivar cleaning_interval: The number of seconds between cleanings. The default is 300 (5 minutes). @type cleaning_interval: float @ivar next_cleaning: The time the cache should next be cleaned (in seconds since the epoch.) @type next_cleaning: float """ def __init__(self, cleaning_interval=300.0): """Initialize a DNS cache. @param cleaning_interval: the number of seconds between periodic cleanings. The default is 300.0 @type cleaning_interval: float. """ self.data = {} self.cleaning_interval = cleaning_interval self.next_cleaning = time.time() + self.cleaning_interval def maybe_clean(self): """Clean the cache if it's time to do so.""" now = time.time() if self.next_cleaning <= now: keys_to_delete = [] for (k, v) in self.data.iteritems(): if v.expiration <= now: keys_to_delete.append(k) for k in keys_to_delete: del self.data[k] now = time.time() self.next_cleaning = now + self.cleaning_interval def get(self, key): """Get the answer associated with I{key}. Returns None if no answer is cached for the key. @param key: the key @type key: (dns.name.Name, int, int) tuple whose values are the query name, rdtype, and rdclass. @rtype: dns.resolver.Answer object or None """ self.maybe_clean() v = self.data.get(key) if v is None or v.expiration <= time.time(): return None return v def put(self, key, value): """Associate key and value in the cache. @param key: the key @type key: (dns.name.Name, int, int) tuple whose values are the query name, rdtype, and rdclass. @param value: The answer being cached @type value: dns.resolver.Answer object """ self.maybe_clean() self.data[key] = value def flush(self, key=None): """Flush the cache. If I{key} is specified, only that item is flushed. Otherwise the entire cache is flushed. @param key: the key to flush @type key: (dns.name.Name, int, int) tuple or None """ if not key is None: if self.data.has_key(key): del self.data[key] else: self.data = {} self.next_cleaning = time.time() + self.cleaning_interval class Resolver(object): """DNS stub resolver @ivar domain: The domain of this host @type domain: dns.name.Name object @ivar nameservers: A list of nameservers to query. Each nameserver is a string which contains the IP address of a nameserver. @type nameservers: list of strings @ivar search: The search list. If the query name is a relative name, the resolver will construct an absolute query name by appending the search names one by one to the query name. @type search: list of dns.name.Name objects @ivar port: The port to which to send queries. The default is 53. @type port: int @ivar timeout: The number of seconds to wait for a response from a server, before timing out. @type timeout: float @ivar lifetime: The total number of seconds to spend trying to get an answer to the question. If the lifetime expires, a Timeout exception will occur. @type lifetime: float @ivar keyring: The TSIG keyring to use. The default is None. @type keyring: dict @ivar keyname: The TSIG keyname to use. The default is None. @type keyname: dns.name.Name object @ivar keyalgorithm: The TSIG key algorithm to use. The default is dns.tsig.default_algorithm. @type keyalgorithm: string @ivar edns: The EDNS level to use. The default is -1, no Edns. @type edns: int @ivar ednsflags: The EDNS flags @type ednsflags: int @ivar payload: The EDNS payload size. The default is 0. @type payload: int @ivar cache: The cache to use. The default is None. @type cache: dns.resolver.Cache object """ def __init__(self, filename='/etc/resolv.conf', configure=True): """Initialize a resolver instance. @param filename: The filename of a configuration file in standard /etc/resolv.conf format. This parameter is meaningful only when I{configure} is true and the platform is POSIX. @type filename: string or file object @param configure: If True (the default), the resolver instance is configured in the normal fashion for the operating system the resolver is running on. (I.e. a /etc/resolv.conf file on POSIX systems and from the registry on Windows systems.) @type configure: bool""" self.reset() if configure: if sys.platform == 'win32': self.read_registry() elif filename: self.read_resolv_conf(filename) def reset(self): """Reset all resolver configuration to the defaults.""" self.domain = \ dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) if len(self.domain) == 0: self.domain = dns.name.root self.nameservers = [] self.search = [] self.port = 53 self.timeout = 2.0 self.lifetime = 30.0 self.keyring = None self.keyname = None self.keyalgorithm = dns.tsig.default_algorithm self.edns = -1 self.ednsflags = 0 self.payload = 0 self.cache = None def read_resolv_conf(self, f): """Process f as a file in the /etc/resolv.conf format. If f is a string, it is used as the name of the file to open; otherwise it is treated as the file itself.""" if isinstance(f, str) or isinstance(f, unicode): try: f = open(f, 'r') except IOError: # /etc/resolv.conf doesn't exist, can't be read, etc. # We'll just use the default resolver configuration. self.nameservers = ['127.0.0.1'] return want_close = True else: want_close = False try: for l in f: if len(l) == 0 or l[0] == '#' or l[0] == ';': continue tokens = l.split() if len(tokens) == 0: continue if tokens[0] == 'nameserver': self.nameservers.append(tokens[1]) elif tokens[0] == 'domain': self.domain = dns.name.from_text(tokens[1]) elif tokens[0] == 'search': for suffix in tokens[1:]: self.search.append(dns.name.from_text(suffix)) finally: if want_close: f.close() if len(self.nameservers) == 0: self.nameservers.append('127.0.0.1') def _determine_split_char(self, entry): # # The windows registry irritatingly changes the list element # delimiter in between ' ' and ',' (and vice-versa) in various # versions of windows. # if entry.find(' ') >= 0: split_char = ' ' elif entry.find(',') >= 0: split_char = ',' else: # probably a singleton; treat as a space-separated list. split_char = ' ' return split_char def _config_win32_nameservers(self, nameservers): """Configure a NameServer registry entry.""" # we call str() on nameservers to convert it from unicode to ascii nameservers = str(nameservers) split_char = self._determine_split_char(nameservers) ns_list = nameservers.split(split_char) for ns in ns_list: if not ns in self.nameservers: self.nameservers.append(ns) def _config_win32_domain(self, domain): """Configure a Domain registry entry.""" # we call str() on domain to convert it from unicode to ascii self.domain = dns.name.from_text(str(domain)) def _config_win32_search(self, search): """Configure a Search registry entry.""" # we call str() on search to convert it from unicode to ascii search = str(search) split_char = self._determine_split_char(search) search_list = search.split(split_char) for s in search_list: if not s in self.search: self.search.append(dns.name.from_text(s)) def _config_win32_fromkey(self, key): """Extract DNS info from a registry key.""" try: servers, rtype = _winreg.QueryValueEx(key, 'NameServer') except WindowsError: servers = None if servers: self._config_win32_nameservers(servers) try: dom, rtype = _winreg.QueryValueEx(key, 'Domain') if dom: self._config_win32_domain(dom) except WindowsError: pass else: try: servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer') except WindowsError: servers = None if servers: self._config_win32_nameservers(servers) try: dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain') if dom: self._config_win32_domain(dom) except WindowsError: pass try: search, rtype = _winreg.QueryValueEx(key, 'SearchList') except WindowsError: search = None if search: self._config_win32_search(search) def read_registry(self): """Extract resolver configuration from the Windows registry.""" lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) want_scan = False try: try: # XP, 2000 tcp_params = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\Tcpip\Parameters') want_scan = True except EnvironmentError: # ME tcp_params = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\VxD\MSTCP') try: self._config_win32_fromkey(tcp_params) finally: tcp_params.Close() if want_scan: interfaces = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\Tcpip\Parameters' r'\Interfaces') try: i = 0 while True: try: guid = _winreg.EnumKey(interfaces, i) i += 1 key = _winreg.OpenKey(interfaces, guid) if not self._win32_is_nic_enabled(lm, guid, key): continue try: self._config_win32_fromkey(key) finally: key.Close() except EnvironmentError: break finally: interfaces.Close() finally: lm.Close() def _win32_is_nic_enabled(self, lm, guid, interface_key): # Look in the Windows Registry to determine whether the network # interface corresponding to the given guid is enabled. # # (Code contributed by Paul Marks, thanks!) # try: # This hard-coded location seems to be consistent, at least # from Windows 2000 through Vista. connection_key = _winreg.OpenKey( lm, r'SYSTEM\CurrentControlSet\Control\Network' r'\{4D36E972-E325-11CE-BFC1-08002BE10318}' r'\%s\Connection' % guid) try: # The PnpInstanceID points to a key inside Enum (pnp_id, ttype) = _winreg.QueryValueEx( connection_key, 'PnpInstanceID') if ttype != _winreg.REG_SZ: raise ValueError device_key = _winreg.OpenKey( lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id) try: # Get ConfigFlags for this device (flags, ttype) = _winreg.QueryValueEx( device_key, 'ConfigFlags') if ttype != _winreg.REG_DWORD: raise ValueError # Based on experimentation, bit 0x1 indicates that the # device is disabled. return not (flags & 0x1) finally: device_key.Close() finally: connection_key.Close() except (EnvironmentError, ValueError): # Pre-vista, enabled interfaces seem to have a non-empty # NTEContextList; this was how dnspython detected enabled # nics before the code above was contributed. We've retained # the old method since we don't know if the code above works # on Windows 95/98/ME. try: (nte, ttype) = _winreg.QueryValueEx(interface_key, 'NTEContextList') return nte is not None except WindowsError: return False def _compute_timeout(self, start): now = time.time() if now < start: if start - now > 1: # Time going backwards is bad. Just give up. raise Timeout else: # Time went backwards, but only a little. This can # happen, e.g. under vmware with older linux kernels. # Pretend it didn't happen. now = start duration = now - start if duration >= self.lifetime: raise Timeout return min(self.lifetime - duration, self.timeout) def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None): """Query nameservers to find the answer to the question. The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects of the appropriate type, or strings that can be converted into objects of the appropriate type. E.g. For I{rdtype} the integer 2 and the the string 'NS' both mean to query for records with DNS rdata type NS. @param qname: the query name @type qname: dns.name.Name object or string @param rdtype: the query type @type rdtype: int or string @param rdclass: the query class @type rdclass: int or string @param tcp: use TCP to make the query (default is False). @type tcp: bool @param source: bind to this IP address (defaults to machine default IP). @type source: IP address in dotted quad notation @rtype: dns.resolver.Answer instance @raises Timeout: no answers could be found in the specified lifetime @raises NXDOMAIN: the query name does not exist @raises NoAnswer: the response did not contain an answer @raises NoNameservers: no non-broken nameservers are available to answer the question.""" if isinstance(qname, (str, unicode)): qname = dns.name.from_text(qname, None) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) if isinstance(rdclass, str): rdclass = dns.rdataclass.from_text(rdclass) qnames_to_try = [] if qname.is_absolute(): qnames_to_try.append(qname) else: if len(qname) > 1: qnames_to_try.append(qname.concatenate(dns.name.root)) if self.search: for suffix in self.search: qnames_to_try.append(qname.concatenate(suffix)) else: qnames_to_try.append(qname.concatenate(self.domain)) all_nxdomain = True start = time.time() for qname in qnames_to_try: if self.cache: answer = self.cache.get((qname, rdtype, rdclass)) if answer: return answer request = dns.message.make_query(qname, rdtype, rdclass) if not self.keyname is None: request.use_tsig(self.keyring, self.keyname, self.keyalgorithm) request.use_edns(self.edns, self.ednsflags, self.payload) response = None # # make a copy of the servers list so we can alter it later. # nameservers = self.nameservers[:] backoff = 0.10 while response is None: if len(nameservers) == 0: raise NoNameservers for nameserver in nameservers[:]: timeout = self._compute_timeout(start) try: if tcp: response = dns.query.tcp(request, nameserver, timeout, self.port, source=source) else: response = dns.query.udp(request, nameserver, timeout, self.port, source=source) except (socket.error, dns.exception.Timeout): # # Communication failure or timeout. Go to the # next server # response = None continue except dns.query.UnexpectedSource: # # Who knows? Keep going. # response = None continue except dns.exception.FormError: # # We don't understand what this server is # saying. Take it out of the mix and # continue. # nameservers.remove(nameserver) response = None continue rcode = response.rcode() if rcode == dns.rcode.NOERROR or \ rcode == dns.rcode.NXDOMAIN: break # # We got a response, but we're not happy with the # rcode in it. Remove the server from the mix if # the rcode isn't SERVFAIL. # if rcode != dns.rcode.SERVFAIL: nameservers.remove(nameserver) response = None if not response is None: break # # All nameservers failed! # if len(nameservers) > 0: # # But we still have servers to try. Sleep a bit # so we don't pound them! # timeout = self._compute_timeout(start) sleep_time = min(timeout, backoff) backoff *= 2 time.sleep(sleep_time) if response.rcode() == dns.rcode.NXDOMAIN: continue all_nxdomain = False break if all_nxdomain: raise NXDOMAIN answer = Answer(qname, rdtype, rdclass, response) if self.cache: self.cache.put((qname, rdtype, rdclass), answer) return answer def use_tsig(self, keyring, keyname=None, algorithm=dns.tsig.default_algorithm): """Add a TSIG signature to the query. @param keyring: The TSIG keyring to use; defaults to None. @type keyring: dict @param keyname: The name of the TSIG key to use; defaults to None. The key must be defined in the keyring. If a keyring is specified but a keyname is not, then the key used will be the first key in the keyring. Note that the order of keys in a dictionary is not defined, so applications should supply a keyname when a keyring is used, unless they know the keyring contains only one key. @param algorithm: The TSIG key algorithm to use. The default is dns.tsig.default_algorithm. @type algorithm: string""" self.keyring = keyring if keyname is None: self.keyname = self.keyring.keys()[0] else: self.keyname = keyname self.keyalgorithm = algorithm def use_edns(self, edns, ednsflags, payload): """Configure Edns. @param edns: The EDNS level to use. The default is -1, no Edns. @type edns: int @param ednsflags: The EDNS flags @type ednsflags: int @param payload: The EDNS payload size. The default is 0. @type payload: int""" if edns is None: edns = -1 self.edns = edns self.ednsflags = ednsflags self.payload = payload default_resolver = None def get_default_resolver(): """Get the default resolver, initializing it if necessary.""" global default_resolver if default_resolver is None: default_resolver = Resolver() return default_resolver def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None): """Query nameservers to find the answer to the question. This is a convenience function that uses the default resolver object to make the query. @see: L{dns.resolver.Resolver.query} for more information on the parameters.""" return get_default_resolver().query(qname, rdtype, rdclass, tcp, source) def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None): """Find the name of the zone which contains the specified name. @param name: the query name @type name: absolute dns.name.Name object or string @param rdclass: The query class @type rdclass: int @param tcp: use TCP to make the query (default is False). @type tcp: bool @param resolver: the resolver to use @type resolver: dns.resolver.Resolver object or None @rtype: dns.name.Name""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, dns.name.root) if resolver is None: resolver = get_default_resolver() if not name.is_absolute(): raise NotAbsolute(name) while 1: try: answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp) return name except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): try: name = name.parent() except dns.name.NoParent: raise NoRootSOA </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475160"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ezequielpereira/Time-Line</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">autopilot/autopilotlib/instructions/selectmenu.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3493</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg # # This file is part of Timeline. # # Timeline is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Timeline is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Timeline. If not, see <http://www.gnu.org/licenses/>. import wx from autopilotlib.instructions.instruction import Instruction from autopilotlib.app.logger import Logger from autopilotlib.app.exceptions import NotFoundException from autopilotlib.app.decorators import Overrides class SelectMenuInstruction(Instruction): """ 0 1 2 3 4 5 6 7 command object ( arg1 , arg2 [ , arg]* ) command ::= Select object ::= Menu | Mnu arg ::= STRING | TEXT Select a menu in the the menu hierarchy, given by the args. At least 2 targets must be present. Example 1: Select menu (Show, Sidebar) Example 2: Select menu (Show, "Balloons on hover") Example 3: Select Menu(File, New, "File Timeline...") """ @Overrides(Instruction) def execute(self, manuscript, win): manuscript.execute_next_instruction() self._select_menu(win) def _select_menu(self, win): try: item_id = self._find_menu_item_id(win) win.click_menu_item(item_id) except NotFoundException: Logger.add_error("Menu not found") def _find_menu_item_id(self, win): labels = self.get_all_args() menu_bar = self._get_menu_bar(win) inx = menu_bar.FindMenu(labels[0]) menu = menu_bar.GetMenu(inx) labels = labels [1:] while len(labels) > 0: item_id = self._get_menu_item_id(menu, labels[0]) if len(labels) > 1: menu_item = menu_bar.FindItemById(item_id) menu = menu_item.GetSubMenu() labels = labels [1:] return item_id def _get_menu_bar(self, win): menu_bar = win.GetMenuBar() if menu_bar is None: raise NotFoundException() return menu_bar def _get_menu_item_id(self, menu, label): valid_labels = self._get_valid_labels(label) for label in valid_labels: item_id = menu.FindItem(label) if item_id != wx.NOT_FOUND: return item_id return wx.NOT_FOUND def _get_valid_labels(self, label): valid_labels = [label] self._get_elipsis_label(label, valid_labels) self._get_accelerator_labels(label, valid_labels) return valid_labels def _get_elipsis_label(self, label, alternative_labels): alternative_labels.append(label + "...") def _get_accelerator_labels(self, label, alternative_labels): for i in range(len(label)): alternative_label = label[0:i] + "&" + label[i:] alternative_labels.append(alternative_label) return alternative_labels </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475161"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">xbmc/atv2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">xbmc/lib/libPython/Python/Tools/scripts/combinerefs.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">102</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4381</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#! /usr/bin/env python """ combinerefs path A helper for analyzing PYTHONDUMPREFS output. When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown time Py_Finalize() prints the list of all live objects twice: first it prints the repr() of each object while the interpreter is still fully intact. After cleaning up everything it can, it prints all remaining live objects again, but the second time just prints their addresses, refcounts, and type names (because the interpreter has been torn down, calling repr methods at this point can get into infinite loops or blow up). Save all this output into a file, then run this script passing the path to that file. The script finds both output chunks, combines them, then prints a line of output for each object still alive at the end: address refcnt typename repr address is the address of the object, in whatever format the platform C produces for a %p format code. refcnt is of the form "[" ref "]" when the object's refcount is the same in both PYTHONDUMPREFS output blocks, or "[" ref_before "->" ref_after "]" if the refcount changed. typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS output block. repr is repr(object), extracted from the first PYTHONDUMPREFS output block. CAUTION: If object is a container type, it may not actually contain all the objects shown in the repr: the repr was captured from the first output block, and some of the containees may have been released since then. For example, it's common for the line showing the dict of interned strings to display strings that no longer exist at the end of Py_Finalize; this can be recognized (albeit painfully) because such containees don't have a line of their own. The objects are listed in allocation order, with most-recently allocated printed first, and the first object allocated printed last. Simple examples: 00857060 [14] str '__len__' The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS output blocks said there were 14 references to it. This is probably due to C modules that intern the string "__len__" and keep a reference to it in a file static. 00857038 [46->5] tuple () 46-5 = 41 references to the empty tuple were removed by the cleanup actions between the times PYTHONDUMPREFS produced output. 00858028 [1025->1456] str '<dummy key>' The string '<dummy key>', which is used in dictobject.c to overwrite a real key that gets deleted, grew several hundred references during cleanup. It suggests that stuff did get removed from dicts by cleanup, but that the dicts themselves are staying alive for some reason. """ import re import sys # Generate lines from fileiter. If whilematch is true, continue reading # while the regexp object pat matches line. If whilematch is false, lines # are read so long as pat doesn't match them. In any case, the first line # that doesn't match pat (when whilematch is true), or that does match pat # (when whilematch is false), is lost, and fileiter will resume at the line # following it. def read(fileiter, pat, whilematch): for line in fileiter: if bool(pat.match(line)) == whilematch: yield line else: break def combine(fname): f = file(fname) fi = iter(f) for line in read(fi, re.compile(r'^Remaining objects:$'), False): pass crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)') addr2rc = {} addr2guts = {} before = 0 for line in read(fi, re.compile(r'^Remaining object addresses:$'), False): m = crack.match(line) if m: addr, addr2rc[addr], addr2guts[addr] = m.groups() before += 1 else: print '??? skipped:', line after = 0 for line in read(fi, crack, True): after += 1 m = crack.match(line) assert m addr, rc, guts = m.groups() # guts is type name here if addr not in addr2rc: print '??? new object created while tearing down:', line.rstrip() continue print addr, if rc == addr2rc[addr]: print '[%s]' % rc, else: print '[%s->%s]' % (addr2rc[addr], rc), print guts, addr2guts[addr] f.close() print "%d objects before, %d after" % (before, after) if __name__ == '__main__': combine(sys.argv[1]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475162"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">matthew-tucker/mne-python</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">examples/inverse/plot_read_inverse.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">42</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1384</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" =========================== Reading an inverse operator =========================== The inverse operator's source space is shown in 3D. """ # Author: Alexandre Gramfort <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="50313c3528313e3422357e3722313d363f22241024353c35333f3d7d2031223923243533387e3622">[email protected]</a>> # # License: BSD (3-clause) from mne.datasets import sample from mne.minimum_norm import read_inverse_operator print(__doc__) data_path = sample.data_path() fname = data_path fname += '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' inv = read_inverse_operator(fname) print("Method: %s" % inv['methods']) print("fMRI prior: %s" % inv['fmri_prior']) print("Number of sources: %s" % inv['nsource']) print("Number of channels: %s" % inv['nchan']) ############################################################################### # Show result on 3D source space lh_points = inv['src'][0]['rr'] lh_faces = inv['src'][0]['use_tris'] rh_points = inv['src'][1]['rr'] rh_faces = inv['src'][1]['use_tris'] from mayavi import mlab # noqa mlab.figure(size=(600, 600), bgcolor=(0, 0, 0)) mesh = mlab.triangular_mesh(lh_points[:, 0], lh_points[:, 1], lh_points[:, 2], lh_faces, colormap='RdBu') mesh.module_manager.scalar_lut_manager.reverse_lut = True mesh = mlab.triangular_mesh(rh_points[:, 0], rh_points[:, 1], rh_points[:, 2], rh_faces, colormap='RdBu') mesh.module_manager.scalar_lut_manager.reverse_lut = True </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475163"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">swapnakrishnan2k/tp-qemu</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">qemu/tests/nx.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2652</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import logging from autotest.client.shared import error from virttest import data_dir @error.context_aware def run(test, params, env): """ try to exploit the guest to test whether nx(cpu) bit takes effect. 1) boot the guest 2) cp the exploit prog into the guest 3) run the exploit :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) exploit_cmd = params.get("exploit_cmd", "") if not exploit_cmd or session.cmd_status("test -x %s" % exploit_cmd): exploit_file = os.path.join(data_dir.get_deps_dir(), 'nx', 'x64_sc_rdo.c') dst_dir = '/tmp' error.context("Copy the Exploit file to guest.", logging.info) vm.copy_files_to(exploit_file, dst_dir) error.context("Build exploit program in guest.", logging.info) build_exploit = "gcc -o /tmp/nx_exploit /tmp/x64_sc_rdo.c" if session.cmd_status(build_exploit): raise error.TestError("Failed to build the exploit program") exploit_cmd = "/tmp/nx_exploit" error.context("Run exploit program in guest.", logging.info) # if nx is enabled (by default), the program failed. # segmentation error. return value of shell is not zero. exec_res = session.cmd_status(exploit_cmd) nx_on = params.get('nx_on', 'yes') if nx_on == 'yes': if exec_res: logging.info('NX works good.') error.context("Using execstack to remove the protection.", logging.info) enable_exec = 'execstack -s %s' % exploit_cmd if session.cmd_status(enable_exec): if session.cmd_status("execstack --help"): msg = "Please make sure guest have execstack command." raise error.TestError(msg) raise error.TestError('Failed to enable the execstack') if session.cmd_status(exploit_cmd): raise error.TestFail('NX is still protecting. Error.') else: logging.info('NX is disabled as desired. good') else: raise error.TestFail('Fatal Error: NX does not protect anything!') else: if exec_res: msg = "qemu fail to disable 'nx' flag or the exploit is corrupted." raise error.TestError(msg) else: logging.info('NX is disabled, and this Test Case passed.') if session: session.close() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475164"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">juanmont/one</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">.vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/transforms/frontmatter.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">9</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">19456</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># $Id: frontmatter.py 7897 2015-05-29 11:48:20Z milde $ # Author: David Goodger, Ueli Schlaepfer <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="573038383330322517272e233f383979382530">[email protected]</a>> # Copyright: This module has been placed in the public domain. """ Transforms related to the front matter of a document or a section (information found before the main text): - `DocTitle`: Used to transform a lone top level section's title to the document title, promote a remaining lone top-level section's title to the document subtitle, and determine the document's title metadata (document['title']) based on the document title and/or the "title" setting. - `SectionSubTitle`: Used to transform a lone subsection into a subtitle. - `DocInfo`: Used to transform a bibliographic field list into docinfo elements. """ __docformat__ = 'reStructuredText' import re from docutils import nodes, utils from docutils.transforms import TransformError, Transform class TitlePromoter(Transform): """ Abstract base class for DocTitle and SectionSubTitle transforms. """ def promote_title(self, node): """ Transform the following tree:: <node> <section> <title> ... into :: <node> <title> ... `node` is normally a document. """ # Type check if not isinstance(node, nodes.Element): raise TypeError, 'node must be of Element-derived type.' # `node` must not have a title yet. assert not (len(node) and isinstance(node[0], nodes.title)) section, index = self.candidate_index(node) if index is None: return None # Transfer the section's attributes to the node: # NOTE: Change second parameter to False to NOT replace # attributes that already exist in node with those in # section # NOTE: Remove third parameter to NOT copy the 'source' # attribute from section node.update_all_atts_concatenating(section, True, True) # setup_child is called automatically for all nodes. node[:] = (section[:1] # section title + node[:index] # everything that was in the # node before the section + section[1:]) # everything that was in the section assert isinstance(node[0], nodes.title) return 1 def promote_subtitle(self, node): """ Transform the following node tree:: <node> <title> <section> <title> ... into :: <node> <title> <subtitle> ... """ # Type check if not isinstance(node, nodes.Element): raise TypeError, 'node must be of Element-derived type.' subsection, index = self.candidate_index(node) if index is None: return None subtitle = nodes.subtitle() # Transfer the subsection's attributes to the new subtitle # NOTE: Change second parameter to False to NOT replace # attributes that already exist in node with those in # section # NOTE: Remove third parameter to NOT copy the 'source' # attribute from section subtitle.update_all_atts_concatenating(subsection, True, True) # Transfer the contents of the subsection's title to the # subtitle: subtitle[:] = subsection[0][:] node[:] = (node[:1] # title + [subtitle] # everything that was before the section: + node[1:index] # everything that was in the subsection: + subsection[1:]) return 1 def candidate_index(self, node): """ Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found. """ index = node.first_child_not_matching_class( nodes.PreBibliographic) if index is None or len(node) > (index + 1) or \ not isinstance(node[index], nodes.section): return None, None else: return node[index], index class DocTitle(TitlePromoter): """ In reStructuredText_, there is no way to specify a document title and subtitle explicitly. Instead, we can supply the document title (and possibly the subtitle as well) implicitly, and use this two-step transform to "raise" or "promote" the title(s) (and their corresponding section contents) to the document level. 1. If the document contains a single top-level section as its first non-comment element, the top-level section's title becomes the document's title, and the top-level section's contents become the document's immediate contents. The lone top-level section header must be the first non-comment element in the document. For example, take this input text:: ================= Top-Level Title ================= A paragraph. Once parsed, it looks like this:: <document> <section names="top-level title"> <title> Top-Level Title <paragraph> A paragraph. After running the DocTitle transform, we have:: <document names="top-level title"> <title> Top-Level Title <paragraph> A paragraph. 2. If step 1 successfully determines the document title, we continue by checking for a subtitle. If the lone top-level section itself contains a single second-level section as its first non-comment element, that section's title is promoted to the document's subtitle, and that section's contents become the document's immediate contents. Given this input text:: ================= Top-Level Title ================= Second-Level Title ~~~~~~~~~~~~~~~~~~ A paragraph. After parsing and running the Section Promotion transform, the result is:: <document names="top-level title"> <title> Top-Level Title <subtitle names="second-level title"> Second-Level Title <paragraph> A paragraph. (Note that the implicit hyperlink target generated by the "Second-Level Title" is preserved on the "subtitle" element itself.) Any comment elements occurring before the document title or subtitle are accumulated and inserted as the first body elements after the title(s). This transform also sets the document's metadata title (document['title']). .. _reStructuredText: http://docutils.sf.net/rst.html """ default_priority = 320 def set_metadata(self): """ Set document['title'] metadata title from the following sources, listed in order of priority: * Existing document['title'] attribute. * "title" setting. * Document title node (as promoted by promote_title). """ if not self.document.hasattr('title'): if self.document.settings.title is not None: self.document['title'] = self.document.settings.title elif len(self.document) and isinstance(self.document[0], nodes.title): self.document['title'] = self.document[0].astext() def apply(self): if getattr(self.document.settings, 'doctitle_xform', 1): # promote_(sub)title defined in TitlePromoter base class. if self.promote_title(self.document): # If a title has been promoted, also try to promote a # subtitle. self.promote_subtitle(self.document) # Set document['title']. self.set_metadata() class SectionSubTitle(TitlePromoter): """ This works like document subtitles, but for sections. For example, :: <section> <title> Title <section> <title> Subtitle ... is transformed into :: <section> <title> Title <subtitle> Subtitle ... For details refer to the docstring of DocTitle. """ default_priority = 350 def apply(self): if not getattr(self.document.settings, 'sectsubtitle_xform', 1): return for section in self.document.traverse(nodes.section): # On our way through the node tree, we are deleting # sections, but we call self.promote_subtitle for those # sections nonetheless. To do: Write a test case which # shows the problem and discuss on Docutils-develop. self.promote_subtitle(section) class DocInfo(Transform): """ This transform is specific to the reStructuredText_ markup syntax; see "Bibliographic Fields" in the `reStructuredText Markup Specification`_ for a high-level description. This transform should be run *after* the `DocTitle` transform. Given a field list as the first non-comment element after the document title and subtitle (if present), registered bibliographic field names are transformed to the corresponding DTD elements, becoming child elements of the "docinfo" element (except for a dedication and/or an abstract, which become "topic" elements after "docinfo"). For example, given this document fragment after parsing:: <document> <title> Document Title <field_list> <field> <field_name> Author <field_body> <paragraph> A. Name <field> <field_name> Status <field_body> <paragraph> $RCSfile$ ... After running the bibliographic field list transform, the resulting document tree would look like this:: <document> <title> Document Title <docinfo> <author> A. Name <status> frontmatter.py ... The "Status" field contained an expanded RCS keyword, which is normally (but optionally) cleaned up by the transform. The sole contents of the field body must be a paragraph containing an expanded RCS keyword of the form "$keyword: expansion text $". Any RCS keyword can be processed in any bibliographic field. The dollar signs and leading RCS keyword name are removed. Extra processing is done for the following RCS keywords: - "RCSfile" expands to the name of the file in the RCS or CVS repository, which is the name of the source file with a ",v" suffix appended. The transform will remove the ",v" suffix. - "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC time zone). The RCS Keywords transform will extract just the date itself and transform it to an ISO 8601 format date, as in "2000-12-31". (Since the source file for this text is itself stored under CVS, we can't show an example of the "Date" RCS keyword because we can't prevent any RCS keywords used in this explanation from being expanded. Only the "RCSfile" keyword is stable; its expansion text changes only if the file name changes.) .. _reStructuredText: http://docutils.sf.net/rst.html .. _reStructuredText Markup Specification: http://docutils.sf.net/docs/ref/rst/restructuredtext.html """ default_priority = 340 biblio_nodes = { 'author': nodes.author, 'authors': nodes.authors, 'organization': nodes.organization, 'address': nodes.address, 'contact': nodes.contact, 'version': nodes.version, 'revision': nodes.revision, 'status': nodes.status, 'date': nodes.date, 'copyright': nodes.copyright, 'dedication': nodes.topic, 'abstract': nodes.topic} """Canonical field name (lowcased) to node class name mapping for bibliographic fields (field_list).""" def apply(self): if not getattr(self.document.settings, 'docinfo_xform', 1): return document = self.document index = document.first_child_not_matching_class( nodes.PreBibliographic) if index is None: return candidate = document[index] if isinstance(candidate, nodes.field_list): biblioindex = document.first_child_not_matching_class( (nodes.Titular, nodes.Decorative)) nodelist = self.extract_bibliographic(candidate) del document[index] # untransformed field list (candidate) document[biblioindex:biblioindex] = nodelist def extract_bibliographic(self, field_list): docinfo = nodes.docinfo() bibliofields = self.language.bibliographic_fields labels = self.language.labels topics = {'dedication': None, 'abstract': None} for field in field_list: try: name = field[0][0].astext() normedname = nodes.make_id(name) if not (len(field) == 2 and normedname in bibliofields and self.check_empty_biblio_field(field, name)): raise TransformError canonical = bibliofields[normedname] biblioclass = self.biblio_nodes[canonical] if issubclass(biblioclass, nodes.TextElement): if not self.check_compound_biblio_field(field, name): raise TransformError utils.clean_rcs_keywords( field[1][0], self.rcs_keyword_substitutions) docinfo.append(biblioclass('', '', *field[1][0])) elif issubclass(biblioclass, nodes.authors): self.extract_authors(field, name, docinfo) elif issubclass(biblioclass, nodes.topic): if topics[canonical]: field[-1] += self.document.reporter.warning( 'There can only be one "%s" field.' % name, base_node=field) raise TransformError title = nodes.title(name, labels[canonical]) topics[canonical] = biblioclass( '', title, classes=[canonical], *field[1].children) else: docinfo.append(biblioclass('', *field[1].children)) except TransformError: if len(field[-1]) == 1 \ and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) if normedname and normedname not in bibliofields: field['classes'].append(normedname) docinfo.append(field) nodelist = [] if len(docinfo) != 0: nodelist.append(docinfo) for name in ('dedication', 'abstract'): if topics[name]: nodelist.append(topics[name]) return nodelist def check_empty_biblio_field(self, field, name): if len(field[-1]) < 1: field[-1] += self.document.reporter.warning( 'Cannot extract empty bibliographic field "%s".' % name, base_node=field) return None return 1 def check_compound_biblio_field(self, field, name): if len(field[-1]) > 1: field[-1] += self.document.reporter.warning( 'Cannot extract compound bibliographic field "%s".' % name, base_node=field) return None if not isinstance(field[-1][0], nodes.paragraph): field[-1] += self.document.reporter.warning( 'Cannot extract bibliographic field "%s" containing ' 'anything other than a single paragraph.' % name, base_node=field) return None return 1 rcs_keyword_substitutions = [ (re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+' r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'), (re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'), (re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),] def extract_authors(self, field, name, docinfo): try: if len(field[1]) == 1: if isinstance(field[1][0], nodes.paragraph): authors = self.authors_from_one_paragraph(field) elif isinstance(field[1][0], nodes.bullet_list): authors = self.authors_from_bullet_list(field) else: raise TransformError else: authors = self.authors_from_paragraphs(field) authornodes = [nodes.author('', '', *author) for author in authors if author] if len(authornodes) >= 1: docinfo.append(nodes.authors('', *authornodes)) else: raise TransformError except TransformError: field[-1] += self.document.reporter.warning( 'Bibliographic field "%s" incompatible with extraction: ' 'it must contain either a single paragraph (with authors ' 'separated by one of "%s"), multiple paragraphs (one per ' 'author), or a bullet list with one paragraph (one author) ' 'per item.' % (name, ''.join(self.language.author_separators)), base_node=field) raise def authors_from_one_paragraph(self, field): text = field[1][0].astext().strip() if not text: raise TransformError for authorsep in self.language.author_separators: authornames = text.split(authorsep) if len(authornames) > 1: break authornames = [author.strip() for author in authornames] authors = [[nodes.Text(author)] for author in authornames if author] return authors def authors_from_bullet_list(self, field): authors = [] for item in field[1][0]: if len(item) != 1 or not isinstance(item[0], nodes.paragraph): raise TransformError authors.append(item[0].children) if not authors: raise TransformError return authors def authors_from_paragraphs(self, field): for item in field[1]: if not isinstance(item, nodes.paragraph): raise TransformError authors = [item.children for item in field[1]] return authors </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475165"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">fnp/pylucene</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">test/test_BooleanPrefixQuery.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2671</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># ==================================================================== # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==================================================================== from unittest import TestCase, main from lucene import * class BooleanPrefixQueryTestCase(TestCase): """ Unit tests ported from Java Lucene """ def getCount(self, r, q): if BooleanQuery.instance_(q): return len(BooleanQuery.cast_(q).getClauses()) elif ConstantScoreQuery.instance_(q): iter = ConstantScoreQuery.cast_(q).getFilter().getDocIdSet(r).iterator() count = 0 while iter.nextDoc() != DocIdSetIterator.NO_MORE_DOCS: count += 1 return count else: self.fail("unexpected query " + q) def testMethod(self): directory = RAMDirectory() categories = ["food", "foodanddrink", "foodanddrinkandgoodtimes", "food and drink"] try: writer = IndexWriter(directory, WhitespaceAnalyzer(), True, IndexWriter.MaxFieldLength.LIMITED) for category in categories: doc = Document() doc.add(Field("category", category, Field.Store.YES, Field.Index.NOT_ANALYZED)) writer.addDocument(doc) writer.close() reader = IndexReader.open(directory, True) query = PrefixQuery(Term("category", "foo")) rw1 = query.rewrite(reader) bq = BooleanQuery() bq.add(query, BooleanClause.Occur.MUST) rw2 = bq.rewrite(reader) except Exception, e: self.fail(e) self.assertEqual(self.getCount(reader, rw1), self.getCount(reader, rw2), "Number of Clauses Mismatch") if __name__ == "__main__": import sys, lucene lucene.initVM() if '-loop' in sys.argv: sys.argv.remove('-loop') while True: try: main() except: pass else: main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475166"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cython-testbed/pandas</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">pandas/tests/extension/base/dtype.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2874</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import warnings import numpy as np import pandas as pd from .base import BaseExtensionTests class BaseDtypeTests(BaseExtensionTests): """Base class for ExtensionDtype classes""" def test_name(self, dtype): assert isinstance(dtype.name, str) def test_kind(self, dtype): valid = set('biufcmMOSUV') if dtype.kind is not None: assert dtype.kind in valid def test_construct_from_string_own_name(self, dtype): result = dtype.construct_from_string(dtype.name) assert type(result) is type(dtype) # check OK as classmethod result = type(dtype).construct_from_string(dtype.name) assert type(result) is type(dtype) def test_is_dtype_from_name(self, dtype): result = type(dtype).is_dtype(dtype.name) assert result is True def test_is_dtype_unboxes_dtype(self, data, dtype): assert dtype.is_dtype(data) is True def test_is_dtype_from_self(self, dtype): result = type(dtype).is_dtype(dtype) assert result is True def test_is_not_string_type(self, dtype): return not pd.api.types.is_string_dtype(dtype) def test_is_not_object_type(self, dtype): return not pd.api.types.is_object_dtype(dtype) def test_eq_with_str(self, dtype): assert dtype == dtype.name assert dtype != dtype.name + '-suffix' def test_eq_with_numpy_object(self, dtype): assert dtype != np.dtype('object') def test_eq_with_self(self, dtype): assert dtype == dtype assert dtype != object() def test_array_type(self, data, dtype): assert dtype.construct_array_type() is type(data) def test_check_dtype(self, data): dtype = data.dtype # check equivalency for using .dtypes df = pd.DataFrame({'A': pd.Series(data, dtype=dtype), 'B': data, 'C': 'foo', 'D': 1}) # np.dtype('int64') == 'Int64' == 'int64' # so can't distinguish if dtype.name == 'Int64': expected = pd.Series([True, True, False, True], index=list('ABCD')) else: expected = pd.Series([True, True, False, False], index=list('ABCD')) # XXX: This should probably be *fixed* not ignored. # See libops.scalar_compare with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) result = df.dtypes == str(dtype) self.assert_series_equal(result, expected) expected = pd.Series([True, True, False, False], index=list('ABCD')) result = df.dtypes.apply(str) == str(dtype) self.assert_series_equal(result, expected) def test_hashable(self, dtype): hash(dtype) # no error </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475167"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">andreparames/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/website_membership/models/product.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">338</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1264</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class product_template(osv.Model): _inherit = 'product.template' _columns = { 'website_published': fields.boolean('Available in the website', copy=False), } _defaults = { 'website_published': False, } </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475168"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jylaxp/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">django/db/migrations/operations/models.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">290</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">21735</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import unicode_literals from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.models.options import normalize_together from django.utils import six from django.utils.functional import cached_property class CreateModel(Operation): """ Create a model's table. """ serialization_expand_args = ['fields', 'options', 'managers'] def __init__(self, name, fields, options=None, bases=None, managers=None): self.name = name self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'fields': self.fields, } if self.options: kwargs['options'] = self.options if self.bases and self.bases != (models.Model,): kwargs['bases'] = self.bases if self.managers and self.managers != [('objects', models.Manager())]: kwargs['managers'] = self.managers return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.add_model(ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), )) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) def references_model(self, name, app_label=None): strings_to_check = [self.name] # Check we didn't inherit from the model for base in self.bases: if isinstance(base, six.string_types): strings_to_check.append(base.split(".")[-1]) # Check we have no FKs/M2Ms with it for fname, field in self.fields: if field.remote_field: if isinstance(field.remote_field.model, six.string_types): strings_to_check.append(field.remote_field.model.split(".")[-1]) # Now go over all the strings and compare them for string in strings_to_check: if string.lower() == name.lower(): return True return False class DeleteModel(Operation): """ Drops a model's table. """ def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label=None): return name.lower() == self.name_lower def describe(self): return "Delete model %s" % (self.name, ) class RenameModel(Operation): """ Renames a model. """ def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): apps = state.apps model = apps.get_model(app_label, self.old_name) model._meta.apps = apps # Get all of the related objects we need to repoint all_related_objects = ( f for f in model._meta.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many) ) # Rename the model state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower] state.models[app_label, self.new_name_lower].name = self.new_name state.remove_model(app_label, self.old_name_lower) # Repoint the FKs and M2Ms pointing to us for related_object in all_related_objects: if related_object.model is not model: # The model being renamed does not participate in this relation # directly. Rather, a superclass does. continue # Use the new related key for self referential related objects. if related_object.related_model == model: related_key = (app_label, self.new_name_lower) else: related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) new_fields = [] for name, field in state.models[related_key].fields: if name == related_object.field.name: field = field.clone() field.remote_field.model = "%s.%s" % (app_label, self.new_name) new_fields.append((name, field)) state.models[related_key].fields = new_fields state.reload_model(*related_key) state.reload_model(app_label, self.new_name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model( *related_key )._meta.get_field(related_object.field.name) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created: continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.remote_field.through new_m2m_model = new_field.remote_field.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label=None): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) class AlterModelTable(Operation): """ Renames a model's table """ def __init__(self, name, table): self.name = name self.table = table @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'table': self.table, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.models[app_label, self.name_lower].options["db_table"] = self.table state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name_lower def describe(self): return "Rename table for %s to %s" % (self.name, self.table) class AlterUniqueTogether(Operation): """ Changes the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): self.name = name unique_together = normalize_together(unique_together) self.unique_together = set(tuple(cons) for cons in unique_together) @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'unique_together': self.unique_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.unique_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_unique_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name_lower def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.unique_together or any((name in together) for together in self.unique_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or '')) class AlterIndexTogether(Operation): """ Changes the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): self.name = name index_together = normalize_together(index_together) self.index_together = set(tuple(cons) for cons in index_together) @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'index_together': self.index_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.index_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_index_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name_lower def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.index_together or any((name in together) for together in self.index_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or '')) class AlterOrderWithRespectTo(Operation): """ Represents a change with the order_with_respect_to option. """ def __init__(self, name, order_with_respect_to): self.name = name self.order_with_respect_to = order_with_respect_to @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'order_with_respect_to': self.order_with_respect_to, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options['order_with_respect_to'] = self.order_with_respect_to state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: schema_editor.remove_field(from_model, from_model._meta.get_field("_order")) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_model(self, name, app_label=None): return name.lower() == self.name_lower def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) ) def describe(self): return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) class AlterModelOptions(Operation): """ Sets new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.name = name self.options = options @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): kwargs = { 'name': self.name, 'options': self.options, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options = dict(model_state.options) model_state.options.update(self.options) for key in self.ALTER_OPTION_KEYS: if key not in self.options and key in model_state.options: del model_state.options[key] state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def references_model(self, name, app_label=None): return name.lower() == self.name_lower def describe(self): return "Change Meta options on %s" % (self.name, ) class AlterModelManagers(Operation): """ Alters the model's managers """ serialization_expand_args = ['managers'] def __init__(self, name, managers): self.name = name self.managers = managers @cached_property def name_lower(self): return self.name.lower() def deconstruct(self): return ( self.__class__.__name__, [self.name, self.managers], {} ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.managers = list(self.managers) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def references_model(self, name, app_label=None): return name.lower() == self.name_lower def describe(self): return "Change managers on %s" % (self.name, ) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475169"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">zanderle/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/template_tests/syntax_tests/test_filter_tag.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">521</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1795</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.template import TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup class FilterTagTests(SimpleTestCase): @setup({'filter01': '{% filter upper %}{% endfilter %}'}) def test_filter01(self): output = self.engine.render_to_string('filter01') self.assertEqual(output, '') @setup({'filter02': '{% filter upper %}django{% endfilter %}'}) def test_filter02(self): output = self.engine.render_to_string('filter02') self.assertEqual(output, 'DJANGO') @setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'}) def test_filter03(self): output = self.engine.render_to_string('filter03') self.assertEqual(output, 'django') @setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'}) def test_filter04(self): output = self.engine.render_to_string('filter04', {'remove': 'spam'}) self.assertEqual(output, 'django') @setup({'filter05': '{% filter safe %}fail{% endfilter %}'}) def test_filter05(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('filter05') @setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'}) def test_filter05bis(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('filter05bis') @setup({'filter06': '{% filter escape %}fail{% endfilter %}'}) def test_filter06(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('filter06') @setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'}) def test_filter06bis(self): with self.assertRaises(TemplateSyntaxError): self.engine.get_template('filter06bis') </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475170"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jamesmarva/docker-py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">docker/errors.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">39</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2469</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2014 dotCloud inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests class APIError(requests.exceptions.HTTPError): def __init__(self, message, response, explanation=None): # requests 1.2 supports response as a keyword argument, but # requests 1.1 doesn't super(APIError, self).__init__(message) self.response = response self.explanation = explanation if self.explanation is None and response.content: self.explanation = response.content.strip() def __str__(self): message = super(APIError, self).__str__() if self.is_client_error(): message = '{0} Client Error: {1}'.format( self.response.status_code, self.response.reason) elif self.is_server_error(): message = '{0} Server Error: {1}'.format( self.response.status_code, self.response.reason) if self.explanation: message = '{0} ("{1}")'.format(message, self.explanation) return message def is_client_error(self): return 400 <= self.response.status_code < 500 def is_server_error(self): return 500 <= self.response.status_code < 600 class DockerException(Exception): pass class NotFound(APIError): pass class InvalidVersion(DockerException): pass class InvalidRepository(DockerException): pass class InvalidConfigFile(DockerException): pass class DeprecatedMethod(DockerException): pass class TLSParameterError(DockerException): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg + (". TLS configurations should map the Docker CLI " "client configurations. See " "http://docs.docker.com/examples/https/ for " "API details.") class NullResource(DockerException, ValueError): pass </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475171"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Zanzibar82/script.module.urlresolver</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lib/urlresolver/plugins/vidstream.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2431</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" urlresolver XBMC Addon Copyright (C) 2011 t0mm0 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from t0mm0.common.net import Net from urlresolver.plugnplay.interfaces import UrlResolver from urlresolver.plugnplay.interfaces import PluginSettings from urlresolver.plugnplay import Plugin from urlresolver import common class VidstreamResolver(Plugin, UrlResolver, PluginSettings): implements = [UrlResolver, PluginSettings] name = "vidstream" domains = ["vidstream.in"] def __init__(self): p = self.get_setting('priority') or 100 self.priority = int(p) self.net = Net() #e.g. http://vidstream.in/xdfaay6ccwqj self.pattern = 'http://((?:www.)?vidstream.in)/(.*)' def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) resp = self.net.http_GET(web_url) html = resp.content post_url = resp.get_url() form_values = {} for i in re.finditer('<input.*?name="(.*?)".*?value="(.*?)">', html): form_values[i.group(1)] = i.group(2) html = self.net.http_POST(post_url, form_data=form_values).content # get stream url pattern = 'file:\s*"([^"]+)",' r = re.search(pattern, html) if r: return r.group(1) else: raise UrlResolver.ResolverError('File Not Found or removed') def get_url(self, host, media_id): return 'http://vidstream.in/%s' % (media_id) def get_host_and_id(self, url): r = re.search(self.pattern, url) if r: return r.groups() else: return False def valid_url(self, url, host): if self.get_setting('enabled') == 'false': return False return re.match(self.pattern, url) or self.name in host </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475172"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">varunnaganathan/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">django/utils/dateformat.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">110</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">11592</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" PHP date() style date formatting See http://www.php.net/date for format strings Usage: >>> import datetime >>> d = datetime.datetime.now() >>> df = DateFormat(d) >>> print(df.format('jS F Y H:i')) 7th October 2003 11:39 >>> """ from __future__ import unicode_literals import calendar import datetime import re import time from django.utils import six from django.utils.dates import ( MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR, ) from django.utils.encoding import force_text from django.utils.timezone import get_default_timezone, is_aware, is_naive from django.utils.translation import ugettext as _ re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])') re_escaped = re.compile(r'\\(.)') class Formatter(object): def format(self, formatstr): pieces = [] for i, piece in enumerate(re_formatchars.split(force_text(formatstr))): if i % 2: pieces.append(force_text(getattr(self, piece)())) elif piece: pieces.append(re_escaped.sub(r'\1', piece)) return ''.join(pieces) class TimeFormat(Formatter): def __init__(self, obj): self.data = obj self.timezone = None # We only support timezone when formatting datetime objects, # not date objects (timezone information not appropriate), # or time objects (against established django policy). if isinstance(obj, datetime.datetime): if is_naive(obj): self.timezone = get_default_timezone() else: self.timezone = obj.tzinfo def a(self): "'a.m.' or 'p.m.'" if self.data.hour > 11: return _('p.m.') return _('a.m.') def A(self): "'AM' or 'PM'" if self.data.hour > 11: return _('PM') return _('AM') def B(self): "Swatch Internet time" raise NotImplementedError('may be implemented in a future release') def e(self): """ Timezone name. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" try: if hasattr(self.data, 'tzinfo') and self.data.tzinfo: # Have to use tzinfo.tzname and not datetime.tzname # because datatime.tzname does not expect Unicode return self.data.tzinfo.tzname(self.data) or "" except NotImplementedError: pass return "" def f(self): """ Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ if self.data.minute == 0: return self.g() return '%s:%s' % (self.g(), self.i()) def g(self): "Hour, 12-hour format without leading zeros; i.e. '1' to '12'" if self.data.hour == 0: return 12 if self.data.hour > 12: return self.data.hour - 12 return self.data.hour def G(self): "Hour, 24-hour format without leading zeros; i.e. '0' to '23'" return self.data.hour def h(self): "Hour, 12-hour format; i.e. '01' to '12'" return '%02d' % self.g() def H(self): "Hour, 24-hour format; i.e. '00' to '23'" return '%02d' % self.G() def i(self): "Minutes; i.e. '00' to '59'" return '%02d' % self.data.minute def O(self): """ Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" seconds = self.Z() if seconds == "": return "" sign = '-' if seconds < 0 else '+' seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60) def P(self): """ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension. """ if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return '%s %s' % (self.f(), self.a()) def s(self): "Seconds; i.e. '00' to '59'" return '%02d' % self.data.second def T(self): """ Time zone of this machine; e.g. 'EST' or 'MDT'. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" name = None try: name = self.timezone.tzname(self.data) except Exception: # pytz raises AmbiguousTimeError during the autumn DST change. # This happens mainly when __init__ receives a naive datetime # and sets self.timezone = get_default_timezone(). pass if name is None: name = self.format('O') return six.text_type(name) def u(self): "Microseconds; i.e. '000000' to '999999'" return '%06d' % self.data.microsecond def Z(self): """ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. If timezone information is not available, this method returns an empty string. """ if not self.timezone: return "" try: offset = self.timezone.utcoffset(self.data) except Exception: # pytz raises AmbiguousTimeError during the autumn DST change. # This happens mainly when __init__ receives a naive datetime # and sets self.timezone = get_default_timezone(). return "" # `offset` is a datetime.timedelta. For negative values (to the west of # UTC) only days can be negative (days=-1) and seconds are always # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0) # Positive offsets have days=0 return offset.days * 86400 + offset.seconds class DateFormat(TimeFormat): year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] def b(self): "Month, textual, 3 letters, lowercase; e.g. 'jan'" return MONTHS_3[self.data.month] def c(self): """ ISO 8601 Format Example : '2008-01-02T10:30:00.000123' """ return self.data.isoformat() def d(self): "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'" return '%02d' % self.data.day def D(self): "Day of the week, textual, 3 letters; e.g. 'Fri'" return WEEKDAYS_ABBR[self.data.weekday()] def E(self): "Alternative month names as required by some locales. Proprietary extension." return MONTHS_ALT[self.data.month] def F(self): "Month, textual, long; e.g. 'January'" return MONTHS[self.data.month] def I(self): "'1' if Daylight Savings Time, '0' otherwise." try: if self.timezone and self.timezone.dst(self.data): return '1' else: return '0' except Exception: # pytz raises AmbiguousTimeError during the autumn DST change. # This happens mainly when __init__ receives a naive datetime # and sets self.timezone = get_default_timezone(). return '' def j(self): "Day of the month without leading zeros; i.e. '1' to '31'" return self.data.day def l(self): "Day of the week, textual, long; e.g. 'Friday'" return WEEKDAYS[self.data.weekday()] def L(self): "Boolean for whether it is a leap year; i.e. True or False" return calendar.isleap(self.data.year) def m(self): "Month; i.e. '01' to '12'" return '%02d' % self.data.month def M(self): "Month, textual, 3 letters; e.g. 'Jan'" return MONTHS_3[self.data.month].title() def n(self): "Month without leading zeros; i.e. '1' to '12'" return self.data.month def N(self): "Month abbreviation in Associated Press style. Proprietary extension." return MONTHS_AP[self.data.month] def o(self): "ISO 8601 year number matching the ISO week number (W)" return self.data.isocalendar()[0] def r(self): "RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'" return self.format('D, j M Y H:i:s O') def S(self): "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'" if self.data.day in (11, 12, 13): # Special case return 'th' last = self.data.day % 10 if last == 1: return 'st' if last == 2: return 'nd' if last == 3: return 'rd' return 'th' def t(self): "Number of days in the given month; i.e. '28' to '31'" return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1] def U(self): "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)" if isinstance(self.data, datetime.datetime) and is_aware(self.data): return int(calendar.timegm(self.data.utctimetuple())) else: return int(time.mktime(self.data.timetuple())) def w(self): "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)" return (self.data.weekday() + 1) % 7 def W(self): "ISO-8601 week number of year, weeks starting on Monday" # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt week_number = None jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1 weekday = self.data.weekday() + 1 day_of_year = self.z() if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4: if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)): week_number = 53 else: week_number = 52 else: if calendar.isleap(self.data.year): i = 366 else: i = 365 if (i - day_of_year) < (4 - weekday): week_number = 1 else: j = day_of_year + (7 - weekday) + (jan1_weekday - 1) week_number = j // 7 if jan1_weekday > 4: week_number -= 1 return week_number def y(self): "Year, 2 digits; e.g. '99'" return six.text_type(self.data.year)[2:] def Y(self): "Year, 4 digits; e.g. '1999'" return self.data.year def z(self): "Day of the year; i.e. '0' to '365'" doy = self.year_days[self.data.month] + self.data.day if self.L() and self.data.month > 2: doy += 1 return doy def format(value, format_string): "Convenience function" df = DateFormat(value) return df.format(format_string) def time_format(value, format_string): "Convenience function" tf = TimeFormat(value) return tf.format(format_string) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475173"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">teamfx/openjfx-9-dev-rt</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/find_files_unittest.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2675</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import unittest from webkitpy.common.system.filesystem import FileSystem import find_files class MockWinFileSystem(object): def join(self, *paths): return '\\'.join(paths) def normpath(self, path): return path.replace('/', '\\') class TestWinNormalize(unittest.TestCase): def assert_filesystem_normalizes(self, filesystem): self.assertEqual(find_files._normalize(filesystem, "c:\\foo", ['fast/html', 'fast/canvas/*', 'compositing/foo.html']), ['c:\\foo\\fast\html', 'c:\\foo\\fast\canvas\*', 'c:\\foo\compositing\\foo.html']) def test_mocked_win(self): # This tests test_files.normalize, using portable behavior emulating # what we think Windows is supposed to do. This test will run on all # platforms. self.assert_filesystem_normalizes(MockWinFileSystem()) def test_win(self): # This tests the actual windows platform, to ensure we get the same # results that we get in test_mocked_win(). if not sys.platform.startswith('win'): return self.assert_filesystem_normalizes(FileSystem()) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475174"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">coecms/CMIP5-utils</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">fetch_step2.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">17182</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Paola Petrelli - <a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="7b0b1a14171a0b3b0e0f1a08551e1f0e551a0e">[email protected]</a> 4th March 2014 # Last changed on 26th of March 2014 # Updates list: # 26/03/2014 - output files and table csv file are created after # collecting data; calling process_file with multiprocessing # module to speed up hash checksum (md5/sha256) # 01/04/2014 - exclude the ACCESS and CSIRO models from check # 03/09/2014 trying to substitute the google file with a csv table # 01/12/2014 script has been divided into two steps, this is first step fetch_step1.py # that runs search on ESGF node and can be run interactively, the second step fetch_step2.py should be run in the queue # 21/05/2015 comments updated, introduce argparse to manage inputs, added extra argument # "node" to choose automatically between different nodes: only pcmdi and dkrz (default) are available at the moment # 09/02/2016 pmcdi9.llnl.gov changed to pcmdi.llnl.gov, in step2 added extra file path checks to take into account that servers pcmdi3/7/9 are now aims3 # # Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5 # published files responding to the constraints passed as arguments. # The search is run on one of the ESGF node but it searches through all the available # nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment # should be specified but models are optionals. The search is limited to the first 10000 matches, # to change this you have to change the pcmdi_url variable in the code. # The second step returns 3 files listing: the published files available on raijin (variables_replica.csv), # the published files that need downloading and/or updating (variables_to_download.csv), # the variable/model/experiment combination not yet published (variables_not_published). # Uses md5/sha256 checksum to determine if a file already existing on raijin is exactly the same as the latest published version # If you have to parse a big number of files, you can speed up the process by using multithread module "Pool" # if you're doing this you should run the second step in the queue, which is the reason why the script is split into 2 steps. # To do that you can change the threads number from 1 (to run interactively) to the number of cpus you're requesting, in line 340 # async_results = Pool(16).map_async(process_file, result) # The maximum number of threads depends on the number of cpus you're using, in above example 16 cpus. # # If the "table" option is selected it returns also a table csv file summarising the search results. # # The CMIP5 replica data is stored on raijin.nci.org.au under # /g/data1/ua6/unofficial-ESG-replica/tmp/tree # # Example of how to run on raijin.nci.org.au # # module load python/2.7.3 (default on raijin) # python fetch_step2.py -v ua_Amon tos_Omon -m CCSM4 -e rcp45 -o out -t # NB needs python version 2.7 or more recent # # - the variable argument is passed as variable-name_cmip-table, this avoids confusion if looking for variables from different cmip tables # - multiple arguments can be passed to "-v", "-m", "-e"; # - to pass multiple arguments, declare the option once followed by all the desired values (as above); # - default output files root is "variables" # - you need to pass at least one experiment and one variable, models are optional. # - output file is optional, default is "variables" # - table is optional, default is False import sys, argparse import subprocess, re, itertools from multiprocessing import Pool import os.path as opath # to manage files and dirs # help functions def VarCmipTable(v): if "_" not in v: raise TypeError("String '%s' does not match required format: var_cmip-table, ie tas_Amon"%(v,)) else: return v def parse_input(): ''' Parse input arguments ''' parser = argparse.ArgumentParser(description='''Retrieves a wget script (wget_<experiment>.out) listing all the CMIP5 published files responding to the constraints passed as arguments. The search is run on one of the ESGF node but it searches through all the available nodes for the latest version. Multiple arguments can be passed to -e, -v, -m. At least one variable and experiment should be specified but models are optionals. The search is limited to the first 1000 matches, to change this you have to change the pcmdi_url variable in the code.''') parser.add_argument('-e','--experiment', type=str, nargs="*", help='CMIP5 experiment', required=True) parser.add_argument('-m','--model', type=str, nargs="*", help='', required=False) parser.add_argument('-v','--variable', type=VarCmipTable, nargs="*", help='combination of CMIP5 variable & cmip_table Ex. tas_Amon', required=True) parser.add_argument('-t','--table', action='store_true', default='store_false', help="csv table option, default is False", required=False) parser.add_argument('-o','--output', type=str, nargs="?", default="variables", help='''output files root, default is variables''', required=False) return vars(parser.parse_args()) sys.exit() def assign_constraint(): ''' Assign default values and input to constraints ''' global var0, exp0, mod0, table, outfile var0 = [] exp0 = [] mod0 = [] outfile = 'variables' # assign constraints from arguments list args = parse_input() var0=args["variable"] if args["model"]: mod0=args["model"] exp0=args["experiment"] table=args["table"] outfile=args["output"] return def correct_model(model): ''' Correct name of models that have two, to make search work ''' # list model as dict{dir name : search name} models={"ACCESS1-0" : "ACCESS1.0", "ACCESS1-3" : "ACCESS1.3", "CESM1-BGC" : "CESM1(BGC)", "CESM1-CAM5" : "CESM1(CAM5)", "CESM1-CAM5-1-FV2" : "CESM1(CAM5.1,FV2)", "CESM1-WACCM" : "CESM1(WACCM)", "CESM1-FASTCHEM" : "CESM1(FASTCHEM)", "bcc-csm1-1" : "BCC-CSM1.1", "bcc-csm1-1-m" : "BCC-CSM1.1(m)", "inmcm4" : "INM-CM4"} # if the current model is one of the dict keys, change name if model in models.keys(): return models[model] return model def tree_exist(furl): ''' Return True if file exists in tmp/tree ''' replica_dir = "/g/data1/ua6/unofficial-ESG-replica/tmp/tree/" tree_path = replica_dir + furl return [opath.exists(tree_path),tree_path] def write_file(): ''' Write info on file to download or replica output ''' global info files = {"R" : orep, "D" : odown} for item in info.values(): outfile = files[item[-1]] outfile.write(",".join(item[0:-1])+"\n") def file_details(fname): ''' Split the filename in variable, MIP code, model, experiment, ensemble (period is excluded) ''' namebits = fname.replace("'","").split('_') if len(namebits) >= 5: details = namebits[0:5] else: details = [] return details def find_string(bits,string): ''' Returns matching string if found in directory structure ''' dummy = filter(lambda el: re.findall( string, el), bits) if len(dummy) == 0: return 'no_version' else: return dummy[0] def get_info(fname,path): ''' Collect the info on a file form its path return it in a list ''' version = '[a-z]*201[0-9][0-1][0-9][0-3][0-9]' bits = path.split('/') finfo = file_details(fname) finfo.append(find_string(bits[:-1],version)) finfo.append(path) return finfo def parse_file(wgetfile,varlist,modlist,exp): ''' extract file list from wget file ''' # open wget file, read content saving to a list of lines and close again infile = open(wgetfile,'r') lines = infile.readlines() infile.close # if wget didn't return files print a warning and exit function if lines[0] == "No files were found that matched the query": print lines[0] + " for ", varlist, modlist, exp return False else: # select only the files lines starting as var_cmortable_model_exp ... result=[] # if modlist empty add to it a regex string indicating model name if len(modlist) > 0: comb_constr = itertools.product(*[varlist,modlist]) filestrs = ["_".join(x) for x in comb_constr] else: filestrs = [var + '_[A-Za-z0-9-.()]*_' for var in varlist] for line in lines: match = [re.search(pat,line) for pat in filestrs] if match.count(None) != len(match) and line.find(exp): [fname,furl,hash_type,fhash] = line.replace("'","").split() if hash_type in ["SHA256","sha256","md5","MD5"]: result.append([fname, furl.replace("http://",""), fhash, hash_type]) else: print "Error in parse_file() is selecting the wrong lines!" print line sys.exit() return result def check_hash(tree_path,fhash,hash_type): ''' Execute md5sum/sha256sum on file on tree and return True,f same as in wget file ''' hash_cmd="md5sum" if hash_type in ["SHA256","sha256"]: hash_cmd="sha256sum" tree_hash = subprocess.check_output([hash_cmd, tree_path]).split()[0] return tree_hash == fhash def process_file(result): ''' Check if file exist on tree and if True check md5/sha265 hash ''' info = {} [fname,furl,fhash,hash_type]=result [bool,tree_path]=tree_exist(furl) # some servers have updated name: for ex pcmdi9.llnl.gov is now aims3.llnl.gov so we need to substitute and check that too print furl, bool if not bool and furl[0:14]=='aims3.llnl.gov': for num in [3,7,9]: other_furl=furl.replace('aims3','pcmdi'+str(num)) print "other_furl ", other_furl [bool,tree_path]=tree_exist(other_furl) if bool: print "bool after extra check for num ", bool, num break info[furl] = get_info(fname,tree_path) # if file exists in tree compare md5/sha256 with values in wgetfile, else add to update if "ACCESS" in fname or "CSIRO" in fname or (bool and check_hash(tree_path,fhash,hash_type)): info[furl].append("R") else: info[furl][-1] = "http://" + furl info[furl].append("D") return info def retrieve_info(query_item): ''' retrieve items of info related to input query combination ''' global info # info order is: 0-var, 1-mip, 2-mod, 3-exp, 4-ens, 5-ver, 6-fname, 7-status var, mip = query_item[0].split("_") rows={} # add the items in info with matching var,mip,exp to rows as dictionaries for item in info.values(): if var == item[0] and mip == item[1] and query_item[-1] == item[3]: key = (item[2], item[4], item[5]) try: rows[key].append(item[7]) except: rows[key] = [item[7]] # loop through mod_ens_vers combination counting files to download/update newrows=[] for key in rows.keys(): ndown = rows[key].count("D") status = key[2] + " " + str(len(rows[key])) + " files, " + str(ndown) + " to update" newrows.append([tuple(key[0:2]), status]) return newrows def result_matrix(querypub,exp0): ''' Build a matrix of the results to output to csv table ''' global gmatrix # querypub contains only published combinations # initialize dictionary of exp/matrices gmatrix = {} for exp in exp0: # for each var_mip retrieve_info create a dict{var_mip:[[(mod1,ens1), details list][(mod1,ens2), details list],[..]]} # they are added to exp_dict and each key will be column header, (mod1,ens1) will indicate row and details will be cell value exp_dict={} infoexp = [x for x in querypub if x[-1] == exp] for item in infoexp: exp_dict[item[0]]=retrieve_info(item) gmatrix[exp]= exp_dict return def compare_query(var0,mod0,exp0): ''' compare the var_mod_exp combinations found with the requested ones ''' global info, opub # for each el. of info: join var_mip, transform to tuple, finally convert modified info to set info_set = set(map(tuple,[["_".join(x[0:2])] + x[2:-4] for x in info.values()])) # create set with all possible combinations of var_mip,model,exp based on constraints # if models not specified create a model list based on wget result if len(mod0) < 1: mod0 = [x[2] for x in info.values()] comb_query = set(itertools.product(*[var0,mod0,exp0])) # the difference between two sets gives combinations not published yet nopub_set = comb_query.difference(info_set) for item in nopub_set: opub.write(",".join(item) + "\n") # write a matrix to pass results to csv table in suitable format if table: result_matrix(comb_query.difference(nopub_set),exp0) return nopub_set def write_table(nopub): ''' write a csv table to summarise search ''' global gmatrix for exp in exp0: # length of dictionary gmatrix[exp] is number of var_mip columns # maximum length of list in each dict inside gmatrix[exp] is number of mod/ens rows emat = gmatrix[exp] klist = emat.keys() # check if there are extra variables never published evar = list(set( [np[0] for np in nopub if np[0] not in klist if np[-1]==exp ] )) # calculate ncol,nrow keeping into account var never published ncol = len(klist) +2 + len(evar) nrow = max([len(emat[x]) for x in klist]) +1 # open/create a csv file for each experiment try: csv = open(exp+".csv","w") except: print "Can not open file " + exp + ".csv" csv.write(" model_ensemble/variable," + ",".join(klist+evar) + "\n") # pre-fill all values with "NP", leave 1 column and 1 row for headers # write first two columns with all (mod,ens) pairs col1= [emat[var][i][0] for var in klist for i in range(len(emat[var])) ] col1 = list(set(col1)) col1_sort=sorted(col1) # write first column with mod_ens combinations & save row indexes in dict where keys are (mod,ens) combination # print col1_sort for modens in col1_sort: csv.write(modens[0] + "_" + modens[1]) for var in klist: line = [item[1].replace(", " , " (") for item in emat[var] if item[0] == modens] if len(line) > 0: csv.write(", " + " ".join(line) + ")") else: csv.write(",NP") if len(evar) > 0: for var in evar: csv.write(",NP") csv.write("\n") csv.close() print "Data written in table " return def main(): ''' Main program starts here ''' global opub, odown, orep, info # somefile is false starting turns to true if at elast one file found somefile=False # read inputs and assign constraints assign_constraint() fdown = outfile + '_to_download.csv' frep = outfile + '_replica.csv' fpub = outfile + '_not_published.csv' # test reading inputs print var0 print exp0 print mod0 print fdown print frep print fpub # if one of the output files exists issue a warning an exit if opath.isfile(fdown) or opath.isfile(frep) or opath.isfile(fpub): print "Warning: one of the output files exists, exit to not overwrite!" sys.exit() info={} # loop through experiments, 1st create a wget request for exp, then parse_file for exp in exp0: wgetfile = "wget_" + exp + ".out" result=parse_file(wgetfile,var0,mod0,exp) # if found any files matching constraints, process them one by one # using multiprocessing Pool to parallelise process_file if result: async_results = Pool(1).map_async(process_file, result) for dinfo in async_results.get(): info.update(dinfo) somefile=True print "Finished checksum for existing files" # if it couldn't find any file for any experiment then exit if not somefile: sys.exit("No files found for any of the experiments, exiting!") # open not published file opub=open(fpub, "w") opub.write("var_mip-table, model, experiment\n") # build all requested combinations and compare to files found nopub_set = compare_query(var0,mod0,exp0) # write replica and download output files # open output files and write header odown=open(fdown, "w") odown.write("var, mip_table, model, experiment, ensemble, version, file url\n") orep=open(frep, "w") orep.write("var, mip_table, model, experiment, ensemble, version, filepath\n") write_file() # close all the output files odown.close() orep.close() opub.close() print "Finished to write output files" # if table option create/open spreadsheet # if table option write summary table in csv file if table: write_table(nopub_set) # check python version and then call main() if sys.version_info < ( 2, 7): # python too old, kill the script sys.exit("This script requires Python 2.7 or newer!") main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475175"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">Dexhub/MTX</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/mem/slicc/ast/OutPortDeclAST.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">92</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2802</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.DeclAST import DeclAST from slicc.ast.TypeAST import TypeAST from slicc.symbols import Var from slicc.symbols import Type class OutPortDeclAST(DeclAST): def __init__(self, slicc, ident, msg_type, var_expr, pairs): super(OutPortDeclAST, self).__init__(slicc, pairs) self.ident = ident self.msg_type = msg_type self.var_expr = var_expr self.queue_type = TypeAST(slicc, "OutPort") def __repr__(self): return "[OutPortDecl: %r]" % self.ident def generate(self): code = self.slicc.codeFormatter(newlines=False) queue_type = self.var_expr.generate(code) if not queue_type.isOutPort: self.error("The outport queue's type must have the 'outport' " + "attribute. Type '%s' does not have this attribute.", (queue_type)) if not self.symtab.find(self.msg_type.ident, Type): self.error("The message type '%s' does not exist.", self.msg_type.ident) var = Var(self.symtab, self.ident, self.location, self.queue_type.type, str(code), self.pairs) self.symtab.newSymbol(var) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475176"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">geomapdev/idea-box</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/idea/tests/editidea_tests.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6582</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from django.test import TestCase from idea import models, views from idea.forms import IdeaForm, PrivateIdeaForm from idea.tests.utils import mock_req, random_user, login, create_superuser from datetime import date from mock import patch def create_idea(user=None): if not user: user = random_user() state = models.State.objects.get(name='Active') idea = models.Idea(creator=user, title='Transit subsidy to Mars', text='Aliens need assistance.', state=state) banner = models.Banner(id=1, title="AAAA", text="text1", start_date=date.today()) banner.save() idea.banner = banner idea.save() idea.tags.add("test tag") return idea class EditIdeaTest(TestCase): fixtures = ['state'] def setUp(self): create_superuser() def test_edit_good_idea(self): """ Test an normal POST submission to edit an idea. """ user = login(self) idea = create_idea(user=user) self.assertEquals(models.Idea.objects.all().count(), 1) new_title = "new title" new_summary = "new summary" new_text = "new text" new_banner = models.Banner(id=2, title="BBB", text="text2", start_date=date.today()) new_banner.save() resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':new_title, 'summary':new_summary, 'text':new_text, 'banner': new_banner.id}) self.assertEqual(resp.status_code, 302) self.assertIn('detail', resp['Location']) self.assertEquals(models.Idea.objects.all().count(), 1) # ensure editing an idea does not up the vote count # vote count is 0 because votes are added in views.add_idea, which is not used in this test num_voters = get_user_model().objects.filter(vote__idea__pk=idea.id, vote__vote=1).count() self.assertEqual(num_voters, 0) refresh_idea = models.Idea.objects.get(id=idea.id) self.assertEqual(refresh_idea.title, new_title) self.assertEqual(refresh_idea.summary, new_summary) self.assertEqual(refresh_idea.text, new_text) self.assertEqual(refresh_idea.banner, new_banner) # verify the expected fields remain the same self.assertEqual(refresh_idea.tags.count(), 1) self.assertEqual(refresh_idea.tags.all()[0].name, "test tag") self.assertEqual(refresh_idea.creator, idea.creator) def test_bad_edit_idea(self): """ Test an incomplete POST submission to edit an idea. """ user = login(self) idea = create_idea(user=user) resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'text':'new title'}) self.assertEqual(resp.status_code, 200) self.assertIn('This field is required.', resp.content) self.assertEquals(models.Idea.objects.all().count(), 1) refresh_idea = models.Idea.objects.get(id=idea.id) self.assertEqual(refresh_idea.title, idea.title) self.assertEqual(refresh_idea.banner, idea.banner) def test_must_be_logged_in(self): """ A user must be logged in to edit an idea. """ user = login(self) idea = create_idea(user=user) self.client.logout() resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text'}) self.assertEqual(resp.status_code, 302) self.assertIn('login', resp['Location']) def test_edit_ignores_tags(self): """ A user must be logged in to edit an idea. """ user = login(self) idea = create_idea(user=user) resp = self.client.post(reverse('idea:edit_idea', args=(idea.id,)), {'title':'test title', 'summary':'test summary', 'text':'test text', 'tags':'sample, newtag'}) self.assertEqual(resp.status_code, 302) self.assertIn('detail', resp['Location']) refresh_idea = models.Idea.objects.get(id=idea.id) self.assertEqual(refresh_idea.tags.count(), 1) self.assertEqual(refresh_idea.tags.all()[0].name, "test tag") @patch('idea.views.render') def test_edit_idea_with_private_banner(self, render): """ Verify that the private banner field auto-populates properly """ user = login(self) state = models.State.objects.get(name='Active') idea1 = models.Idea(creator=user, title='Transit subsidy to Venus', text='Aliens need assistance.', state=state) banner1 = models.Banner(id=1, title="AAAA", text="text1", start_date=date.today(), is_private=True) banner1.save() idea1.banner = banner1 idea1.save() idea2 = models.Idea(creator=user, title='Transit subsidy to Venus', text='Aliens need assistance.', state=state) banner2 = models.Banner(id=2, title="BBBB", text="text2", start_date=date.today()) banner2.save() idea2.banner = banner2 idea2.save() views.edit_idea(mock_req(user=user), idea1.id) context = render.call_args[0][2] self.assertTrue('form' in context) self.assertTrue(isinstance(context['form'], PrivateIdeaForm)) banner_field = context['form'].fields['banner'] selected = context['form'].initial['banner'] self.assertEqual(banner1.id, selected) self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, None) self.assertIn(banner1, banner_field._queryset) self.assertNotIn(banner2, banner_field._queryset) views.edit_idea(mock_req(user=user), idea2.id) context = render.call_args[0][2] self.assertTrue('form' in context) self.assertTrue(isinstance(context['form'], IdeaForm)) self.assertFalse(isinstance(context['form'], PrivateIdeaForm)) banner_field = context['form'].fields['banner'] selected = context['form'].initial['banner'] self.assertEqual(banner2.id, selected) self.assertEqual(context['form'].fields['banner'].widget.choices.field.empty_label, 'Select') self.assertNotIn(banner1, banner_field._queryset) self.assertIn(banner2, banner_field._queryset) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cc0-1.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475177"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">boyuegame/kbengine</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">kbe/res/scripts/common/Lib/test/test_bz2.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">72</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">32972</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from test import support from test.support import bigmemtest, _4G import unittest from io import BytesIO import os import pickle import random import subprocess import sys from test.support import unlink try: import threading except ImportError: threading = None # Skip tests if the bz2 module doesn't exist. bz2 = support.import_module('bz2') from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor class BaseTest(unittest.TestCase): "Base for other testcases." TEXT_LINES = [ b'root:x:0:0:root:/root:/bin/bash\n', b'bin:x:1:1:bin:/bin:\n', b'daemon:x:2:2:daemon:/sbin:\n', b'adm:x:3:4:adm:/var/adm:\n', b'lp:x:4:7:lp:/var/spool/lpd:\n', b'sync:x:5:0:sync:/sbin:/bin/sync\n', b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n', b'halt:x:7:0:halt:/sbin:/sbin/halt\n', b'mail:x:8:12:mail:/var/spool/mail:\n', b'news:x:9:13:news:/var/spool/news:\n', b'uucp:x:10:14:uucp:/var/spool/uucp:\n', b'operator:x:11:0:operator:/root:\n', b'games:x:12:100:games:/usr/games:\n', b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n', b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n', b'nobody:x:65534:65534:Nobody:/home:\n', b'postfix:x:100:101:postfix:/var/spool/postfix:\n', b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n', b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n', b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n', b'www:x:103:104::/var/www:/bin/false\n', ] TEXT = b''.join(TEXT_LINES) DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`' EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00' BAD_DATA = b'this is not a valid bzip2 file' def setUp(self): self.filename = support.TESTFN def tearDown(self): if os.path.isfile(self.filename): os.unlink(self.filename) if sys.platform == "win32": # bunzip2 isn't available to run on Windows. def decompress(self, data): return bz2.decompress(data) else: def decompress(self, data): pop = subprocess.Popen("bunzip2", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pop.stdin.write(data) pop.stdin.close() ret = pop.stdout.read() pop.stdout.close() if pop.wait() != 0: ret = bz2.decompress(data) return ret class BZ2FileTest(BaseTest): "Test the BZ2File class." def createTempFile(self, streams=1, suffix=b""): with open(self.filename, "wb") as f: f.write(self.DATA * streams) f.write(suffix) def testBadArgs(self): self.assertRaises(TypeError, BZ2File, 123.456) self.assertRaises(ValueError, BZ2File, "/dev/null", "z") self.assertRaises(ValueError, BZ2File, "/dev/null", "rx") self.assertRaises(ValueError, BZ2File, "/dev/null", "rbt") self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=0) self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=10) def testRead(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(), self.TEXT) def testReadBadFile(self): self.createTempFile(streams=0, suffix=self.BAD_DATA) with BZ2File(self.filename) as bz2f: self.assertRaises(OSError, bz2f.read) def testReadMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(), self.TEXT * 5) def testReadMonkeyMultiStream(self): # Test BZ2File.read() on a multi-stream archive where a stream # boundary coincides with the end of the raw read buffer. buffer_size = bz2._BUFFER_SIZE bz2._BUFFER_SIZE = len(self.DATA) try: self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(), self.TEXT * 5) finally: bz2._BUFFER_SIZE = buffer_size def testReadTrailingJunk(self): self.createTempFile(suffix=self.BAD_DATA) with BZ2File(self.filename) as bz2f: self.assertEqual(bz2f.read(), self.TEXT) def testReadMultiStreamTrailingJunk(self): self.createTempFile(streams=5, suffix=self.BAD_DATA) with BZ2File(self.filename) as bz2f: self.assertEqual(bz2f.read(), self.TEXT * 5) def testRead0(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(0), b"") def testReadChunk10(self): self.createTempFile() with BZ2File(self.filename) as bz2f: text = b'' while True: str = bz2f.read(10) if not str: break text += str self.assertEqual(text, self.TEXT) def testReadChunk10MultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: text = b'' while True: str = bz2f.read(10) if not str: break text += str self.assertEqual(text, self.TEXT * 5) def testRead100(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertEqual(bz2f.read(100), self.TEXT[:100]) def testPeek(self): self.createTempFile() with BZ2File(self.filename) as bz2f: pdata = bz2f.peek() self.assertNotEqual(len(pdata), 0) self.assertTrue(self.TEXT.startswith(pdata)) self.assertEqual(bz2f.read(), self.TEXT) def testReadInto(self): self.createTempFile() with BZ2File(self.filename) as bz2f: n = 128 b = bytearray(n) self.assertEqual(bz2f.readinto(b), n) self.assertEqual(b, self.TEXT[:n]) n = len(self.TEXT) - n b = bytearray(len(self.TEXT)) self.assertEqual(bz2f.readinto(b), n) self.assertEqual(b[:n], self.TEXT[-n:]) def testReadLine(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readline, None) for line in self.TEXT_LINES: self.assertEqual(bz2f.readline(), line) def testReadLineMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readline, None) for line in self.TEXT_LINES * 5: self.assertEqual(bz2f.readline(), line) def testReadLines(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readlines, None) self.assertEqual(bz2f.readlines(), self.TEXT_LINES) def testReadLinesMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.readlines, None) self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5) def testIterator(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertEqual(list(iter(bz2f)), self.TEXT_LINES) def testIteratorMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5) def testClosedIteratorDeadlock(self): # Issue #3309: Iteration on a closed BZ2File should release the lock. self.createTempFile() bz2f = BZ2File(self.filename) bz2f.close() self.assertRaises(ValueError, next, bz2f) # This call will deadlock if the above call failed to release the lock. self.assertRaises(ValueError, bz2f.readlines) def testWrite(self): with BZ2File(self.filename, "w") as bz2f: self.assertRaises(TypeError, bz2f.write) bz2f.write(self.TEXT) with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteChunks10(self): with BZ2File(self.filename, "w") as bz2f: n = 0 while True: str = self.TEXT[n*10:(n+1)*10] if not str: break bz2f.write(str) n += 1 with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteNonDefaultCompressLevel(self): expected = bz2.compress(self.TEXT, compresslevel=5) with BZ2File(self.filename, "w", compresslevel=5) as bz2f: bz2f.write(self.TEXT) with open(self.filename, "rb") as f: self.assertEqual(f.read(), expected) def testWriteLines(self): with BZ2File(self.filename, "w") as bz2f: self.assertRaises(TypeError, bz2f.writelines) bz2f.writelines(self.TEXT_LINES) # Issue #1535500: Calling writelines() on a closed BZ2File # should raise an exception. self.assertRaises(ValueError, bz2f.writelines, ["a"]) with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT) def testWriteMethodsOnReadOnlyFile(self): with BZ2File(self.filename, "w") as bz2f: bz2f.write(b"abc") with BZ2File(self.filename, "r") as bz2f: self.assertRaises(OSError, bz2f.write, b"a") self.assertRaises(OSError, bz2f.writelines, [b"a"]) def testAppend(self): with BZ2File(self.filename, "w") as bz2f: self.assertRaises(TypeError, bz2f.write) bz2f.write(self.TEXT) with BZ2File(self.filename, "a") as bz2f: self.assertRaises(TypeError, bz2f.write) bz2f.write(self.TEXT) with open(self.filename, 'rb') as f: self.assertEqual(self.decompress(f.read()), self.TEXT * 2) def testSeekForward(self): self.createTempFile() with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.seek) bz2f.seek(150) self.assertEqual(bz2f.read(), self.TEXT[150:]) def testSeekForwardAcrossStreams(self): self.createTempFile(streams=2) with BZ2File(self.filename) as bz2f: self.assertRaises(TypeError, bz2f.seek) bz2f.seek(len(self.TEXT) + 150) self.assertEqual(bz2f.read(), self.TEXT[150:]) def testSeekBackwards(self): self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.read(500) bz2f.seek(-150, 1) self.assertEqual(bz2f.read(), self.TEXT[500-150:]) def testSeekBackwardsAcrossStreams(self): self.createTempFile(streams=2) with BZ2File(self.filename) as bz2f: readto = len(self.TEXT) + 100 while readto > 0: readto -= len(bz2f.read(readto)) bz2f.seek(-150, 1) self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT) def testSeekBackwardsFromEnd(self): self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(-150, 2) self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:]) def testSeekBackwardsFromEndAcrossStreams(self): self.createTempFile(streams=2) with BZ2File(self.filename) as bz2f: bz2f.seek(-1000, 2) self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:]) def testSeekPostEnd(self): self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT)) self.assertEqual(bz2f.read(), b"") def testSeekPostEndMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT) * 5) self.assertEqual(bz2f.read(), b"") def testSeekPostEndTwice(self): self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(150000) bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT)) self.assertEqual(bz2f.read(), b"") def testSeekPostEndTwiceMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: bz2f.seek(150000) bz2f.seek(150000) self.assertEqual(bz2f.tell(), len(self.TEXT) * 5) self.assertEqual(bz2f.read(), b"") def testSeekPreStart(self): self.createTempFile() with BZ2File(self.filename) as bz2f: bz2f.seek(-150) self.assertEqual(bz2f.tell(), 0) self.assertEqual(bz2f.read(), self.TEXT) def testSeekPreStartMultiStream(self): self.createTempFile(streams=2) with BZ2File(self.filename) as bz2f: bz2f.seek(-150) self.assertEqual(bz2f.tell(), 0) self.assertEqual(bz2f.read(), self.TEXT * 2) def testFileno(self): self.createTempFile() with open(self.filename, 'rb') as rawf: bz2f = BZ2File(rawf) try: self.assertEqual(bz2f.fileno(), rawf.fileno()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.fileno) def testSeekable(self): bz2f = BZ2File(BytesIO(self.DATA)) try: self.assertTrue(bz2f.seekable()) bz2f.read() self.assertTrue(bz2f.seekable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.seekable) bz2f = BZ2File(BytesIO(), "w") try: self.assertFalse(bz2f.seekable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.seekable) src = BytesIO(self.DATA) src.seekable = lambda: False bz2f = BZ2File(src) try: self.assertFalse(bz2f.seekable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.seekable) def testReadable(self): bz2f = BZ2File(BytesIO(self.DATA)) try: self.assertTrue(bz2f.readable()) bz2f.read() self.assertTrue(bz2f.readable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.readable) bz2f = BZ2File(BytesIO(), "w") try: self.assertFalse(bz2f.readable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.readable) def testWritable(self): bz2f = BZ2File(BytesIO(self.DATA)) try: self.assertFalse(bz2f.writable()) bz2f.read() self.assertFalse(bz2f.writable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.writable) bz2f = BZ2File(BytesIO(), "w") try: self.assertTrue(bz2f.writable()) finally: bz2f.close() self.assertRaises(ValueError, bz2f.writable) def testOpenDel(self): self.createTempFile() for i in range(10000): o = BZ2File(self.filename) del o def testOpenNonexistent(self): self.assertRaises(OSError, BZ2File, "/non/existent") def testReadlinesNoNewline(self): # Issue #1191043: readlines() fails on a file containing no newline. data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t' with open(self.filename, "wb") as f: f.write(data) with BZ2File(self.filename) as bz2f: lines = bz2f.readlines() self.assertEqual(lines, [b'Test']) with BZ2File(self.filename) as bz2f: xlines = list(bz2f.readlines()) self.assertEqual(xlines, [b'Test']) def testContextProtocol(self): f = None with BZ2File(self.filename, "wb") as f: f.write(b"xxx") f = BZ2File(self.filename, "rb") f.close() try: with f: pass except ValueError: pass else: self.fail("__enter__ on a closed file didn't raise an exception") try: with BZ2File(self.filename, "wb") as f: 1/0 except ZeroDivisionError: pass else: self.fail("1/0 didn't raise an exception") @unittest.skipUnless(threading, 'Threading required for this test.') def testThreading(self): # Issue #7205: Using a BZ2File from several threads shouldn't deadlock. data = b"1" * 2**20 nthreads = 10 with BZ2File(self.filename, 'wb') as f: def comp(): for i in range(5): f.write(data) threads = [threading.Thread(target=comp) for i in range(nthreads)] for t in threads: t.start() for t in threads: t.join() def testWithoutThreading(self): module = support.import_fresh_module("bz2", blocked=("threading",)) with module.BZ2File(self.filename, "wb") as f: f.write(b"abc") with module.BZ2File(self.filename, "rb") as f: self.assertEqual(f.read(), b"abc") def testMixedIterationAndReads(self): self.createTempFile() linelen = len(self.TEXT_LINES[0]) halflen = linelen // 2 with BZ2File(self.filename) as bz2f: bz2f.read(halflen) self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:]) self.assertEqual(bz2f.read(), self.TEXT[linelen:]) with BZ2File(self.filename) as bz2f: bz2f.readline() self.assertEqual(next(bz2f), self.TEXT_LINES[1]) self.assertEqual(bz2f.readline(), self.TEXT_LINES[2]) with BZ2File(self.filename) as bz2f: bz2f.readlines() self.assertRaises(StopIteration, next, bz2f) self.assertEqual(bz2f.readlines(), []) def testMultiStreamOrdering(self): # Test the ordering of streams when reading a multi-stream archive. data1 = b"foo" * 1000 data2 = b"bar" * 1000 with BZ2File(self.filename, "w") as bz2f: bz2f.write(data1) with BZ2File(self.filename, "a") as bz2f: bz2f.write(data2) with BZ2File(self.filename) as bz2f: self.assertEqual(bz2f.read(), data1 + data2) def testOpenBytesFilename(self): str_filename = self.filename try: bytes_filename = str_filename.encode("ascii") except UnicodeEncodeError: self.skipTest("Temporary file name needs to be ASCII") with BZ2File(bytes_filename, "wb") as f: f.write(self.DATA) with BZ2File(bytes_filename, "rb") as f: self.assertEqual(f.read(), self.DATA) # Sanity check that we are actually operating on the right file. with BZ2File(str_filename, "rb") as f: self.assertEqual(f.read(), self.DATA) # Tests for a BZ2File wrapping another file object: def testReadBytesIO(self): with BytesIO(self.DATA) as bio: with BZ2File(bio) as bz2f: self.assertRaises(TypeError, bz2f.read, None) self.assertEqual(bz2f.read(), self.TEXT) self.assertFalse(bio.closed) def testPeekBytesIO(self): with BytesIO(self.DATA) as bio: with BZ2File(bio) as bz2f: pdata = bz2f.peek() self.assertNotEqual(len(pdata), 0) self.assertTrue(self.TEXT.startswith(pdata)) self.assertEqual(bz2f.read(), self.TEXT) def testWriteBytesIO(self): with BytesIO() as bio: with BZ2File(bio, "w") as bz2f: self.assertRaises(TypeError, bz2f.write) bz2f.write(self.TEXT) self.assertEqual(self.decompress(bio.getvalue()), self.TEXT) self.assertFalse(bio.closed) def testSeekForwardBytesIO(self): with BytesIO(self.DATA) as bio: with BZ2File(bio) as bz2f: self.assertRaises(TypeError, bz2f.seek) bz2f.seek(150) self.assertEqual(bz2f.read(), self.TEXT[150:]) def testSeekBackwardsBytesIO(self): with BytesIO(self.DATA) as bio: with BZ2File(bio) as bz2f: bz2f.read(500) bz2f.seek(-150, 1) self.assertEqual(bz2f.read(), self.TEXT[500-150:]) def test_read_truncated(self): # Drop the eos_magic field (6 bytes) and CRC (4 bytes). truncated = self.DATA[:-10] with BZ2File(BytesIO(truncated)) as f: self.assertRaises(EOFError, f.read) with BZ2File(BytesIO(truncated)) as f: self.assertEqual(f.read(len(self.TEXT)), self.TEXT) self.assertRaises(EOFError, f.read, 1) # Incomplete 4-byte file header, and block header of at least 146 bits. for i in range(22): with BZ2File(BytesIO(truncated[:i])) as f: self.assertRaises(EOFError, f.read, 1) class BZ2CompressorTest(BaseTest): def testCompress(self): bz2c = BZ2Compressor() self.assertRaises(TypeError, bz2c.compress) data = bz2c.compress(self.TEXT) data += bz2c.flush() self.assertEqual(self.decompress(data), self.TEXT) def testCompressEmptyString(self): bz2c = BZ2Compressor() data = bz2c.compress(b'') data += bz2c.flush() self.assertEqual(data, self.EMPTY_DATA) def testCompressChunks10(self): bz2c = BZ2Compressor() n = 0 data = b'' while True: str = self.TEXT[n*10:(n+1)*10] if not str: break data += bz2c.compress(str) n += 1 data += bz2c.flush() self.assertEqual(self.decompress(data), self.TEXT) @bigmemtest(size=_4G + 100, memuse=2) def testCompress4G(self, size): # "Test BZ2Compressor.compress()/flush() with >4GiB input" bz2c = BZ2Compressor() data = b"x" * size try: compressed = bz2c.compress(data) compressed += bz2c.flush() finally: data = None # Release memory data = bz2.decompress(compressed) try: self.assertEqual(len(data), size) self.assertEqual(len(data.strip(b"x")), 0) finally: data = None def testPickle(self): with self.assertRaises(TypeError): pickle.dumps(BZ2Compressor()) class BZ2DecompressorTest(BaseTest): def test_Constructor(self): self.assertRaises(TypeError, BZ2Decompressor, 42) def testDecompress(self): bz2d = BZ2Decompressor() self.assertRaises(TypeError, bz2d.decompress) text = bz2d.decompress(self.DATA) self.assertEqual(text, self.TEXT) def testDecompressChunks10(self): bz2d = BZ2Decompressor() text = b'' n = 0 while True: str = self.DATA[n*10:(n+1)*10] if not str: break text += bz2d.decompress(str) n += 1 self.assertEqual(text, self.TEXT) def testDecompressUnusedData(self): bz2d = BZ2Decompressor() unused_data = b"this is unused data" text = bz2d.decompress(self.DATA+unused_data) self.assertEqual(text, self.TEXT) self.assertEqual(bz2d.unused_data, unused_data) def testEOFError(self): bz2d = BZ2Decompressor() text = bz2d.decompress(self.DATA) self.assertRaises(EOFError, bz2d.decompress, b"anything") self.assertRaises(EOFError, bz2d.decompress, b"") @bigmemtest(size=_4G + 100, memuse=3.3) def testDecompress4G(self, size): # "Test BZ2Decompressor.decompress() with >4GiB input" blocksize = 10 * 1024 * 1024 block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little') try: data = block * (size // blocksize + 1) compressed = bz2.compress(data) bz2d = BZ2Decompressor() decompressed = bz2d.decompress(compressed) self.assertTrue(decompressed == data) finally: data = None compressed = None decompressed = None def testPickle(self): with self.assertRaises(TypeError): pickle.dumps(BZ2Decompressor()) class CompressDecompressTest(BaseTest): def testCompress(self): data = bz2.compress(self.TEXT) self.assertEqual(self.decompress(data), self.TEXT) def testCompressEmptyString(self): text = bz2.compress(b'') self.assertEqual(text, self.EMPTY_DATA) def testDecompress(self): text = bz2.decompress(self.DATA) self.assertEqual(text, self.TEXT) def testDecompressEmpty(self): text = bz2.decompress(b"") self.assertEqual(text, b"") def testDecompressToEmptyString(self): text = bz2.decompress(self.EMPTY_DATA) self.assertEqual(text, b'') def testDecompressIncomplete(self): self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10]) def testDecompressBadData(self): self.assertRaises(OSError, bz2.decompress, self.BAD_DATA) def testDecompressMultiStream(self): text = bz2.decompress(self.DATA * 5) self.assertEqual(text, self.TEXT * 5) def testDecompressTrailingJunk(self): text = bz2.decompress(self.DATA + self.BAD_DATA) self.assertEqual(text, self.TEXT) def testDecompressMultiStreamTrailingJunk(self): text = bz2.decompress(self.DATA * 5 + self.BAD_DATA) self.assertEqual(text, self.TEXT * 5) class OpenTest(BaseTest): "Test the open function." def open(self, *args, **kwargs): return bz2.open(*args, **kwargs) def test_binary_modes(self): for mode in ("wb", "xb"): if mode == "xb": unlink(self.filename) with self.open(self.filename, mode) as f: f.write(self.TEXT) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()) self.assertEqual(file_data, self.TEXT) with self.open(self.filename, "rb") as f: self.assertEqual(f.read(), self.TEXT) with self.open(self.filename, "ab") as f: f.write(self.TEXT) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()) self.assertEqual(file_data, self.TEXT * 2) def test_implicit_binary_modes(self): # Test implicit binary modes (no "b" or "t" in mode string). for mode in ("w", "x"): if mode == "x": unlink(self.filename) with self.open(self.filename, mode) as f: f.write(self.TEXT) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()) self.assertEqual(file_data, self.TEXT) with self.open(self.filename, "r") as f: self.assertEqual(f.read(), self.TEXT) with self.open(self.filename, "a") as f: f.write(self.TEXT) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()) self.assertEqual(file_data, self.TEXT * 2) def test_text_modes(self): text = self.TEXT.decode("ascii") text_native_eol = text.replace("\n", os.linesep) for mode in ("wt", "xt"): if mode == "xt": unlink(self.filename) with self.open(self.filename, mode) as f: f.write(text) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()).decode("ascii") self.assertEqual(file_data, text_native_eol) with self.open(self.filename, "rt") as f: self.assertEqual(f.read(), text) with self.open(self.filename, "at") as f: f.write(text) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()).decode("ascii") self.assertEqual(file_data, text_native_eol * 2) def test_x_mode(self): for mode in ("x", "xb", "xt"): unlink(self.filename) with self.open(self.filename, mode) as f: pass with self.assertRaises(FileExistsError): with self.open(self.filename, mode) as f: pass def test_fileobj(self): with self.open(BytesIO(self.DATA), "r") as f: self.assertEqual(f.read(), self.TEXT) with self.open(BytesIO(self.DATA), "rb") as f: self.assertEqual(f.read(), self.TEXT) text = self.TEXT.decode("ascii") with self.open(BytesIO(self.DATA), "rt") as f: self.assertEqual(f.read(), text) def test_bad_params(self): # Test invalid parameter combinations. self.assertRaises(ValueError, self.open, self.filename, "wbt") self.assertRaises(ValueError, self.open, self.filename, "xbt") self.assertRaises(ValueError, self.open, self.filename, "rb", encoding="utf-8") self.assertRaises(ValueError, self.open, self.filename, "rb", errors="ignore") self.assertRaises(ValueError, self.open, self.filename, "rb", newline="\n") def test_encoding(self): # Test non-default encoding. text = self.TEXT.decode("ascii") text_native_eol = text.replace("\n", os.linesep) with self.open(self.filename, "wt", encoding="utf-16-le") as f: f.write(text) with open(self.filename, "rb") as f: file_data = self.decompress(f.read()).decode("utf-16-le") self.assertEqual(file_data, text_native_eol) with self.open(self.filename, "rt", encoding="utf-16-le") as f: self.assertEqual(f.read(), text) def test_encoding_error_handler(self): # Test with non-default encoding error handler. with self.open(self.filename, "wb") as f: f.write(b"foo\xffbar") with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \ as f: self.assertEqual(f.read(), "foobar") def test_newline(self): # Test with explicit newline (universal newline mode disabled). text = self.TEXT.decode("ascii") with self.open(self.filename, "wt", newline="\n") as f: f.write(text) with self.open(self.filename, "rt", newline="\r") as f: self.assertEqual(f.readlines(), [text]) def test_main(): support.run_unittest( BZ2FileTest, BZ2CompressorTest, BZ2DecompressorTest, CompressDecompressTest, OpenTest, ) support.reap_children() if __name__ == '__main__': test_main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lgpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475178"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">chylli/phantomjs</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/qt/qtwebkit/Source/WebCore/inspector/compile-front-end.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">116</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">15388</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import os.path import generate_protocol_externs import shutil import sys import tempfile inspector_path = "Source/WebCore/inspector" inspector_frontend_path = inspector_path + "/front-end" protocol_externs_path = inspector_frontend_path + "/protocol-externs.js" generate_protocol_externs.generate_protocol_externs(protocol_externs_path, inspector_path + "/Inspector.json") jsmodule_name_prefix = "jsmodule_" modules = [ { "name": "common", "dependencies": [], "sources": [ "Color.js", "DOMExtension.js", "Object.js", "ParsedURL.js", "Progress.js", "Settings.js", "UIString.js", "UserMetrics.js", "utilities.js", ] }, { "name": "sdk", "dependencies": ["common"], "sources": [ "ApplicationCacheModel.js", "CompilerScriptMapping.js", "ConsoleModel.js", "ContentProvider.js", "ContentProviderBasedProjectDelegate.js", "ContentProviders.js", "CookieParser.js", "CSSMetadata.js", "CSSStyleModel.js", "BreakpointManager.js", "Database.js", "DOMAgent.js", "DOMStorage.js", "DebuggerModel.js", "DebuggerScriptMapping.js", "FileManager.js", "FileMapping.js", "FileSystemMapping.js", "FileSystemModel.js", "FileSystemProjectDelegate.js", "FileUtils.js", "HAREntry.js", "IndexedDBModel.js", "InspectorBackend.js", "IsolatedFileSystemManager.js", "IsolatedFileSystem.js", "Linkifier.js", "NetworkLog.js", "NetworkUISourceCodeProvider.js", "PresentationConsoleMessageHelper.js", "RuntimeModel.js", "SASSSourceMapping.js", "Script.js", "ScriptFormatter.js", "ScriptSnippetModel.js", "SimpleWorkspaceProvider.js", "SnippetStorage.js", "SourceMapping.js", "StylesSourceMapping.js", "TimelineManager.js", "RemoteObject.js", "Resource.js", "DefaultScriptMapping.js", "ResourceScriptMapping.js", "LiveEditSupport.js", "ResourceTreeModel.js", "ResourceType.js", "ResourceUtils.js", "SourceMap.js", "NetworkManager.js", "NetworkRequest.js", "UISourceCode.js", "UserAgentSupport.js", "Workspace.js", "protocol-externs.js", ] }, { "name": "ui", "dependencies": ["common"], "sources": [ "Checkbox.js", "ContextMenu.js", "DOMSyntaxHighlighter.js", "DataGrid.js", "DefaultTextEditor.js", "Dialog.js", "DockController.js", "Drawer.js", "EmptyView.js", "GoToLineDialog.js", "HelpScreen.js", "InspectorView.js", "KeyboardShortcut.js", "OverviewGrid.js", "Panel.js", "PanelEnablerView.js", "Placard.js", "Popover.js", "ProgressIndicator.js", "PropertiesSection.js", "SearchController.js", "Section.js", "SidebarPane.js", "SidebarTreeElement.js", "ShortcutsScreen.js", "ShowMoreDataGridNode.js", "SidebarOverlay.js", "SoftContextMenu.js", "SourceTokenizer.js", "Spectrum.js", "SplitView.js", "SidebarView.js", "StatusBarButton.js", "SuggestBox.js", "TabbedPane.js", "TextEditor.js", "TextEditorHighlighter.js", "TextEditorModel.js", "TextPrompt.js", "TextUtils.js", "TimelineGrid.js", "Toolbar.js", "UIUtils.js", "View.js", "ViewportControl.js", "treeoutline.js", ] }, { "name": "components", "dependencies": ["sdk", "ui"], "sources": [ "AdvancedSearchController.js", "HandlerRegistry.js", "ConsoleMessage.js", "CookiesTable.js", "DOMBreakpointsSidebarPane.js", "DOMPresentationUtils.js", "ElementsTreeOutline.js", "FontView.js", "ImageView.js", "NativeBreakpointsSidebarPane.js", "InspectElementModeController.js", "ObjectPopoverHelper.js", "ObjectPropertiesSection.js", "SourceFrame.js", "ResourceView.js", ] }, { "name": "elements", "dependencies": ["components"], "sources": [ "CSSNamedFlowCollectionsView.js", "CSSNamedFlowView.js", "ElementsPanel.js", "ElementsPanelDescriptor.js", "EventListenersSidebarPane.js", "MetricsSidebarPane.js", "PropertiesSidebarPane.js", "StylesSidebarPane.js", ] }, { "name": "network", "dependencies": ["components"], "sources": [ "NetworkItemView.js", "RequestCookiesView.js", "RequestHeadersView.js", "RequestHTMLView.js", "RequestJSONView.js", "RequestPreviewView.js", "RequestResponseView.js", "RequestTimingView.js", "RequestView.js", "ResourceWebSocketFrameView.js", "NetworkPanel.js", "NetworkPanelDescriptor.js", ] }, { "name": "resources", "dependencies": ["components"], "sources": [ "ApplicationCacheItemsView.js", "CookieItemsView.js", "DatabaseQueryView.js", "DatabaseTableView.js", "DirectoryContentView.js", "DOMStorageItemsView.js", "FileContentView.js", "FileSystemView.js", "IndexedDBViews.js", "ResourcesPanel.js", ] }, { "name": "workers", "dependencies": ["components"], "sources": [ "WorkerManager.js", ] }, { "name": "scripts", "dependencies": ["components", "workers"], "sources": [ "BreakpointsSidebarPane.js", "CallStackSidebarPane.js", "FilteredItemSelectionDialog.js", "JavaScriptSourceFrame.js", "NavigatorOverlayController.js", "NavigatorView.js", "RevisionHistoryView.js", "ScopeChainSidebarPane.js", "ScriptsNavigator.js", "ScriptsPanel.js", "ScriptsPanelDescriptor.js", "ScriptsSearchScope.js", "SnippetJavaScriptSourceFrame.js", "StyleSheetOutlineDialog.js", "TabbedEditorContainer.js", "UISourceCodeFrame.js", "WatchExpressionsSidebarPane.js", "WorkersSidebarPane.js", ] }, { "name": "console", "dependencies": ["components"], "sources": [ "ConsoleView.js", "ConsolePanel.js", ] }, { "name": "timeline", "dependencies": ["components"], "sources": [ "DOMCountersGraph.js", "MemoryStatistics.js", "NativeMemoryGraph.js", "TimelineModel.js", "TimelineOverviewPane.js", "TimelinePanel.js", "TimelinePanelDescriptor.js", "TimelinePresentationModel.js", "TimelineFrameController.js" ] }, { "name": "audits", "dependencies": ["components"], "sources": [ "AuditCategories.js", "AuditController.js", "AuditFormatters.js", "AuditLauncherView.js", "AuditResultView.js", "AuditRules.js", "AuditsPanel.js", ] }, { "name": "extensions", "dependencies": ["components"], "sources": [ "ExtensionAPI.js", "ExtensionAuditCategory.js", "ExtensionPanel.js", "ExtensionRegistryStub.js", "ExtensionServer.js", "ExtensionView.js", ] }, { "name": "settings", "dependencies": ["components", "extensions"], "sources": [ "SettingsScreen.js", "OverridesView.js", ] }, { "name": "tests", "dependencies": ["components"], "sources": [ "TestController.js", ] }, { "name": "profiler", "dependencies": ["components", "workers"], "sources": [ "BottomUpProfileDataGridTree.js", "CPUProfileView.js", "CSSSelectorProfileView.js", "FlameChart.js", "HeapSnapshot.js", "HeapSnapshotDataGrids.js", "HeapSnapshotGridNodes.js", "HeapSnapshotLoader.js", "HeapSnapshotProxy.js", "HeapSnapshotView.js", "HeapSnapshotWorker.js", "HeapSnapshotWorkerDispatcher.js", "JSHeapSnapshot.js", "NativeHeapSnapshot.js", "ProfileDataGridTree.js", "ProfilesPanel.js", "ProfilesPanelDescriptor.js", "ProfileLauncherView.js", "TopDownProfileDataGridTree.js", "CanvasProfileView.js", ] }, { "name": "host_stub", "dependencies": ["components", "profiler", "timeline"], "sources": [ "InspectorFrontendAPI.js", "InspectorFrontendHostStub.js", ] } ] modules_by_name = {} for module in modules: modules_by_name[module["name"]] = module def dump_module(name, recursively, processed_modules): if name in processed_modules: return "" processed_modules[name] = True module = modules_by_name[name] command = "" if recursively: for dependency in module["dependencies"]: command += dump_module(dependency, recursively, processed_modules) command += " \\\n --module " + jsmodule_name_prefix + module["name"] + ":" command += str(len(module["sources"])) firstDependency = True for dependency in module["dependencies"]: if firstDependency: command += ":" else: command += "," firstDependency = False command += jsmodule_name_prefix + dependency for script in module["sources"]: command += " \\\n --js " + inspector_frontend_path + "/" + script return command modules_dir = tempfile.mkdtemp() compiler_command = "java -jar ~/closure/compiler.jar --summary_detail_level 3 --compilation_level SIMPLE_OPTIMIZATIONS --warning_level VERBOSE --language_in ECMASCRIPT5 --accept_const_keyword --module_output_path_prefix %s/ \\\n" % modules_dir process_recursively = len(sys.argv) > 1 if process_recursively: module_name = sys.argv[1] if module_name != "all": modules = [] for i in range(1, len(sys.argv)): modules.append(modules_by_name[sys.argv[i]]) for module in modules: command = compiler_command command += " --externs " + inspector_frontend_path + "/externs.js" command += dump_module(module["name"], True, {}) print "Compiling \"" + module["name"] + "\"" os.system(command) else: command = compiler_command command += " --externs " + inspector_frontend_path + "/externs.js" for module in modules: command += dump_module(module["name"], False, {}) os.system(command) if not process_recursively: print "Compiling InjectedScriptSource.js..." os.system("echo \"var injectedScriptValue = \" > " + inspector_path + "/" + "InjectedScriptSourceTmp.js") os.system("cat " + inspector_path + "/" + "InjectedScriptSource.js" + " >> " + inspector_path + "/" + "InjectedScriptSourceTmp.js") command = compiler_command command += " --externs " + inspector_path + "/" + "InjectedScriptExterns.js" + " \\\n" command += " --externs " + protocol_externs_path + " \\\n" command += " --module " + jsmodule_name_prefix + "injected_script" + ":" + "1" + " \\\n" command += " --js " + inspector_path + "/" + "InjectedScriptSourceTmp.js" + " \\\n" command += "\n" os.system(command) os.system("rm " + inspector_path + "/" + "InjectedScriptSourceTmp.js") print "Compiling InjectedScriptCanvasModuleSource.js..." os.system("echo \"var injectedScriptCanvasModuleValue = \" > " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js") os.system("cat " + inspector_path + "/" + "InjectedScriptCanvasModuleSource.js" + " >> " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js") command = compiler_command command += " --externs " + inspector_path + "/" + "InjectedScriptExterns.js" + " \\\n" command += " --externs " + protocol_externs_path + " \\\n" command += " --module " + jsmodule_name_prefix + "injected_script" + ":" + "1" + " \\\n" command += " --js " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js" + " \\\n" command += "\n" os.system(command) os.system("rm " + inspector_path + "/" + "InjectedScriptCanvasModuleSourceTmp.js") shutil.rmtree(modules_dir) #os.system("rm " + protocol_externs_path) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475179"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">lxml/lxml</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">src/lxml/tests/test_relaxng.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">8434</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- """ Test cases related to RelaxNG parsing and validation """ from __future__ import absolute_import import unittest from .common_imports import ( etree, BytesIO, _bytes, HelperTestCase, fileInTestDir, make_doctest, skipif ) try: import rnc2rng except ImportError: rnc2rng = None class ETreeRelaxNGTestCase(HelperTestCase): def test_relaxng(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(schema) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertFalse(schema.validate(tree_invalid)) self.assertTrue(schema.error_log.filter_from_errors()) self.assertTrue(schema.validate(tree_valid)) # repeat valid self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid def test_relaxng_stringio(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema_file = BytesIO('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(file=schema_file) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) def test_relaxng_elementtree_error(self): self.assertRaises(ValueError, etree.RelaxNG, etree.ElementTree()) def test_relaxng_error(self): tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') schema = etree.RelaxNG(schema) self.assertFalse(schema.validate(tree_invalid)) errors = schema.error_log self.assertTrue([log for log in errors if log.level_name == "ERROR"]) self.assertTrue([log for log in errors if "not expect" in log.message]) def test_relaxng_generic_error(self): tree_invalid = self.parse('''\ <test> <reference id="my-ref">This is my unique ref.</reference> <data ref="my-ref">Valid data</data> <data ref="myref">Invalid data</data> </test> ''') schema = self.parse('''\ <grammar datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes" xmlns="http://relaxng.org/ns/structure/1.0"> <define name="by-ref"> <data type="IDREF"/> </define> <start> <element name="test"> <zeroOrMore> <element name="reference"> <attribute name="id"> <data type="ID"/> </attribute> <text/> </element> </zeroOrMore> <zeroOrMore> <element name="data"> <attribute name="ref"> <data type="IDREF"/> </attribute> <text/> </element> </zeroOrMore> </element> </start> </grammar> ''') schema = etree.RelaxNG(schema) self.assertFalse(schema.validate(tree_invalid)) errors = schema.error_log self.assertTrue(errors) self.assertTrue([log for log in errors if "IDREF" in log.message]) self.assertTrue([log for log in errors if "myref" in log.message]) def test_relaxng_invalid_schema(self): schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b" /> </zeroOrMore> </element> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema2(self): schema = self.parse('''\ <grammar xmlns="http://relaxng.org/ns/structure/1.0" /> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema3(self): schema = self.parse('''\ <grammar xmlns="http://relaxng.org/ns/structure/1.0"> <define name="test"> <element name="test"/> </define> </grammar> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_invalid_schema4(self): # segfault schema = self.parse('''\ <element name="a" xmlns="mynamespace" /> ''') self.assertRaises(etree.RelaxNGParseError, etree.RelaxNG, schema) def test_relaxng_include(self): # this will only work if we access the file through path or # file object.. f = open(fileInTestDir('test1.rng'), 'rb') try: schema = etree.RelaxNG(file=f) finally: f.close() def test_relaxng_shortcut(self): tree_valid = self.parse('<a><b></b></a>') tree_invalid = self.parse('<a><c></c></a>') schema = self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <zeroOrMore> <element name="b"> <text /> </element> </zeroOrMore> </element> ''') self.assertTrue(tree_valid.relaxng(schema)) self.assertFalse(tree_invalid.relaxng(schema)) def test_multiple_elementrees(self): tree = self.parse('<a><b>B</b><c>C</c></a>') schema = etree.RelaxNG( self.parse('''\ <element name="a" xmlns="http://relaxng.org/ns/structure/1.0"> <element name="b"> <text /> </element> <element name="c"> <text /> </element> </element> ''') ) self.assertTrue(schema.validate(tree)) self.assertFalse(schema.error_log.filter_from_errors()) self.assertTrue(schema.validate(tree)) # repeat valid self.assertFalse(schema.error_log.filter_from_errors()) # repeat valid schema = etree.RelaxNG( self.parse('''\ <element name="b" xmlns="http://relaxng.org/ns/structure/1.0"> <text /> </element> ''') ) c_tree = etree.ElementTree(tree.getroot()[1]) self.assertEqual(self._rootstring(c_tree), _bytes('<c>C</c>')) self.assertFalse(schema.validate(c_tree)) self.assertTrue(schema.error_log.filter_from_errors()) b_tree = etree.ElementTree(tree.getroot()[0]) self.assertEqual(self._rootstring(b_tree), _bytes('<b>B</b>')) self.assertTrue(schema.validate(b_tree)) self.assertFalse(schema.error_log.filter_from_errors()) class RelaxNGCompactTestCase(HelperTestCase): pytestmark = skipif('rnc2rng is None') def test_relaxng_compact(self): tree_valid = self.parse('<a><b>B</b><c>C</c></a>') tree_invalid = self.parse('<a><b></b></a>') schema = etree.RelaxNG(file=fileInTestDir('test.rnc')) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) def test_relaxng_compact_file_obj(self): with open(fileInTestDir('test.rnc'), 'r') as f: schema = etree.RelaxNG(file=f) tree_valid = self.parse('<a><b>B</b><c>C</c></a>') tree_invalid = self.parse('<a><b></b></a>') self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) def test_relaxng_compact_str(self): tree_valid = self.parse('<a><b>B</b></a>') tree_invalid = self.parse('<a><b>X</b></a>') rnc_str = 'element a { element b { "B" } }' schema = etree.RelaxNG.from_rnc_string(rnc_str) self.assertTrue(schema.validate(tree_valid)) self.assertFalse(schema.validate(tree_invalid)) def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.makeSuite(ETreeRelaxNGTestCase)]) suite.addTests( [make_doctest('../../../doc/validation.txt')]) if rnc2rng is not None: suite.addTests([unittest.makeSuite(RelaxNGCompactTestCase)]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475180"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">robertwb/incubator-beam</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query1.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1608</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Nexmark Query 1: Convert bid prices from dollars to euros. The Nexmark suite is a series of queries (streaming pipelines) performed on a simulation of auction events. This query converts bid prices from dollars to euros. It illustrates a simple map. """ # pytype: skip-file import apache_beam as beam from apache_beam.testing.benchmarks.nexmark.models import nexmark_model from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util USD_TO_EURO = 0.89 def load(events, metadata=None, pipeline_options=None): return ( events | nexmark_query_util.JustBids() | 'ConvertToEuro' >> beam.Map( lambda bid: nexmark_model.Bid( bid.auction, bid.bidder, bid.price * USD_TO_EURO, bid.date_time, bid.extra))) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475181"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jiaweizhou/kubernetes</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">cluster/juju/charms/trusty/kubernetes-master/hooks/kubernetes_installer.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">213</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4138</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shlex import subprocess from path import path def run(command, shell=False): """ A convience method for executing all the commands. """ print(command) if shell is False: command = shlex.split(command) output = subprocess.check_output(command, shell=shell) print(output) return output class KubernetesInstaller(): """ This class contains the logic needed to install kuberentes binary files. """ def __init__(self, arch, version, output_dir): """ Gather the required variables for the install. """ # The kubernetes-master charm needs certain commands to be aliased. self.aliases = {'kube-apiserver': 'apiserver', 'kube-controller-manager': 'controller-manager', 'kube-proxy': 'kube-proxy', 'kube-scheduler': 'scheduler', 'kubectl': 'kubectl', 'kubelet': 'kubelet'} self.arch = arch self.version = version self.output_dir = path(output_dir) def build(self, branch): """ Build kubernetes from a github repository using the Makefile. """ # Remove any old build artifacts. make_clean = 'make clean' run(make_clean) # Always checkout the master to get the latest repository information. git_checkout_cmd = 'git checkout master' run(git_checkout_cmd) # When checking out a tag, delete the old branch (not master). if branch != 'master': git_drop_branch = 'git branch -D {0}'.format(self.version) print(git_drop_branch) rc = subprocess.call(git_drop_branch.split()) if rc != 0: print('returned: %d' % rc) # Make sure the git repository is up-to-date. git_fetch = 'git fetch origin {0}'.format(branch) run(git_fetch) if branch == 'master': git_reset = 'git reset --hard origin/master' run(git_reset) else: # Checkout a branch of kubernetes so the repo is correct. checkout = 'git checkout -b {0} {1}'.format(self.version, branch) run(checkout) # Create an environment with the path to the GO binaries included. go_path = ('/usr/local/go/bin', os.environ.get('PATH', '')) go_env = os.environ.copy() go_env['PATH'] = ':'.join(go_path) print(go_env['PATH']) # Compile the binaries with the make command using the WHAT variable. make_what = "make all WHAT='cmd/kube-apiserver cmd/kubectl "\ "cmd/kube-controller-manager plugin/cmd/kube-scheduler "\ "cmd/kubelet cmd/kube-proxy'" print(make_what) rc = subprocess.call(shlex.split(make_what), env=go_env) def install(self, install_dir=path('/usr/local/bin')): """ Install kubernetes binary files from the output directory. """ if not install_dir.isdir(): install_dir.makedirs_p() # Create the symbolic links to the real kubernetes binaries. for key, value in self.aliases.iteritems(): target = self.output_dir / key if target.exists(): link = install_dir / value if link.exists(): link.remove() target.symlink(link) else: print('Error target file {0} does not exist.'.format(target)) exit(1) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475182"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">loopCM/chromium</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">media/tools/constrained_network_server/cn.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">186</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4311</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A script for configuring constraint networks. Sets up a constrained network configuration on a specific port. Traffic on this port will be redirected to another local server port. The configuration includes bandwidth, latency, and packet loss. """ import collections import logging import optparse import traffic_control # Default logging is ERROR. Use --verbose to enable DEBUG logging. _DEFAULT_LOG_LEVEL = logging.ERROR Dispatcher = collections.namedtuple('Dispatcher', ['dispatch', 'requires_ports', 'desc']) # Map of command names to traffic_control functions. COMMANDS = { # Adds a new constrained network configuration. 'add': Dispatcher(traffic_control.CreateConstrainedPort, requires_ports=True, desc='Add a new constrained port.'), # Deletes an existing constrained network configuration. 'del': Dispatcher(traffic_control.DeleteConstrainedPort, requires_ports=True, desc='Delete a constrained port.'), # Deletes all constrained network configurations. 'teardown': Dispatcher(traffic_control.TearDown, requires_ports=False, desc='Teardown all constrained ports.') } def _ParseArgs(): """Define and parse command-line arguments. Returns: tuple as (command, configuration): command: one of the possible commands to setup, delete or teardown the constrained network. configuration: a map of constrained network properties to their values. """ parser = optparse.OptionParser() indent_first = parser.formatter.indent_increment opt_width = parser.formatter.help_position - indent_first cmd_usage = [] for s in COMMANDS: cmd_usage.append('%*s%-*s%s' % (indent_first, '', opt_width, s, COMMANDS[s].desc)) parser.usage = ('usage: %%prog {%s} [options]\n\n%s' % ('|'.join(COMMANDS.keys()), '\n'.join(cmd_usage))) parser.add_option('--port', type='int', help='The port to apply traffic control constraints to.') parser.add_option('--server-port', type='int', help='Port to forward traffic on --port to.') parser.add_option('--bandwidth', type='int', help='Bandwidth of the network in kbit/s.') parser.add_option('--latency', type='int', help=('Latency (delay) added to each outgoing packet in ' 'ms.')) parser.add_option('--loss', type='int', help='Packet-loss percentage on outgoing packets. ') parser.add_option('--interface', type='string', help=('Interface to setup constraints on. Use "lo" for a ' 'local client.')) parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='Turn on verbose output.') options, args = parser.parse_args() _SetLogger(options.verbose) # Check a valid command was entered if not args or args[0].lower() not in COMMANDS: parser.error('Please specify a command {%s}.' % '|'.join(COMMANDS.keys())) user_cmd = args[0].lower() # Check if required options are available if COMMANDS[user_cmd].requires_ports: if not (options.port and options.server_port): parser.error('Please provide port and server-port values.') config = { 'port': options.port, 'server_port': options.server_port, 'interface': options.interface, 'latency': options.latency, 'bandwidth': options.bandwidth, 'loss': options.loss } return user_cmd, config def _SetLogger(verbose): log_level = _DEFAULT_LOG_LEVEL if verbose: log_level = logging.DEBUG logging.basicConfig(level=log_level, format='%(message)s') def Main(): """Get the command and configuration of the network to set up.""" user_cmd, config = _ParseArgs() try: COMMANDS[user_cmd].dispatch(config) except traffic_control.TrafficControlError as e: logging.error('Error: %s\n\nOutput: %s', e.msg, e.error) if __name__ == '__main__': Main() </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475183"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">muntasirsyed/intellij-community</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">plugins/hg4idea/testData/bin/hgext/largefiles/wirestore.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">97</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1336</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2010-2011 Fog Creek Software # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. '''largefile store working over Mercurial's wire protocol''' import lfutil import remotestore class wirestore(remotestore.remotestore): def __init__(self, ui, repo, remote): cap = remote.capable('largefiles') if not cap: raise lfutil.storeprotonotcapable([]) storetypes = cap.split(',') if 'serve' not in storetypes: raise lfutil.storeprotonotcapable(storetypes) self.remote = remote super(wirestore, self).__init__(ui, repo, remote.url()) def _put(self, hash, fd): return self.remote.putlfile(hash, fd) def _get(self, hash): return self.remote.getlfile(hash) def _stat(self, hashes): '''For each hash, return 0 if it is available, other values if not. It is usually 2 if the largefile is missing, but might be 1 the server has a corrupted copy.''' batch = self.remote.batch() futures = {} for hash in hashes: futures[hash] = batch.statlfile(hash) batch.submit() retval = {} for hash in hashes: retval[hash] = futures[hash].value return retval </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475184"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">komsas/OpenUpgrade</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/crm/wizard/crm_phonecall_to_phonecall.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">40</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4562</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ import time class crm_phonecall2phonecall(osv.osv_memory): _name = 'crm.phonecall2phonecall' _description = 'Phonecall To Phonecall' _columns = { 'name' : fields.char('Call summary', size=64, required=True, select=1), 'user_id' : fields.many2one('res.users',"Assign To"), 'contact_name':fields.char('Contact', size=64), 'phone':fields.char('Phone', size=64), 'categ_id': fields.many2one('crm.case.categ', 'Category', \ domain="['|',('section_id','=',False),('section_id','=',section_id),\ ('object_id.model', '=', 'crm.phonecall')]"), 'date': fields.datetime('Date'), 'section_id':fields.many2one('crm.case.section','Sales Team'), 'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True), 'partner_id' : fields.many2one('res.partner', "Partner"), 'note':fields.text('Note') } def action_cancel(self, cr, uid, ids, context=None): """ Closes Phonecall to Phonecall form """ return {'type':'ir.actions.act_window_close'} def action_schedule(self, cr, uid, ids, context=None): value = {} if context is None: context = {} phonecall = self.pool.get('crm.phonecall') phonecall_ids = context and context.get('active_ids') or [] for this in self.browse(cr, uid, ids, context=context): phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \ this.user_id and this.user_id.id or False, \ this.section_id and this.section_id.id or False, \ this.categ_id and this.categ_id.id or False, \ action=this.action, context=context) return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context) def default_get(self, cr, uid, fields, context=None): """ This function gets default values """ res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context) record_id = context and context.get('active_id', False) or False res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')}) if record_id: phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context) categ_id = False data_obj = self.pool.get('ir.model.data') try: res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2') categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id except ValueError: pass if 'name' in fields: res.update({'name': phonecall.name}) if 'user_id' in fields: res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False}) if 'date' in fields: res.update({'date': False}) if 'section_id' in fields: res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False}) if 'categ_id' in fields: res.update({'categ_id': categ_id}) if 'partner_id' in fields: res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False}) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475185"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">fernandoacorreia/DjangoWAWSLogging</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">DjangoWAWSLogging/env/Lib/site-packages/django/contrib/flatpages/tests/views.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">77</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6226</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os from django.conf import settings from django.contrib.auth.models import User from django.contrib.flatpages.models import FlatPage from django.test import TestCase class FlatpageViewTests(TestCase): fixtures = ['sample_flatpages'] urls = 'django.contrib.flatpages.tests.urls' def setUp(self): self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware' if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES: settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class) self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join( os.path.dirname(__file__), 'templates' ), ) self.old_LOGIN_URL = settings.LOGIN_URL settings.LOGIN_URL = '/accounts/login/' def tearDown(self): settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS settings.LOGIN_URL = self.old_LOGIN_URL def test_view_flatpage(self): "A flatpage can be served through a view" response = self.client.get('/flatpage_root/flatpage/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it flat!</p>") def test_view_non_existent_flatpage(self): "A non-existent flatpage raises 404 when served through a view" response = self.client.get('/flatpage_root/no_such_flatpage/') self.assertEqual(response.status_code, 404) def test_view_authenticated_flatpage(self): "A flatpage served through a view can require authentication" response = self.client.get('/flatpage_root/sekrit/') self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/') User.objects.create_user('testuser', '<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="0b7f6e787f4b6e736a667b676e25686466">[email protected]</a>', 's3krit') self.client.login(username='testuser',password='s3krit') response = self.client.get('/flatpage_root/sekrit/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it sekrit!</p>") def test_fallback_flatpage(self): "A fallback flatpage won't be served if the middleware is disabled" response = self.client.get('/flatpage/') self.assertEqual(response.status_code, 404) def test_fallback_non_existent_flatpage(self): "A non-existent flatpage won't be served if the fallback middlware is disabled" response = self.client.get('/no_such_flatpage/') self.assertEqual(response.status_code, 404) def test_view_flatpage_special_chars(self): "A flatpage with special chars in the URL can be served through a view" fp = FlatPage.objects.create( url="/some.very_special~chars-here/", title="A very special page", content="Isn't it special!", enable_comments=False, registration_required=False, ) fp.sites.add(settings.SITE_ID) response = self.client.get('/flatpage_root/some.very_special~chars-here/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it special!</p>") class FlatpageViewAppendSlashTests(TestCase): fixtures = ['sample_flatpages'] urls = 'django.contrib.flatpages.tests.urls' def setUp(self): self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware' if flatpage_middleware_class in settings.MIDDLEWARE_CLASSES: settings.MIDDLEWARE_CLASSES = tuple(m for m in settings.MIDDLEWARE_CLASSES if m != flatpage_middleware_class) self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS settings.TEMPLATE_DIRS = ( os.path.join( os.path.dirname(__file__), 'templates' ), ) self.old_LOGIN_URL = settings.LOGIN_URL settings.LOGIN_URL = '/accounts/login/' self.old_APPEND_SLASH = settings.APPEND_SLASH settings.APPEND_SLASH = True def tearDown(self): settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS settings.LOGIN_URL = self.old_LOGIN_URL settings.APPEND_SLASH = self.old_APPEND_SLASH def test_redirect_view_flatpage(self): "A flatpage can be served through a view and should add a slash" response = self.client.get('/flatpage_root/flatpage') self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301) def test_redirect_view_non_existent_flatpage(self): "A non-existent flatpage raises 404 when served through a view and should not add a slash" response = self.client.get('/flatpage_root/no_such_flatpage') self.assertEqual(response.status_code, 404) def test_redirect_fallback_flatpage(self): "A fallback flatpage won't be served if the middleware is disabled and should not add a slash" response = self.client.get('/flatpage') self.assertEqual(response.status_code, 404) def test_redirect_fallback_non_existent_flatpage(self): "A non-existent flatpage won't be served if the fallback middlware is disabled and should not add a slash" response = self.client.get('/no_such_flatpage') self.assertEqual(response.status_code, 404) def test_redirect_view_flatpage_special_chars(self): "A flatpage with special chars in the URL can be served through a view and should add a slash" fp = FlatPage.objects.create( url="/some.very_special~chars-here/", title="A very special page", content="Isn't it special!", enable_comments=False, registration_required=False, ) fp.sites.add(1) response = self.client.get('/flatpage_root/some.very_special~chars-here') self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475186"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">turbokongen/home-assistant</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/components/nx584/test_binary_sensor.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">7175</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">"""The tests for the nx584 sensor platform.""" from unittest import mock from nx584 import client as nx584_client import pytest import requests from homeassistant.components.nx584 import binary_sensor as nx584 from homeassistant.setup import async_setup_component class StopMe(Exception): """Stop helper.""" @pytest.fixture def fake_zones(): """Fixture for fake zones. Returns: list: List of fake zones """ return [ {"name": "front", "number": 1}, {"name": "back", "number": 2}, {"name": "inside", "number": 3}, ] @pytest.fixture def client(fake_zones): """Fixture for client. Args: fake_zones (list): Fixture of fake zones Yields: MagicMock: Client Mock """ with mock.patch.object(nx584_client, "Client") as _mock_client: client = nx584_client.Client.return_value client.list_zones.return_value = fake_zones client.get_version.return_value = "1.1" yield _mock_client @pytest.mark.usefixtures("client") @mock.patch("homeassistant.components.nx584.binary_sensor.NX584Watcher") @mock.patch("homeassistant.components.nx584.binary_sensor.NX584ZoneSensor") def test_nx584_sensor_setup_defaults(mock_nx, mock_watcher, hass, fake_zones): """Test the setup with no configuration.""" add_entities = mock.MagicMock() config = { "host": nx584.DEFAULT_HOST, "port": nx584.DEFAULT_PORT, "exclude_zones": [], "zone_types": {}, } assert nx584.setup_platform(hass, config, add_entities) mock_nx.assert_has_calls([mock.call(zone, "opening") for zone in fake_zones]) assert add_entities.called assert nx584_client.Client.call_count == 1 assert nx584_client.Client.call_args == mock.call("http://localhost:5007") @pytest.mark.usefixtures("client") @mock.patch("homeassistant.components.nx584.binary_sensor.NX584Watcher") @mock.patch("homeassistant.components.nx584.binary_sensor.NX584ZoneSensor") def test_nx584_sensor_setup_full_config(mock_nx, mock_watcher, hass, fake_zones): """Test the setup with full configuration.""" config = { "host": "foo", "port": 123, "exclude_zones": [2], "zone_types": {3: "motion"}, } add_entities = mock.MagicMock() assert nx584.setup_platform(hass, config, add_entities) mock_nx.assert_has_calls( [ mock.call(fake_zones[0], "opening"), mock.call(fake_zones[2], "motion"), ] ) assert add_entities.called assert nx584_client.Client.call_count == 1 assert nx584_client.Client.call_args == mock.call("http://foo:123") assert mock_watcher.called async def _test_assert_graceful_fail(hass, config): """Test the failing.""" assert not await async_setup_component(hass, "nx584", config) @pytest.mark.usefixtures("client") @pytest.mark.parametrize( "config", [ ({"exclude_zones": ["a"]}), ({"zone_types": {"a": "b"}}), ({"zone_types": {1: "notatype"}}), ({"zone_types": {"notazone": "motion"}}), ], ) async def test_nx584_sensor_setup_bad_config(hass, config): """Test the setup with bad configuration.""" await _test_assert_graceful_fail(hass, config) @pytest.mark.usefixtures("client") @pytest.mark.parametrize( "exception_type", [ pytest.param(requests.exceptions.ConnectionError, id="connect_failed"), pytest.param(IndexError, id="no_partitions"), ], ) async def test_nx584_sensor_setup_with_exceptions(hass, exception_type): """Test the setup handles exceptions.""" nx584_client.Client.return_value.list_zones.side_effect = exception_type await _test_assert_graceful_fail(hass, {}) @pytest.mark.usefixtures("client") async def test_nx584_sensor_setup_version_too_old(hass): """Test if version is too old.""" nx584_client.Client.return_value.get_version.return_value = "1.0" await _test_assert_graceful_fail(hass, {}) @pytest.mark.usefixtures("client") def test_nx584_sensor_setup_no_zones(hass): """Test the setup with no zones.""" nx584_client.Client.return_value.list_zones.return_value = [] add_entities = mock.MagicMock() assert nx584.setup_platform(hass, {}, add_entities) assert not add_entities.called def test_nx584_zone_sensor_normal(): """Test for the NX584 zone sensor.""" zone = {"number": 1, "name": "foo", "state": True} sensor = nx584.NX584ZoneSensor(zone, "motion") assert "foo" == sensor.name assert not sensor.should_poll assert sensor.is_on assert sensor.device_state_attributes["zone_number"] == 1 zone["state"] = False assert not sensor.is_on @mock.patch.object(nx584.NX584ZoneSensor, "schedule_update_ha_state") def test_nx584_watcher_process_zone_event(mock_update): """Test the processing of zone events.""" zone1 = {"number": 1, "name": "foo", "state": True} zone2 = {"number": 2, "name": "bar", "state": True} zones = { 1: nx584.NX584ZoneSensor(zone1, "motion"), 2: nx584.NX584ZoneSensor(zone2, "motion"), } watcher = nx584.NX584Watcher(None, zones) watcher._process_zone_event({"zone": 1, "zone_state": False}) assert not zone1["state"] assert mock_update.call_count == 1 @mock.patch.object(nx584.NX584ZoneSensor, "schedule_update_ha_state") def test_nx584_watcher_process_zone_event_missing_zone(mock_update): """Test the processing of zone events with missing zones.""" watcher = nx584.NX584Watcher(None, {}) watcher._process_zone_event({"zone": 1, "zone_state": False}) assert not mock_update.called def test_nx584_watcher_run_with_zone_events(): """Test the zone events.""" empty_me = [1, 2] def fake_get_events(): """Return nothing twice, then some events.""" if empty_me: empty_me.pop() else: return fake_events client = mock.MagicMock() fake_events = [ {"zone": 1, "zone_state": True, "type": "zone_status"}, {"zone": 2, "foo": False}, ] client.get_events.side_effect = fake_get_events watcher = nx584.NX584Watcher(client, {}) @mock.patch.object(watcher, "_process_zone_event") def run(fake_process): """Run a fake process.""" fake_process.side_effect = StopMe with pytest.raises(StopMe): watcher._run() assert fake_process.call_count == 1 assert fake_process.call_args == mock.call(fake_events[0]) run() assert 3 == client.get_events.call_count @mock.patch("time.sleep") def test_nx584_watcher_run_retries_failures(mock_sleep): """Test the retries with failures.""" empty_me = [1, 2] def fake_run(): """Fake runner.""" if empty_me: empty_me.pop() raise requests.exceptions.ConnectionError() raise StopMe() watcher = nx584.NX584Watcher(None, {}) with mock.patch.object(watcher, "_run") as mock_inner: mock_inner.side_effect = fake_run with pytest.raises(StopMe): watcher.run() assert 3 == mock_inner.call_count mock_sleep.assert_has_calls([mock.call(10), mock.call(10)]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475187"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">GabrielNicolasAvellaneda/chemlab</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">chemlab/db/toxnetdb.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">6</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2107</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">'''Database for toxnet''' from .base import AbstractDB, EntryNotFound # Python 2-3 compatibility try: from urllib.parse import quote_plus from urllib.request import urlopen except ImportError: from urllib import quote_plus from urllib2 import urlopen import re class ToxNetDB(AbstractDB): def __init__(self): self.baseurl = 'http://toxgate.nlm.nih.gov' def get(self, feature, query): searchurl = self.baseurl + '/cgi-bin/sis/search/x?dbs+hsdb:%s'%quote_plus(query) result = urlopen(searchurl).read() try: result= str(result, 'utf-8') except TypeError: pass if not result: raise EntryNotFound() #print result firstresult = re.findall(r'\<Id>(.*?)\</Id>', result)[0].split()[0] retrieveurl = self.baseurl + '/cgi-bin/sis/search/r?dbs+hsdb:@term+@DOCNO+%s'%firstresult result = urlopen(retrieveurl).read() try: result = str(result, 'utf-8') except TypeError: pass tocregex = 'SRC="(.*?)"' basesearch = re.findall(tocregex, result)[0] basesearch = ':'.join(basesearch.split(':')[:-1]) if feature == 'boiling point': bprequest = urlopen(self.baseurl + basesearch + ':bp').read() # Massaging this request is not easy try: # python3 bprequest = str(bprequest, 'utf-8') except TypeError: pass res = re.findall(r">\s*(.*?)\s*deg C", bprequest) #print res return float(res[0]) if feature == 'melting point': bprequest = urlopen(self.baseurl + basesearch + ':mp').read() try: # python3 bprequest = str(bprequest, 'utf-8') except TypeError: pass # Massaging this request is not easy res = re.findall(r">\s*(.*?)\s*deg C", bprequest) return float(res[0]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475188"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">akozumpl/dnf</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/test_commands.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">39804</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (C) 2012-2014 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # from __future__ import absolute_import from __future__ import unicode_literals from tests import support from tests.support import mock import dnf.cli.commands import dnf.cli.commands.group import dnf.cli.commands.install import dnf.cli.commands.reinstall import dnf.cli.commands.upgrade import dnf.repo import itertools import logging import tests.support import unittest logger = logging.getLogger('dnf') class CommandsCliTest(support.TestCase): def setUp(self): self.base = support.MockBase() self.cli = self.base.mock_cli() def test_erase_configure(self): erase_cmd = dnf.cli.commands.EraseCommand(self.cli) erase_cmd.configure([]) self.assertTrue(self.cli.demands.allow_erasing) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) def test_history_get_error_output_rollback_transactioncheckerror(self): """Test get_error_output with the history rollback and a TransactionCheckError.""" cmd = dnf.cli.commands.HistoryCommand(self.cli) self.base.basecmd = 'history' self.base.extcmds = ('rollback', '1') lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError()) self.assertEqual( lines, ('Cannot rollback transaction 1, doing so would result in an ' 'inconsistent package database.',)) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) def test_history_get_error_output_undo_transactioncheckerror(self): """Test get_error_output with the history undo and a TransactionCheckError.""" cmd = dnf.cli.commands.HistoryCommand(self.cli) self.base.basecmd = 'history' self.base.extcmds = ('undo', '1') lines = cmd.get_error_output(dnf.exceptions.TransactionCheckError()) self.assertEqual( lines, ('Cannot undo transaction 1, doing so would result in an ' 'inconsistent package database.',)) @staticmethod @mock.patch('dnf.Base.fill_sack') def _do_makecache(cmd, fill_sack): return cmd.run(['timer']) def assertLastInfo(self, logger, msg): self.assertEqual(logger.info.mock_calls[-1], mock.call(msg)) @mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.util.on_ac_power', return_value=True) def test_makecache_timer(self, _on_ac_power, logger): cmd = dnf.cli.commands.MakeCacheCommand(self.cli) self.base.conf.metadata_timer_sync = 0 self.assertFalse(self._do_makecache(cmd)) self.assertLastInfo(logger, u'Metadata timer caching disabled.') self.base.conf.metadata_timer_sync = 5 # resync after 5 seconds self.base._persistor.since_last_makecache = mock.Mock(return_value=3) self.assertFalse(self._do_makecache(cmd)) self.assertLastInfo(logger, u'Metadata cache refreshed recently.') self.base._persistor.since_last_makecache = mock.Mock(return_value=10) self.base._sack = 'nonempty' r = support.MockRepo("glimpse", None) self.base.repos.add(r) # regular case 1: metadata is already expired: r.metadata_expire_in = mock.Mock(return_value=(False, 0)) r.sync_strategy = dnf.repo.SYNC_TRY_CACHE self.assertTrue(self._do_makecache(cmd)) self.assertLastInfo(logger, u'Metadata cache created.') self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED) # regular case 2: metadata is cached and will expire later than # metadata_timer_sync: r.metadata_expire_in = mock.Mock(return_value=(True, 100)) r.sync_strategy = dnf.repo.SYNC_TRY_CACHE self.assertTrue(self._do_makecache(cmd)) self.assertLastInfo(logger, u'Metadata cache created.') self.assertEqual(r.sync_strategy, dnf.repo.SYNC_TRY_CACHE) # regular case 3: metadata is cached but will eqpire before # metadata_timer_sync: r.metadata_expire_in = mock.Mock(return_value=(True, 4)) r.sync_strategy = dnf.repo.SYNC_TRY_CACHE self.assertTrue(self._do_makecache(cmd)) self.assertLastInfo(logger, u'Metadata cache created.') self.assertEqual(r.sync_strategy, dnf.repo.SYNC_EXPIRED) @mock.patch('dnf.cli.commands.logger', new_callable=tests.support.mock_logger) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.util.on_ac_power', return_value=False) def test_makecache_timer_battery(self, _on_ac_power, logger): cmd = dnf.cli.commands.MakeCacheCommand(self.cli) self.base.conf.metadata_timer_sync = 5 self.assertFalse(self._do_makecache(cmd)) msg = u'Metadata timer caching disabled when running on a battery.' self.assertLastInfo(logger, msg) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.util.on_ac_power', return_value=None) def test_makecache_timer_battery2(self, _on_ac_power): cmd = dnf.cli.commands.MakeCacheCommand(self.cli) self.base.conf.metadata_timer_sync = 5 self.assertTrue(self._do_makecache(cmd)) class CommandTest(support.TestCase): def test_canonical(self): cmd = dnf.cli.commands.upgrade.UpgradeCommand(None) (base, ext) = cmd.canonical(['update', 'cracker', 'filling']) self.assertEqual(base, 'upgrade') self.assertEqual(ext, ['cracker', 'filling']) class EraseCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.EraseCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(EraseCommandTest, self).setUp() base = support.BaseCliStub() base.init_sack() self.cmd = dnf.cli.commands.EraseCommand(base.mock_cli()) def test_run(self): """Test whether the package is installed.""" self.cmd.run(['pepper']) self.assertResult( self.cmd.base, self.cmd.base.sack.query().installed().filter(name__neq='pepper')) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) def test_run_notfound(self): """Test whether it fails if the package cannot be found.""" stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self.cmd.run, ['non-existent']) self.assertEqual(stdout.getvalue(), 'No match for argument: non-existent\n') self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed()) class InstallCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.install.InstallCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(InstallCommandTest, self).setUp() base = support.BaseCliStub('main') base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH) base.init_sack() self._cmd = dnf.cli.commands.install.InstallCommand(base.mock_cli()) def test_configure(self): cli = self._cmd.cli self._cmd.configure([]) self.assertFalse(cli.demands.allow_erasing) self.assertTrue(cli.demands.sack_activation) def test_run_group(self): """Test whether a group is installed.""" self._cmd.run(['@Solid Ground']) base = self._cmd.cli.base self.assertResult(base, itertools.chain( base.sack.query().installed(), dnf.subject.Subject('trampoline').get_best_query(base.sack))) @mock.patch('dnf.cli.commands.install._', dnf.pycomp.NullTranslations().ugettext) def test_run_group_notfound(self): """Test whether it fails if the group cannot be found.""" stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['@non-existent']) self.assertEqual(stdout.getvalue(), "Warning: Group 'non-existent' does not exist.\n") self.assertResult(self._cmd.cli.base, self._cmd.cli.base.sack.query().installed()) def test_run_package(self): """Test whether a package is installed.""" self._cmd.run(['lotus']) base = self._cmd.cli.base self.assertResult(base, itertools.chain( base.sack.query().installed(), dnf.subject.Subject('lotus.x86_64').get_best_query(base.sack))) @mock.patch('dnf.cli.commands.install._', dnf.pycomp.NullTranslations().ugettext) def test_run_package_notfound(self): """Test whether it fails if the package cannot be found.""" stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['non-existent']) self.assertEqual(stdout.getvalue(), 'No package non-existent available.\n') self.assertResult(self._cmd.cli.base, self._cmd.cli.base.sack.query().installed()) class ReinstallCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.ReinstallCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(ReinstallCommandTest, self).setUp() base = support.BaseCliStub('main') base.init_sack() self._cmd = dnf.cli.commands.reinstall.ReinstallCommand(base.mock_cli()) def test_run(self): """Test whether the package is installed.""" self._cmd.run(['pepper']) base = self._cmd.cli.base self.assertResult(base, itertools.chain( base.sack.query().installed().filter(name__neq='pepper'), dnf.subject.Subject('pepper.x86_64').get_best_query(base.sack) .available())) @mock.patch('dnf.cli.commands.reinstall._', dnf.pycomp.NullTranslations().ugettext) def test_run_notinstalled(self): """Test whether it fails if the package is not installed.""" stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['lotus']) self.assertEqual(stdout.getvalue(), 'No match for argument: lotus\n') self.assertResult(self._cmd.cli.base, self._cmd.cli.base.sack.query().installed()) @mock.patch('dnf.cli.commands.reinstall._', dnf.pycomp.NullTranslations().ugettext) def test_run_notavailable(self): """Test whether it fails if the package is not available.""" base = self._cmd.cli.base holes_query = dnf.subject.Subject('hole').get_best_query(base.sack) for pkg in holes_query.installed(): self._cmd.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self._cmd.base.yumdb.get_package(pkg).from_repo = 'unknown' stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self._cmd.run, ['hole']) self.assertEqual( stdout.getvalue(), 'Installed package hole-1-1.x86_64 (from unknown) not available.\n') self.assertResult(base, base.sack.query().installed()) class RepoPkgsCommandTest(unittest.TestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsCommandTest, self).setUp() cli = support.BaseCliStub().mock_cli() self.cmd = dnf.cli.commands.RepoPkgsCommand(cli) def test_configure_badargs(self): """Test whether the method does not fail even in case of wrong args.""" self.cmd.configure([]) class RepoPkgsCheckUpdateSubCommandTest(unittest.TestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsCheckUpdateSubCommandTest, self).setUp() base = support.BaseCliStub('main', 'updates', 'third_party') self.cli = base.mock_cli() @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) def test(self): """Test whether only upgrades in the repository are listed.""" cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('updates', []) self.assertEqual( stdout.getvalue(), u'\n' u'hole.x86_64 1-2' u' updates\n' u'hole.x86_64 2-1' u' updates\n' u'pepper.x86_64 20-1' u' updates\n' u'Obsoleting Packages\n' u'hole.i686 2-1' u' updates\n' u' tour.noarch 5-0' u' @System\n' u'hole.x86_64 2-1' u' updates\n' u' tour.noarch 5-0' u' @System\n') self.assertEqual(self.cli.demands.success_exit_status, 100) def test_not_found(self): """Test whether exit code differs if updates are not found.""" cmd = dnf.cli.commands.RepoPkgsCommand.CheckUpdateSubCommand(self.cli) cmd.run_on_repo('main', []) self.assertNotEqual(self.cli.demands.success_exit_status, 100) class RepoPkgsInfoSubCommandTest(unittest.TestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.InfoSubCommand`` class.""" AVAILABLE_TITLE = u'Available Packages\n' HOLE_I686_INFO = (u'Name : hole\n' u'Arch : i686\n' u'Epoch : 0\n' u'Version : 2\n' u'Release : 1\n' u'Size : 0.0 \n' u'Repo : updates\n' u'Summary : \n' u'License : \n' u'Description : \n' u'\n') HOLE_X86_64_INFO = (u'Name : hole\n' u'Arch : x86_64\n' u'Epoch : 0\n' u'Version : 2\n' u'Release : 1\n' u'Size : 0.0 \n' u'Repo : updates\n' u'Summary : \n' u'License : \n' u'Description : \n\n') INSTALLED_TITLE = u'Installed Packages\n' PEPPER_SYSTEM_INFO = (u'Name : pepper\n' u'Arch : x86_64\n' u'Epoch : 0\n' u'Version : 20\n' u'Release : 0\n' u'Size : 0.0 \n' u'Repo : @System\n' u'From repo : main\n' u'Summary : \n' u'License : \n' u'Description : \n\n') PEPPER_UPDATES_INFO = (u'Name : pepper\n' u'Arch : x86_64\n' u'Epoch : 0\n' u'Version : 20\n' u'Release : 1\n' u'Size : 0.0 \n' u'Repo : updates\n' u'Summary : \n' u'License : \n' u'Description : \n\n') def setUp(self): """Prepare the test fixture.""" super(RepoPkgsInfoSubCommandTest, self).setUp() base = support.BaseCliStub('main', 'updates', 'third_party') base.conf.recent = 7 self.cli = base.mock_cli() @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_all(self): """Test whether only packages related to the repository are listed.""" for pkg in self.cli.base.sack.query().installed().filter(name='pepper'): self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = 'main' cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('main', ['all', '*p*']) self.assertEqual( stdout.getvalue(), ''.join(( self.INSTALLED_TITLE, self.PEPPER_SYSTEM_INFO, self.AVAILABLE_TITLE, u'Name : pepper\n' u'Arch : src\n' u'Epoch : 0\n' u'Version : 20\n' u'Release : 0\n' u'Size : 0.0 \n' u'Repo : main\n' u'Summary : \n' u'License : \n' u'Description : \n' u'\n', u'Name : trampoline\n' u'Arch : noarch\n' u'Epoch : 0\n' u'Version : 2.1\n' u'Release : 1\n' u'Size : 0.0 \n' u'Repo : main\n' u'Summary : \n' u'License : \n' u'Description : \n' u'\n'))) @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_available(self): """Test whether only packages in the repository are listed.""" cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('updates', ['available']) self.assertEqual( stdout.getvalue(), ''.join(( self.AVAILABLE_TITLE, self.HOLE_I686_INFO, self.HOLE_X86_64_INFO, self.PEPPER_UPDATES_INFO))) @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_extras(self): """Test whether only extras installed from the repository are listed.""" for pkg in self.cli.base.sack.query().installed().filter(name='tour'): self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = 'unknown' cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('unknown', ['extras']) self.assertEqual( stdout.getvalue(), u'Extra Packages\n' u'Name : tour\n' u'Arch : noarch\n' u'Epoch : 0\n' u'Version : 5\n' u'Release : 0\n' u'Size : 0.0 \n' u'Repo : @System\n' u'From repo : unknown\n' u'Summary : \n' u'License : \n' u'Description : \n\n') @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_installed(self): """Test whether only packages installed from the repository are listed.""" for pkg in self.cli.base.sack.query().installed().filter(name='pepper'): self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = 'main' cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('main', ['installed']) self.assertEqual( stdout.getvalue(), ''.join((self.INSTALLED_TITLE, self.PEPPER_SYSTEM_INFO))) @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_obsoletes(self): """Test whether only obsoletes in the repository are listed.""" cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('updates', ['obsoletes']) self.assertEqual( stdout.getvalue(), ''.join(( u'Obsoleting Packages\n', self.HOLE_I686_INFO, self.HOLE_X86_64_INFO))) @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_recent(self): """Test whether only packages in the repository are listed.""" cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with mock.patch('time.time', return_value=0), \ support.patch_std_streams() as (stdout, _): cmd.run_on_repo('updates', ['recent']) self.assertEqual( stdout.getvalue(), ''.join(( u'Recently Added Packages\n', self.HOLE_I686_INFO, self.HOLE_X86_64_INFO, self.PEPPER_UPDATES_INFO))) @mock.patch('dnf.cli.cli._', dnf.pycomp.NullTranslations().ugettext) @mock.patch('dnf.cli.output._', dnf.pycomp.NullTranslations().ugettext) def test_info_upgrades(self): """Test whether only upgrades in the repository are listed.""" cmd = dnf.cli.commands.RepoPkgsCommand.InfoSubCommand(self.cli) with support.patch_std_streams() as (stdout, _): cmd.run_on_repo('updates', ['upgrades']) self.assertEqual( stdout.getvalue(), ''.join(( u'Upgraded Packages\n' u'Name : hole\n' u'Arch : x86_64\n' u'Epoch : 0\n' u'Version : 1\n' u'Release : 2\n' u'Size : 0.0 \n' u'Repo : updates\n' u'Summary : \n' u'License : \n' u'Description : \n' u'\n', self.HOLE_X86_64_INFO, self.PEPPER_UPDATES_INFO))) class RepoPkgsInstallSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.InstallSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsInstallSubCommandTest, self).setUp() base = support.BaseCliStub('main', 'third_party') base.repos['main'].metadata = mock.Mock(comps_fn=support.COMPS_PATH) base.repos['third_party'].enablegroups = False base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether all packages from the repository are installed.""" cmd = dnf.cli.commands.RepoPkgsCommand.InstallSubCommand(self.cli) cmd.run_on_repo('third_party', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='hole'), self.cli.base.sack.query().available().filter(reponame='third_party', arch='x86_64'))) class RepoPkgsMoveToSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsMoveToSubCommandTest, self).setUp() base = support.BaseCliStub('distro', 'main') base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether only packages in the repository are installed.""" cmd = dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand(self.cli) cmd.run_on_repo('distro', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='tour'), dnf.subject.Subject('tour-5-0').get_best_query(self.cli.base.sack) .available())) class RepoPkgsReinstallOldSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsReinstallOldSubCommandTest, self).setUp() base = support.BaseCliStub('main') base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether all packages from the repository are reinstalled.""" for pkg in self.cli.base.sack.query().installed(): reponame = 'main' if pkg.name != 'pepper' else 'non-main' self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = reponame cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand(self.cli) cmd.run_on_repo('main', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='librita'), dnf.subject.Subject('librita.i686').get_best_query(self.cli.base.sack) .installed(), dnf.subject.Subject('librita').get_best_query(self.cli.base.sack) .available())) class RepoPkgsReinstallSubCommandTest(unittest.TestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsReinstallSubCommandTest, self).setUp() self.cli = support.BaseCliStub('main').mock_cli() self.mock = mock.Mock() old_run_patcher = mock.patch( 'dnf.cli.commands.RepoPkgsCommand.ReinstallOldSubCommand.run_on_repo', self.mock.reinstall_old_run) move_run_patcher = mock.patch( 'dnf.cli.commands.RepoPkgsCommand.MoveToSubCommand.run_on_repo', self.mock.move_to_run) old_run_patcher.start() self.addCleanup(old_run_patcher.stop) move_run_patcher.start() self.addCleanup(move_run_patcher.stop) def test_all_fails(self): """Test whether it fails if everything fails.""" self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test') self.mock.move_to_run.side_effect = dnf.exceptions.Error('test') cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli) self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'main', []) self.assertEqual(self.mock.mock_calls, [mock.call.reinstall_old_run('main', []), mock.call.move_to_run('main', [])]) def test_all_moveto(self): """Test whether reinstall-old is called first and move-to next.""" self.mock.reinstall_old_run.side_effect = dnf.exceptions.Error('test') cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli) cmd.run_on_repo('main', []) self.assertEqual(self.mock.mock_calls, [mock.call.reinstall_old_run('main', []), mock.call.move_to_run('main', [])]) def test_all_reinstallold(self): """Test whether only reinstall-old is called.""" cmd = dnf.cli.commands.RepoPkgsCommand.ReinstallSubCommand(self.cli) cmd.run_on_repo('main', []) self.assertEqual(self.mock.mock_calls, [mock.call.reinstall_old_run('main', [])]) class RepoPkgsRemoveOrDistroSyncSubCommandTest(support.ResultTestCase): """Tests of ``RemoveOrDistroSyncSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsRemoveOrDistroSyncSubCommandTest, self).setUp() self.cli = support.BaseCliStub('distro').mock_cli() self.cli.base.init_sack() def test_run_on_repo_spec_sync(self): """Test running with a package which can be synchronized.""" for pkg in self.cli.base.sack.query().installed(): data = support.RPMDBAdditionalDataPackageStub() data.from_repo = 'non-distro' if pkg.name == 'pepper' else 'distro' self.cli.base.yumdb.db[str(pkg)] = data cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand( self.cli) cmd.run_on_repo('non-distro', ['pepper']) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='pepper'), dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack) .available())) def test_run_on_repo_spec_remove(self): """Test running with a package which must be removed.""" for pkg in self.cli.base.sack.query().installed(): data = support.RPMDBAdditionalDataPackageStub() data.from_repo = 'non-distro' if pkg.name == 'hole' else 'distro' self.cli.base.yumdb.db[str(pkg)] = data cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand( self.cli) cmd.run_on_repo('non-distro', ['hole']) self.assertResult( self.cli.base, self.cli.base.sack.query().installed().filter(name__neq='hole')) def test_run_on_repo_all(self): """Test running without a package specification.""" nondist = {'pepper', 'hole'} for pkg in self.cli.base.sack.query().installed(): data = support.RPMDBAdditionalDataPackageStub() data.from_repo = 'non-distro' if pkg.name in nondist else 'distro' self.cli.base.yumdb.db[str(pkg)] = data cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand( self.cli) cmd.run_on_repo('non-distro', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='pepper') .filter(name__neq='hole'), dnf.subject.Subject('pepper').get_best_query(self.cli.base.sack) .available())) @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) def test_run_on_repo_spec_notinstalled(self): """Test running with a package which is not installed.""" stdout = dnf.pycomp.StringIO() cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand( self.cli) with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'non-distro', ['not-installed']) self.assertIn('No match for argument: not-installed\n', stdout.getvalue(), 'mismatch not logged') @mock.patch('dnf.cli.commands._', dnf.pycomp.NullTranslations().ugettext) def test_run_on_repo_all_notinstalled(self): """Test running with a repository from which nothing is installed.""" stdout = dnf.pycomp.StringIO() cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrDistroSyncSubCommand( self.cli) with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'non-distro', []) self.assertIn('No package installed from the repository.\n', stdout.getvalue(), 'mismatch not logged') class RepoPkgsRemoveOrReinstallSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsRemoveOrReinstallSubCommandTest, self).setUp() base = support.BaseCliStub('distro') base.init_sack() self.cli = base.mock_cli() def test_all_not_installed(self): """Test whether it fails if no package is installed from the repository.""" cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand( self.cli) self.assertRaises(dnf.exceptions.Error, cmd.run_on_repo, 'non-distro', []) self.assertResult(self.cli.base, self.cli.base.sack.query().installed()) def test_all_reinstall(self): """Test whether all packages from the repository are reinstalled.""" for pkg in self.cli.base.sack.query().installed(): reponame = 'distro' if pkg.name != 'tour' else 'non-distro' self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = reponame cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand( self.cli) cmd.run_on_repo('non-distro', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='tour'), dnf.subject.Subject('tour').get_best_query(self.cli.base.sack) .available())) def test_all_remove(self): """Test whether all packages from the repository are removed.""" for pkg in self.cli.base.sack.query().installed(): reponame = 'distro' if pkg.name != 'hole' else 'non-distro' self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = reponame cmd = dnf.cli.commands.RepoPkgsCommand.RemoveOrReinstallSubCommand( self.cli) cmd.run_on_repo('non-distro', []) self.assertResult( self.cli.base, self.cli.base.sack.query().installed().filter(name__neq='hole')) class RepoPkgsRemoveSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsRemoveSubCommandTest, self).setUp() base = support.BaseCliStub('main') base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether only packages from the repository are removed.""" for pkg in self.cli.base.sack.query().installed(): reponame = 'main' if pkg.name == 'pepper' else 'non-main' self.cli.base.yumdb.db[str(pkg)] = support.RPMDBAdditionalDataPackageStub() self.cli.base.yumdb.get_package(pkg).from_repo = reponame cmd = dnf.cli.commands.RepoPkgsCommand.RemoveSubCommand(self.cli) cmd.run_on_repo('main', []) self.assertResult( self.cli.base, self.cli.base.sack.query().installed().filter(name__neq='pepper')) class RepoPkgsUpgradeSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsUpgradeSubCommandTest, self).setUp() base = support.BaseCliStub('updates', 'third_party') base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether all packages from the repository are installed.""" cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeSubCommand(self.cli) cmd.run_on_repo('third_party', []) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='hole'), self.cli.base.sack.query().upgrades().filter(reponame='third_party', arch='x86_64'))) class RepoPkgsUpgradeToSubCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(RepoPkgsUpgradeToSubCommandTest, self).setUp() base = support.BaseCliStub('updates', 'third_party') base.init_sack() self.cli = base.mock_cli() def test_all(self): """Test whether the package from the repository is installed.""" cmd = dnf.cli.commands.RepoPkgsCommand.UpgradeToSubCommand(self.cli) cmd.run_on_repo('updates', ['hole-1-2']) self.assertResult(self.cli.base, itertools.chain( self.cli.base.sack.query().installed().filter(name__neq='hole'), dnf.subject.Subject('hole-1-2.x86_64').get_best_query(self.cli.base.sack) .filter(reponame='updates'))) class UpgradeCommandTest(support.ResultTestCase): """Tests of ``dnf.cli.commands.upgrade.UpgradeCommand`` class.""" def setUp(self): """Prepare the test fixture.""" super(UpgradeCommandTest, self).setUp() base = support.BaseCliStub('updates') base.init_sack() self.cmd = dnf.cli.commands.upgrade.UpgradeCommand(base.mock_cli()) def test_run(self): """Test whether a package is updated.""" self.cmd.run(['pepper']) self.assertResult(self.cmd.base, itertools.chain( self.cmd.base.sack.query().installed().filter(name__neq='pepper'), self.cmd.base.sack.query().upgrades().filter(name='pepper'))) @mock.patch('dnf.cli.commands.upgrade._', dnf.pycomp.NullTranslations().ugettext) def test_updatePkgs_notfound(self): """Test whether it fails if the package cannot be found.""" stdout = dnf.pycomp.StringIO() with support.wiretap_logs('dnf', logging.INFO, stdout): self.assertRaises(dnf.exceptions.Error, self.cmd.run, ['non-existent']) self.assertEqual(stdout.getvalue(), 'No match for argument: non-existent\n') self.assertResult(self.cmd.base, self.cmd.base.sack.query().installed()) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475189"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jrief/easy-thumbnails</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">easy_thumbnails/tests/fields.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3792</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os from django.core.files.base import ContentFile from django.db import models from easy_thumbnails import test from easy_thumbnails.fields import ThumbnailerField, ThumbnailerImageField from easy_thumbnails.exceptions import InvalidImageFormatError class TestModel(models.Model): avatar = ThumbnailerField(upload_to='avatars') picture = ThumbnailerImageField(upload_to='pictures', resize_source=dict(size=(10, 10))) class ThumbnailerFieldTest(test.BaseTest): def setUp(self): super(ThumbnailerFieldTest, self).setUp() self.storage = test.TemporaryStorage() # Save a test image. self.create_image(self.storage, 'avatars/avatar.jpg') # Set the test model to use the current temporary storage. TestModel._meta.get_field('avatar').storage = self.storage TestModel._meta.get_field('avatar').thumbnail_storage = self.storage def tearDown(self): self.storage.delete_temporary_storage() super(ThumbnailerFieldTest, self).tearDown() def test_generate_thumbnail(self): instance = TestModel(avatar='avatars/avatar.jpg') thumb = instance.avatar.generate_thumbnail({'size': (300, 300)}) self.assertEqual((thumb.width, thumb.height), (300, 225)) def test_generate_thumbnail_type_error(self): text_file = ContentFile("Lorem ipsum dolor sit amet. Not an image.") self.storage.save('avatars/invalid.jpg', text_file) instance = TestModel(avatar='avatars/invalid.jpg') generate = lambda: instance.avatar.generate_thumbnail( {'size': (300, 300)}) self.assertRaises(InvalidImageFormatError, generate) def test_delete(self): instance = TestModel(avatar='avatars/avatar.jpg') source_path = instance.avatar.path thumb_paths = ( instance.avatar.get_thumbnail({'size': (300, 300)}).path, instance.avatar.get_thumbnail({'size': (200, 200)}).path, instance.avatar.get_thumbnail({'size': (100, 100)}).path, ) self.assertTrue(os.path.exists(source_path)) for path in thumb_paths: self.assertTrue(os.path.exists(path)) instance.avatar.delete(save=False) self.assertFalse(os.path.exists(source_path)) for path in thumb_paths: self.assertFalse(os.path.exists(path)) def test_delete_thumbnails(self): instance = TestModel(avatar='avatars/avatar.jpg') source_path = instance.avatar.path thumb_paths = ( instance.avatar.get_thumbnail({'size': (300, 300)}).path, instance.avatar.get_thumbnail({'size': (200, 200)}).path, instance.avatar.get_thumbnail({'size': (100, 100)}).path, ) self.assertTrue(os.path.exists(source_path)) for path in thumb_paths: self.assertTrue(os.path.exists(path)) instance.avatar.delete_thumbnails() self.assertTrue(os.path.exists(source_path)) for path in thumb_paths: self.assertFalse(os.path.exists(path)) def test_get_thumbnails(self): instance = TestModel(avatar='avatars/avatar.jpg') instance.avatar.get_thumbnail({'size': (300, 300)}) instance.avatar.get_thumbnail({'size': (200, 200)}) self.assertEqual(len(list(instance.avatar.get_thumbnails())), 2) def test_saving_image_field_with_resize_source(self): # Ensure that saving ThumbnailerImageField with resize_source enabled # using instance.field.save() does not fail instance = TestModel(avatar='avatars/avatar.jpg') instance.picture.save( 'file.jpg', ContentFile(instance.avatar.file.read()), save=False) self.assertEqual(instance.picture.width, 10) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475190"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">watson-developer-cloud/discovery-starter-kit</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">server/python/server.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4953</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import sys import json from helpers import get_constants, get_questions from flask import Flask, jsonify, render_template, request from flask_sslify import SSLify from flask_cors import CORS from flask_limiter import Limiter from flask_limiter.util import get_remote_address from requests.exceptions import HTTPError from dotenv import load_dotenv, find_dotenv import watson_developer_cloud.natural_language_understanding.features.v1 as features # noqa from watson_developer_cloud import DiscoveryV1, NaturalLanguageUnderstandingV1 import metrics_tracker_client try: load_dotenv(find_dotenv()) except IOError: print('warning: no .env file loaded') # Emit Bluemix deployment event if not a demo deploy if not(os.getenv('DEMO_DEPLOY')): metrics_tracker_client.track() app = Flask( __name__, static_folder="../../client/knowledge_base_search/build/static", template_folder="../../client/knowledge_base_search/build" ) # force SSL sslify = SSLify(app) # Limit requests limiter = Limiter( app, key_func=get_remote_address, default_limits=['240 per minute', '4 per second'], headers_enabled=True ) CORS(app, resources={r"/api/*": {"origins": "*"}}) # Discovery discovery = DiscoveryV1( url=os.getenv('DISCOVERY_URL'), username=os.getenv('DISCOVERY_USERNAME'), password=os.getenv('DISCOVERY_PASSWORD'), version="2016-12-01" ) # NLU nlu = NaturalLanguageUnderstandingV1( url=os.getenv('NLU_URL'), username=os.getenv('NLU_USERNAME'), password=os.getenv('NLU_PASSWORD'), version="2017-02-27" ) """ retrieve the following: { environment_id: env_id, collection_id: { passages: passages_id, regular: regular_id, trained: trained_id } } """ constants = get_constants( discovery, passages_name=os.getenv( 'DISCOVERY_PASSAGES_COLLECTION_NAME', 'knowledge_base_regular' ), regular_name=os.getenv( 'DISCOVERY_REGULAR_COLLECTION_NAME', 'knowledge_base_regular' ), trained_name=os.getenv( 'DISCOVERY_TRAINED_COLLECTION_NAME', 'knowledge_base_trained' ) ) try: total_questions = int(os.getenv('DISCOVERY_QUESTION_COUNT', 5000)) except ValueError: sys.exit('DISCOVERY_QUESTION_COUNT not an integer, terminating...') passages_question_cache = get_questions( discovery=discovery, constants=constants, question_count=total_questions, feature_type='passages') trained_question_cache = get_questions( discovery=discovery, constants=constants, question_count=total_questions, feature_type='trained') @app.route('/') @limiter.exempt def index(): return render_template('index.html') @app.route('/api/query/<collection_type>', methods=['POST']) def query(collection_type): query_options = json.loads(request.data) query_options['return'] = 'text' if collection_type == 'passages': query_options['passages'] = True # retrieve more results for regular so that we can compare original rank if collection_type == 'regular': query_options['count'] = 100 return jsonify( discovery.query( environment_id=constants['environment_id'], collection_id=constants['collection_id'][collection_type], query_options=query_options ) ) @app.route('/api/questions/<feature_type>', methods=['GET']) def questions(feature_type): if feature_type == 'passages': return jsonify(passages_question_cache) else: return jsonify(trained_question_cache) @app.errorhandler(429) def ratelimit_handler(e): return jsonify( error="API Rate Limit exceeded: %s" % e.description, code=429), 429 @app.errorhandler(Exception) def handle_error(e): code = 500 error = 'Error processing the request' if isinstance(e, HTTPError): code = e.code error = str(e.message) return jsonify(error=error, code=code), code if __name__ == '__main__': # If we are in the Bluemix environment PRODUCTION = True if os.getenv('VCAP_APPLICATION') else False # set port to 0.0.0.0, otherwise set it to localhost (127.0.0.1) HOST = '0.0.0.0' if PRODUCTION else '127.0.0.1' # Get port from the Bluemix environment, or default to 5000 PORT_NUMBER = int(os.getenv('PORT', '5000')) app.run(host=HOST, port=PORT_NUMBER, debug=not(PRODUCTION)) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">mit</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475191"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">jordanemedlock/psychtruths</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">temboo/Library/Microsoft/OAuth/RefreshToken.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">4495</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- ############################################################################### # # RefreshToken # Retrieves a new refresh token and access token by exchanging the refresh token that is associated with the expired access token. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class RefreshToken(Choreography): def __init__(self, temboo_session): """ Create a new instance of the RefreshToken Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(RefreshToken, self).__init__(temboo_session, '/Library/Microsoft/OAuth/RefreshToken') def new_input_set(self): return RefreshTokenInputSet() def _make_result_set(self, result, path): return RefreshTokenResultSet(result, path) def _make_execution(self, session, exec_id, path): return RefreshTokenChoreographyExecution(session, exec_id, path) class RefreshTokenInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the RefreshToken Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Microsoft after registering your application.) """ super(RefreshTokenInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((required, string) The Client Secret provided by Microsoft after registering your application.) """ super(RefreshTokenInputSet, self)._set_input('ClientSecret', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((required, string) An OAuth Refresh Token used to generate a new access token when the original token is expired.) """ super(RefreshTokenInputSet, self)._set_input('RefreshToken', value) def set_Resource(self, value): """ Set the value of the Resource input for this Choreo. ((conditional, string) The App ID URI of the web API (secured resource). See Choreo notes for details.) """ super(RefreshTokenInputSet, self)._set_input('Resource', value) class RefreshTokenResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the RefreshToken Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Microsoft.) """ return self._output.get('Response', None) def get_Expires(self): """ Retrieve the value for the "Expires" output from this Choreo execution. ((integer) The remaining lifetime of the short-lived access token.) """ return self._output.get('Expires', None) def get_NewRefreshToken(self): """ Retrieve the value for the "NewRefreshToken" output from this Choreo execution. ((string) The new Refresh Token which can be used the next time your app needs to get a new Access Token.) """ return self._output.get('NewRefreshToken', None) class RefreshTokenChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return RefreshTokenResultSet(response, path) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475192"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">huahbo/src</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">book/Recipes/m1d.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">5</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">10916</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from rsf.proj import * from decimal import * # --- User set --- # # model model = { 'X' : 2000, # meter 'dx': 10.0, 'dt': 0.001, 'SelT': 0.25, # selected time for snapshot show 'snpintvl': 1.0, # nterval of snapshot output 'size' : 8, # FD order 'frqcut' : 1.0, 'pml' : 240, } # source & receiver srp = { 'bgn' : 0.1, # s, time of maximum ricker 'frq' : 10.0, # source domain frequence 'srcmms' : 'n', # MMS 'inject': 'n', # if y, inject; if n, Initiate conditon 'slx' : 1000.0, # source location (x), meter 'gdep' : 800 # receiver location (z), meter } # ------------------------------------------------------------------------------ def mgraph(fin, title): Result(fin, ''' put label1="Depth" unit1="m" | transp plane=23 | graph screenratio=0.5 title="%s" '''%str(title)) # ------------------------------------------------------------------------------ def setpar(mdl, srp): dx = mdl['dx'] dt = mdl['dt'] objpar = { 'vel' : mdl['vel'], 'dvel': mdl['dvel'], 'den' : mdl['den'], 'nx' : mdl['X']/dx+1, 'SelT': mdl['SelT'], 'nt' : int(Decimal(str(mdl['T']))/Decimal(str(dt)))+1, 'snt' : int(Decimal(str(mdl['SelT']))/Decimal(str(dt))/ \ Decimal(str(mdl['snpintvl']))), 'snpi': mdl['snpintvl'], #snap interval 'dt' : dt, 'iwdt': dt*1000, #dt for iwave 'dx' : dx, 'dxhf': 0.5*dx, 'ox' : 0.0, 'ot' : 0.0, # source 'frq' : srp['frq'], 'wavfrq': srp['frq']/3.0, 'bgnp' : srp['bgn']/dt+1, 'slx' : srp['slx'], 'spx' : srp['slx']/dx+1, 'gdep' : srp['gdep'], 'gp' : int(srp['gdep']/dx+0.5), 'srcmms': srp['srcmms'], # MMS 'inject': srp['inject'], # if y, inject; if n, Initiate conditon # fd 'size' : mdl['size'], 'fdsize': mdl['size']/2, 'frqcut': mdl['frqcut'], 'pml' : mdl['pml'], 'bd' : mdl['pml']+int((mdl['size']+1)/2) } return objpar def buildmodel(par, denname, velname, dvelname, denval, velval, dvelval): name = { 'den' : denname, 'vel' : velname, 'dvel': dvelname } value = { 'den' : denval, 'vel' : velval, 'dvel': dvelval } label = { 'den': 'Density', 'vel': 'Velocity', 'dvel': 'Velocity' } unit = { 'den': 'lg/m\^3\_', 'vel': 'm/s', 'dvel': 'm/s' } for m in ['den','vel','dvel']: Flow(name[m],None, ''' spike d1=%(dx)g n1=%(nx)d o1=0.0 label1=Depth unit1=m | '''%par + ''' math output="%s" '''%value[m]) pml = name[m]+'_pml' pmlt = name[m]+'_pmlt' pmlb = name[m]+'_pmlb' Flow(pmlt, name[m], 'window n1=1 f1= 0 |spray axis=1 n=%(bd)d' %par) Flow(pmlb, name[m], 'window n1=1 f1=-1 |spray axis=1 n=%(bd)d' %par) Flow(pml,[pmlt, name[m], pmlb],'cat ${SOURCES[1]} ${SOURCES[2]} axis=1') for m in ['den','vel','dvel']: Flow(name[m]+'hf',None, ''' spike d1=%(dx)g n1=%(nx)d o1=%(dxhf)g label1=Depth unit1=m | '''%par + ''' math output="%s" '''%value[m]) def buildic(par, ic): Flow(ic,None, ''' spike n1=%(nx)d d1=%(dx)g k1=%(spx)d| ricker1 frequency=%(wavfrq)g | scale axis=1 | put lable1="Depth" unit1="m" label2="Amplitude" unit2="" '''%par) def buildsrcp(par, srcp): Flow(srcp, None, ''' spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g | ricker1 frequency=%(frq)g | scale axis=1 |math output=input*400 '''%par) def buildsrcd(par, srcd, prefix, subfix): _pf = str(prefix) sf_ = str(subfix) spike = '%sspike%s' %(_pf, sf_) ricker = '%sricker%s' %(_pf, sf_) Flow(spike,None, ''' spike n1=%(nx)d n2=%(nt)d d1=%(dx)g d2=%(dt)g k1=%(spx)d k2=1 '''%par) Flow(ricker,None, ''' spike n1=%(nt)d d1=%(dt)g k1=%(bgnp)g | ricker1 frequency=%(frq)g |scale axis=1 '''%par) Flow(srcd,[spike,ricker], ''' convft other=${SOURCES[1]} axis=2 | window n2=%(nt)d | math output=input*400 '''%par) def buildmms(par, mms, psrc, vsrc, pint, vint, vel, dvel, den, velhf, dvelhf, denhf): #beta = 2*3.14159265359*par['frq'] alpha = 2*3.1415926*par['frq']/4.0 alpha = alpha*alpha Flow([mms, psrc, vsrc, pint, vint], [vel, dvel, den, velhf, dvelhf, denhf], ''' sfmms1dexp nt=%d dt=%g slx=%g alpha=%g dvel=${SOURCES[1]} den=${SOURCES[2]} presrc=${TARGETS[1]} velsrc=${TARGETS[2]} preinit=${TARGETS[3]} velinit=${TARGETS[4]} velhf=${SOURCES[3]} dvelhf=${SOURCES[4]} denhf=${SOURCES[5]}| put label1="Depth" unit1="km" label2="Time" unit2="s" '''%(par['nt'],par['dt'],par['slx'],alpha)) # ------------------------------------------------------------------------------ def lrmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix): _pf = str(prefix) sf_ = str(suffix) fft = '%sfft%s' %(_pf, sf_) rt = '%srt%s' %(_pf, sf_) lt = '%slt%s' %(_pf, sf_) Flow(fft, vel, 'fft1') Flow([rt, lt], [vel, fft], ''' isolrsg1 seed=2010 dt=%(dt)g fft=${SOURCES[1]} left=${TARGETS[1]} '''%par) if (mmsfiles == {}): Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic], ''' sfsglr1 verb=y rec=${TARGETS[1]} left=${SOURCES[1]} right=${SOURCES[2]} vel=${SOURCES[3]} den=${SOURCES[4]} fft=${SOURCES[5]} ic=${SOURCES[6]} gdep=%(gdep)g slx=%(slx)g inject=%(inject)s srcmms=%(srcmms)s '''%par) else : psrc = mmsfiles['presrc'] vsrc = mmsfiles['velsrc'] pint = mmsfiles['preinit'] vint = mmsfiles['velinit'] Flow([fwf,frec], [src, lt, rt, vel, den, fft, ic, psrc, vsrc, pint, vint], ''' sfsglr1 verb=y rec=${TARGETS[1]} left=${SOURCES[1]} right=${SOURCES[2]} vel=${SOURCES[3]} den=${SOURCES[4]} fft=${SOURCES[5]} ic=${SOURCES[6]} presrc=${SOURCES[7]} velsrc=${SOURCES[8]} preinit=${SOURCES[9]} velinit=${SOURCES[10]} gdep=%(gdep)g slx=%(slx)g inject=%(inject)s srcmms=%(srcmms)s '''%par) def lfdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par, prefix, suffix): _pf = str(prefix) sf_ = str(suffix) G = '%sG%s' %(_pf, sf_) sx = '%ssx%s' %(_pf, sf_) Flow([G,sx],vel, ''' sfsglfdc1 dt=%(dt)g eps=0.00001 npk=20 seed=2012 sx=${TARGETS[1]} size=%(size)d wavnumcut=%(frqcut)g ''' %par) if mmsfiles == {}: Flow([fwf, frec], [src, ic, vel, den, G, sx], ''' sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]} vel=${SOURCES[2]} den=${SOURCES[3]} G=${SOURCES[4]} sx=${SOURCES[5]} pmld0=20 gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d inject=%(inject)s srcmms=%(srcmms)s verb=y snapinter=1 ''' %par) else: psrc = mmsfiles['presrc'] vsrc = mmsfiles['velsrc'] pint = mmsfiles['preinit'] vint = mmsfiles['velinit'] Flow([fwf, frec], [src, ic, vel, den, G, sx, psrc, vsrc, pint, vint], ''' sfsglfd1pml rec=${TARGETS[1]} ic=${SOURCES[1]} vel=${SOURCES[2]} den=${SOURCES[3]} G=${SOURCES[4]} sx=${SOURCES[5]} presrc=${SOURCES[6]} velsrc=${SOURCES[7]} preinit=${SOURCES[8]} velinit=${SOURCES[9]} pmld0=20 gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d inject=%(inject)s srcmms=%(srcmms)s verb=y snapinter=1 ''' %par) def fdmodel(fwf, frec, src, ic, vel, den, mmsfiles, par): if (mmsfiles == {}): Flow([fwf, frec], [src, ic, vel, den], ''' sfsgfd1 ic=${SOURCES[1]} vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]} pmld0=20 size=%(fdsize)d gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d inject=%(inject)s verb=y snapinter=1 ''' %par ) else : psrc = mmsfiles['presrc'] vsrc = mmsfiles['velsrc'] pint = mmsfiles['preinit'] vint = mmsfiles['velinit'] Flow([fwf, frec], [src, ic, vel, den, psrc, vsrc, pint, vint], ''' sfsgfd1 ic=${SOURCES[1]} vel=${SOURCES[2]} den=${SOURCES[3]} rec=${TARGETS[1]} presrc=${SOURCES[4]} velsrc=${SOURCES[5]} preinit=${SOURCES[6]} velinit=${SOURCES[7]} pmld0=20 size=%(fdsize)d gdep=%(gdep)g slx=%(slx)g pmlsize=%(pml)d inject=%(inject)s srcmms=%(srcmms)s verb=y snapinter=1 ''' %par ) # ------------------------------------------------------------------------------ def analyticslt(fout, par, vel, prefix, subfix): _pf = str(prefix) sf_ = str(subfix) spx = par['spx'] selt= par['SelT'] dx = par['dx'] leftp = spx - round(vel*selt/dx) rightp = spx + round(vel*selt/dx) left = '%sleft%s' %(_pf, sf_) right= '%sright%s'%(_pf, sf_) for fi in [left, right]: p = (leftp, rightp)[fi==right] Flow(fi,None, ''' spike n1=%d d1=%g k1=%d| ricker1 frequency=%g | math output="input" '''%(par['nx'],par['dx'],p,par['wavfrq'])) Flow(fout,[left,right], ''' math t=${SOURCES[1]} output="input+t" | scale axis=2 | scale rscale=0.5 | put label1="Distance" unit1="km" ''') </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475193"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">stewartsmith/bzr</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bzrlib/tests/http_utils.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">20784</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright (C) 2005-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from cStringIO import StringIO import re import urllib2 from bzrlib import ( errors, osutils, tests, transport, ) from bzrlib.smart import ( medium, ) from bzrlib.tests import http_server from bzrlib.transport import chroot class HTTPServerWithSmarts(http_server.HttpServer): """HTTPServerWithSmarts extends the HttpServer with POST methods that will trigger a smart server to execute with a transport rooted at the rootdir of the HTTP server. """ def __init__(self, protocol_version=None): http_server.HttpServer.__init__(self, SmartRequestHandler, protocol_version=protocol_version) class SmartRequestHandler(http_server.TestingHTTPRequestHandler): """Extend TestingHTTPRequestHandler to support smart client POSTs. XXX: This duplicates a fair bit of the logic in bzrlib.transport.http.wsgi. """ def do_POST(self): """Hand the request off to a smart server instance.""" backing = transport.get_transport_from_path( self.server.test_case_server._home_dir) chroot_server = chroot.ChrootServer(backing) chroot_server.start_server() try: t = transport.get_transport_from_url(chroot_server.get_url()) self.do_POST_inner(t) finally: chroot_server.stop_server() def do_POST_inner(self, chrooted_transport): self.send_response(200) self.send_header("Content-type", "application/octet-stream") if not self.path.endswith('.bzr/smart'): raise AssertionError( 'POST to path not ending in .bzr/smart: %r' % (self.path,)) t = chrooted_transport.clone(self.path[:-len('.bzr/smart')]) # if this fails, we should return 400 bad request, but failure is # failure for now - RBC 20060919 data_length = int(self.headers['Content-Length']) # TODO: We might like to support streaming responses. 1.0 allows no # Content-length in this case, so for integrity we should perform our # own chunking within the stream. # 1.1 allows chunked responses, and in this case we could chunk using # the HTTP chunking as this will allow HTTP persistence safely, even if # we have to stop early due to error, but we would also have to use the # HTTP trailer facility which may not be widely available. request_bytes = self.rfile.read(data_length) protocol_factory, unused_bytes = medium._get_protocol_factory_for_bytes( request_bytes) out_buffer = StringIO() smart_protocol_request = protocol_factory(t, out_buffer.write, '/') # Perhaps there should be a SmartServerHTTPMedium that takes care of # feeding the bytes in the http request to the smart_protocol_request, # but for now it's simpler to just feed the bytes directly. smart_protocol_request.accept_bytes(unused_bytes) if not (smart_protocol_request.next_read_size() == 0): raise errors.SmartProtocolError( "not finished reading, but all data sent to protocol.") self.send_header("Content-Length", str(len(out_buffer.getvalue()))) self.end_headers() self.wfile.write(out_buffer.getvalue()) class TestCaseWithWebserver(tests.TestCaseWithTransport): """A support class that provides readonly urls that are http://. This is done by forcing the readonly server to be an http one. This will currently fail if the primary transport is not backed by regular disk files. """ # These attributes can be overriden or parametrized by daughter clasess if # needed, but must exist so that the create_transport_readonly_server() # method (or any method creating an http(s) server) can propagate it. _protocol_version = None _url_protocol = 'http' def setUp(self): super(TestCaseWithWebserver, self).setUp() self.transport_readonly_server = http_server.HttpServer def create_transport_readonly_server(self): server = self.transport_readonly_server( protocol_version=self._protocol_version) server._url_protocol = self._url_protocol return server class TestCaseWithTwoWebservers(TestCaseWithWebserver): """A support class providing readonly urls on two servers that are http://. We set up two webservers to allows various tests involving proxies or redirections from one server to the other. """ def setUp(self): super(TestCaseWithTwoWebservers, self).setUp() self.transport_secondary_server = http_server.HttpServer self.__secondary_server = None def create_transport_secondary_server(self): """Create a transport server from class defined at init. This is mostly a hook for daughter classes. """ server = self.transport_secondary_server( protocol_version=self._protocol_version) server._url_protocol = self._url_protocol return server def get_secondary_server(self): """Get the server instance for the secondary transport.""" if self.__secondary_server is None: self.__secondary_server = self.create_transport_secondary_server() self.start_server(self.__secondary_server) return self.__secondary_server def get_secondary_url(self, relpath=None): base = self.get_secondary_server().get_url() return self._adjust_url(base, relpath) def get_secondary_transport(self, relpath=None): t = transport.get_transport_from_url(self.get_secondary_url(relpath)) self.assertTrue(t.is_readonly()) return t class ProxyServer(http_server.HttpServer): """A proxy test server for http transports.""" proxy_requests = True class RedirectRequestHandler(http_server.TestingHTTPRequestHandler): """Redirect all request to the specified server""" def parse_request(self): """Redirect a single HTTP request to another host""" valid = http_server.TestingHTTPRequestHandler.parse_request(self) if valid: tcs = self.server.test_case_server code, target = tcs.is_redirected(self.path) if code is not None and target is not None: # Redirect as instructed self.send_response(code) self.send_header('Location', target) # We do not send a body self.send_header('Content-Length', '0') self.end_headers() return False # The job is done else: # We leave the parent class serve the request pass return valid class HTTPServerRedirecting(http_server.HttpServer): """An HttpServer redirecting to another server """ def __init__(self, request_handler=RedirectRequestHandler, protocol_version=None): http_server.HttpServer.__init__(self, request_handler, protocol_version=protocol_version) # redirections is a list of tuples (source, target, code) # - source is a regexp for the paths requested # - target is a replacement for re.sub describing where # the request will be redirected # - code is the http error code associated to the # redirection (301 permanent, 302 temporarry, etc self.redirections = [] def redirect_to(self, host, port): """Redirect all requests to a specific host:port""" self.redirections = [('(.*)', r'http://%s:%s\1' % (host, port) , 301)] def is_redirected(self, path): """Is the path redirected by this server. :param path: the requested relative path :returns: a tuple (code, target) if a matching redirection is found, (None, None) otherwise. """ code = None target = None for (rsource, rtarget, rcode) in self.redirections: target, match = re.subn(rsource, rtarget, path) if match: code = rcode break # The first match wins else: target = None return code, target class TestCaseWithRedirectedWebserver(TestCaseWithTwoWebservers): """A support class providing redirections from one server to another. We set up two webservers to allows various tests involving redirections. The 'old' server is redirected to the 'new' server. """ def setUp(self): super(TestCaseWithRedirectedWebserver, self).setUp() # The redirections will point to the new server self.new_server = self.get_readonly_server() # The requests to the old server will be redirected to the new server self.old_server = self.get_secondary_server() def create_transport_secondary_server(self): """Create the secondary server redirecting to the primary server""" new = self.get_readonly_server() redirecting = HTTPServerRedirecting( protocol_version=self._protocol_version) redirecting.redirect_to(new.host, new.port) redirecting._url_protocol = self._url_protocol return redirecting def get_old_url(self, relpath=None): base = self.old_server.get_url() return self._adjust_url(base, relpath) def get_old_transport(self, relpath=None): t = transport.get_transport_from_url(self.get_old_url(relpath)) self.assertTrue(t.is_readonly()) return t def get_new_url(self, relpath=None): base = self.new_server.get_url() return self._adjust_url(base, relpath) def get_new_transport(self, relpath=None): t = transport.get_transport_from_url(self.get_new_url(relpath)) self.assertTrue(t.is_readonly()) return t class AuthRequestHandler(http_server.TestingHTTPRequestHandler): """Requires an authentication to process requests. This is intended to be used with a server that always and only use one authentication scheme (implemented by daughter classes). """ # The following attributes should be defined in the server # - auth_header_sent: the header name sent to require auth # - auth_header_recv: the header received containing auth # - auth_error_code: the error code to indicate auth required def _require_authentication(self): # Note that we must update test_case_server *before* # sending the error or the client may try to read it # before we have sent the whole error back. tcs = self.server.test_case_server tcs.auth_required_errors += 1 self.send_response(tcs.auth_error_code) self.send_header_auth_reqed() # We do not send a body self.send_header('Content-Length', '0') self.end_headers() return def do_GET(self): if self.authorized(): return http_server.TestingHTTPRequestHandler.do_GET(self) else: return self._require_authentication() def do_HEAD(self): if self.authorized(): return http_server.TestingHTTPRequestHandler.do_HEAD(self) else: return self._require_authentication() class BasicAuthRequestHandler(AuthRequestHandler): """Implements the basic authentication of a request""" def authorized(self): tcs = self.server.test_case_server if tcs.auth_scheme != 'basic': return False auth_header = self.headers.get(tcs.auth_header_recv, None) if auth_header: scheme, raw_auth = auth_header.split(' ', 1) if scheme.lower() == tcs.auth_scheme: user, password = raw_auth.decode('base64').split(':') return tcs.authorized(user, password) return False def send_header_auth_reqed(self): tcs = self.server.test_case_server self.send_header(tcs.auth_header_sent, 'Basic realm="%s"' % tcs.auth_realm) # FIXME: We could send an Authentication-Info header too when # the authentication is succesful class DigestAuthRequestHandler(AuthRequestHandler): """Implements the digest authentication of a request. We need persistence for some attributes and that can't be achieved here since we get instantiated for each request. We rely on the DigestAuthServer to take care of them. """ def authorized(self): tcs = self.server.test_case_server auth_header = self.headers.get(tcs.auth_header_recv, None) if auth_header is None: return False scheme, auth = auth_header.split(None, 1) if scheme.lower() == tcs.auth_scheme: auth_dict = urllib2.parse_keqv_list(urllib2.parse_http_list(auth)) return tcs.digest_authorized(auth_dict, self.command) return False def send_header_auth_reqed(self): tcs = self.server.test_case_server header = 'Digest realm="%s", ' % tcs.auth_realm header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce, 'MD5') self.send_header(tcs.auth_header_sent,header) class DigestAndBasicAuthRequestHandler(DigestAuthRequestHandler): """Implements a digest and basic authentication of a request. I.e. the server proposes both schemes and the client should choose the best one it can handle, which, in that case, should be digest, the only scheme accepted here. """ def send_header_auth_reqed(self): tcs = self.server.test_case_server self.send_header(tcs.auth_header_sent, 'Basic realm="%s"' % tcs.auth_realm) header = 'Digest realm="%s", ' % tcs.auth_realm header += 'nonce="%s", algorithm="%s", qop="auth"' % (tcs.auth_nonce, 'MD5') self.send_header(tcs.auth_header_sent,header) class AuthServer(http_server.HttpServer): """Extends HttpServer with a dictionary of passwords. This is used as a base class for various schemes which should all use or redefined the associated AuthRequestHandler. Note that no users are defined by default, so add_user should be called before issuing the first request. """ # The following attributes should be set dy daughter classes # and are used by AuthRequestHandler. auth_header_sent = None auth_header_recv = None auth_error_code = None auth_realm = "Thou should not pass" def __init__(self, request_handler, auth_scheme, protocol_version=None): http_server.HttpServer.__init__(self, request_handler, protocol_version=protocol_version) self.auth_scheme = auth_scheme self.password_of = {} self.auth_required_errors = 0 def add_user(self, user, password): """Declare a user with an associated password. password can be empty, use an empty string ('') in that case, not None. """ self.password_of[user] = password def authorized(self, user, password): """Check that the given user provided the right password""" expected_password = self.password_of.get(user, None) return expected_password is not None and password == expected_password # FIXME: There is some code duplication with # _urllib2_wrappers.py.DigestAuthHandler. If that duplication # grows, it may require a refactoring. Also, we don't implement # SHA algorithm nor MD5-sess here, but that does not seem worth # it. class DigestAuthServer(AuthServer): """A digest authentication server""" auth_nonce = 'now!' def __init__(self, request_handler, auth_scheme, protocol_version=None): AuthServer.__init__(self, request_handler, auth_scheme, protocol_version=protocol_version) def digest_authorized(self, auth, command): nonce = auth['nonce'] if nonce != self.auth_nonce: return False realm = auth['realm'] if realm != self.auth_realm: return False user = auth['username'] if not self.password_of.has_key(user): return False algorithm= auth['algorithm'] if algorithm != 'MD5': return False qop = auth['qop'] if qop != 'auth': return False password = self.password_of[user] # Recalculate the response_digest to compare with the one # sent by the client A1 = '%s:%s:%s' % (user, realm, password) A2 = '%s:%s' % (command, auth['uri']) H = lambda x: osutils.md5(x).hexdigest() KD = lambda secret, data: H("%s:%s" % (secret, data)) nonce_count = int(auth['nc'], 16) ncvalue = '%08x' % nonce_count cnonce = auth['cnonce'] noncebit = '%s:%s:%s:%s:%s' % (nonce, ncvalue, cnonce, qop, H(A2)) response_digest = KD(H(A1), noncebit) return response_digest == auth['response'] class HTTPAuthServer(AuthServer): """An HTTP server requiring authentication""" def init_http_auth(self): self.auth_header_sent = 'WWW-Authenticate' self.auth_header_recv = 'Authorization' self.auth_error_code = 401 class ProxyAuthServer(AuthServer): """A proxy server requiring authentication""" def init_proxy_auth(self): self.proxy_requests = True self.auth_header_sent = 'Proxy-Authenticate' self.auth_header_recv = 'Proxy-Authorization' self.auth_error_code = 407 class HTTPBasicAuthServer(HTTPAuthServer): """An HTTP server requiring basic authentication""" def __init__(self, protocol_version=None): HTTPAuthServer.__init__(self, BasicAuthRequestHandler, 'basic', protocol_version=protocol_version) self.init_http_auth() class HTTPDigestAuthServer(DigestAuthServer, HTTPAuthServer): """An HTTP server requiring digest authentication""" def __init__(self, protocol_version=None): DigestAuthServer.__init__(self, DigestAuthRequestHandler, 'digest', protocol_version=protocol_version) self.init_http_auth() class HTTPBasicAndDigestAuthServer(DigestAuthServer, HTTPAuthServer): """An HTTP server requiring basic or digest authentication""" def __init__(self, protocol_version=None): DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler, 'basicdigest', protocol_version=protocol_version) self.init_http_auth() # We really accept Digest only self.auth_scheme = 'digest' class ProxyBasicAuthServer(ProxyAuthServer): """A proxy server requiring basic authentication""" def __init__(self, protocol_version=None): ProxyAuthServer.__init__(self, BasicAuthRequestHandler, 'basic', protocol_version=protocol_version) self.init_proxy_auth() class ProxyDigestAuthServer(DigestAuthServer, ProxyAuthServer): """A proxy server requiring basic authentication""" def __init__(self, protocol_version=None): ProxyAuthServer.__init__(self, DigestAuthRequestHandler, 'digest', protocol_version=protocol_version) self.init_proxy_auth() class ProxyBasicAndDigestAuthServer(DigestAuthServer, ProxyAuthServer): """An proxy server requiring basic or digest authentication""" def __init__(self, protocol_version=None): DigestAuthServer.__init__(self, DigestAndBasicAuthRequestHandler, 'basicdigest', protocol_version=protocol_version) self.init_proxy_auth() # We really accept Digest only self.auth_scheme = 'digest' </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475194"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">radicalbit/ambari</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ambari-common/src/main/python/ambari_commons/get_ambari_version.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">3</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1589</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import ConfigParser from resource_management.core.logger import Logger """ returns the ambari version on an agent host """ def get_ambari_version_agent(): ambari_version = None AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini' if os.path.exists(AMBARI_AGENT_CONF): try: ambari_agent_config = ConfigParser.RawConfigParser() ambari_agent_config.read(AMBARI_AGENT_CONF) data_dir = ambari_agent_config.get('agent', 'prefix') ver_file = os.path.join(data_dir, 'version') with open(ver_file, "r") as f: ambari_version = f.read().strip() except Exception, e: Logger.info('Unable to determine ambari version from the agent version file.') Logger.debug('Exception: %s' % str(e)) pass pass return ambari_version </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475195"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">abomyi/django</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/shortcuts/views.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">87</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2274</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from django.shortcuts import render, render_to_response from django.template import RequestContext def render_to_response_view(request): return render_to_response('shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }) def render_to_response_view_with_multiple_templates(request): return render_to_response([ 'shortcuts/no_such_template.html', 'shortcuts/render_test.html', ], { 'foo': 'FOO', 'bar': 'BAR', }) def render_to_response_view_with_content_type(request): return render_to_response('shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }, content_type='application/x-rendertest') def render_to_response_view_with_status(request): return render_to_response('shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }, status=403) def render_to_response_view_with_using(request): using = request.GET.get('using') return render_to_response('shortcuts/using.html', using=using) def context_processor(request): return {'bar': 'context processor output'} def render_to_response_with_context_instance_misuse(request): context_instance = RequestContext(request, {}, processors=[context_processor]) # Incorrect -- context_instance should be passed as a keyword argument. return render_to_response('shortcuts/render_test.html', context_instance) def render_view(request): return render(request, 'shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }) def render_view_with_multiple_templates(request): return render(request, [ 'shortcuts/no_such_template.html', 'shortcuts/render_test.html', ], { 'foo': 'FOO', 'bar': 'BAR', }) def render_view_with_content_type(request): return render(request, 'shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }, content_type='application/x-rendertest') def render_view_with_status(request): return render(request, 'shortcuts/render_test.html', { 'foo': 'FOO', 'bar': 'BAR', }, status=403) def render_view_with_using(request): using = request.GET.get('using') return render(request, 'shortcuts/using.html', using=using) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475196"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">wisechengyi/pants</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">tests/python/pants_test/repo_scripts/test_git_hooks.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">8029</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import datetime import os import shutil import subprocess import unittest from contextlib import contextmanager from pathlib import Path from textwrap import dedent from typing import Optional, Sequence from pants.testutil.git_util import initialize_repo from pants.util.contextutil import temporary_dir from pants.util.dirutil import safe_file_dump, safe_mkdir_for class PreCommitHookTest(unittest.TestCase): @contextmanager def _create_tiny_git_repo(self, *, copy_files: Optional[Sequence[Path]] = None): with temporary_dir() as gitdir, temporary_dir() as worktree: # A tiny little fake git repo we will set up. initialize_repo() requires at least one file. Path(worktree, "README").touch() # The contextmanager interface is only necessary if an explicit gitdir is not provided. with initialize_repo(worktree, gitdir=gitdir) as git: if copy_files is not None: for fp in copy_files: new_fp = Path(worktree, fp) safe_mkdir_for(str(new_fp)) shutil.copy(fp, new_fp) yield git, worktree, gitdir def _assert_subprocess_error(self, worktree, cmd, expected_excerpt): result = subprocess.run( cmd, cwd=worktree, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) self.assertNotEqual(0, result.returncode) self.assertIn(expected_excerpt, f"{result.stdout}\n{result.stderr}") def _assert_subprocess_success(self, worktree, cmd, **kwargs): self.assertEqual(0, subprocess.check_call(cmd, cwd=worktree, **kwargs)) def _assert_subprocess_success_with_output(self, worktree, cmd, full_expected_output): stdout = subprocess.run( cmd, cwd=worktree, check=True, stdout=subprocess.PIPE, encoding="utf-8" ).stdout self.assertEqual(full_expected_output, stdout) def test_check_packages(self): package_check_script = "build-support/bin/check_packages.sh" with self._create_tiny_git_repo(copy_files=[Path(package_check_script)]) as ( _, worktree, _, ): init_py_path = os.path.join(worktree, "subdir/__init__.py") # Check that an invalid __init__.py errors. safe_file_dump(init_py_path, "asdf") self._assert_subprocess_error( worktree, [package_check_script, "subdir"], """\ ERROR: All '__init__.py' files should be empty or else only contain a namespace declaration, but the following contain code: --- subdir/__init__.py """, ) # Check that a valid empty __init__.py succeeds. safe_file_dump(init_py_path, "") self._assert_subprocess_success(worktree, [package_check_script, "subdir"]) # Check that a valid __init__.py with `pkg_resources` setup succeeds. safe_file_dump(init_py_path, '__import__("pkg_resources").declare_namespace(__name__)') self._assert_subprocess_success(worktree, [package_check_script, "subdir"]) # TODO: consider testing the degree to which copies (-C) and moves (-M) are detected by making # some small edits to a file, then moving it, and seeing if it is detected as a new file! That's # more testing git functionality, but since it's not clear how this is measured, it could be # useful if correctly detecting copies and moves ever becomes a concern. def test_added_files_correctly_detected(self): get_added_files_script = "build-support/bin/get_added_files.sh" with self._create_tiny_git_repo(copy_files=[Path(get_added_files_script)]) as ( git, worktree, _, ): # Create a new file. new_file = os.path.join(worktree, "wow.txt") safe_file_dump(new_file, "") # Stage the file. rel_new_file = os.path.relpath(new_file, worktree) git.add(rel_new_file) self._assert_subprocess_success_with_output( worktree, [get_added_files_script], # This should be the only entry in the index, and it is a newly added file. full_expected_output=f"{rel_new_file}\n", ) def test_check_headers(self): header_check_script = "build-support/bin/check_header.py" cur_year_num = datetime.datetime.now().year cur_year = str(cur_year_num) with self._create_tiny_git_repo( copy_files=[Path(header_check_script), "build-support/bin/common.py"] ) as (_, worktree, _): new_py_path = os.path.join(worktree, "subdir/file.py") def assert_header_check(added_files, expected_excerpt): self._assert_subprocess_error( worktree=worktree, cmd=[header_check_script, "subdir", "--files-added"] + added_files, expected_excerpt=expected_excerpt, ) # Check that a file with an empty header fails. safe_file_dump(new_py_path, "") assert_header_check( added_files=[], expected_excerpt="subdir/file.py: missing the expected header" ) # Check that a file with a random header fails. safe_file_dump(new_py_path, "asdf") assert_header_check( added_files=[], expected_excerpt="subdir/file.py: missing the expected header" ) # Check that a file with a typo in the header fails safe_file_dump( new_py_path, dedent( f"""\ # Copyright {cur_year} Pants project contributors (see CONTRIBUTORS.md). # Licensed under the MIT License, Version 3.3 (see LICENSE). """ ), ) assert_header_check( added_files=[], expected_excerpt="subdir/file.py: header does not match the expected header", ) # Check that a file without a valid copyright year fails. safe_file_dump( new_py_path, dedent( """\ # Copyright YYYY Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """ ), ) assert_header_check( added_files=[], expected_excerpt=( r"subdir/file.py: copyright year must match '20\d\d' (was YYYY): " f"current year is {cur_year}" ), ) # Check that a newly added file must have the current year. last_year = str(cur_year_num - 1) safe_file_dump( new_py_path, dedent( f"""\ # Copyright {last_year} Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """ ), ) rel_new_py_path = os.path.relpath(new_py_path, worktree) assert_header_check( added_files=[rel_new_py_path], expected_excerpt=f"subdir/file.py: copyright year must be {cur_year} (was {last_year})", ) # Check that a file isn't checked against the current year if it is not passed as an # arg to the script. # Use the same file as last time, with last year's copyright date. self._assert_subprocess_success(worktree, [header_check_script, "subdir"]) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apache-2.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475197"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">apbard/scipy</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">scipy/sparse/linalg/dsolve/tests/test_linsolve.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">1</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">24049</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from __future__ import division, print_function, absolute_import import threading import numpy as np from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix import numpy.random as random from numpy.testing import ( assert_array_almost_equal, assert_raises, assert_almost_equal, assert_equal, assert_array_equal, assert_, assert_allclose, assert_warns) import pytest from scipy._lib._numpy_compat import assert_raises_regex import scipy.linalg from scipy.linalg import norm, inv from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix) from scipy.sparse.linalg import SuperLU from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu, MatrixRankWarning, _superlu, spsolve_triangular, factorized) from scipy._lib._numpy_compat import suppress_warnings sup_sparse_efficiency = suppress_warnings() sup_sparse_efficiency.filter(SparseEfficiencyWarning) # scikits.umfpack is not a SciPy dependency but it is optionally used in # dsolve, so check whether it's available try: import scikits.umfpack as umfpack has_umfpack = True except ImportError: has_umfpack = False def toarray(a): if isspmatrix(a): return a.toarray() else: return a class TestFactorized(object): def setup_method(self): n = 5 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() random.seed(1234) def _check_singular(self): A = csc_matrix((5,5), dtype='d') b = ones(5) assert_array_almost_equal(0. * b, factorized(A)(b)) def _check_non_singular(self): # Make a diagonal dominant, to make sure it is not singular n = 5 a = csc_matrix(random.rand(n, n)) b = ones(n) expected = splu(a).solve(b) assert_array_almost_equal(factorized(a)(b), expected) def test_singular_without_umfpack(self): use_solver(useUmfpack=False) assert_raises_regex(RuntimeError, "Factor is exactly singular", self._check_singular) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_singular_with_umfpack(self): use_solver(useUmfpack=True) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") assert_warns(umfpack.UmfpackWarning, self._check_singular) def test_non_singular_without_umfpack(self): use_solver(useUmfpack=False) self._check_non_singular() @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_non_singular_with_umfpack(self): use_solver(useUmfpack=True) self._check_non_singular() def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): use_solver(useUmfpack=False) assert_raises_regex(ValueError, "can only factor square matrices", factorized, self.A[:,:4]) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_factorizes_nonsquare_matrix_with_umfpack(self): use_solver(useUmfpack=True) # does not raise factorized(self.A[:,:4]) def test_call_with_incorrectly_sized_matrix_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) assert_raises_regex(ValueError, "is of incompatible size", solve, b) assert_raises_regex(ValueError, "is of incompatible size", solve, B) assert_raises_regex(ValueError, "object too deep for desired array", solve, BB) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_incorrectly_sized_matrix_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) B = random.rand(4, 3) BB = random.rand(self.n, 3, 9) # does not raise solve(b) assert_raises_regex(ValueError, "object too deep for desired array", solve, B) assert_raises_regex(ValueError, "object too deep for desired array", solve, BB) def test_call_with_cast_to_complex_without_umfpack(self): use_solver(useUmfpack=False) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: assert_raises_regex(TypeError, "Cannot cast array data", solve, b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_call_with_cast_to_complex_with_umfpack(self): use_solver(useUmfpack=True) solve = factorized(self.A) b = random.rand(4) for t in [np.complex64, np.complex128]: assert_warns(np.ComplexWarning, solve, b.astype(t)) @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_assume_sorted_indices_flag(self): # a sparse matrix with unsorted indices unsorted_inds = np.array([2, 0, 1, 0]) data = np.array([10, 16, 5, 0.4]) indptr = np.array([0, 1, 2, 4]) A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) b = ones(3) # should raise when incorrectly assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=True) assert_raises_regex(RuntimeError, "UMFPACK_ERROR_invalid_matrix", factorized, A) # should sort indices and succeed when not assuming indices are sorted use_solver(useUmfpack=True, assumeSortedIndices=False) expected = splu(A.copy()).solve(b) assert_equal(A.has_sorted_indices, 0) assert_array_almost_equal(factorized(A)(b), expected) assert_equal(A.has_sorted_indices, 1) class TestLinsolve(object): def setup_method(self): use_solver(useUmfpack=False) def test_singular(self): A = csc_matrix((5,5), dtype='d') b = array([1, 2, 3, 4, 5],dtype='d') with suppress_warnings() as sup: sup.filter(MatrixRankWarning, "Matrix is exactly singular") x = spsolve(A, b) assert_(not np.isfinite(x).any()) def test_singular_gh_3312(self): # "Bad" test case that leads SuperLU to call LAPACK with invalid # arguments. Check that it fails moderately gracefully. ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) A = csc_matrix((v, ij.T), shape=(20, 20)) b = np.arange(20) try: # should either raise a runtimeerror or return value # appropriate for singular input x = spsolve(A, b) assert_(not np.isfinite(x).any()) except RuntimeError: pass def test_twodiags(self): A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) b = array([1, 2, 3, 4, 5]) # condition number of A cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2) for t in ['f','d','F','D']: eps = finfo(t).eps # floating point epsilon b = b.astype(t) for format in ['csc','csr']: Asp = A.astype(t).asformat(format) x = spsolve(Asp,b) assert_(norm(b - Asp*x) < 10 * cond_A * eps) def test_bvector_smoketest(self): Adense = matrix([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3) b = As*x x2 = spsolve(As, b) assert_array_almost_equal(x, x2) def test_bmatrix_smoketest(self): Adense = matrix([[0., 1., 1.], [1., 0., 1.], [0., 0., 1.]]) As = csc_matrix(Adense) random.seed(1234) x = random.randn(3, 4) Bdense = As.dot(x) Bs = csc_matrix(Bdense) x2 = spsolve(As, Bs) assert_array_almost_equal(x, x2.todense()) @sup_sparse_efficiency def test_non_square(self): # A is not square. A = ones((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve, A, b) # A2 and b2 have incompatible shapes. A2 = csc_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve, A2, b2) @sup_sparse_efficiency def test_example_comparison(self): row = array([0,0,1,2,2,2]) col = array([0,2,2,0,1,2]) data = array([1,2,3,-4,5,6]) sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) M = sM.todense() row = array([0,0,1,1,0,0]) col = array([0,2,1,1,0,0]) data = array([1,1,1,1,1,1]) sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) N = sN.todense() sX = spsolve(sM, sN) X = scipy.linalg.solve(M, N) assert_array_almost_equal(X, sX.todense()) @sup_sparse_efficiency @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") def test_shape_compatibility(self): use_solver(useUmfpack=True) A = csc_matrix([[1., 0], [0, 2]]) bs = [ [1, 6], array([1, 6]), [[1], [6]], array([[1], [6]]), csc_matrix([[1], [6]]), csr_matrix([[1], [6]]), dok_matrix([[1], [6]]), bsr_matrix([[1], [6]]), array([[1., 2., 3.], [6., 8., 10.]]), csc_matrix([[1., 2., 3.], [6., 8., 10.]]), csr_matrix([[1., 2., 3.], [6., 8., 10.]]), dok_matrix([[1., 2., 3.], [6., 8., 10.]]), bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), ] for b in bs: x = np.linalg.solve(A.toarray(), toarray(b)) for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: x1 = spsolve(spmattype(A), b, use_umfpack=True) x2 = spsolve(spmattype(A), b, use_umfpack=False) # check solution if x.ndim == 2 and x.shape[1] == 1: # interprets also these as "vectors" x = x.ravel() assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1))) assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2))) # dense vs. sparse output ("vectors" are always dense) if isspmatrix(b) and x.ndim > 1: assert_(isspmatrix(x1), repr((b, spmattype, 1))) assert_(isspmatrix(x2), repr((b, spmattype, 2))) else: assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) # check output shape if x.ndim == 1: # "vector" assert_equal(x1.shape, (A.shape[1],)) assert_equal(x2.shape, (A.shape[1],)) else: # "matrix" assert_equal(x1.shape, x.shape) assert_equal(x2.shape, x.shape) A = csc_matrix((3, 3)) b = csc_matrix((1, 3)) assert_raises(ValueError, spsolve, A, b) @sup_sparse_efficiency def test_ndarray_support(self): A = array([[1., 2.], [2., 0.]]) x = array([[1., 1.], [0.5, -0.5]]) b = array([[2., 0.], [2., 2.]]) assert_array_almost_equal(x, spsolve(A, b)) def test_gssv_badinput(self): N = 10 d = arange(N) + 1.0 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) for spmatrix in (csc_matrix, csr_matrix): A = spmatrix(A) b = np.arange(N) def not_c_contig(x): return x.repeat(2)[::2] def not_1dim(x): return x[:,None] def bad_type(x): return x.astype(bool) def too_short(x): return x[:-1] badops = [not_c_contig, not_1dim, bad_type, too_short] for badop in badops: msg = "%r %r" % (spmatrix, badop) # Not C-contiguous assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, badop(A.data), A.indices, A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, badop(A.indices), A.indptr, b, int(spmatrix == csc_matrix), err_msg=msg) assert_raises((ValueError, TypeError), _superlu.gssv, N, A.nnz, A.data, A.indices, badop(A.indptr), b, int(spmatrix == csc_matrix), err_msg=msg) def test_sparsity_preservation(self): ident = csc_matrix([ [1, 0, 0], [0, 1, 0], [0, 0, 1]]) b = csc_matrix([ [0, 1], [1, 0], [0, 0]]) x = spsolve(ident, b) assert_equal(ident.nnz, 3) assert_equal(b.nnz, 2) assert_equal(x.nnz, 2) assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) def test_dtype_cast(self): A_real = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) A_complex = scipy.sparse.csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5 + 1j]]) b_real = np.array([1,1,1]) b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) x = spsolve(A_real, b_real) assert_(np.issubdtype(x.dtype, np.floating)) x = spsolve(A_real, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_real) assert_(np.issubdtype(x.dtype, np.complexfloating)) x = spsolve(A_complex, b_complex) assert_(np.issubdtype(x.dtype, np.complexfloating)) class TestSplu(object): def setup_method(self): use_solver(useUmfpack=False) n = 40 d = arange(n) + 1 self.n = n self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) random.seed(1234) def _smoketest(self, spxlu, check, dtype): if np.issubdtype(dtype, np.complexfloating): A = self.A + 1j*self.A.T else: A = self.A A = A.astype(dtype) lu = spxlu(A) rng = random.RandomState(1234) # Input shapes for k in [None, 1, 2, self.n, self.n+2]: msg = "k=%r" % (k,) if k is None: b = rng.rand(self.n) else: b = rng.rand(self.n, k) if np.issubdtype(dtype, np.complexfloating): b = b + 1j*rng.rand(*b.shape) b = b.astype(dtype) x = lu.solve(b) check(A, b, x, msg) x = lu.solve(b, 'T') check(A.T, b, x, msg) x = lu.solve(b, 'H') check(A.T.conj(), b, x, msg) @sup_sparse_efficiency def test_splu_smoketest(self): self._internal_test_splu_smoketest() def _internal_test_splu_smoketest(self): # Check that splu works at all def check(A, b, x, msg=""): eps = np.finfo(A.dtype).eps r = A * x assert_(abs(r - b).max() < 1e3*eps, msg) self._smoketest(splu, check, np.float32) self._smoketest(splu, check, np.float64) self._smoketest(splu, check, np.complex64) self._smoketest(splu, check, np.complex128) @sup_sparse_efficiency def test_spilu_smoketest(self): self._internal_test_spilu_smoketest() def _internal_test_spilu_smoketest(self): errors = [] def check(A, b, x, msg=""): r = A * x err = abs(r - b).max() assert_(err < 1e-2, msg) if b.dtype in (np.float64, np.complex128): errors.append(err) self._smoketest(spilu, check, np.float32) self._smoketest(spilu, check, np.float64) self._smoketest(spilu, check, np.complex64) self._smoketest(spilu, check, np.complex128) assert_(max(errors) > 1e-5) @sup_sparse_efficiency def test_spilu_drop_rule(self): # Test passing in the drop_rule argument to spilu. A = identity(2) rules = [ b'basic,area'.decode('ascii'), # unicode b'basic,area', # ascii [b'basic', b'area'.decode('ascii')] ] for rule in rules: # Argument should be accepted assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) def test_splu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, splu, A) def test_spilu_nnz0(self): A = csc_matrix((5,5), dtype='d') assert_raises(RuntimeError, spilu, A) def test_splu_basic(self): # Test basic splu functionality. n = 30 rng = random.RandomState(12) a = rng.rand(n, n) a[a < 0.95] = 0 # First test with a singular matrix a[:, 0] = 0 a_ = csc_matrix(a) # Matrix is exactly singular assert_raises(RuntimeError, splu, a_) # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) b = ones(n) x = lu.solve(b) assert_almost_equal(dot(a, x), b) def test_splu_perm(self): # Test the permutation vectors exposed by splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # Check that the permutation indices do belong to [0, n-1]. for perm in (lu.perm_r, lu.perm_c): assert_(all(perm > -1)) assert_(all(perm < n)) assert_equal(len(unique(perm)), len(perm)) # Now make a symmetric, and test that the two permutation vectors are # the same # Note: a += a.T relies on undefined behavior. a = a + a.T a_ = csc_matrix(a) lu = splu(a_) assert_array_equal(lu.perm_r, lu.perm_c) def test_lu_refcount(self): # Test that we are keeping track of the reference count with splu. n = 30 a = random.random((n, n)) a[a < 0.95] = 0 # Make a diagonal dominant, to make sure it is not singular a += 4*eye(n) a_ = csc_matrix(a) lu = splu(a_) # And now test that we don't have a refcount bug import sys rc = sys.getrefcount(lu) for attr in ('perm_r', 'perm_c'): perm = getattr(lu, attr) assert_equal(sys.getrefcount(lu), rc + 1) del perm assert_equal(sys.getrefcount(lu), rc) def test_bad_inputs(self): A = self.A.tocsc() assert_raises(ValueError, splu, A[:,:4]) assert_raises(ValueError, spilu, A[:,:4]) for lu in [splu(A), spilu(A)]: b = random.rand(42) B = random.rand(42, 3) BB = random.rand(self.n, 3, 9) assert_raises(ValueError, lu.solve, b) assert_raises(ValueError, lu.solve, B) assert_raises(ValueError, lu.solve, BB) assert_raises(TypeError, lu.solve, b.astype(np.complex64)) assert_raises(TypeError, lu.solve, b.astype(np.complex128)) @sup_sparse_efficiency def test_superlu_dlamch_i386_nan(self): # SuperLU 4.3 calls some functions returning floats without # declaring them. On i386@linux call convention, this fails to # clear floating point registers after call. As a result, NaN # can appear in the next floating point operation made. # # Here's a test case that triggered the issue. n = 8 d = np.arange(n) + 1 A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) A = A.astype(np.float32) spilu(A) A = A + 1j*A B = A.A assert_(not np.isnan(B).any()) @sup_sparse_efficiency def test_lu_attr(self): def check(dtype, complex_2=False): A = self.A.astype(dtype) if complex_2: A = A + 1j*A.T n = A.shape[0] lu = splu(A) # Check that the decomposition is as advertized Pc = np.zeros((n, n)) Pc[np.arange(n), lu.perm_c] = 1 Pr = np.zeros((n, n)) Pr[lu.perm_r, np.arange(n)] = 1 Ad = A.toarray() lhs = Pr.dot(Ad).dot(Pc) rhs = (lu.L * lu.U).toarray() eps = np.finfo(dtype).eps assert_allclose(lhs, rhs, atol=100*eps) check(np.float32) check(np.float64) check(np.complex64) check(np.complex128) check(np.complex64, True) check(np.complex128, True) @sup_sparse_efficiency def test_threads_parallel(self): oks = [] def worker(): try: self.test_splu_basic() self._internal_test_splu_smoketest() self._internal_test_spilu_smoketest() oks.append(True) except: pass threads = [threading.Thread(target=worker) for k in range(20)] for t in threads: t.start() for t in threads: t.join() assert_equal(len(oks), 20) class TestSpsolveTriangular(object): def setup_method(self): use_solver(useUmfpack=False) def test_singular(self): n = 5 A = csr_matrix((n, n)) b = np.arange(n) for lower in (True, False): assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower) @sup_sparse_efficiency def test_bad_shape(self): # A is not square. A = np.zeros((3, 4)) b = ones((4, 1)) assert_raises(ValueError, spsolve_triangular, A, b) # A2 and b2 have incompatible shapes. A2 = csr_matrix(eye(3)) b2 = array([1.0, 2.0]) assert_raises(ValueError, spsolve_triangular, A2, b2) @sup_sparse_efficiency def test_input_types(self): A = array([[1., 0.], [1., 2.]]) b = array([[2., 0.], [2., 2.]]) for matrix_type in (array, csc_matrix, csr_matrix): x = spsolve_triangular(matrix_type(A), b, lower=True) assert_array_almost_equal(A.dot(x), b) @sup_sparse_efficiency def test_random(self): def random_triangle_matrix(n, lower=True): A = scipy.sparse.random(n, n, density=0.1, format='coo') if lower: A = scipy.sparse.tril(A) else: A = scipy.sparse.triu(A) A = A.tocsr(copy=False) for i in range(n): A[i, i] = np.random.rand() + 1 return A np.random.seed(1234) for lower in (True, False): for n in (10, 10**2, 10**3): A = random_triangle_matrix(n, lower=lower) for m in (1, 10): for b in (np.random.rand(n, m), np.random.randint(-9, 9, (n, m)), np.random.randint(-9, 9, (n, m)) + np.random.randint(-9, 9, (n, m)) * 1j): x = spsolve_triangular(A, b, lower=lower) assert_array_almost_equal(A.dot(x), b) </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">bsd-3-clause</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475198"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">microcom/odoo</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">addons/survey/wizard/survey_email_compose_message.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">29</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">10120</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.osv import osv from openerp.osv import fields from openerp.tools.translate import _ from datetime import datetime from openerp.exceptions import UserError import re import uuid import urlparse emails_split = re.compile(r"[;,\n\r]+") class survey_mail_compose_message(osv.TransientModel): _name = 'survey.mail.compose.message' _inherit = 'mail.compose.message' _description = 'Email composition wizard for Survey' _log_access = True def _get_public_url(self, cr, uid, ids, name, arg, context=None): res = dict((id, 0) for id in ids) survey_obj = self.pool.get('survey.survey') for wizard in self.browse(cr, uid, ids, context=context): res[wizard.id] = wizard.survey_id.public_url return res def _get_public_url_html(self, cr, uid, ids, name, arg, context=None): """ Compute if the message is unread by the current user """ urls = self._get_public_url(cr, uid, ids, name, arg, context=context) for key, url in urls.items(): urls[key] = '<a href="%s">%s</a>' % (url, _("Click here to start survey")) return urls _columns = { 'survey_id': fields.many2one('survey.survey', 'Survey', required=True), 'public': fields.selection([('public_link', 'Share the public web link to your audience.'), ('email_public_link', 'Send by email the public web link to your audience.'), ('email_private', 'Send private invitation to your audience (only one response per recipient and per invitation).')], string='Share options', required=True), 'public_url': fields.function(_get_public_url, string="Public url", type="char"), 'public_url_html': fields.function(_get_public_url_html, string="Public HTML web link", type="char"), 'partner_ids': fields.many2many('res.partner', 'survey_mail_compose_message_res_partner_rel', 'wizard_id', 'partner_id', 'Existing contacts'), 'attachment_ids': fields.many2many('ir.attachment', 'survey_mail_compose_message_ir_attachments_rel', 'wizard_id', 'attachment_id', 'Attachments'), 'multi_email': fields.text(string='List of emails', help="This list of emails of recipients will not converted in contacts. Emails separated by commas, semicolons or newline."), 'date_deadline': fields.date(string="Deadline to which the invitation to respond is valid", help="Deadline to which the invitation to respond for this survey is valid. If the field is empty, the invitation is still valid."), } _defaults = { 'public': 'public_link', 'survey_id': lambda self, cr, uid, ctx={}: ctx.get('model') == 'survey.survey' and ctx.get('res_id') or None } def default_get(self, cr, uid, fields, context=None): res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context) if context.get('active_model') == 'res.partner' and context.get('active_ids'): res.update({'partner_ids': context.get('active_ids')}) return res def onchange_multi_email(self, cr, uid, ids, multi_email, context=None): emails = list(set(emails_split.split(multi_email or ""))) emails_checked = [] error_message = "" for email in emails: email = email.strip() if email: if not re.search(r"^[^@]+@[^@]+$", email): error_message += "\n'%s'" % email else: emails_checked.append(email) if error_message: raise UserError(_("One email at least is incorrect: %s") % error_message) emails_checked.sort() values = {'multi_email': '\n'.join(emails_checked)} return {'value': values} def onchange_survey_id(self, cr, uid, ids, survey_id, context=None): """ Compute if the message is unread by the current user. """ if survey_id: survey = self.pool.get('survey.survey').browse(cr, uid, survey_id, context=context) return { 'value': { 'subject': survey.title, 'public_url': survey.public_url, 'public_url_html': '<a href="%s">%s</a>' % (survey.public_url, _("Click here to take survey")), }} else: txt = _("Please select a survey") return { 'value': { 'public_url': txt, 'public_url_html': txt, }} #------------------------------------------------------ # Wizard validation and send #------------------------------------------------------ def send_mail(self, cr, uid, ids, auto_commit=False, context=None): """ Process the wizard content and proceed with sending the related email(s), rendering any template patterns on the fly if needed """ if context is None: context = {} survey_response_obj = self.pool.get('survey.user_input') partner_obj = self.pool.get('res.partner') mail_mail_obj = self.pool.get('mail.mail') try: model, anonymous_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'portal', 'group_anonymous') except ValueError: anonymous_id = None def create_response_and_send_mail(wizard, token, partner_id, email): """ Create one mail by recipients and replace __URL__ by link with identification token """ #set url url = wizard.survey_id.public_url url = urlparse.urlparse(url).path[1:] # dirty hack to avoid incorrect urls if token: url = url + '/' + token # post the message values = { 'model': None, 'res_id': None, 'subject': wizard.subject, 'body': wizard.body.replace("__URL__", url), 'body_html': wizard.body.replace("__URL__", url), 'parent_id': None, 'attachment_ids': wizard.attachment_ids and [(6, 0, wizard.attachment_ids.ids)] or None, 'email_from': wizard.email_from or None, 'auto_delete': True, } if partner_id: values['recipient_ids'] = [(4, partner_id)] else: values['email_to'] = email mail_id = mail_mail_obj.create(cr, uid, values, context=context) mail_mail_obj.send(cr, uid, [mail_id], context=context) def create_token(wizard, partner_id, email): if context.get("survey_resent_token"): response_ids = survey_response_obj.search(cr, uid, [('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], context=context) if response_ids: return survey_response_obj.read(cr, uid, response_ids, ['token'], context=context)[0]['token'] if wizard.public != 'email_private': return None else: token = uuid.uuid4().__str__() # create response with token survey_response_obj.create(cr, uid, { 'survey_id': wizard.survey_id.id, 'deadline': wizard.date_deadline, 'date_create': datetime.now(), 'type': 'link', 'state': 'new', 'token': token, 'partner_id': partner_id, 'email': email}, context=context) return token for wizard in self.browse(cr, uid, ids, context=context): # check if __URL__ is in the text if wizard.body.find("__URL__") < 0: raise UserError(_("The content of the text don't contain '__URL__'. \ __URL__ is automaticaly converted into the special url of the survey.")) if not wizard.multi_email and not wizard.partner_ids and (context.get('default_partner_ids') or context.get('default_multi_email')): wizard.multi_email = context.get('default_multi_email') wizard.partner_ids = context.get('default_partner_ids') # quick check of email list emails_list = [] if wizard.multi_email: emails = list(set(emails_split.split(wizard.multi_email)) - set([partner.email for partner in wizard.partner_ids])) for email in emails: email = email.strip() if re.search(r"^[^@]+@[^@]+$", email): emails_list.append(email) # remove public anonymous access partner_list = [] for partner in wizard.partner_ids: if not anonymous_id or not partner.user_ids or anonymous_id not in [x.id for x in partner.user_ids[0].groups_id]: partner_list.append({'id': partner.id, 'email': partner.email}) if not len(emails_list) and not len(partner_list): if wizard.model == 'res.partner' and wizard.res_id: return False raise UserError(_("Please enter at least one valid recipient.")) for email in emails_list: partner_id = partner_obj.search(cr, uid, [('email', '=', email)], context=context) partner_id = partner_id and partner_id[0] or None token = create_token(wizard, partner_id, email) create_response_and_send_mail(wizard, token, partner_id, email) for partner in partner_list: token = create_token(wizard, partner['id'], partner['email']) create_response_and_send_mail(wizard, token, partner['id'], partner['email']) return {'type': 'ir.actions.act_window_close'} </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">agpl-3.0</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="475199"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">freephys/python_ase</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ase/transport/calculators.py</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">2</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">11688</span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np from numpy import linalg from ase.transport.selfenergy import LeadSelfEnergy, BoxProbe from ase.transport.greenfunction import GreenFunction from ase.transport.tools import subdiagonalize, cutcoupling, tri2full, dagger class TransportCalculator: """Determine transport properties of device sandwiched between semi-infinite leads using nonequillibrium Green function methods. """ def __init__(self, **kwargs): """Bla Bla XXX energies is the energy grid on which the transport properties should be determined. h1 (h2) is a matrix representation of the Hamiltonian of two principal layers of the left (right) lead, and the coupling between such layers. h is a matrix representation of the Hamiltonian of the scattering region. This must include at least one lead principal layer on each side. The coupling in (out) of the scattering region is by default assumed to be identical to the coupling between left (right) principal layers. However, these couplings can also be specified explicitly through hc1 and hc2. s, s1, and s2 are the overlap matrices corresponding to h, h1, and h2. Default is the identity operator. sc1 and sc2 are the overlap matrices corresponding to the optional couplings hc1 and hc2. align_bf specifies the principal layer basis index used to align the fermi levels of the lead and scattering regions. """ # The default values for all extra keywords self.input_parameters = {'energies': None, 'h': None, 'h1': None, 'h2': None, 's': None, 's1': None, 's2': None, 'hc1': None, 'hc2': None, 'sc1': None, 'sc2': None, 'box': None, 'align_bf': None, 'eta1': 1e-3, 'eta2': 1e-3, 'eta': 1e-3, 'logfile': None, # '-', 'eigenchannels': 0, 'dos': False, 'pdos': [], } self.initialized = False # Changed Hamiltonians? self.uptodate = False # Changed energy grid? self.set(**kwargs) def set(self, **kwargs): for key in kwargs: if key in ['h', 'h1', 'h2', 'hc1', 'hc2', 's', 's1', 's2', 'sc1', 'sc2', 'eta', 'eta1', 'eta2', 'align_bf', 'box']: self.initialized = False self.uptodate = False break elif key in ['energies', 'eigenchannels', 'dos', 'pdos']: self.uptodate = False elif key not in self.input_parameters: raise KeyError, '\'%s\' not a vaild keyword' % key self.input_parameters.update(kwargs) log = self.input_parameters['logfile'] if log is None: class Trash: def write(self, s): pass def flush(self): pass self.log = Trash() elif log == '-': from sys import stdout self.log = stdout elif 'logfile' in kwargs: self.log = open(log, 'w') def initialize(self): if self.initialized: return print >> self.log, '# Initializing calculator...' p = self.input_parameters if p['s1'] == None: p['s1'] = np.identity(len(p['h1'])) if p['s2'] == None: p['s2'] = np.identity(len(p['h2'])) if p['s'] == None: p['s'] = np.identity(len(p['h'])) h_mm = p['h'] s_mm = p['s'] pl1 = len(p['h1']) / 2 pl2 = len(p['h2']) / 2 h1_ii = p['h1'][:pl1, :pl1] h1_ij = p['h1'][:pl1, pl1:2 * pl1] s1_ii = p['s1'][:pl1, :pl1] s1_ij = p['s1'][:pl1, pl1:2 * pl1] h2_ii = p['h2'][:pl2, :pl2] h2_ij = p['h2'][pl2: 2 * pl2, :pl2] s2_ii = p['s2'][:pl2, :pl2] s2_ij = p['s2'][pl2: 2 * pl2, :pl2] if p['hc1'] is None: nbf = len(h_mm) h1_im = np.zeros((pl1, nbf), complex) s1_im = np.zeros((pl1, nbf), complex) h1_im[:pl1, :pl1] = h1_ij s1_im[:pl1, :pl1] = s1_ij else: h1_im = p['hc1'] if p['sc1'] is not None: s1_im = p['sc1'] else: s1_im = np.zeros(h1_im.shape, complex) if p['hc2'] is None: h2_im = np.zeros((pl2, nbf), complex) s2_im = np.zeros((pl2, nbf), complex) h2_im[-pl2:, -pl2:] = h2_ij s2_im[-pl2:, -pl2:] = s2_ij else: h2_im = p['hc2'] if p['sc2'] is not None: s2_im[:] = p['sc2'] else: s2_im = np.zeros(h2_im.shape, complex) align_bf = p['align_bf'] if align_bf != None: diff = (h_mm[align_bf, align_bf] - h1_ii[align_bf, align_bf]) \ / s_mm[align_bf, align_bf] print >> self.log, '# Aligning scat. H to left lead H. diff=', diff h_mm -= diff * s_mm #setup lead self-energies self.selfenergies = [LeadSelfEnergy((h1_ii, s1_ii), (h1_ij, s1_ij), (h1_im, s1_im), p['eta1']), LeadSelfEnergy((h2_ii, s2_ii), (h2_ij, s2_ij), (h2_im, s2_im), p['eta2'])] box = p['box'] if box is not None: print 'Using box probe!' self.selfenergies.append( BoxProbe(eta=box[0], a=box[1], b=box[2], energies=box[3], S=s_mm, T=0.3)) #setup scattering green function self.greenfunction = GreenFunction(selfenergies=self.selfenergies, H=h_mm, S=s_mm, eta=p['eta']) self.initialized = True def update(self): if self.uptodate: return p = self.input_parameters self.energies = p['energies'] nepts = len(self.energies) nchan = p['eigenchannels'] pdos = p['pdos'] self.T_e = np.empty(nepts) if p['dos']: self.dos_e = np.empty(nepts) if pdos != []: self.pdos_ne = np.empty((len(pdos), nepts)) if nchan > 0: self.eigenchannels_ne = np.empty((nchan, nepts)) for e, energy in enumerate(self.energies): Ginv_mm = self.greenfunction.retarded(energy, inverse=True) lambda1_mm = self.selfenergies[0].get_lambda(energy) lambda2_mm = self.selfenergies[1].get_lambda(energy) a_mm = linalg.solve(Ginv_mm, lambda1_mm) b_mm = linalg.solve(dagger(Ginv_mm), lambda2_mm) T_mm = np.dot(a_mm, b_mm) if nchan > 0: t_n = linalg.eigvals(T_mm).real self.eigenchannels_ne[:, e] = np.sort(t_n)[-nchan:] self.T_e[e] = np.sum(t_n) else: self.T_e[e] = np.trace(T_mm).real print >> self.log, energy, self.T_e[e] self.log.flush() if p['dos']: self.dos_e[e] = self.greenfunction.dos(energy) if pdos != []: self.pdos_ne[:, e] = np.take(self.greenfunction.pdos(energy), pdos) self.uptodate = True def print_pl_convergence(self): self.initialize() pl1 = len(self.input_parameters['h1']) / 2 h_ii = self.selfenergies[0].h_ii s_ii = self.selfenergies[0].s_ii ha_ii = self.greenfunction.H[:pl1, :pl1] sa_ii = self.greenfunction.S[:pl1, :pl1] c1 = np.abs(h_ii - ha_ii).max() c2 = np.abs(s_ii - sa_ii).max() print 'Conv (h,s)=%.2e, %2.e' % (c1, c2) def plot_pl_convergence(self): self.initialize() pl1 = len(self.input_parameters['h1']) / 2 hlead = self.selfenergies[0].h_ii.real.diagonal() hprincipal = self.greenfunction.H.real.diagonal[:pl1] import pylab as pl pl.plot(hlead, label='lead') pl.plot(hprincipal, label='principal layer') pl.axis('tight') pl.show() def get_transmission(self): self.initialize() self.update() return self.T_e def get_dos(self): self.initialize() self.update() return self.dos_e def get_eigenchannels(self, n=None): """Get ``n`` first eigenchannels.""" self.initialize() self.update() if n is None: n = self.input_parameters['eigenchannels'] return self.eigenchannels_ne[:n] def get_pdos(self): self.initialize() self.update() return self.pdos_ne def subdiagonalize_bfs(self, bfs): self.initialize() bfs = np.array(bfs) p = self.input_parameters h_pp = p['h'] s_pp = p['s'] ht_pp, st_pp, c_pp, e_p = subdiagonalize(h_pp, s_pp, bfs) c_pp = np.take(c_pp, bfs, axis=0) c_pp = np.take(c_pp, bfs, axis=1) return ht_pp, st_pp, e_p, c_pp def cutcoupling_bfs(self, bfs): self.initialize() bfs = np.array(bfs) p = self.input_parameters h_pp = p['h'].copy() s_pp = p['s'].copy() cutcoupling(h_pp, s_pp, bfs) return h_pp, s_pp def get_left_channels(self, energy, nchan=1): self.initialize() g_s_ii = self.greenfunction.retarded(energy) lambda_l_ii = self.selfenergies[0].get_lambda(energy) lambda_r_ii = self.selfenergies[1].get_lambda(energy) if self.greenfunction.S is None: s_s_qsrt_ii = s_s_isqrt = np.identity(len(g_s_ii)) else: s_mm = self.greenfunction.S s_s_i, s_s_ii = linalg.eig(s_mm) s_s_i = np.abs(s_s_i) s_s_sqrt_i = np.sqrt(s_s_i) # sqrt of eigenvalues s_s_sqrt_ii = np.dot(s_s_ii * s_s_sqrt_i, dagger(s_s_ii)) s_s_isqrt_ii = np.dot(s_s_ii / s_s_sqrt_i, dagger(s_s_ii)) lambdab_r_ii = np.dot(np.dot(s_s_isqrt_ii, lambda_r_ii),s_s_isqrt_ii) a_l_ii = np.dot(np.dot(g_s_ii, lambda_l_ii), dagger(g_s_ii)) ab_l_ii = np.dot(np.dot(s_s_sqrt_ii, a_l_ii), s_s_sqrt_ii) lambda_i, u_ii = linalg.eig(ab_l_ii) ut_ii = np.sqrt(lambda_i / (2.0 * np.pi)) * u_ii m_ii = 2 * np.pi * np.dot(np.dot(dagger(ut_ii), lambdab_r_ii),ut_ii) T_i,c_in = linalg.eig(m_ii) T_i = np.abs(T_i) channels = np.argsort(-T_i)[:nchan] c_in = np.take(c_in, channels, axis=1) T_n = np.take(T_i, channels) v_in = np.dot(np.dot(s_s_isqrt_ii, ut_ii), c_in) return T_n, v_in </span></div> </div></div> </td><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">gpl-3.0</span></div> </div></div> </td> </tr></tbody></table> </div> <div class="bg-linear-to-b from-gray-100 to-white dark:from-gray-950 dark:to-gray-900 rounded-b-lg"><hr class="flex-none -translate-y-px border-t border-dashed border-gray-300 bg-white dark:border-gray-700 dark:bg-gray-950"> <nav><ul class="flex select-none items-center justify-between space-x-2 text-gray-700 sm:justify-center py-1 text-center font-mono text-xs "><li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4750"><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg> Previous</a></li> <li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=0">1</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 pointer-events-none cursor-default" href="#">...</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4749">4,750</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4750">4,751</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-900 dark:text-yellow-500 dark:ring-gray-900 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4751">4,752</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4752">4,753</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4753">4,754</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 pointer-events-none cursor-default" href="#">...</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4772">4,773</a> </li> <li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="/datasets/transformersbook/codeparrot/viewer/default/train?p=4752">Next <svg class="ml-1.5 transform rotate-180" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg></a></li></ul></nav></div></div> </div></div></div></div></div></div></div> <div class="hidden items-center md:flex"> <div class="mx-1 flex items-center justify-center"><div class="h-8 w-1 cursor-ew-resize rounded-full bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 max-sm:hidden" role="separator"></div></div> <div class="flex h-full flex-col" style="height: calc(100vh - 48px)"><div class="my-4 mr-4 h-full overflow-auto rounded-lg border shadow-lg dark:border-gray-800" style="width: 480px"><div class="flex h-full flex-col"><div class="flex flex-col "> <div class="px-4 md:mt-4"><div class="mb-4 flex justify-end"> <span class="inline-block w-full flex justify-center"><span class="contents"><div class="flex w-full flex-col rounded-lg border-slate-200 bg-white p-2 shadow-md ring-1 ring-slate-200 dark:border-slate-700 dark:bg-slate-800 dark:ring-slate-700"> <div class="mt-0 flex items-start gap-1"><div class="flex items-center rounded-md bg-slate-100 p-2 dark:bg-slate-700"><svg class="size-4 text-gray-700 dark:text-gray-300" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 11 11"><path fill="currentColor" d="M4.881 4.182c0 .101-.031.2-.087.283a.5.5 0 0 1-.242.18l-.65.217a1.3 1.3 0 0 0-.484.299 1.3 1.3 0 0 0-.298.484l-.222.639a.46.46 0 0 1-.18.242.5.5 0 0 1-.288.092.5.5 0 0 1-.294-.097.5.5 0 0 1-.175-.242l-.211-.644a1.26 1.26 0 0 0-.299-.48 1.14 1.14 0 0 0-.479-.298L.328 4.64a.48.48 0 0 1-.247-.18.515.515 0 0 1 .247-.758l.644-.21a1.28 1.28 0 0 0 .788-.789l.211-.634a.5.5 0 0 1 .165-.242.5.5 0 0 1 .283-.103.5.5 0 0 1 .294.083c.086.058.152.14.19.237l.217.659a1.28 1.28 0 0 0 .788.788l.644.222a.476.476 0 0 1 .237.18.5.5 0 0 1 .092.288"></path><path fill="currentColor" d="M10.031 7.458a.5.5 0 0 1-.098.314.5.5 0 0 1-.267.196l-.881.293c-.272.09-.519.242-.721.443a1.8 1.8 0 0 0-.443.721l-.31.876a.5.5 0 0 1-.185.263.56.56 0 0 1-.319.098.515.515 0 0 1-.515-.366l-.294-.88a1.8 1.8 0 0 0-.443-.722c-.204-.2-.45-.353-.72-.448l-.881-.288a.57.57 0 0 1-.263-.191.56.56 0 0 1-.014-.64.5.5 0 0 1 .271-.194l.886-.294A1.82 1.82 0 0 0 6.01 5.465l.293-.87a.515.515 0 0 1 .49-.377c.11 0 .219.03.314.088a.56.56 0 0 1 .206.263l.298.896a1.82 1.82 0 0 0 1.175 1.174l.875.31a.5.5 0 0 1 .263.195c.07.09.108.2.108.314"></path><path fill="currentColor" d="M7.775 1.684a.5.5 0 0 0 .088-.262.45.45 0 0 0-.088-.263.5.5 0 0 0-.21-.155L7.24.896a.5.5 0 0 1-.165-.103.5.5 0 0 1-.103-.17l-.108-.33a.5.5 0 0 0-.165-.21A.5.5 0 0 0 6.426 0a.5.5 0 0 0-.252.098.5.5 0 0 0-.145.206l-.108.32a.5.5 0 0 1-.103.17.5.5 0 0 1-.17.102L5.334 1a.45.45 0 0 0-.216.155.5.5 0 0 0-.088.262c0 .094.029.186.083.263a.5.5 0 0 0 .216.16l.32.103q.095.03.164.103a.37.37 0 0 1 .103.165l.108.319c.031.09.088.17.165.227a.56.56 0 0 0 .252.077.42.42 0 0 0 .268-.093.5.5 0 0 0 .15-.2l.113-.325a.43.43 0 0 1 .268-.268l.32-.108a.42.42 0 0 0 .215-.155"></path></svg></div> <div class="flex min-w-0 flex-1"><textarea placeholder="Ask AI to help write your query..." class="max-h-64 min-h-8 w-full resize-none overflow-y-auto border-none bg-transparent py-1 text-sm leading-6 text-slate-700 placeholder-slate-400 [scrollbar-width:thin] focus:ring-0 dark:text-slate-200 dark:placeholder-slate-400" rows="1"></textarea> </div> </div> </div></span> </span></div> <div class="relative flex flex-col rounded-md bg-gray-100 pt-2 dark:bg-gray-800/50"> <div class="flex h-64 items-center justify-center "><svg class="animate-spin text-xs" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path class="opacity-75" fill-rule="evenodd" clip-rule="evenodd" d="M6 0C2.6862 0 0 2.6862 0 6H1.8C1.8 4.88609 2.2425 3.8178 3.03015 3.03015C3.8178 2.2425 4.88609 1.8 6 1.8V0ZM12 6C12 9.3138 9.3138 12 6 12V10.2C7.11391 10.2 8.1822 9.7575 8.96985 8.96985C9.7575 8.1822 10.2 7.11391 10.2 6H12Z" fill="currentColor"></path><path class="opacity-25" fill-rule="evenodd" clip-rule="evenodd" d="M3.03015 8.96985C3.8178 9.7575 4.88609 10.2 6 10.2V12C2.6862 12 0 9.3138 0 6H1.8C1.8 7.11391 2.2425 8.1822 3.03015 8.96985ZM7.60727 2.11971C7.0977 1.90864 6.55155 1.8 6 1.8V0C9.3138 0 12 2.6862 12 6H10.2C10.2 5.44845 10.0914 4.9023 9.88029 4.39273C9.66922 3.88316 9.35985 3.42016 8.96985 3.03015C8.57984 2.64015 8.11684 2.33078 7.60727 2.11971Z" fill="currentColor"></path></svg></div></div> <div class="mt-2 flex flex-col gap-2"><div class="flex items-center justify-between max-sm:text-sm"><div class="flex w-full items-center justify-between gap-4"> <span class="flex flex-shrink-0 items-center gap-1"><span class="font-semibold">Subsets and Splits</span> <span class="inline-block "><span class="contents"><svg class="text-xs text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M17 22v-8h-4v2h2v6h-3v2h8v-2h-3z" fill="currentColor"></path><path d="M16 8a1.5 1.5 0 1 0 1.5 1.5A1.5 1.5 0 0 0 16 8z" fill="currentColor"></path><path d="M16 30a14 14 0 1 1 14-14a14 14 0 0 1-14 14zm0-26a12 12 0 1 0 12 12A12 12 0 0 0 16 4z" fill="currentColor"></path></svg></span> </span> </span> <div class="ml-4 flex flex-1 items-center justify-end gap-1"> </div></div></div> <div class="flex flex-nowrap gap-1 overflow-x-auto"></div></div> <button type="button" class="btn mt-2 h-10 w-full text-sm font-semibold md:text-base" ><span class="flex items-center gap-1.5"> <span>Run Query</span> <span class="shadow-xs ml-2 hidden items-center rounded-sm border bg-white px-0.5 text-xs font-medium text-gray-700 sm:inline-flex">Ctrl+↵</span></span></button></div> <div class="flex flex-col px-2 pb-4"></div></div> <div class="mt-auto pb-4"><div class="flex justify-center"><div class="w-full sm:px-4"><div class="mb-3"><ul class="flex gap-1 text-sm "><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 text-gray-500 hover:bg-gray-100 hover:text-gray-700 dark:hover:bg-gray-900 dark:hover:text-gray-300">Saved Queries </button> </li><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 bg-black text-white dark:bg-gray-800">Top Community Queries </button> </li></ul></div> <div class="h-48 overflow-y-auto"><div class="flex flex-col gap-2"><div class="flex h-48 flex-col items-center justify-center rounded border border-gray-200 bg-gray-50 p-4 text-center dark:border-gray-700/60 dark:bg-gray-900"><p class="mb-1 font-semibold text-gray-600 dark:text-gray-400">No community queries yet</p> <p class="max-w-xs text-xs text-gray-500 dark:text-gray-400">The top public SQL queries from the community will appear here once available.</p></div></div></div></div></div></div></div></div></div></div> </div></div></div></main> </div> <script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script><script> import("\/front\/build\/kube-bf554b6\/index.js"); window.moonSha = "kube-bf554b6\/"; window.__hf_deferred = {}; </script> <!-- Stripe --> <script> if (["hf.co", "huggingface.co"].includes(window.location.hostname)) { const script = document.createElement("script"); script.src = "https://js.stripe.com/v3/"; script.async = true; document.head.appendChild(script); } </script> </body> </html>