{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ')\n text = soup.findAll('div', text=True)\n self.failUnless(len(text) == 2)\n self.failUnless(text[0] == u'Text 01')\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475214,"cells":{"repo_name":{"kind":"string","value":"mdeejay/kernel_huawei_omap4"},"path":{"kind":"string","value":"tools/perf/scripts/python/syscall-counts.py"},"copies":{"kind":"string","value":"11181"},"size":{"kind":"string","value":"1522"},"content":{"kind":"string","value":"# system call counts\n# (c) 2010, Tom Zanussi \n# Licensed under the terms of the GNU GPL License version 2\n#\n# Displays system-wide system call totals, broken down by syscall.\n# If a [comm] arg is specified, only syscalls called by [comm] are displayed.\n\nimport os\nimport sys\n\nsys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n\nfrom perf_trace_context import *\nfrom Core import *\nfrom Util import syscall_name\n\nusage = \"perf script -s syscall-counts.py [comm]\\n\";\n\nfor_comm = None\n\nif len(sys.argv) > 2:\n\tsys.exit(usage)\n\nif len(sys.argv) > 1:\n\tfor_comm = sys.argv[1]\n\nsyscalls = autodict()\n\ndef trace_begin():\n\tprint \"Press control+C to stop and show the summary\"\n\ndef trace_end():\n\tprint_syscall_totals()\n\ndef raw_syscalls__sys_enter(event_name, context, common_cpu,\n\tcommon_secs, common_nsecs, common_pid, common_comm,\n\tid, args):\n\tif for_comm is not None:\n\t\tif common_comm != for_comm:\n\t\t\treturn\n\ttry:\n\t\tsyscalls[id] += 1\n\texcept TypeError:\n\t\tsyscalls[id] = 1\n\ndef print_syscall_totals():\n if for_comm is not None:\n\t print \"\\nsyscall events for %s:\\n\\n\" % (for_comm),\n else:\n\t print \"\\nsyscall events:\\n\\n\",\n\n print \"%-40s %10s\\n\" % (\"event\", \"count\"),\n print \"%-40s %10s\\n\" % (\"----------------------------------------\", \\\n \"-----------\"),\n\n for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \\\n\t\t\t\t reverse = True):\n\t print \"%-40s %10d\\n\" % (syscall_name(id), val),\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475215,"cells":{"repo_name":{"kind":"string","value":"valkyriesavage/invenio"},"path":{"kind":"string","value":"modules/bibupload/lib/batchuploader_engine.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"22837"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##\n## This file is part of Invenio.\n## Copyright (C) 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"\nBatch Uploader core functions. Uploading metadata and documents.\n\"\"\"\n\nimport os\nimport pwd, grp\nimport time\nimport tempfile\n\nfrom invenio.dbquery import run_sql\nfrom invenio.access_control_engine import acc_authorize_action\nfrom invenio.webuser import collect_user_info\nfrom invenio.config import CFG_BINDIR, CFG_TMPDIR, CFG_LOGDIR, \\\n CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \\\n CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \\\n CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \\\n CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \\\n CFG_BATCHUPLOADER_WEB_ROBOT_AGENT, \\\n CFG_PREFIX, CFG_SITE_LANG\nfrom invenio.webinterface_handler_wsgi_utils import Field\nfrom invenio.textutils import encode_for_xml\nfrom invenio.bibtask import task_low_level_submission\nfrom invenio.messages import gettext_set_language\n\nPERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',\n '--insert', '--replace', '--correct', '--append']\n\ndef cli_allocate_record(req):\n req.content_type = \"text/plain\"\n req.send_http_header()\n\n # check IP and useragent:\n if not _check_client_ip(req):\n msg = \"[ERROR] Sorry, client IP %s cannot use the service.\" % _get_client_ip(req)\n _log(msg)\n return _write(req, msg)\n if not _check_client_useragent(req):\n msg = \"[ERROR] Sorry, this useragent cannot use the service.\"\n _log(msg)\n return _write(req, msg)\n\n recid = run_sql(\"insert into bibrec (creation_date,modification_date) values(NOW(),NOW())\")\n return recid\n\ndef cli_upload(req, file_content=None, mode=None):\n \"\"\" Robot interface for uploading MARC files\n \"\"\"\n req.content_type = \"text/plain\"\n req.send_http_header()\n\n # check IP and useragent:\n if not _check_client_ip(req):\n msg = \"[ERROR] Sorry, client IP %s cannot use the service.\" % _get_client_ip(req)\n _log(msg)\n return _write(req, msg)\n if not _check_client_useragent(req):\n msg = \"[ERROR] Sorry, this useragent cannot use the service.\"\n _log(msg)\n return _write(req, msg)\n\n arg_file = file_content\n arg_mode = mode\n if not arg_file:\n msg = \"[ERROR] Please specify file body to input.\"\n _log(msg)\n return _write(req, msg)\n if not arg_mode:\n msg = \"[ERROR] Please specify upload mode to use.\"\n _log(msg)\n return _write(req, msg)\n if not arg_mode in PERMITTED_MODES:\n msg = \"[ERROR] Invalid upload mode.\"\n _log(msg)\n return _write(req, msg)\n if isinstance(arg_file, Field):\n arg_file = arg_file.value\n\n # write temporary file:\n tempfile.tempdir = CFG_TMPDIR\n\n filename = tempfile.mktemp(prefix=\"batchupload_\" + \\\n time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\")\n\n filedesc = open(filename, 'w')\n filedesc.write(arg_file)\n filedesc.close()\n\n # check if this client can run this file:\n client_ip = _get_client_ip(req)\n permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]\n if permitted_dbcollids != ['*']: # wildcard\n allow = _check_client_can_submit_file(client_ip, filename, req, 0)\n if not allow:\n msg = \"[ERROR] Cannot submit such a file from this IP. (Wrong collection.)\"\n _log(msg)\n return _write(req, msg)\n\n # run upload command:\n cmd = CFG_BINDIR + '/bibupload -u batchupload ' + arg_mode + ' ' + filename\n os.system(cmd)\n msg = \"[INFO] %s\" % cmd\n _log(msg)\n return _write(req, msg)\n\ndef metadata_upload(req, metafile=None, mode=None, exec_date=None, exec_time=None, metafilename=None, ln=CFG_SITE_LANG):\n \"\"\"\n Metadata web upload service. Get upload parameters and exec bibupload for the given file.\n Finally, write upload history.\n @return: tuple (error code, message)\n error code: code that indicates if an error ocurred\n message: message describing the error\n \"\"\"\n # start output:\n req.content_type = \"text/html\"\n req.send_http_header()\n\n # write temporary file:\n metafile = metafile.value\n user_info = collect_user_info(req)\n tempfile.tempdir = CFG_TMPDIR\n filename = tempfile.mktemp(prefix=\"batchupload_\" + \\\n user_info['nickname'] + \"_\" + time.strftime(\"%Y%m%d%H%M%S\",\n time.localtime()) + \"_\" + metafilename + \"_\")\n filedesc = open(filename, 'w')\n filedesc.write(metafile)\n filedesc.close()\n\n # check if this client can run this file:\n allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)\n if allow[0] != 0:\n return (allow[0], allow[1])\n\n # run upload command:\n if exec_date:\n date = \"\\'\" + exec_date + ' ' + exec_time + \"\\'\"\n jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, \"--name=\" + metafilename,\"-t\", date, filename)\n else:\n jobid = task_low_level_submission('bibupload', user_info['nickname'], mode, \"--name=\" + metafilename, filename)\n\n # write batch upload history\n run_sql(\"\"\"INSERT INTO hstBATCHUPLOAD (user, submitdate,\n filename, execdate, id_schTASK, batch_mode)\n VALUES (%s, NOW(), %s, %s, %s, \"metadata\")\"\"\",\n (user_info['nickname'], metafilename,\n exec_date != \"\" and (exec_date + ' ' + exec_time)\n or time.strftime(\"%Y-%m-%d %H:%M:%S\"), str(jobid), ))\n return (0, \"Task %s queued\" % str(jobid))\n\ndef document_upload(req=None, folder=\"\", matching=\"\", mode=\"\", exec_date=\"\", exec_time=\"\", ln=CFG_SITE_LANG):\n \"\"\" Take files from the given directory and upload them with the appropiate mode.\n @parameters:\n + folder: Folder where the files to upload are stored\n + matching: How to match file names with record fields (report number, barcode,...)\n + mode: Upload mode (append, revise, replace)\n @return: tuple (file, error code)\n file: file name causing the error to notify the user\n error code:\n 1 - More than one possible recID, ambiguous behaviour\n 2 - No records match that file name\n 3 - File already exists\n \"\"\"\n import sys\n if sys.hexversion < 0x2060000:\n from md5 import md5\n else:\n from hashlib import md5\n from invenio.bibdocfile import BibRecDocs, file_strip_ext\n import shutil\n from invenio.search_engine import perform_request_search, \\\n search_pattern, \\\n guess_collection_of_a_record\n _ = gettext_set_language(ln)\n errors = []\n info = [0, []] # Number of files read, name of the files\n try:\n files = os.listdir(folder)\n except OSError, error:\n errors.append((\"\", error))\n return errors, info\n err_desc = {1: _(\"More than one possible recID, ambiguous behaviour\"), 2: _(\"No records match that file name\"),\n 3: _(\"File already exists\"), 4: _(\"A file with the same name and format already exists\"),\n 5: _(\"No rights to upload to collection '%s'\")}\n # Create directory DONE/ if doesn't exist\n folder = (folder[-1] == \"/\") and folder or (folder + \"/\")\n files_done_dir = folder + \"DONE/\"\n try:\n os.mkdir(files_done_dir)\n except OSError:\n # Directory exists or no write permission\n pass\n for docfile in files:\n if os.path.isfile(os.path.join(folder, docfile)):\n info[0] += 1\n identifier = file_strip_ext(docfile)\n extension = docfile[len(identifier):]\n rec_id = None\n if identifier:\n rec_id = search_pattern(p=identifier, f=matching, m='e')\n if not rec_id:\n errors.append((docfile, err_desc[2]))\n continue\n elif len(rec_id) > 1:\n errors.append((docfile, err_desc[1]))\n continue\n else:\n rec_id = str(list(rec_id)[0])\n rec_info = BibRecDocs(rec_id)\n if rec_info.bibdocs:\n for bibdoc in rec_info.bibdocs:\n attached_files = bibdoc.list_all_files()\n file_md5 = md5(open(os.path.join(folder, docfile), \"rb\").read()).hexdigest()\n num_errors = len(errors)\n for attached_file in attached_files:\n if attached_file.checksum == file_md5:\n errors.append((docfile, err_desc[3]))\n break\n elif attached_file.fullname == docfile:\n errors.append((docfile, err_desc[4]))\n break\n if len(errors) > num_errors:\n continue\n # Check if user has rights to upload file\n file_collection = guess_collection_of_a_record(int(rec_id))\n auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)\n if auth_code != 0:\n error_msg = err_desc[5] % file_collection\n errors.append((docfile, error_msg))\n continue\n tempfile.tempdir = CFG_TMPDIR\n # Move document to be uploaded to temporary folder\n tmp_file = tempfile.mktemp(prefix=identifier + \"_\" + time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\", suffix=extension)\n shutil.copy(os.path.join(folder, docfile), tmp_file)\n # Create MARC temporary file with FFT tag and call bibupload\n filename = tempfile.mktemp(prefix=identifier + '_')\n filedesc = open(filename, 'w')\n marc_content = \"\"\" \n %(rec_id)s\n \n %(name)s\n %(path)s\n \n \"\"\" % {'rec_id': rec_id,\n 'name': encode_for_xml(identifier),\n 'path': encode_for_xml(tmp_file),\n }\n filedesc.write(marc_content)\n filedesc.close()\n info[1].append(docfile)\n user_info = collect_user_info(req)\n user = user_info['nickname']\n if not user:\n user = \"batchupload\"\n # Execute bibupload with the appropiate mode\n if exec_date:\n date = '--runtime=' + \"\\'\" + exec_date + ' ' + exec_time + \"\\'\"\n jobid = task_low_level_submission('bibupload', user, \"--\" + mode, \"--name=\" + docfile, date, filename)\n else:\n jobid = task_low_level_submission('bibupload', user, \"--\" + mode, \"--name=\" + docfile, filename)\n\n # write batch upload history\n run_sql(\"\"\"INSERT INTO hstBATCHUPLOAD (user, submitdate,\n filename, execdate, id_schTASK, batch_mode)\n VALUES (%s, NOW(), %s, %s, %s, \"document\")\"\"\",\n (user_info['nickname'], docfile,\n exec_date != \"\" and (exec_date + ' ' + exec_time)\n or time.strftime(\"%Y-%m-%d %H:%M:%S\"), str(jobid)))\n\n # Move file to DONE folder\n done_filename = docfile + \"_\" + time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\" + str(jobid)\n try:\n os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))\n except OSError:\n errors.append('MoveError')\n return errors, info\n\ndef get_user_metadata_uploads(req):\n \"\"\"Retrieve all metadata upload history information for a given user\"\"\"\n user_info = collect_user_info(req)\n upload_list = run_sql(\"\"\"SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \\\n h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \\\n s.status \\\n FROM hstBATCHUPLOAD h INNER JOIN schTASK s \\\n ON h.id_schTASK = s.id \\\n WHERE h.user=%s and h.batch_mode=\"metadata\"\n ORDER BY h.submitdate DESC\"\"\", (user_info['nickname'],))\n return upload_list\n\ndef get_user_document_uploads(req):\n \"\"\"Retrieve all document upload history information for a given user\"\"\"\n user_info = collect_user_info(req)\n upload_list = run_sql(\"\"\"SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \\\n h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \\\n s.status \\\n FROM hstBATCHUPLOAD h INNER JOIN schTASK s \\\n ON h.id_schTASK = s.id \\\n WHERE h.user=%s and h.batch_mode=\"document\"\n ORDER BY h.submitdate DESC\"\"\", (user_info['nickname'],))\n return upload_list\n\ndef get_daemon_doc_files():\n \"\"\" Return all files found in batchuploader document folders \"\"\"\n files = {}\n for folder in ['/revise', '/append']:\n try:\n daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \\\n or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR\n directory = daemon_dir + '/documents' + folder\n files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]\n for file_instance, info in files[directory]:\n stat_info = os.lstat(os.path.join(directory, file_instance))\n info.append(\"%s\" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner\n info.append(\"%s\" % grp.getgrgid(stat_info.st_gid)[0]) # Group\n info.append(\"%d\" % stat_info.st_size) # Size\n time_stat = stat_info.st_mtime\n time_fmt = \"%Y-%m-%d %R\"\n info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified\n except OSError:\n pass\n return files\n\ndef get_daemon_meta_files():\n \"\"\" Return all files found in batchuploader metadata folders \"\"\"\n files = {}\n for folder in ['/correct', '/replace', '/insert', '/append']:\n try:\n daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \\\n or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR\n directory = daemon_dir + '/metadata' + folder\n files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]\n for file_instance, info in files[directory]:\n stat_info = os.lstat(os.path.join(directory, file_instance))\n info.append(\"%s\" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner\n info.append(\"%s\" % grp.getgrgid(stat_info.st_gid)[0]) # Group\n info.append(\"%d\" % stat_info.st_size) # Size\n time_stat = stat_info.st_mtime\n time_fmt = \"%Y-%m-%d %R\"\n info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified\n except OSError:\n pass\n return files\n\ndef _get_client_ip(req):\n \"\"\"Return client IP address from req object.\"\"\"\n return str(req.remote_ip)\n\ndef _check_client_ip(req):\n \"\"\"\n Is this client permitted to use the service?\n \"\"\"\n client_ip = _get_client_ip(req)\n if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():\n return True\n return False\n\ndef _check_client_useragent(req):\n \"\"\"\n Is this user agent permitted to use the service?\n \"\"\"\n user_info = collect_user_info(req)\n client_useragent = user_info['agent']\n if client_useragent in CFG_BATCHUPLOADER_WEB_ROBOT_AGENT:\n return True\n return False\n\ndef _check_client_can_submit_file(client_ip=\"\", metafile=\"\", req=None, webupload=0, ln=CFG_SITE_LANG):\n \"\"\"\n Is this client able to upload such a FILENAME?\n check 980 $a values and collection tags in the file to see if they are among the\n permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.\n Useful to make sure that the client does not override other records by\n mistake.\n \"\"\"\n from invenio.bibrecord import create_records\n\n _ = gettext_set_language(ln)\n recs = create_records(metafile, 0, 0)\n user_info = collect_user_info(req)\n\n filename_tag980_values = _detect_980_values_from_marcxml_file(recs)\n for filename_tag980_value in filename_tag980_values:\n if not filename_tag980_value:\n if not webupload:\n return False\n else:\n return(1, \"Invalid tag 980 value\")\n if not webupload:\n if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:\n return False\n else:\n auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)\n if auth_code != 0:\n error_msg = _(\"The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'\") % \\\n {'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}\n return (auth_code, error_msg)\n\n filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)\n\n for filename_rec_id_collection in filename_rec_id_collections:\n if not webupload:\n if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:\n return False\n else:\n auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)\n if auth_code != 0:\n error_msg = _(\"The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'\") % \\\n {'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}\n return (auth_code, error_msg)\n if not webupload:\n return True\n else:\n return (0, \" \")\n\ndef _detect_980_values_from_marcxml_file(recs):\n \"\"\"\n Read MARCXML file and return list of 980 $a values found in that file.\n Useful for checking rights.\n \"\"\"\n from invenio.bibrecord import record_get_field_values\n\n collection_tag = run_sql(\"SELECT value FROM tag, field_tag, field \\\n WHERE tag.id=field_tag.id_tag AND \\\n field_tag.id_field=field.id AND \\\n field.code='collection'\")\n collection_tag = collection_tag[0][0]\n dbcollids = {}\n for rec, dummy1, dummy2 in recs:\n if rec:\n for tag980 in record_get_field_values(rec,\n tag=collection_tag[:3],\n ind1=collection_tag[3],\n ind2=collection_tag[4],\n code=collection_tag[5]):\n dbcollids[tag980] = 1\n return dbcollids.keys()\n\n\ndef _detect_collections_from_marcxml_file(recs):\n \"\"\"\n Extract all possible recIDs from MARCXML file and guess collections\n for these recIDs.\n \"\"\"\n from invenio.bibrecord import record_get_field_values\n from invenio.search_engine import guess_collection_of_a_record\n from invenio.bibupload import find_record_from_sysno, \\\n find_records_from_extoaiid, \\\n find_record_from_oaiid\n\n dbcollids = {}\n sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG\n oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG\n oai_tag = CFG_OAI_ID_FIELD\n for rec, dummy1, dummy2 in recs:\n if rec:\n for tag001 in record_get_field_values(rec, '001'):\n collection = guess_collection_of_a_record(int(tag001))\n dbcollids[collection] = 1\n for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],\n ind1=sysno_tag[3],\n ind2=sysno_tag[4],\n code=sysno_tag[5]):\n record = find_record_from_sysno(tag_sysno)\n collection = guess_collection_of_a_record(int(record))\n dbcollids[collection] = 1\n for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],\n ind1=oaiid_tag[3],\n ind2=oaiid_tag[4],\n code=oaiid_tag[5]):\n record = find_records_from_extoaiid(tag_oaiid)\n collection = guess_collection_of_a_record(int(record))\n dbcollids[collection] = 1\n for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],\n ind1=oai_tag[3],\n ind2=oai_tag[4],\n code=oai_tag[5]):\n record = find_record_from_oaiid(tag_oai)\n collection = guess_collection_of_a_record(int(record))\n dbcollids[collection] = 1\n return dbcollids.keys()\n\n\n\ndef _log(msg, logfile=\"webupload.log\"):\n \"\"\"\n Log MSG into LOGFILE with timestamp.\n \"\"\"\n filedesc = open(CFG_LOGDIR + \"/\" + logfile, \"a\")\n filedesc.write(time.strftime(\"%Y-%m-%d %H:%M:%S\") + \" --> \" + msg + \"\\n\")\n filedesc.close()\n return\n\ndef _write(req, msg):\n \"\"\"\n Write MSG to the output stream for the end user.\n \"\"\"\n req.write(msg + \"\\n\")\n return\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475216,"cells":{"repo_name":{"kind":"string","value":"lo-co/atm-py"},"path":{"kind":"string","value":"atmPy/for_removal/LAS/LAS.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"8188"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 10 11:43:10 2014\n\n@author: htelg\n\"\"\"\n\nimport datetime\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pylab as plt\nfrom StringIO import StringIO as io\nfrom scipy.interpolate import UnivariateSpline\n\nfrom atmPy.aerosols.size_distr import sizedistribution\n\n\ndef read_csv(fname):\n las = _readFromFakeXLS(fname)\n sd,hk = _separate_sizedist_and_housekeep(las)\n bins = _get_bins(sd)\n dist = sizedistribution.SizeDist_TS(sd, bins, \"numberConcentration\")\n return dist\n\n\ndef _separate_sizedist_and_housekeep(las):\n \"\"\"Beside separating size distribution and housekeeping this\n function also converts the data to a numberconcentration (#/cc)\n\n Parameters\n ----------\n las: pandas.DataFrame\"\"\"\n\n sd = las.copy()\n hk = las.copy()\n k = sd.keys()\n where = np.argwhere(k == 'Flow sccm') + 1\n khk = k[: where]\n sd = sd.drop(khk, axis=1)\n hsd = k[where:]\n hk = hk.drop(hsd, axis=1)\n\n hk['Sample sccm'] = hk['Sample sccm'].astype(float)\n\n hk['Accum. Secs'] = hk['Accum. Secs'].astype(float)\n\n # normalize to time and flow\n sd = sd.mul(60./hk['Sample sccm'] / hk['Accum. Secs'], axis = 0 )\n return sd,hk\n\ndef _get_bins(frame, log=False):\n \"\"\"\n get the bins from the column labels of the size distribution DataFrame.\n \"\"\"\n frame = frame.copy()\n\n bins = np.zeros(frame.keys().shape[0]+1)\n for e, i in enumerate(frame.keys()):\n bin_s, bin_e = i.split(' ')\n bin_s = float(bin_s)\n bin_e = float(bin_e)\n bins[e] = bin_s\n bins[e+1] = bin_e\n return bins #binCenters\n\n\ndef _readFromFakeXLS(fname):\n \"\"\"reads and shapes a XLS file produced by the LAS instrument\"\"\"\n fr = pd.read_csv(fname, sep='\\t')\n newcolname = [fr.columns[e] + ' ' + str(fr.values[0][e]) for e, i in enumerate(fr.columns)]\n fr.columns = newcolname\n fr = fr.drop(fr.index[0])\n bla = pd.Series(fr['Date -'].values + ' ' + fr['Time -'].values)\n fr.index = bla.map(lambda x: datetime.datetime.strptime(x, '%m/%d/%Y %I:%M:%S.%f %p'))\n fr = fr.drop(['Date -', 'Time -'], axis=1)\n return fr\n\n\n\n\n# def _getBinCenters(frame, binedges=False, log=False):\n# \"\"\"\n# LAS gives the bin edges, this calculates the bin centers.\n# if log is True, the center will be with respect to the log10 ... log(d_{n+1})-log(d_{n})\n# if binedges is True, frame is not really a frame but the binedges (array with dtype=float)\n# Make sure you are running \"removeHousekeeping\" first\n# \"\"\"\n# frame = frame.copy()\n#\n# if binedges:\n# if log:\n# binCenters = 10**((np.log10(frame[:-1]) + np.log10(frame[1:]))/2.)\n# else:\n#\n# binCenters = (frame[:-1] + frame[1:])/2.\n# else:\n# binCenters = np.zeros(frame.keys().shape)\n# for e, i in enumerate(frame.keys()):\n# bin_s, bin_e = i.split(' ')\n# bin_s = float(bin_s)\n# bin_e = float(bin_e)\n# normTo = bin_e - bin_s\n# frame[i] = frame[i].divide(normTo)\n# if log:\n# binCenters[e] = 10**((np.log10(bin_e) + np.log10(bin_s))/2.)\n# else:\n# binCenters[e] = (bin_e + bin_s)/2.\n# return binCenters\n\n\n\n\n\n# def getTimeIntervalFromFrame(frame, start, end):\n# \"\"\"cutes out a particular time interval from frame.\n# e.g.: getTimeIntervalFromFrame(frame,'2014-10-31 18:10:00','2014-10-31 18:10:00')\"\"\"\n# frame = frame.copy()\n# if start:\n# frame = frame.truncate(before = start)\n#\n# if end:\n# frame = frame.truncate(after = end)\n#\n# return frame\n\n#\n# def frame2singleDistribution(frame):\n# frame = frame.copy()\n# singleHist = np.zeros(frame.shape[1])\n# for i in xrange(frame.shape[1]):\n# singleHist[i] = np.nansum(frame.values[:,i])\n# singleHist /= frame.shape[0]\n# return singleHist\n\n\ndef _string2Dataframe(data):\n sb = io(data)\n dataFrame = pd.read_csv(sb, sep=' ', names=('d', 'amp')).sort('d')\n return dataFrame\n\n\ndef read_Calibration_fromString(data):\n '''\n unit of diameter must be nm\ndata = \"\"\"140 88\n150 102\n173 175\n200 295\n233 480\n270 740\n315 880\n365 1130\n420 1350\n490 1930\n570 3050\n660 4200\n770 5100\n890 6300\n1040 8000\n1200 8300\n1400 10000\n1600 11500\n1880 16000\n2180 21000\n2500 28000\n3000 37000\"\"\"\n '''\n \n dataFrame = _string2Dataframe(data)\n calibrationInstance = calibration(dataFrame)\n return calibrationInstance\n\n\ndef save_Calibration(calibrationInstance, fname):\n \"\"\"should be saved hier cd ~/data/POPS_calibrations/\"\"\"\n calibrationInstance.data.to_csv(fname, index = False)\n return\n\n# def plot_distMap_LAS(fr_d,binEdgensLAS_d):\n# binCenters = getBinCenters(binEdgensLAS_d , binedges= True, log = True)\n# TIME_LAS,D_LAS,DNDP_LAS = frameToXYZ(fr_d, binCenters)\n# f,a = plt.subplots()\n# pcIm = a.pcolormesh(TIME_LAS,D_LAS,\n# DNDP_LAS,\n# norm = LogNorm(),#vmin = 3,vmax = distZoom.data.values.max()),#vmin = 1e-5),\n# # cmap=plt.cm.RdYlBu_r,\n# # cmap = plt.cm.terrain_r,\n# cmap = hm.get_colorMap_intensity(),#plt.cm.hot_r, #PuBuGn,\n# # shading='gouraud',\n# )\n# a.semilogy()\n# a.set_ylim((150,2500))\n# a.set_ylabel('Diameter (nm)')\n# a.set_xlabel('Time')\n# a.set_title('LAS')\n# cb = f.colorbar(pcIm)\n# cb.set_label(\"Particle number (cm$^{-3}\\,$s$^{-1}$)\")\n# f.autofmt_xdate()\n# # a.yaxis.set_minor_formatter(FormatStrFormatter(\"%i\"))\n# # a.yaxis.set_major_formatter(FormatStrFormatter(\"%i\"))\n\n \nclass calibration:\n def __init__(self,dataTabel):\n self.data = dataTabel\n self.calibrationFunction = self.get_calibrationFunctionSpline()\n\n def save_csv(self,fname):\n save_Calibration(self,fname)\n return\n \n def get_calibrationFunctionSpline(self, fitOrder=1):\n \"\"\"\n Performes a spline fit/smoothening (scipy.interpolate.UnivariateSpline) of d over amp (yes this way not the other way around).\n \n Returns (generates): creates a function self.spline which can later be used to calculate d from amp \n \n Optional Parameters:\n \\t s: int - oder of the spline function\n \\t noOfPts: int - length of generated graph\n \\t plot: boolean - if result is supposed to be plotted\n \"\"\"\n\n # The following two step method is necessary to get a smooth curve. \n #When I only do the second step on the cal_curve I get some wired whiggles\n ##### First Step\n if (self.data.amp.values[1:]-self.data.amp.values[:-1]).min() < 0:\n warnings.warn('The data represent a non injective function! This will not work. plot the calibration to see what I meen') \n\n sf = UnivariateSpline(self.data.d.values, self.data.amp.values, s=fitOrder)\n d = np.logspace(np.log10(self.data.d.values.min()), np.log10(self.data.d.values.max()), 500)\n amp = sf(d)\n \n # second step\n cal_function = UnivariateSpline(amp, d, s=fitOrder)\n return cal_function\n \n def plot_calibration(self):\n \"\"\"Plots the calibration function and data\n Arguments\n ------------\n cal: calibration instance\n \n Returns\n ------------\n figure\n axes\n calibration data graph\n calibration function graph\n \"\"\"\n cal_function = self.calibrationFunction\n amp = np.logspace(np.log10(self.data.amp.min()), np.log10(self.data.amp.max()), 500)\n d = cal_function(amp)\n \n f, a = plt.subplots()\n \n cal_data, = a.plot(self.data.d, self.data.amp, 'o', label='data',)\n cal_func, = a.plot(d, amp, label='function')\n \n a.loglog()\n \n a.set_xlim(0.9*self.data.d.min(), 1.1*self.data.d.max())\n a.set_xlabel('Diameter (nm)')\n \n a.set_ylim(0.9*self.data.amp.min(), 1.1*self.data.amp.max()) \n a.set_ylabel('Amplitude (digitizer bins)')\n \n a.set_title('Calibration curve')\n a.legend(loc = 2)\n return f, a, cal_data, cal_func"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475217,"cells":{"repo_name":{"kind":"string","value":"WhireCrow/openwrt-mt7620"},"path":{"kind":"string","value":"staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/urlparse.py"},"copies":{"kind":"string","value":"32"},"size":{"kind":"string","value":"14414"},"content":{"kind":"string","value":"\"\"\"Parse (absolute and relative) URLs.\n\nurlparse module is based upon the following RFC specifications.\n\nRFC 3986 (STD66): \"Uniform Resource Identifiers\" by T. Berners-Lee, R. Fielding\nand L. Masinter, January 2005.\n\nRFC 2732 : \"Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter\nand L.Masinter, December 1999.\n\nRFC 2396: \"Uniform Resource Identifiers (URI)\": Generic Syntax by T.\nBerners-Lee, R. Fielding, and L. Masinter, August 1998.\n\nRFC 2368: \"The mailto URL scheme\", by P.Hoffman , L Masinter, J. Zwinski, July 1998.\n\nRFC 1808: \"Relative Uniform Resource Locators\", by R. Fielding, UC Irvine, June\n1995.\n\nRFC 1738: \"Uniform Resource Locators (URL)\" by T. Berners-Lee, L. Masinter, M.\nMcCahill, December 1994\n\nRFC 3986 is considered the current standard and any future changes to\nurlparse module should conform with it. The urlparse module is\ncurrently not entirely compliant with this RFC due to defacto\nscenarios for parsing, and for backward compatibility purposes, some\nparsing quirks from older RFCs are retained. The testcases in\ntest_urlparse.py provides a good indicator of parsing behavior.\n\n\"\"\"\n\n__all__ = [\"urlparse\", \"urlunparse\", \"urljoin\", \"urldefrag\",\n \"urlsplit\", \"urlunsplit\", \"parse_qs\", \"parse_qsl\"]\n\n# A classification of schemes ('' means apply by default)\nuses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',\n 'wais', 'file', 'https', 'shttp', 'mms',\n 'prospero', 'rtsp', 'rtspu', '', 'sftp',\n 'svn', 'svn+ssh']\nuses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',\n 'imap', 'wais', 'file', 'mms', 'https', 'shttp',\n 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',\n 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']\nnon_hierarchical = ['gopher', 'hdl', 'mailto', 'news',\n 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']\nuses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',\n 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',\n 'mms', '', 'sftp']\nuses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',\n 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']\nuses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',\n 'nntp', 'wais', 'https', 'shttp', 'snews',\n 'file', 'prospero', '']\n\n# Characters valid in scheme names\nscheme_chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789'\n '+-.')\n\nMAX_CACHE_SIZE = 20\n_parse_cache = {}\n\ndef clear_cache():\n \"\"\"Clear the parse cache.\"\"\"\n _parse_cache.clear()\n\n\nclass ResultMixin(object):\n \"\"\"Shared methods for the parsed result objects.\"\"\"\n\n @property\n def username(self):\n netloc = self.netloc\n if \"@\" in netloc:\n userinfo = netloc.rsplit(\"@\", 1)[0]\n if \":\" in userinfo:\n userinfo = userinfo.split(\":\", 1)[0]\n return userinfo\n return None\n\n @property\n def password(self):\n netloc = self.netloc\n if \"@\" in netloc:\n userinfo = netloc.rsplit(\"@\", 1)[0]\n if \":\" in userinfo:\n return userinfo.split(\":\", 1)[1]\n return None\n\n @property\n def hostname(self):\n netloc = self.netloc.split('@')[-1]\n if '[' in netloc and ']' in netloc:\n return netloc.split(']')[0][1:].lower()\n elif ':' in netloc:\n return netloc.split(':')[0].lower()\n elif netloc == '':\n return None\n else:\n return netloc.lower()\n\n @property\n def port(self):\n netloc = self.netloc.split('@')[-1].split(']')[-1]\n if ':' in netloc:\n port = netloc.split(':')[1]\n return int(port, 10)\n else:\n return None\n\nfrom collections import namedtuple\n\nclass SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):\n\n __slots__ = ()\n\n def geturl(self):\n return urlunsplit(self)\n\n\nclass ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):\n\n __slots__ = ()\n\n def geturl(self):\n return urlunparse(self)\n\n\ndef urlparse(url, scheme='', allow_fragments=True):\n \"\"\"Parse a URL into 6 components:\n :///;?#\n Return a 6-tuple: (scheme, netloc, path, params, query, fragment).\n Note that we don't break the components up in smaller bits\n (e.g. netloc is a single string) and we don't expand % escapes.\"\"\"\n tuple = urlsplit(url, scheme, allow_fragments)\n scheme, netloc, url, query, fragment = tuple\n if scheme in uses_params and ';' in url:\n url, params = _splitparams(url)\n else:\n params = ''\n return ParseResult(scheme, netloc, url, params, query, fragment)\n\ndef _splitparams(url):\n if '/' in url:\n i = url.find(';', url.rfind('/'))\n if i < 0:\n return url, ''\n else:\n i = url.find(';')\n return url[:i], url[i+1:]\n\ndef _splitnetloc(url, start=0):\n delim = len(url) # position of end of domain part of url, default is end\n for c in '/?#': # look for delimiters; the order is NOT important\n wdelim = url.find(c, start) # find first of this delim\n if wdelim >= 0: # if found\n delim = min(delim, wdelim) # use earliest delim position\n return url[start:delim], url[delim:] # return (domain, rest)\n\ndef urlsplit(url, scheme='', allow_fragments=True):\n \"\"\"Parse a URL into 5 components:\n :///?#\n Return a 5-tuple: (scheme, netloc, path, query, fragment).\n Note that we don't break the components up in smaller bits\n (e.g. netloc is a single string) and we don't expand % escapes.\"\"\"\n allow_fragments = bool(allow_fragments)\n key = url, scheme, allow_fragments, type(url), type(scheme)\n cached = _parse_cache.get(key, None)\n if cached:\n return cached\n if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth\n clear_cache()\n netloc = query = fragment = ''\n i = url.find(':')\n if i > 0:\n if url[:i] == 'http': # optimize the common case\n scheme = url[:i].lower()\n url = url[i+1:]\n if url[:2] == '//':\n netloc, url = _splitnetloc(url, 2)\n if (('[' in netloc and ']' not in netloc) or\n (']' in netloc and '[' not in netloc)):\n raise ValueError(\"Invalid IPv6 URL\")\n if allow_fragments and '#' in url:\n url, fragment = url.split('#', 1)\n if '?' in url:\n url, query = url.split('?', 1)\n v = SplitResult(scheme, netloc, url, query, fragment)\n _parse_cache[key] = v\n return v\n for c in url[:i]:\n if c not in scheme_chars:\n break\n else:\n try:\n # make sure \"url\" is not actually a port number (in which case\n # \"scheme\" is really part of the path\n _testportnum = int(url[i+1:])\n except ValueError:\n scheme, url = url[:i].lower(), url[i+1:]\n\n if url[:2] == '//':\n netloc, url = _splitnetloc(url, 2)\n if (('[' in netloc and ']' not in netloc) or\n (']' in netloc and '[' not in netloc)):\n raise ValueError(\"Invalid IPv6 URL\")\n if allow_fragments and scheme in uses_fragment and '#' in url:\n url, fragment = url.split('#', 1)\n if scheme in uses_query and '?' in url:\n url, query = url.split('?', 1)\n v = SplitResult(scheme, netloc, url, query, fragment)\n _parse_cache[key] = v\n return v\n\ndef urlunparse(data):\n \"\"\"Put a parsed URL back together again. This may result in a\n slightly different, but equivalent URL, if the URL that was parsed\n originally had redundant delimiters, e.g. a ? with an empty query\n (the draft states that these are equivalent).\"\"\"\n scheme, netloc, url, params, query, fragment = data\n if params:\n url = \"%s;%s\" % (url, params)\n return urlunsplit((scheme, netloc, url, query, fragment))\n\ndef urlunsplit(data):\n \"\"\"Combine the elements of a tuple as returned by urlsplit() into a\n complete URL as a string. The data argument can be any five-item iterable.\n This may result in a slightly different, but equivalent URL, if the URL that\n was parsed originally had unnecessary delimiters (for example, a ? with an\n empty query; the RFC states that these are equivalent).\"\"\"\n scheme, netloc, url, query, fragment = data\n if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):\n if url and url[:1] != '/': url = '/' + url\n url = '//' + (netloc or '') + url\n if scheme:\n url = scheme + ':' + url\n if query:\n url = url + '?' + query\n if fragment:\n url = url + '#' + fragment\n return url\n\ndef urljoin(base, url, allow_fragments=True):\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\"\"\"\n if not base:\n return url\n if not url:\n return base\n bscheme, bnetloc, bpath, bparams, bquery, bfragment = \\\n urlparse(base, '', allow_fragments)\n scheme, netloc, path, params, query, fragment = \\\n urlparse(url, bscheme, allow_fragments)\n if scheme != bscheme or scheme not in uses_relative:\n return url\n if scheme in uses_netloc:\n if netloc:\n return urlunparse((scheme, netloc, path,\n params, query, fragment))\n netloc = bnetloc\n if path[:1] == '/':\n return urlunparse((scheme, netloc, path,\n params, query, fragment))\n if not path and not params:\n path = bpath\n params = bparams\n if not query:\n query = bquery\n return urlunparse((scheme, netloc, path,\n params, query, fragment))\n segments = bpath.split('/')[:-1] + path.split('/')\n # XXX The stuff below is bogus in various ways...\n if segments[-1] == '.':\n segments[-1] = ''\n while '.' in segments:\n segments.remove('.')\n while 1:\n i = 1\n n = len(segments) - 1\n while i < n:\n if (segments[i] == '..'\n and segments[i-1] not in ('', '..')):\n del segments[i-1:i+1]\n break\n i = i+1\n else:\n break\n if segments == ['', '..']:\n segments[-1] = ''\n elif len(segments) >= 2 and segments[-1] == '..':\n segments[-2:] = ['']\n return urlunparse((scheme, netloc, '/'.join(segments),\n params, query, fragment))\n\ndef urldefrag(url):\n \"\"\"Removes any existing fragment from URL.\n\n Returns a tuple of the defragmented URL and the fragment. If\n the URL contained no fragments, the second element is the\n empty string.\n \"\"\"\n if '#' in url:\n s, n, p, a, q, frag = urlparse(url)\n defrag = urlunparse((s, n, p, a, q, ''))\n return defrag, frag\n else:\n return url, ''\n\n# unquote method for parse_qs and parse_qsl\n# Cannot use directly from urllib as it would create a circular reference\n# because urllib uses urlparse methods (urljoin). If you update this function,\n# update it also in urllib. This code duplication does not existin in Python3.\n\n_hexdig = '0123456789ABCDEFabcdef'\n_hextochr = dict((a+b, chr(int(a+b,16)))\n for a in _hexdig for b in _hexdig)\n\ndef unquote(s):\n \"\"\"unquote('abc%20def') -> 'abc def'.\"\"\"\n res = s.split('%')\n # fastpath\n if len(res) == 1:\n return s\n s = res[0]\n for item in res[1:]:\n try:\n s += _hextochr[item[:2]] + item[2:]\n except KeyError:\n s += '%' + item\n except UnicodeDecodeError:\n s += unichr(int(item[:2], 16)) + item[2:]\n return s\n\ndef parse_qs(qs, keep_blank_values=0, strict_parsing=0):\n \"\"\"Parse a query given as a string argument.\n\n Arguments:\n\n qs: percent-encoded query string to be parsed\n\n keep_blank_values: flag indicating whether blank values in\n percent-encoded queries should be treated as blank strings.\n A true value indicates that blanks should be retained as\n blank strings. The default false value indicates that\n blank values are to be ignored and treated as if they were\n not included.\n\n strict_parsing: flag indicating what to do with parsing errors.\n If false (the default), errors are silently ignored.\n If true, errors raise a ValueError exception.\n \"\"\"\n dict = {}\n for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):\n if name in dict:\n dict[name].append(value)\n else:\n dict[name] = [value]\n return dict\n\ndef parse_qsl(qs, keep_blank_values=0, strict_parsing=0):\n \"\"\"Parse a query given as a string argument.\n\n Arguments:\n\n qs: percent-encoded query string to be parsed\n\n keep_blank_values: flag indicating whether blank values in\n percent-encoded queries should be treated as blank strings. A\n true value indicates that blanks should be retained as blank\n strings. The default false value indicates that blank values\n are to be ignored and treated as if they were not included.\n\n strict_parsing: flag indicating what to do with parsing errors. If\n false (the default), errors are silently ignored. If true,\n errors raise a ValueError exception.\n\n Returns a list, as G-d intended.\n \"\"\"\n pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]\n r = []\n for name_value in pairs:\n if not name_value and not strict_parsing:\n continue\n nv = name_value.split('=', 1)\n if len(nv) != 2:\n if strict_parsing:\n raise ValueError, \"bad query field: %r\" % (name_value,)\n # Handle case of a control-name with no equal sign\n if keep_blank_values:\n nv.append('')\n else:\n continue\n if len(nv[1]) or keep_blank_values:\n name = unquote(nv[0].replace('+', ' '))\n value = unquote(nv[1].replace('+', ' '))\n r.append((name, value))\n\n return r\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475218,"cells":{"repo_name":{"kind":"string","value":"dpla/zen"},"path":{"kind":"string","value":"lib/akamod/__init__.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1845"},"content":{"kind":"string","value":"#zen.akamod\n\n#Some helper classes for accessing local Zen/Akara services\nimport urllib\n\nfrom amara.thirdparty import httplib2, json\nfrom akara import logger\nfrom akara import request\nfrom akara.caching import cache\nfrom akara import global_config\nfrom akara.util import find_peer_service\n\nclass geolookup_service(object):\n '''\n Convenience for calling the local/peer geolookup service.\n\n Can only be called from within an Akara module handler. E.g. the following sample module:\n \n -- %< --\nfrom akara.services import simple_service\nfrom zen.akamod import geolookup_service\n\ngeolookup = geolookup_service()\n\n@simple_service(\"GET\", \"http://testing/report.get\")\ndef s(place):\n return repr(geolookup('Superior,CO'))\n -- %< --\n\n Then test: curl -i \"http://localhost:8880/s?place=Superior,CO\"\n '''\n def __init__(self):\n self.GEOLOOKUP_URI = find_peer_service(u'http://purl.org/com/zepheira/services/geolookup.json')\n self.H = httplib2.Http('/tmp/.cache')\n return\n\n def __call__(self, place):\n if not place:\n return None\n if isinstance(place, unicode):\n place = place.encode('utf-8')\n\n if not self.GEOLOOKUP_URI: setup()\n logger.debug('geolookup' + repr((place, self.GEOLOOKUP_URI)))\n resp, body = self.H.request(self.GEOLOOKUP_URI + '?' + urllib.urlencode({'place': place}))\n logger.debug('geolookup result: {0}'.format(repr(body)))\n try:\n result = json.loads(body)\n return result\n #latlong = json.loads(body).itervalues().next()\n #return latlong\n except (ValueError, StopIteration), e:\n logger.debug(\"Not found: \" + repr(place))\n return None\n\n\n#GEOLOOKUP_CACHE = cache(\n# 'http://purl.org/com/zepheira/services/geolookup.json', expires=24*60*60)\n\n\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475219,"cells":{"repo_name":{"kind":"string","value":"geminy/aidear"},"path":{"kind":"string","value":"oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"11616"},"content":{"kind":"string","value":"# Copyright (C) 2010, 2012 Google Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Unit tests for printing.py.\"\"\"\n\nimport StringIO\nimport optparse\nimport sys\nimport unittest\n\nfrom webkitpy.common.host_mock import MockHost\n\nfrom webkitpy.common.system import logtesting\nfrom webkitpy.layout_tests import port\nfrom webkitpy.layout_tests.controllers import manager\nfrom webkitpy.layout_tests.models import test_expectations\nfrom webkitpy.layout_tests.models import test_failures\nfrom webkitpy.layout_tests.models import test_results\nfrom webkitpy.layout_tests.views import printing\n\n\ndef get_options(args):\n print_options = printing.print_options()\n option_parser = optparse.OptionParser(option_list=print_options)\n return option_parser.parse_args(args)\n\n\nclass TestUtilityFunctions(unittest.TestCase):\n\n def test_print_options(self):\n options, _ = get_options([])\n self.assertIsNotNone(options)\n\n\nclass FakeRunResults(object):\n\n def __init__(self, total=1, expected=1, unexpected=0, fake_results=None):\n fake_results = fake_results or []\n self.total = total\n self.expected = expected\n self.expected_failures = 0\n self.unexpected = unexpected\n self.expected_skips = 0\n self.results_by_name = {}\n total_run_time = 0\n for result in fake_results:\n self.results_by_name[result.shard_name] = result\n total_run_time += result.total_run_time\n self.run_time = total_run_time + 1\n\n\nclass FakeShard(object):\n\n def __init__(self, shard_name, total_run_time):\n self.shard_name = shard_name\n self.total_run_time = total_run_time\n\n\nclass Testprinter(unittest.TestCase):\n\n def assertEmpty(self, stream):\n self.assertFalse(stream.getvalue())\n\n def assertNotEmpty(self, stream):\n self.assertTrue(stream.getvalue())\n\n def assertWritten(self, stream, contents):\n self.assertEqual(stream.buflist, contents)\n\n def reset(self, stream):\n stream.buflist = []\n stream.buf = ''\n\n def get_printer(self, args=None):\n args = args or []\n printing_options = printing.print_options()\n option_parser = optparse.OptionParser(option_list=printing_options)\n options, args = option_parser.parse_args(args)\n host = MockHost()\n self._port = host.port_factory.get('test', options)\n\n regular_output = StringIO.StringIO()\n printer = printing.Printer(self._port, options, regular_output)\n return printer, regular_output\n\n def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):\n failures = []\n if result_type == test_expectations.TIMEOUT:\n failures = [test_failures.FailureTimeout()]\n elif result_type == test_expectations.CRASH:\n failures = [test_failures.FailureCrash()]\n return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)\n\n def test_configure_and_cleanup(self):\n # This test verifies that calling cleanup repeatedly and deleting\n # the object is safe.\n printer, _ = self.get_printer()\n printer.cleanup()\n printer.cleanup()\n printer = None\n\n def test_print_config(self):\n printer, err = self.get_printer()\n # FIXME: Make it so these options don't have to be set directly.\n # pylint: disable=protected-access\n printer._options.pixel_tests = True\n printer._options.new_baseline = True\n printer._options.time_out_ms = 6000\n printer._options.slow_time_out_ms = 12000\n printer._options.order = 'random'\n printer._options.seed = 1234\n printer.print_config('/tmp')\n self.assertIn(\"Using port 'test-mac-mac10.10'\", err.getvalue())\n self.assertIn('Test configuration: ', err.getvalue())\n self.assertIn('View the test results at file:///tmp', err.getvalue())\n self.assertIn('View the archived results dashboard at file:///tmp', err.getvalue())\n self.assertIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())\n self.assertIn('Using Release build', err.getvalue())\n self.assertIn('Pixel tests enabled', err.getvalue())\n self.assertIn('Command line:', err.getvalue())\n self.assertIn('Regular timeout: ', err.getvalue())\n self.assertIn('Using random order with seed: 1234', err.getvalue())\n\n self.reset(err)\n printer._options.quiet = True\n printer.print_config('/tmp')\n self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())\n\n def test_print_directory_timings(self):\n printer, err = self.get_printer()\n printer._options.debug_rwt_logging = True\n\n run_results = FakeRunResults()\n run_results.results_by_name = {\n \"slowShard\": FakeShard(\"slowShard\", 16),\n \"borderlineShard\": FakeShard(\"borderlineShard\", 15),\n \"fastShard\": FakeShard(\"fastShard\", 1),\n }\n\n printer._print_directory_timings(run_results)\n self.assertWritten(err, ['Time to process slowest subdirectories:\\n',\n ' slowShard took 16.0 seconds to run 1 tests.\\n', '\\n'])\n\n printer, err = self.get_printer()\n printer._options.debug_rwt_logging = True\n\n run_results.results_by_name = {\n \"borderlineShard\": FakeShard(\"borderlineShard\", 15),\n \"fastShard\": FakeShard(\"fastShard\", 1),\n }\n\n printer._print_directory_timings(run_results)\n self.assertWritten(err, [])\n\n def test_print_one_line_summary(self):\n def run_test(total, exp, unexp, shards, result):\n printer, err = self.get_printer(['--timing'] if shards else None)\n fake_results = FakeRunResults(total, exp, unexp, shards)\n total_time = fake_results.run_time + 1\n printer._print_one_line_summary(total_time, fake_results)\n self.assertWritten(err, result)\n\n # Without times:\n run_test(1, 1, 0, [], [\"The test ran as expected.\\n\", \"\\n\"])\n run_test(2, 1, 1, [], [\"\\n\", \"1 test ran as expected, 1 didn't:\\n\", \"\\n\"])\n run_test(3, 2, 1, [], [\"\\n\", \"2 tests ran as expected, 1 didn't:\\n\", \"\\n\"])\n run_test(3, 2, 0, [], [\"\\n\", \"2 tests ran as expected (1 didn't run).\\n\", \"\\n\"])\n\n # With times:\n fake_shards = [FakeShard(\"foo\", 1), FakeShard(\"bar\", 2)]\n run_test(1, 1, 0, fake_shards, [\"The test ran as expected in 5.00s (2.00s in rwt, 1x).\\n\", \"\\n\"])\n run_test(2, 1, 1, fake_shards, [\"\\n\", \"1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\\n\", \"\\n\"])\n run_test(3, 2, 1, fake_shards, [\"\\n\", \"2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\\n\", \"\\n\"])\n run_test(3, 2, 0, fake_shards, [\"\\n\", \"2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\\n\", \"\\n\"])\n\n def test_test_status_line(self):\n printer, _ = self.get_printer()\n printer._meter.number_of_columns = lambda: 80\n actual = printer._test_status_line(\n 'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')\n self.assertEqual(80, len(actual))\n self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')\n\n printer._meter.number_of_columns = lambda: 89\n actual = printer._test_status_line(\n 'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')\n self.assertEqual(89, len(actual))\n self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')\n\n printer._meter.number_of_columns = lambda: sys.maxsize\n actual = printer._test_status_line(\n 'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')\n self.assertEqual(90, len(actual))\n self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')\n\n printer._meter.number_of_columns = lambda: 18\n actual = printer._test_status_line(\n 'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')\n self.assertEqual(18, len(actual))\n self.assertEqual(actual, '[0/0] f...l passed')\n\n printer._meter.number_of_columns = lambda: 10\n actual = printer._test_status_line(\n 'fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')\n self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')\n\n def test_details(self):\n printer, err = self.get_printer(['--details'])\n result = self.get_result('passes/image.html')\n printer.print_started_test('passes/image.html')\n printer.print_finished_test(result, expected=False, exp_str='', got_str='')\n self.assertNotEmpty(err)\n\n def test_print_found(self):\n printer, err = self.get_printer()\n\n printer.print_found(100, 10, 1, 1)\n self.assertWritten(err, [\"Found 100 tests; running 10, skipping 90.\\n\"])\n\n self.reset(err)\n printer.print_found(100, 10, 2, 3)\n self.assertWritten(err, [\"Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\\n\"])\n\n def test_debug_rwt_logging_is_throttled(self):\n printer, err = self.get_printer(['--debug-rwt-logging'])\n\n result = self.get_result('passes/image.html')\n printer.print_started_test('passes/image.html')\n printer.print_finished_test(result, expected=True, exp_str='', got_str='')\n\n printer.print_started_test('passes/text.html')\n result = self.get_result('passes/text.html')\n printer.print_finished_test(result, expected=True, exp_str='', got_str='')\n\n # Only the first test's start should be printed.\n lines = err.buflist\n self.assertEqual(len(lines), 1)\n self.assertTrue(lines[0].endswith('passes/image.html\\n'))\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475220,"cells":{"repo_name":{"kind":"string","value":"lakshayg/tensorflow"},"path":{"kind":"string","value":"tensorflow/python/debug/cli/cli_config_test.py"},"copies":{"kind":"string","value":"68"},"size":{"kind":"string","value":"5541"},"content":{"kind":"string","value":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for cli_config.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport shutil\nimport tempfile\n\nfrom tensorflow.python.debug.cli import cli_config\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\n\n\nclass CLIConfigTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._tmp_dir = tempfile.mkdtemp()\n self._tmp_config_path = os.path.join(self._tmp_dir, \".tfdbg_config\")\n self.assertFalse(gfile.Exists(self._tmp_config_path))\n super(CLIConfigTest, self).setUp()\n\n def tearDown(self):\n shutil.rmtree(self._tmp_dir)\n super(CLIConfigTest, self).tearDown()\n\n def testConstructCLIConfigWithoutFile(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n self.assertEqual(20, config.get(\"graph_recursion_depth\"))\n self.assertEqual(True, config.get(\"mouse_mode\"))\n with self.assertRaises(KeyError):\n config.get(\"property_that_should_not_exist\")\n self.assertTrue(gfile.Exists(self._tmp_config_path))\n\n def testCLIConfigForwardCompatibilityTest(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n with open(self._tmp_config_path, \"rt\") as f:\n config_json = json.load(f)\n # Remove a field to simulate forward compatibility test.\n del config_json[\"graph_recursion_depth\"]\n with open(self._tmp_config_path, \"wt\") as f:\n json.dump(config_json, f)\n\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n self.assertEqual(20, config.get(\"graph_recursion_depth\"))\n\n def testModifyConfigValue(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n config.set(\"graph_recursion_depth\", 9)\n config.set(\"mouse_mode\", False)\n self.assertEqual(9, config.get(\"graph_recursion_depth\"))\n self.assertEqual(False, config.get(\"mouse_mode\"))\n\n def testModifyConfigValueWithTypeCasting(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n config.set(\"graph_recursion_depth\", \"18\")\n config.set(\"mouse_mode\", \"false\")\n self.assertEqual(18, config.get(\"graph_recursion_depth\"))\n self.assertEqual(False, config.get(\"mouse_mode\"))\n\n def testModifyConfigValueWithTypeCastingFailure(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n with self.assertRaises(ValueError):\n config.set(\"mouse_mode\", \"maybe\")\n\n def testLoadFromModifiedConfigFile(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n config.set(\"graph_recursion_depth\", 9)\n config.set(\"mouse_mode\", False)\n config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n self.assertEqual(9, config2.get(\"graph_recursion_depth\"))\n self.assertEqual(False, config2.get(\"mouse_mode\"))\n\n def testSummarizeFromConfig(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n output = config.summarize()\n self.assertEqual(\n [\"Command-line configuration:\",\n \"\",\n \" graph_recursion_depth: %d\" % config.get(\"graph_recursion_depth\"),\n \" mouse_mode: %s\" % config.get(\"mouse_mode\")], output.lines)\n\n def testSummarizeFromConfigWithHighlight(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n output = config.summarize(highlight=\"mouse_mode\")\n self.assertEqual(\n [\"Command-line configuration:\",\n \"\",\n \" graph_recursion_depth: %d\" % config.get(\"graph_recursion_depth\"),\n \" mouse_mode: %s\" % config.get(\"mouse_mode\")], output.lines)\n self.assertEqual((2, 12, [\"underline\", \"bold\"]),\n output.font_attr_segs[3][0])\n self.assertEqual((14, 18, \"bold\"), output.font_attr_segs[3][1])\n\n def testSetCallback(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n\n test_value = {\"graph_recursion_depth\": -1}\n def callback(config):\n test_value[\"graph_recursion_depth\"] = config.get(\"graph_recursion_depth\")\n config.set_callback(\"graph_recursion_depth\", callback)\n\n config.set(\"graph_recursion_depth\", config.get(\"graph_recursion_depth\") - 1)\n self.assertEqual(test_value[\"graph_recursion_depth\"],\n config.get(\"graph_recursion_depth\"))\n\n def testSetCallbackInvalidPropertyName(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n\n with self.assertRaises(KeyError):\n config.set_callback(\"nonexistent_property_name\", print)\n\n def testSetCallbackNotCallable(self):\n config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)\n\n with self.assertRaises(TypeError):\n config.set_callback(\"graph_recursion_depth\", 1)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475221,"cells":{"repo_name":{"kind":"string","value":"caosmo/pip"},"path":{"kind":"string","value":"pip/_vendor/requests/packages/chardet/langgreekmodel.py"},"copies":{"kind":"string","value":"2763"},"size":{"kind":"string","value":"12628"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is Mozilla Communicator client code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 1998\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\n# 255: Control characters that usually does not exist in any text\n# 254: Carriage/Return\n# 253: symbol (punctuation) that does not belong to word\n# 252: 0 - 9\n\n# Character Mapping Table:\nLatin7_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40\n 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50\n253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60\n 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90\n253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0\n253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0\n110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0\n 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0\n124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0\n 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0\n)\n\nwin1253_CharToOrderMap = (\n255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10\n253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20\n252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30\n253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40\n 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50\n253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60\n 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80\n255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90\n253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0\n253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0\n110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0\n 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0\n124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0\n 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0\n)\n\n# Model Table:\n# total sequences: 100%\n# first 512 sequences: 98.2851%\n# first 1024 sequences:1.7001%\n# rest sequences: 0.0359%\n# negative sequences: 0.0148%\nGreekLangModel = (\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,\n3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,\n2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,\n0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,\n2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,\n0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,\n2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,\n2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,\n0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,\n2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,\n0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,\n3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,\n3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,\n2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,\n2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,\n0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,\n0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,\n0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,\n0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,\n0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,\n0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,\n0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,\n0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,\n0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,\n0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,\n0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,\n0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,\n0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,\n0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,\n0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,\n0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,\n0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,\n0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,\n0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,\n0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,\n0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,\n0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,\n0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,\n0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,\n0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,\n0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,\n0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,\n0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,\n0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,\n0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,\n0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,\n0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,\n0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,\n0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,\n0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,\n0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,\n0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,\n0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,\n0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n)\n\nLatin7GreekModel = {\n 'charToOrderMap': Latin7_CharToOrderMap,\n 'precedenceMatrix': GreekLangModel,\n 'mTypicalPositiveRatio': 0.982851,\n 'keepEnglishLetter': False,\n 'charsetName': \"ISO-8859-7\"\n}\n\nWin1253GreekModel = {\n 'charToOrderMap': win1253_CharToOrderMap,\n 'precedenceMatrix': GreekLangModel,\n 'mTypicalPositiveRatio': 0.982851,\n 'keepEnglishLetter': False,\n 'charsetName': \"windows-1253\"\n}\n\n# flake8: noqa\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475222,"cells":{"repo_name":{"kind":"string","value":"marrow/web.dispatch.route"},"path":{"kind":"string","value":"test/test_router.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1820"},"content":{"kind":"string","value":"# encoding: utf-8\n\nimport pytest\n\nfrom web.dispatch.route.router import __DYNAMIC__, Router\n\nfrom sample import Root\n\n\n@pytest.fixture\ndef router():\n\treturn Router.from_object(Root)\n\n\ndef test_dynamic_repr():\n\tassert repr(__DYNAMIC__) == ''\n\n\ndef test_router_singleton():\n\tassert Router.from_object(Root) is Router.from_object(Root)\n\n\ndef test_invalid_route():\n\trouter = Router()\n\t\n\twith pytest.raises(ValueError):\n\t\trouter.parse(\"{bad:/}\")\n\n\nclass TestRouterSample(object):\n\tdef test_single_static(self, router):\n\t\tassert len(router.routes) == 1 # There's only a single top-level element.\n\t\tassert 'user' in router.routes # It's \"user\".\n\t\tassert len(router.routes['user']) == 2 # Which has a terminus and dynamic continuation.\n\t\tassert router.routes['user'][None] == Root.root # The terminus is the \"root\" method.\n\t\tassert router.routes['user'][None](Root()) == \"I'm all people.\" # It really is.\n\t\n\tdef test_dynamic_username(self, router):\n\t\tassert __DYNAMIC__ in router.routes['user']\n\t\t\n\t\tdynamic = router.routes['user'][__DYNAMIC__]\n\t\tassert len(dynamic) == 1\n\t\t\n\t\tassert list(dynamic.keys())[0].match(\"GothAlice\") # The regular expression matches.\n\t\t\n\t\tassert len(list(dynamic.values())[0]) == 2\n\t\t\n\t\tassert list(dynamic.values())[0][None] == Root.user\n\t\tassert list(dynamic.values())[0][None](Root(), \"GothAlice\") == \"Hi, I'm GothAlice\"\n\t\n\tdef test_dynamic_username_action(self, router):\n\t\tassert __DYNAMIC__ in router.routes['user']\n\t\t\n\t\tdynamic = router.routes['user'][__DYNAMIC__]\n\t\tassert len(dynamic) == 1\n\t\t\n\t\tassert list(dynamic.keys())[0].match(\"GothAlice\") # The regular expression matches.\n\t\t\n\t\tassert len(list(dynamic.values())[0]) == 2\n\t\t\n\t\tassert list(dynamic.values())[0][None] == Root.user\n\t\tassert list(dynamic.values())[0][None](Root(), \"GothAlice\") == \"Hi, I'm GothAlice\"\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475223,"cells":{"repo_name":{"kind":"string","value":"saeschdivara/ArangoPy"},"path":{"kind":"string","value":"arangodb/tests/api/collection.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2502"},"content":{"kind":"string","value":"import unittest\n\nfrom arangodb.api import Database, Collection\n\n\nclass CollectionTestCase(unittest.TestCase):\n def setUp(self):\n self.database_name = 'testcase_collection_123'\n self.db = Database.create(name=self.database_name)\n\n def tearDown(self):\n Database.remove(name=self.database_name)\n\n def test_create_and_delete_collection_without_extra_db(self):\n\n collection_name = 'test_foo_123'\n\n col = Collection.create(name=collection_name)\n\n self.assertIsNotNone(col)\n\n Collection.remove(name=collection_name)\n\n def test_get_collection(self):\n\n collection_name = 'test_foo_123'\n\n col = Collection.create(name=collection_name)\n\n self.assertIsNotNone(col)\n\n retrieved_col = Collection.get_loaded_collection(name=collection_name)\n\n self.assertEqual(col.id, retrieved_col.id)\n self.assertEqual(col.name, retrieved_col.name)\n self.assertEqual(col.type, retrieved_col.type)\n\n Collection.remove(name=collection_name)\n\n def test_getting_new_info_for_collection(self):\n\n collection_name = 'test_foo_123'\n\n col = Collection.create(name=collection_name)\n\n retrieved_col = Collection.get_loaded_collection(name=collection_name)\n retrieved_col.set_data(waitForSync=True)\n retrieved_col.save()\n\n col.get()\n\n self.assertEqual(col.waitForSync, True)\n\n Collection.remove(name=collection_name)\n\n def test_different_document_revisions(self):\n\n collection_name = 'test_revision_documents'\n\n col = Collection.create(name=collection_name)\n doc1 = col.create_document()\n doc1.save()\n\n all_documents = col.documents()\n self.assertEqual(len(all_documents), 1)\n doc = all_documents[0]\n\n self.assertEqual(doc.revision, doc1.revision)\n\n doc.foo = 'bar'\n doc.save()\n\n self.assertNotEqual(doc.revision, doc1.revision)\n\n Collection.remove(name=collection_name)\n\n def test_remove_document_from_collection(self):\n\n collection_name = 'test_remove_document_from_collection'\n\n col = Collection.create(name=collection_name)\n doc1 = col.create_document()\n doc1.save()\n\n all_documents = col.documents()\n self.assertEqual(len(all_documents), 1)\n doc = all_documents[0]\n\n doc.delete()\n\n all_documents = col.documents()\n self.assertEqual(len(all_documents), 0)\n\n Collection.remove(name=collection_name)"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475224,"cells":{"repo_name":{"kind":"string","value":"mintar/ros-infrastructure-rosdistro"},"path":{"kind":"string","value":"src/rosdistro/source_repository_specification.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2465"},"content":{"kind":"string","value":"# Software License Agreement (BSD License)\n#\n# Copyright (c) 2014, Open Source Robotics Foundation, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Open Source Robotics Foundation, Inc. nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom .repository_specification import RepositorySpecification\n\n\nclass SourceRepositorySpecification(RepositorySpecification):\n\n def __init__(self, name, data):\n super(SourceRepositorySpecification, self).__init__(name, data)\n\n self.test_commits = None\n if 'test_commits' in data:\n self.test_commits = bool(data['test_commits'])\n\n self.test_pull_requests = None\n if 'test_pull_requests' in data:\n self.test_pull_requests = bool(data['test_pull_requests'])\n\n def get_data(self):\n data = self._get_data(skip_git_type=False)\n if self.test_commits is not None:\n data['test_commits'] = self.test_commits\n if self.test_pull_requests is not None:\n data['test_pull_requests'] = self.test_pull_requests\n return data\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475225,"cells":{"repo_name":{"kind":"string","value":"mgagne/nova"},"path":{"kind":"string","value":"nova/baserpc.py"},"copies":{"kind":"string","value":"10"},"size":{"kind":"string","value":"2562"},"content":{"kind":"string","value":"#\n# Copyright 2013 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"\nBase RPC client and server common to all services.\n\"\"\"\n\nfrom oslo.config import cfg\nfrom oslo import messaging\nfrom oslo.serialization import jsonutils\n\nfrom nova import rpc\n\n\nCONF = cfg.CONF\nrpcapi_cap_opt = cfg.StrOpt('baseapi',\n help='Set a version cap for messages sent to the base api in any '\n 'service')\nCONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')\n\n_NAMESPACE = 'baseapi'\n\n\nclass BaseAPI(object):\n \"\"\"Client side of the base rpc API.\n\n API version history:\n\n 1.0 - Initial version.\n 1.1 - Add get_backdoor_port\n \"\"\"\n\n VERSION_ALIASES = {\n # baseapi was added in havana\n }\n\n def __init__(self, topic):\n super(BaseAPI, self).__init__()\n target = messaging.Target(topic=topic,\n namespace=_NAMESPACE,\n version='1.0')\n version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,\n CONF.upgrade_levels.baseapi)\n self.client = rpc.get_client(target, version_cap=version_cap)\n\n def ping(self, context, arg, timeout=None):\n arg_p = jsonutils.to_primitive(arg)\n cctxt = self.client.prepare(timeout=timeout)\n return cctxt.call(context, 'ping', arg=arg_p)\n\n def get_backdoor_port(self, context, host):\n cctxt = self.client.prepare(server=host, version='1.1')\n return cctxt.call(context, 'get_backdoor_port')\n\n\nclass BaseRPCAPI(object):\n \"\"\"Server side of the base RPC API.\"\"\"\n\n target = messaging.Target(namespace=_NAMESPACE, version='1.1')\n\n def __init__(self, service_name, backdoor_port):\n self.service_name = service_name\n self.backdoor_port = backdoor_port\n\n def ping(self, context, arg):\n resp = {'service': self.service_name, 'arg': arg}\n return jsonutils.to_primitive(resp)\n\n def get_backdoor_port(self, context):\n return self.backdoor_port\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475226,"cells":{"repo_name":{"kind":"string","value":"CatsAndDogsbvba/odoo"},"path":{"kind":"string","value":"addons/l10n_tr/__openerp__.py"},"copies":{"kind":"string","value":"259"},"size":{"kind":"string","value":"2056"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n{\n 'name': 'Turkey - Accounting',\n 'version': '1.beta',\n 'category': 'Localization/Account Charts',\n 'description': \"\"\"\nTürkiye için Tek düzen hesap planı şablonu OpenERP Modülü.\n==========================================================\n\nBu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır\n * Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap\n bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.\n \"\"\",\n 'author': 'Ahmet Altınışık',\n 'maintainer':'https://launchpad.net/~openerp-turkey',\n 'website':'https://launchpad.net/openerp-turkey',\n 'depends': [\n 'account',\n 'base_vat',\n 'account_chart',\n ],\n 'data': [\n 'account_code_template.xml',\n 'account_tdhp_turkey.xml',\n 'account_tax_code_template.xml',\n 'account_chart_template.xml',\n 'account_tax_template.xml',\n 'l10n_tr_wizard.xml',\n ],\n 'demo': [],\n 'installable': True,\n}\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475227,"cells":{"repo_name":{"kind":"string","value":"abdulla-alali/CRE-NS3"},"path":{"kind":"string","value":"src/fd-net-device/bindings/modulegen__gcc_LP64.py"},"copies":{"kind":"string","value":"6"},"size":{"kind":"string","value":"300516"},"content":{"kind":"string","value":"from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers\n\n\nimport pybindgen.settings\nimport warnings\n\nclass ErrorHandler(pybindgen.settings.ErrorHandler):\n def handle_error(self, wrapper, exception, traceback_):\n warnings.warn(\"exception %r in wrapper %s\" % (exception, wrapper))\n return True\npybindgen.settings.error_handler = ErrorHandler()\n\n\nimport sys\n\ndef module_init():\n root_module = Module('ns.fd_net_device', cpp_namespace='::ns3')\n return root_module\n\ndef register_types(module):\n root_module = module.get_root()\n \n ## address.h (module 'network'): ns3::Address [class]\n module.add_class('Address', import_from_module='ns.network')\n ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]\n module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]\n module.add_class('AsciiTraceHelper', import_from_module='ns.network')\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]\n module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]\n module.add_class('AttributeConstructionList', import_from_module='ns.core')\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]\n module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])\n ## buffer.h (module 'network'): ns3::Buffer [class]\n module.add_class('Buffer', import_from_module='ns.network')\n ## buffer.h (module 'network'): ns3::Buffer::Iterator [class]\n module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])\n ## packet.h (module 'network'): ns3::ByteTagIterator [class]\n module.add_class('ByteTagIterator', import_from_module='ns.network')\n ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]\n module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]\n module.add_class('ByteTagList', import_from_module='ns.network')\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]\n module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]\n module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])\n ## callback.h (module 'core'): ns3::CallbackBase [class]\n module.add_class('CallbackBase', import_from_module='ns.core')\n ## system-mutex.h (module 'core'): ns3::CriticalSection [class]\n module.add_class('CriticalSection', import_from_module='ns.core')\n ## data-rate.h (module 'network'): ns3::DataRate [class]\n module.add_class('DataRate', import_from_module='ns.network')\n ## event-id.h (module 'core'): ns3::EventId [class]\n module.add_class('EventId', import_from_module='ns.core')\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]\n module.add_class('Ipv4Address', import_from_module='ns.network')\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]\n root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]\n module.add_class('Ipv4Mask', import_from_module='ns.network')\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]\n module.add_class('Ipv6Address', import_from_module='ns.network')\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]\n root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]\n module.add_class('Ipv6Prefix', import_from_module='ns.network')\n ## mac48-address.h (module 'network'): ns3::Mac48Address [class]\n module.add_class('Mac48Address', import_from_module='ns.network')\n ## mac48-address.h (module 'network'): ns3::Mac48Address [class]\n root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]\n module.add_class('NetDeviceContainer', import_from_module='ns.network')\n ## node-container.h (module 'network'): ns3::NodeContainer [class]\n module.add_class('NodeContainer', import_from_module='ns.network')\n ## object-base.h (module 'core'): ns3::ObjectBase [class]\n module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')\n ## object.h (module 'core'): ns3::ObjectDeleter [struct]\n module.add_class('ObjectDeleter', import_from_module='ns.core')\n ## object-factory.h (module 'core'): ns3::ObjectFactory [class]\n module.add_class('ObjectFactory', import_from_module='ns.core')\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]\n module.add_class('PacketMetadata', import_from_module='ns.network')\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]\n module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]\n module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]\n module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])\n ## packet.h (module 'network'): ns3::PacketTagIterator [class]\n module.add_class('PacketTagIterator', import_from_module='ns.network')\n ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]\n module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]\n module.add_class('PacketTagList', import_from_module='ns.network')\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]\n module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])\n ## pcap-file.h (module 'network'): ns3::PcapFile [class]\n module.add_class('PcapFile', import_from_module='ns.network')\n ## trace-helper.h (module 'network'): ns3::PcapHelper [class]\n module.add_class('PcapHelper', import_from_module='ns.network')\n ## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]\n module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')\n ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]\n module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simulator.h (module 'core'): ns3::Simulator [class]\n module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')\n ## system-condition.h (module 'core'): ns3::SystemCondition [class]\n module.add_class('SystemCondition', import_from_module='ns.core')\n ## system-mutex.h (module 'core'): ns3::SystemMutex [class]\n module.add_class('SystemMutex', import_from_module='ns.core')\n ## tag.h (module 'network'): ns3::Tag [class]\n module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])\n ## tag-buffer.h (module 'network'): ns3::TagBuffer [class]\n module.add_class('TagBuffer', import_from_module='ns.network')\n ## type-id.h (module 'core'): ns3::TypeId [class]\n module.add_class('TypeId', import_from_module='ns.core')\n ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]\n module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]\n module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]\n module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])\n ## empty.h (module 'core'): ns3::empty [class]\n module.add_class('empty', import_from_module='ns.core')\n ## int64x64-double.h (module 'core'): ns3::int64x64_t [class]\n module.add_class('int64x64_t', import_from_module='ns.core')\n ## chunk.h (module 'network'): ns3::Chunk [class]\n module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper [class]\n module.add_class('FdNetDeviceHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])\n ## header.h (module 'network'): ns3::Header [class]\n module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])\n ## object.h (module 'core'): ns3::Object [class]\n module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])\n ## object.h (module 'core'): ns3::Object::AggregateIterator [class]\n module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])\n ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]\n module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount > [class]\n module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))\n ## system-thread.h (module 'core'): ns3::SystemThread [class]\n module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter >'])\n ## nstime.h (module 'core'): ns3::Time [class]\n module.add_class('Time', import_from_module='ns.core')\n ## nstime.h (module 'core'): ns3::Time::Unit [enumeration]\n module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')\n ## nstime.h (module 'core'): ns3::Time [class]\n root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])\n ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]\n module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter >'])\n ## trailer.h (module 'network'): ns3::Trailer [class]\n module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])\n ## attribute.h (module 'core'): ns3::AttributeAccessor [class]\n module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter >'])\n ## attribute.h (module 'core'): ns3::AttributeChecker [class]\n module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter >'])\n ## attribute.h (module 'core'): ns3::AttributeValue [class]\n module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter >'])\n ## callback.h (module 'core'): ns3::CallbackChecker [class]\n module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])\n ## callback.h (module 'core'): ns3::CallbackImplBase [class]\n module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter >'])\n ## callback.h (module 'core'): ns3::CallbackValue [class]\n module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])\n ## data-rate.h (module 'network'): ns3::DataRateChecker [class]\n module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## data-rate.h (module 'network'): ns3::DataRateValue [class]\n module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]\n module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper [class]\n module.add_class('EmuFdNetDeviceHelper', parent=root_module['ns3::FdNetDeviceHelper'])\n ## event-impl.h (module 'core'): ns3::EventImpl [class]\n module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter >'])\n ## unix-fd-reader.h (module 'core'): ns3::FdReader [class]\n module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter >'])\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]\n module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]\n module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]\n module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]\n module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]\n module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]\n module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]\n module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]\n module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]\n module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]\n module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## net-device.h (module 'network'): ns3::NetDevice [class]\n module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])\n ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]\n module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')\n ## nix-vector.h (module 'network'): ns3::NixVector [class]\n module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter >'])\n ## node.h (module 'network'): ns3::Node [class]\n module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])\n ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]\n module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])\n ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]\n module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])\n ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]\n module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter >'])\n ## packet.h (module 'network'): ns3::Packet [class]\n module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter >'])\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper [class]\n module.add_class('PlanetLabFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper'])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper [class]\n module.add_class('TapFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper'])\n ## nstime.h (module 'core'): ns3::TimeChecker [class]\n module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])\n ## nstime.h (module 'core'): ns3::TimeValue [class]\n module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])\n ## type-id.h (module 'core'): ns3::TypeIdChecker [class]\n module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])\n ## type-id.h (module 'core'): ns3::TypeIdValue [class]\n module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])\n ## address.h (module 'network'): ns3::AddressChecker [class]\n module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])\n ## address.h (module 'network'): ns3::AddressValue [class]\n module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice [class]\n module.add_class('FdNetDevice', parent=root_module['ns3::NetDevice'])\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode [enumeration]\n module.add_enum('EncapsulationMode', ['DIX', 'LLC', 'DIXPI'], outer_class=root_module['ns3::FdNetDevice'])\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader [class]\n module.add_class('FdNetDeviceFdReader', parent=root_module['ns3::FdReader'])\n \n ## Register a nested module for the namespace FatalImpl\n \n nested_module = module.add_cpp_namespace('FatalImpl')\n register_types_ns3_FatalImpl(nested_module)\n \n\ndef register_types_ns3_FatalImpl(module):\n root_module = module.get_root()\n \n\ndef register_methods(root_module):\n register_Ns3Address_methods(root_module, root_module['ns3::Address'])\n register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])\n register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])\n register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])\n register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])\n register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])\n register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])\n register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])\n register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])\n register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])\n register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])\n register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])\n register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])\n register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])\n register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])\n register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])\n register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])\n register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])\n register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])\n register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])\n register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])\n register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])\n register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])\n register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])\n register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])\n register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])\n register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])\n register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])\n register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])\n register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])\n register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])\n register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])\n register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])\n register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])\n register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])\n register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])\n register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])\n register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])\n register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])\n register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])\n register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])\n register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])\n register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])\n register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])\n register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])\n register_Ns3Empty_methods(root_module, root_module['ns3::empty'])\n register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])\n register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])\n register_Ns3FdNetDeviceHelper_methods(root_module, root_module['ns3::FdNetDeviceHelper'])\n register_Ns3Header_methods(root_module, root_module['ns3::Header'])\n register_Ns3Object_methods(root_module, root_module['ns3::Object'])\n register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])\n register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])\n register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter >'])\n register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])\n register_Ns3Time_methods(root_module, root_module['ns3::Time'])\n register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])\n register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])\n register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])\n register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])\n register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])\n register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])\n register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])\n register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])\n register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])\n register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])\n register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])\n register_Ns3EmuFdNetDeviceHelper_methods(root_module, root_module['ns3::EmuFdNetDeviceHelper'])\n register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])\n register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])\n register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])\n register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])\n register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])\n register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])\n register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])\n register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])\n register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])\n register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])\n register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])\n register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])\n register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])\n register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])\n register_Ns3Node_methods(root_module, root_module['ns3::Node'])\n register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])\n register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])\n register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])\n register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])\n register_Ns3PlanetLabFdNetDeviceHelper_methods(root_module, root_module['ns3::PlanetLabFdNetDeviceHelper'])\n register_Ns3TapFdNetDeviceHelper_methods(root_module, root_module['ns3::TapFdNetDeviceHelper'])\n register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])\n register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])\n register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])\n register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])\n register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])\n register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])\n register_Ns3FdNetDevice_methods(root_module, root_module['ns3::FdNetDevice'])\n register_Ns3FdNetDeviceFdReader_methods(root_module, root_module['ns3::FdNetDeviceFdReader'])\n return\n\ndef register_Ns3Address_methods(root_module, cls):\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## address.h (module 'network'): ns3::Address::Address() [constructor]\n cls.add_constructor([])\n ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]\n cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])\n ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]\n cls.add_constructor([param('ns3::Address const &', 'address')])\n ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]\n cls.add_method('CheckCompatible', \n 'bool', \n [param('uint8_t', 'type'), param('uint8_t', 'len')], \n is_const=True)\n ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]\n cls.add_method('CopyAllFrom', \n 'uint32_t', \n [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])\n ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]\n cls.add_method('CopyAllTo', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], \n is_const=True)\n ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]\n cls.add_method('CopyFrom', \n 'uint32_t', \n [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])\n ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]\n cls.add_method('CopyTo', \n 'uint32_t', \n [param('uint8_t *', 'buffer')], \n is_const=True)\n ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]\n cls.add_method('Deserialize', \n 'void', \n [param('ns3::TagBuffer', 'buffer')])\n ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]\n cls.add_method('GetLength', \n 'uint8_t', \n [], \n is_const=True)\n ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_const=True)\n ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]\n cls.add_method('IsInvalid', \n 'bool', \n [], \n is_const=True)\n ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]\n cls.add_method('IsMatchingType', \n 'bool', \n [param('uint8_t', 'type')], \n is_const=True)\n ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]\n cls.add_method('Register', \n 'uint8_t', \n [], \n is_static=True)\n ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('ns3::TagBuffer', 'buffer')], \n is_const=True)\n return\n\ndef register_Ns3AsciiTraceHelper_methods(root_module, cls):\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]\n cls.add_constructor([])\n ## trace-helper.h (module 'network'): ns3::Ptr ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]\n cls.add_method('CreateFileStream', \n 'ns3::Ptr< ns3::OutputStreamWrapper >', \n [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr file, std::string context, ns3::Ptr p) [member function]\n cls.add_method('DefaultDequeueSinkWithContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr file, ns3::Ptr p) [member function]\n cls.add_method('DefaultDequeueSinkWithoutContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr file, std::string context, ns3::Ptr p) [member function]\n cls.add_method('DefaultDropSinkWithContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr file, ns3::Ptr p) [member function]\n cls.add_method('DefaultDropSinkWithoutContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr file, std::string context, ns3::Ptr p) [member function]\n cls.add_method('DefaultEnqueueSinkWithContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr file, ns3::Ptr p) [member function]\n cls.add_method('DefaultEnqueueSinkWithoutContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr file, std::string context, ns3::Ptr p) [member function]\n cls.add_method('DefaultReceiveSinkWithContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr file, ns3::Ptr p) [member function]\n cls.add_method('DefaultReceiveSinkWithoutContext', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], \n is_static=True)\n ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr device, bool useObjectNames=true) [member function]\n cls.add_method('GetFilenameFromDevice', \n 'std::string', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])\n ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr object, uint32_t interface, bool useObjectNames=true) [member function]\n cls.add_method('GetFilenameFromInterfacePair', \n 'std::string', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])\n return\n\ndef register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])\n ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]\n cls.add_constructor([])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr nd, bool explicitFilename=false) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr stream, ns3::Ptr nd) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr stream, std::string ndName) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr stream, ns3::NetDeviceContainer d) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr stream, ns3::NodeContainer n) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr stream, uint32_t nodeid, uint32_t deviceid) [member function]\n cls.add_method('EnableAscii', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]\n cls.add_method('EnableAsciiAll', \n 'void', \n [param('std::string', 'prefix')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr stream) [member function]\n cls.add_method('EnableAsciiAll', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])\n ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr stream, std::string prefix, ns3::Ptr nd, bool explicitFilename) [member function]\n cls.add_method('EnableAsciiInternal', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], \n is_pure_virtual=True, is_virtual=True)\n return\n\ndef register_Ns3AttributeConstructionList_methods(root_module, cls):\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]\n cls.add_constructor([])\n ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr checker, ns3::Ptr value) [member function]\n cls.add_method('Add', \n 'void', \n [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])\n ## attribute-construction-list.h (module 'core'): std::_List_const_iterator ns3::AttributeConstructionList::Begin() const [member function]\n cls.add_method('Begin', \n 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', \n [], \n is_const=True)\n ## attribute-construction-list.h (module 'core'): std::_List_const_iterator ns3::AttributeConstructionList::End() const [member function]\n cls.add_method('End', \n 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', \n [], \n is_const=True)\n ## attribute-construction-list.h (module 'core'): ns3::Ptr ns3::AttributeConstructionList::Find(ns3::Ptr checker) const [member function]\n cls.add_method('Find', \n 'ns3::Ptr< ns3::AttributeValue >', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True)\n return\n\ndef register_Ns3AttributeConstructionListItem_methods(root_module, cls):\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]\n cls.add_constructor([])\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]\n cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]\n cls.add_instance_attribute('name', 'std::string', is_const=False)\n ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]\n cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)\n return\n\ndef register_Ns3Buffer_methods(root_module, cls):\n ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]\n cls.add_constructor([])\n ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]\n cls.add_constructor([param('uint32_t', 'dataSize')])\n ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]\n cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])\n ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]\n cls.add_constructor([param('ns3::Buffer const &', 'o')])\n ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]\n cls.add_method('AddAtEnd', \n 'bool', \n [param('uint32_t', 'end')])\n ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]\n cls.add_method('AddAtEnd', \n 'void', \n [param('ns3::Buffer const &', 'o')])\n ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]\n cls.add_method('AddAtStart', \n 'bool', \n [param('uint32_t', 'start')])\n ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]\n cls.add_method('Begin', \n 'ns3::Buffer::Iterator', \n [], \n is_const=True)\n ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]\n cls.add_method('CopyData', \n 'void', \n [param('std::ostream *', 'os'), param('uint32_t', 'size')], \n is_const=True)\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]\n cls.add_method('CopyData', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], \n is_const=True)\n ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]\n cls.add_method('CreateFragment', \n 'ns3::Buffer', \n [param('uint32_t', 'start'), param('uint32_t', 'length')], \n is_const=True)\n ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]\n cls.add_method('CreateFullCopy', \n 'ns3::Buffer', \n [], \n is_const=True)\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])\n ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]\n cls.add_method('End', \n 'ns3::Buffer::Iterator', \n [], \n is_const=True)\n ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]\n cls.add_method('GetCurrentEndOffset', \n 'int32_t', \n [], \n is_const=True)\n ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]\n cls.add_method('GetCurrentStartOffset', \n 'int32_t', \n [], \n is_const=True)\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_const=True)\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]\n cls.add_method('GetSize', \n 'uint32_t', \n [], \n is_const=True)\n ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]\n cls.add_method('PeekData', \n 'uint8_t const *', \n [], \n is_const=True)\n ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]\n cls.add_method('RemoveAtEnd', \n 'void', \n [param('uint32_t', 'end')])\n ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]\n cls.add_method('RemoveAtStart', \n 'void', \n [param('uint32_t', 'start')])\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]\n cls.add_method('Serialize', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], \n is_const=True)\n return\n\ndef register_Ns3BufferIterator_methods(root_module, cls):\n ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])\n ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]\n cls.add_constructor([])\n ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]\n cls.add_method('CalculateIpChecksum', \n 'uint16_t', \n [param('uint16_t', 'size')])\n ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]\n cls.add_method('CalculateIpChecksum', \n 'uint16_t', \n [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]\n cls.add_method('GetDistanceFrom', \n 'uint32_t', \n [param('ns3::Buffer::Iterator const &', 'o')], \n is_const=True)\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]\n cls.add_method('GetSize', \n 'uint32_t', \n [], \n is_const=True)\n ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]\n cls.add_method('IsEnd', \n 'bool', \n [], \n is_const=True)\n ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]\n cls.add_method('IsStart', \n 'bool', \n [], \n is_const=True)\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]\n cls.add_method('Next', \n 'void', \n [])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]\n cls.add_method('Next', \n 'void', \n [param('uint32_t', 'delta')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]\n cls.add_method('Prev', \n 'void', \n [])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]\n cls.add_method('Prev', \n 'void', \n [param('uint32_t', 'delta')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]\n cls.add_method('Read', \n 'void', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'size')])\n ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]\n cls.add_method('ReadLsbtohU16', \n 'uint16_t', \n [])\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]\n cls.add_method('ReadLsbtohU32', \n 'uint32_t', \n [])\n ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]\n cls.add_method('ReadLsbtohU64', \n 'uint64_t', \n [])\n ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]\n cls.add_method('ReadNtohU16', \n 'uint16_t', \n [])\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]\n cls.add_method('ReadNtohU32', \n 'uint32_t', \n [])\n ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]\n cls.add_method('ReadNtohU64', \n 'uint64_t', \n [])\n ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]\n cls.add_method('ReadU16', \n 'uint16_t', \n [])\n ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]\n cls.add_method('ReadU32', \n 'uint32_t', \n [])\n ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]\n cls.add_method('ReadU64', \n 'uint64_t', \n [])\n ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]\n cls.add_method('ReadU8', \n 'uint8_t', \n [])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]\n cls.add_method('Write', \n 'void', \n [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]\n cls.add_method('Write', \n 'void', \n [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]\n cls.add_method('WriteHtolsbU16', \n 'void', \n [param('uint16_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]\n cls.add_method('WriteHtolsbU32', \n 'void', \n [param('uint32_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]\n cls.add_method('WriteHtolsbU64', \n 'void', \n [param('uint64_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]\n cls.add_method('WriteHtonU16', \n 'void', \n [param('uint16_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]\n cls.add_method('WriteHtonU32', \n 'void', \n [param('uint32_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]\n cls.add_method('WriteHtonU64', \n 'void', \n [param('uint64_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]\n cls.add_method('WriteU16', \n 'void', \n [param('uint16_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]\n cls.add_method('WriteU32', \n 'void', \n [param('uint32_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]\n cls.add_method('WriteU64', \n 'void', \n [param('uint64_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]\n cls.add_method('WriteU8', \n 'void', \n [param('uint8_t', 'data')])\n ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]\n cls.add_method('WriteU8', \n 'void', \n [param('uint8_t', 'data'), param('uint32_t', 'len')])\n return\n\ndef register_Ns3ByteTagIterator_methods(root_module, cls):\n ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])\n ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]\n cls.add_method('HasNext', \n 'bool', \n [], \n is_const=True)\n ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]\n cls.add_method('Next', \n 'ns3::ByteTagIterator::Item', \n [])\n return\n\ndef register_Ns3ByteTagIteratorItem_methods(root_module, cls):\n ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])\n ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]\n cls.add_method('GetEnd', \n 'uint32_t', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]\n cls.add_method('GetStart', \n 'uint32_t', \n [], \n is_const=True)\n ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]\n cls.add_method('GetTag', \n 'void', \n [param('ns3::Tag &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_const=True)\n return\n\ndef register_Ns3ByteTagList_methods(root_module, cls):\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]\n cls.add_constructor([])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]\n cls.add_constructor([param('ns3::ByteTagList const &', 'o')])\n ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]\n cls.add_method('Add', \n 'ns3::TagBuffer', \n [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])\n ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::ByteTagList const &', 'o')])\n ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]\n cls.add_method('AddAtEnd', \n 'void', \n [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])\n ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]\n cls.add_method('AddAtStart', \n 'void', \n [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]\n cls.add_method('Begin', \n 'ns3::ByteTagList::Iterator', \n [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], \n is_const=True)\n ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]\n cls.add_method('RemoveAll', \n 'void', \n [])\n return\n\ndef register_Ns3ByteTagListIterator_methods(root_module, cls):\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])\n ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]\n cls.add_method('GetOffsetStart', \n 'uint32_t', \n [], \n is_const=True)\n ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]\n cls.add_method('HasNext', \n 'bool', \n [], \n is_const=True)\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]\n cls.add_method('Next', \n 'ns3::ByteTagList::Iterator::Item', \n [])\n return\n\ndef register_Ns3ByteTagListIteratorItem_methods(root_module, cls):\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]\n cls.add_constructor([param('ns3::TagBuffer', 'buf')])\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]\n cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]\n cls.add_instance_attribute('end', 'int32_t', is_const=False)\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]\n cls.add_instance_attribute('size', 'uint32_t', is_const=False)\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]\n cls.add_instance_attribute('start', 'int32_t', is_const=False)\n ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]\n cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)\n return\n\ndef register_Ns3CallbackBase_methods(root_module, cls):\n ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])\n ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]\n cls.add_constructor([])\n ## callback.h (module 'core'): ns3::Ptr ns3::CallbackBase::GetImpl() const [member function]\n cls.add_method('GetImpl', \n 'ns3::Ptr< ns3::CallbackImplBase >', \n [], \n is_const=True)\n ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr impl) [constructor]\n cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], \n visibility='protected')\n ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]\n cls.add_method('Demangle', \n 'std::string', \n [param('std::string const &', 'mangled')], \n is_static=True, visibility='protected')\n return\n\ndef register_Ns3CriticalSection_methods(root_module, cls):\n ## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])\n ## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]\n cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])\n return\n\ndef register_Ns3DataRate_methods(root_module, cls):\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('!=')\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('<=')\n cls.add_binary_comparison_operator('==')\n cls.add_binary_comparison_operator('>')\n cls.add_binary_comparison_operator('>=')\n ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::DataRate const &', 'arg0')])\n ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor]\n cls.add_constructor([])\n ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor]\n cls.add_constructor([param('uint64_t', 'bps')])\n ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor]\n cls.add_constructor([param('std::string', 'rate')])\n ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function]\n cls.add_method('CalculateTxTime', \n 'double', \n [param('uint32_t', 'bytes')], \n is_const=True)\n ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function]\n cls.add_method('GetBitRate', \n 'uint64_t', \n [], \n is_const=True)\n return\n\ndef register_Ns3EventId_methods(root_module, cls):\n cls.add_binary_comparison_operator('!=')\n cls.add_binary_comparison_operator('==')\n ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::EventId const &', 'arg0')])\n ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]\n cls.add_constructor([])\n ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]\n cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])\n ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]\n cls.add_method('Cancel', \n 'void', \n [])\n ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]\n cls.add_method('GetContext', \n 'uint32_t', \n [], \n is_const=True)\n ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]\n cls.add_method('GetTs', \n 'uint64_t', \n [], \n is_const=True)\n ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]\n cls.add_method('GetUid', \n 'uint32_t', \n [], \n is_const=True)\n ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]\n cls.add_method('IsExpired', \n 'bool', \n [], \n is_const=True)\n ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]\n cls.add_method('IsRunning', \n 'bool', \n [], \n is_const=True)\n ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]\n cls.add_method('PeekEventImpl', \n 'ns3::EventImpl *', \n [], \n is_const=True)\n return\n\ndef register_Ns3Ipv4Address_methods(root_module, cls):\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]\n cls.add_constructor([param('uint32_t', 'address')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]\n cls.add_constructor([param('char const *', 'address')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]\n cls.add_method('CombineMask', \n 'ns3::Ipv4Address', \n [param('ns3::Ipv4Mask const &', 'mask')], \n is_const=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]\n cls.add_method('ConvertFrom', \n 'ns3::Ipv4Address', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]\n cls.add_method('Deserialize', \n 'ns3::Ipv4Address', \n [param('uint8_t const *', 'buf')], \n is_static=True)\n ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]\n cls.add_method('Get', \n 'uint32_t', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]\n cls.add_method('GetAny', \n 'ns3::Ipv4Address', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]\n cls.add_method('GetBroadcast', \n 'ns3::Ipv4Address', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]\n cls.add_method('GetLoopback', \n 'ns3::Ipv4Address', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]\n cls.add_method('GetSubnetDirectedBroadcast', \n 'ns3::Ipv4Address', \n [param('ns3::Ipv4Mask const &', 'mask')], \n is_const=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]\n cls.add_method('GetZero', \n 'ns3::Ipv4Address', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]\n cls.add_method('IsBroadcast', \n 'bool', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]\n cls.add_method('IsEqual', \n 'bool', \n [param('ns3::Ipv4Address const &', 'other')], \n is_const=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]\n cls.add_method('IsLocalMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]\n cls.add_method('IsMatchingType', \n 'bool', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]\n cls.add_method('IsMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]\n cls.add_method('IsSubnetDirectedBroadcast', \n 'bool', \n [param('ns3::Ipv4Mask const &', 'mask')], \n is_const=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('uint8_t *', 'buf')], \n is_const=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]\n cls.add_method('Set', \n 'void', \n [param('uint32_t', 'address')])\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]\n cls.add_method('Set', \n 'void', \n [param('char const *', 'address')])\n return\n\ndef register_Ns3Ipv4Mask_methods(root_module, cls):\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]\n cls.add_constructor([param('uint32_t', 'mask')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]\n cls.add_constructor([param('char const *', 'mask')])\n ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]\n cls.add_method('Get', \n 'uint32_t', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]\n cls.add_method('GetInverse', \n 'uint32_t', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]\n cls.add_method('GetLoopback', \n 'ns3::Ipv4Mask', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]\n cls.add_method('GetOnes', \n 'ns3::Ipv4Mask', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]\n cls.add_method('GetPrefixLength', \n 'uint16_t', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]\n cls.add_method('GetZero', \n 'ns3::Ipv4Mask', \n [], \n is_static=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]\n cls.add_method('IsEqual', \n 'bool', \n [param('ns3::Ipv4Mask', 'other')], \n is_const=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]\n cls.add_method('IsMatch', \n 'bool', \n [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], \n is_const=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]\n cls.add_method('Set', \n 'void', \n [param('uint32_t', 'mask')])\n return\n\ndef register_Ns3Ipv6Address_methods(root_module, cls):\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]\n cls.add_constructor([param('char const *', 'address')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]\n cls.add_constructor([param('uint8_t *', 'address')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]\n cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]\n cls.add_method('CombinePrefix', \n 'ns3::Ipv6Address', \n [param('ns3::Ipv6Prefix const &', 'prefix')])\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]\n cls.add_method('ConvertFrom', \n 'ns3::Ipv6Address', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]\n cls.add_method('Deserialize', \n 'ns3::Ipv6Address', \n [param('uint8_t const *', 'buf')], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]\n cls.add_method('GetAllHostsMulticast', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]\n cls.add_method('GetAllNodesMulticast', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]\n cls.add_method('GetAllRoutersMulticast', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]\n cls.add_method('GetAny', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]\n cls.add_method('GetBytes', \n 'void', \n [param('uint8_t *', 'buf')], \n is_const=True)\n ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]\n cls.add_method('GetIpv4MappedAddress', \n 'ns3::Ipv4Address', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]\n cls.add_method('GetLoopback', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]\n cls.add_method('GetOnes', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]\n cls.add_method('GetZero', \n 'ns3::Ipv6Address', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]\n cls.add_method('IsAllHostsMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]\n cls.add_method('IsAllNodesMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]\n cls.add_method('IsAllRoutersMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]\n cls.add_method('IsAny', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]\n cls.add_method('IsEqual', \n 'bool', \n [param('ns3::Ipv6Address const &', 'other')], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]\n cls.add_method('IsIpv4MappedAddress', \n 'bool', \n [])\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]\n cls.add_method('IsLinkLocal', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]\n cls.add_method('IsLinkLocalMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]\n cls.add_method('IsLocalhost', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]\n cls.add_method('IsMatchingType', \n 'bool', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]\n cls.add_method('IsMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]\n cls.add_method('IsSolicitedMulticast', \n 'bool', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]\n cls.add_method('MakeAutoconfiguredAddress', \n 'ns3::Ipv6Address', \n [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]\n cls.add_method('MakeAutoconfiguredLinkLocalAddress', \n 'ns3::Ipv6Address', \n [param('ns3::Mac48Address', 'mac')], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]\n cls.add_method('MakeIpv4MappedAddress', \n 'ns3::Ipv6Address', \n [param('ns3::Ipv4Address', 'addr')], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]\n cls.add_method('MakeSolicitedAddress', \n 'ns3::Ipv6Address', \n [param('ns3::Ipv6Address', 'addr')], \n is_static=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('uint8_t *', 'buf')], \n is_const=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]\n cls.add_method('Set', \n 'void', \n [param('char const *', 'address')])\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]\n cls.add_method('Set', \n 'void', \n [param('uint8_t *', 'address')])\n return\n\ndef register_Ns3Ipv6Prefix_methods(root_module, cls):\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]\n cls.add_constructor([param('uint8_t *', 'prefix')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]\n cls.add_constructor([param('char const *', 'prefix')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]\n cls.add_constructor([param('uint8_t', 'prefix')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]\n cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]\n cls.add_method('GetBytes', \n 'void', \n [param('uint8_t *', 'buf')], \n is_const=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]\n cls.add_method('GetLoopback', \n 'ns3::Ipv6Prefix', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]\n cls.add_method('GetOnes', \n 'ns3::Ipv6Prefix', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]\n cls.add_method('GetPrefixLength', \n 'uint8_t', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]\n cls.add_method('GetZero', \n 'ns3::Ipv6Prefix', \n [], \n is_static=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]\n cls.add_method('IsEqual', \n 'bool', \n [param('ns3::Ipv6Prefix const &', 'other')], \n is_const=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]\n cls.add_method('IsMatch', \n 'bool', \n [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], \n is_const=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n return\n\ndef register_Ns3Mac48Address_methods(root_module, cls):\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])\n ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]\n cls.add_constructor([])\n ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]\n cls.add_constructor([param('char const *', 'str')])\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]\n cls.add_method('Allocate', \n 'ns3::Mac48Address', \n [], \n is_static=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]\n cls.add_method('ConvertFrom', \n 'ns3::Mac48Address', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]\n cls.add_method('CopyFrom', \n 'void', \n [param('uint8_t const *', 'buffer')])\n ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]\n cls.add_method('CopyTo', \n 'void', \n [param('uint8_t *', 'buffer')], \n is_const=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]\n cls.add_method('GetBroadcast', \n 'ns3::Mac48Address', \n [], \n is_static=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]\n cls.add_method('GetMulticast', \n 'ns3::Mac48Address', \n [param('ns3::Ipv4Address', 'address')], \n is_static=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]\n cls.add_method('GetMulticast', \n 'ns3::Mac48Address', \n [param('ns3::Ipv6Address', 'address')], \n is_static=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]\n cls.add_method('GetMulticast6Prefix', \n 'ns3::Mac48Address', \n [], \n is_static=True)\n ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]\n cls.add_method('GetMulticastPrefix', \n 'ns3::Mac48Address', \n [], \n is_static=True)\n ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]\n cls.add_method('IsBroadcast', \n 'bool', \n [], \n is_const=True)\n ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]\n cls.add_method('IsGroup', \n 'bool', \n [], \n is_const=True)\n ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]\n cls.add_method('IsMatchingType', \n 'bool', \n [param('ns3::Address const &', 'address')], \n is_static=True)\n return\n\ndef register_Ns3NetDeviceContainer_methods(root_module, cls):\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]\n cls.add_constructor([])\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr dev) [constructor]\n cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]\n cls.add_constructor([param('std::string', 'devName')])\n ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]\n cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])\n ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::NetDeviceContainer', 'other')])\n ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr device) [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::Ptr< ns3::NetDevice >', 'device')])\n ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]\n cls.add_method('Add', \n 'void', \n [param('std::string', 'deviceName')])\n ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator*,std::vector, std::allocator > > > ns3::NetDeviceContainer::Begin() const [member function]\n cls.add_method('Begin', \n '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', \n [], \n is_const=True)\n ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator*,std::vector, std::allocator > > > ns3::NetDeviceContainer::End() const [member function]\n cls.add_method('End', \n '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', \n [], \n is_const=True)\n ## net-device-container.h (module 'network'): ns3::Ptr ns3::NetDeviceContainer::Get(uint32_t i) const [member function]\n cls.add_method('Get', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('uint32_t', 'i')], \n is_const=True)\n ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]\n cls.add_method('GetN', \n 'uint32_t', \n [], \n is_const=True)\n return\n\ndef register_Ns3NodeContainer_methods(root_module, cls):\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]\n cls.add_constructor([])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr node) [constructor]\n cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]\n cls.add_constructor([param('std::string', 'nodeName')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]\n cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]\n cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]\n cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])\n ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]\n cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])\n ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::NodeContainer', 'other')])\n ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr node) [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::Ptr< ns3::Node >', 'node')])\n ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]\n cls.add_method('Add', \n 'void', \n [param('std::string', 'nodeName')])\n ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator*,std::vector, std::allocator > > > ns3::NodeContainer::Begin() const [member function]\n cls.add_method('Begin', \n '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', \n [], \n is_const=True)\n ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]\n cls.add_method('Create', \n 'void', \n [param('uint32_t', 'n')])\n ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]\n cls.add_method('Create', \n 'void', \n [param('uint32_t', 'n'), param('uint32_t', 'systemId')])\n ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator*,std::vector, std::allocator > > > ns3::NodeContainer::End() const [member function]\n cls.add_method('End', \n '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', \n [], \n is_const=True)\n ## node-container.h (module 'network'): ns3::Ptr ns3::NodeContainer::Get(uint32_t i) const [member function]\n cls.add_method('Get', \n 'ns3::Ptr< ns3::Node >', \n [param('uint32_t', 'i')], \n is_const=True)\n ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]\n cls.add_method('GetGlobal', \n 'ns3::NodeContainer', \n [], \n is_static=True)\n ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]\n cls.add_method('GetN', \n 'uint32_t', \n [], \n is_const=True)\n return\n\ndef register_Ns3ObjectBase_methods(root_module, cls):\n ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]\n cls.add_constructor([])\n ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])\n ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]\n cls.add_method('GetAttribute', \n 'void', \n [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], \n is_const=True)\n ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]\n cls.add_method('GetAttributeFailSafe', \n 'bool', \n [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], \n is_const=True)\n ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]\n cls.add_method('GetInstanceTypeId', \n 'ns3::TypeId', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]\n cls.add_method('SetAttribute', \n 'void', \n [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])\n ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]\n cls.add_method('SetAttributeFailSafe', \n 'bool', \n [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])\n ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]\n cls.add_method('TraceConnect', \n 'bool', \n [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])\n ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]\n cls.add_method('TraceConnectWithoutContext', \n 'bool', \n [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])\n ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]\n cls.add_method('TraceDisconnect', \n 'bool', \n [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])\n ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]\n cls.add_method('TraceDisconnectWithoutContext', \n 'bool', \n [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])\n ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]\n cls.add_method('ConstructSelf', \n 'void', \n [param('ns3::AttributeConstructionList const &', 'attributes')], \n visibility='protected')\n ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]\n cls.add_method('NotifyConstructionCompleted', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3ObjectDeleter_methods(root_module, cls):\n ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]\n cls.add_constructor([])\n ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])\n ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]\n cls.add_method('Delete', \n 'void', \n [param('ns3::Object *', 'object')], \n is_static=True)\n return\n\ndef register_Ns3ObjectFactory_methods(root_module, cls):\n cls.add_output_stream_operator()\n ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])\n ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]\n cls.add_constructor([])\n ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]\n cls.add_constructor([param('std::string', 'typeId')])\n ## object-factory.h (module 'core'): ns3::Ptr ns3::ObjectFactory::Create() const [member function]\n cls.add_method('Create', \n 'ns3::Ptr< ns3::Object >', \n [], \n is_const=True)\n ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_const=True)\n ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])\n ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]\n cls.add_method('SetTypeId', \n 'void', \n [param('ns3::TypeId', 'tid')])\n ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]\n cls.add_method('SetTypeId', \n 'void', \n [param('char const *', 'tid')])\n ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]\n cls.add_method('SetTypeId', \n 'void', \n [param('std::string', 'tid')])\n return\n\ndef register_Ns3PacketMetadata_methods(root_module, cls):\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]\n cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]\n cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]\n cls.add_method('AddAtEnd', \n 'void', \n [param('ns3::PacketMetadata const &', 'o')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]\n cls.add_method('AddHeader', \n 'void', \n [param('ns3::Header const &', 'header'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]\n cls.add_method('AddPaddingAtEnd', \n 'void', \n [param('uint32_t', 'end')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]\n cls.add_method('AddTrailer', \n 'void', \n [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]\n cls.add_method('BeginItem', \n 'ns3::PacketMetadata::ItemIterator', \n [param('ns3::Buffer', 'buffer')], \n is_const=True)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]\n cls.add_method('CreateFragment', \n 'ns3::PacketMetadata', \n [param('uint32_t', 'start'), param('uint32_t', 'end')], \n is_const=True)\n ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]\n cls.add_method('Enable', \n 'void', \n [], \n is_static=True)\n ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]\n cls.add_method('EnableChecking', \n 'void', \n [], \n is_static=True)\n ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_const=True)\n ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]\n cls.add_method('GetUid', \n 'uint64_t', \n [], \n is_const=True)\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]\n cls.add_method('RemoveAtEnd', \n 'void', \n [param('uint32_t', 'end')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]\n cls.add_method('RemoveAtStart', \n 'void', \n [param('uint32_t', 'start')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]\n cls.add_method('RemoveHeader', \n 'void', \n [param('ns3::Header const &', 'header'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]\n cls.add_method('RemoveTrailer', \n 'void', \n [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])\n ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]\n cls.add_method('Serialize', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], \n is_const=True)\n return\n\ndef register_Ns3PacketMetadataItem_methods(root_module, cls):\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]\n cls.add_constructor([])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]\n cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]\n cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]\n cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]\n cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]\n cls.add_instance_attribute('isFragment', 'bool', is_const=False)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]\n cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)\n return\n\ndef register_Ns3PacketMetadataItemIterator_methods(root_module, cls):\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]\n cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])\n ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]\n cls.add_method('HasNext', \n 'bool', \n [], \n is_const=True)\n ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]\n cls.add_method('Next', \n 'ns3::PacketMetadata::Item', \n [])\n return\n\ndef register_Ns3PacketTagIterator_methods(root_module, cls):\n ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])\n ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]\n cls.add_method('HasNext', \n 'bool', \n [], \n is_const=True)\n ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]\n cls.add_method('Next', \n 'ns3::PacketTagIterator::Item', \n [])\n return\n\ndef register_Ns3PacketTagIteratorItem_methods(root_module, cls):\n ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])\n ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]\n cls.add_method('GetTag', \n 'void', \n [param('ns3::Tag &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_const=True)\n return\n\ndef register_Ns3PacketTagList_methods(root_module, cls):\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]\n cls.add_constructor([])\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]\n cls.add_constructor([param('ns3::PacketTagList const &', 'o')])\n ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]\n cls.add_method('Add', \n 'void', \n [param('ns3::Tag const &', 'tag')], \n is_const=True)\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]\n cls.add_method('Head', \n 'ns3::PacketTagList::TagData const *', \n [], \n is_const=True)\n ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]\n cls.add_method('Peek', \n 'bool', \n [param('ns3::Tag &', 'tag')], \n is_const=True)\n ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]\n cls.add_method('Remove', \n 'bool', \n [param('ns3::Tag &', 'tag')])\n ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]\n cls.add_method('RemoveAll', \n 'void', \n [])\n return\n\ndef register_Ns3PacketTagListTagData_methods(root_module, cls):\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]\n cls.add_constructor([])\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]\n cls.add_instance_attribute('count', 'uint32_t', is_const=False)\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]\n cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]\n cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)\n ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]\n cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)\n return\n\ndef register_Ns3PcapFile_methods(root_module, cls):\n ## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]\n cls.add_constructor([])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]\n cls.add_method('Clear', \n 'void', \n [])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]\n cls.add_method('Close', \n 'void', \n [])\n ## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]\n cls.add_method('Diff', \n 'bool', \n [param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')], \n is_static=True)\n ## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]\n cls.add_method('Eof', \n 'bool', \n [], \n is_const=True)\n ## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]\n cls.add_method('Fail', \n 'bool', \n [], \n is_const=True)\n ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]\n cls.add_method('GetDataLinkType', \n 'uint32_t', \n [])\n ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]\n cls.add_method('GetMagic', \n 'uint32_t', \n [])\n ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]\n cls.add_method('GetSigFigs', \n 'uint32_t', \n [])\n ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]\n cls.add_method('GetSnapLen', \n 'uint32_t', \n [])\n ## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]\n cls.add_method('GetSwapMode', \n 'bool', \n [])\n ## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]\n cls.add_method('GetTimeZoneOffset', \n 'int32_t', \n [])\n ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]\n cls.add_method('GetVersionMajor', \n 'uint16_t', \n [])\n ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]\n cls.add_method('GetVersionMinor', \n 'uint16_t', \n [])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]\n cls.add_method('Init', \n 'void', \n [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]\n cls.add_method('Open', \n 'void', \n [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]\n cls.add_method('Read', \n 'void', \n [param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]\n cls.add_method('Write', \n 'void', \n [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr p) [member function]\n cls.add_method('Write', \n 'void', \n [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])\n ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr p) [member function]\n cls.add_method('Write', \n 'void', \n [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])\n ## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]\n cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)\n ## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]\n cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)\n return\n\ndef register_Ns3PcapHelper_methods(root_module, cls):\n ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])\n ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]\n cls.add_constructor([])\n ## trace-helper.h (module 'network'): ns3::Ptr ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function]\n cls.add_method('CreateFile', \n 'ns3::Ptr< ns3::PcapFileWrapper >', \n [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')])\n ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr device, bool useObjectNames=true) [member function]\n cls.add_method('GetFilenameFromDevice', \n 'std::string', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])\n ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr object, uint32_t interface, bool useObjectNames=true) [member function]\n cls.add_method('GetFilenameFromInterfacePair', \n 'std::string', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])\n return\n\ndef register_Ns3PcapHelperForDevice_methods(root_module, cls):\n ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])\n ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]\n cls.add_constructor([])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr nd, bool promiscuous=false, bool explicitFilename=false) [member function]\n cls.add_method('EnablePcap', \n 'void', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]\n cls.add_method('EnablePcap', \n 'void', \n [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]\n cls.add_method('EnablePcap', \n 'void', \n [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]\n cls.add_method('EnablePcap', \n 'void', \n [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]\n cls.add_method('EnablePcap', \n 'void', \n [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]\n cls.add_method('EnablePcapAll', \n 'void', \n [param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])\n ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr nd, bool promiscuous, bool explicitFilename) [member function]\n cls.add_method('EnablePcapInternal', \n 'void', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], \n is_pure_virtual=True, is_virtual=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount::SimpleRefCount(ns3::SimpleRefCount const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3Simulator_methods(root_module, cls):\n ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Simulator const &', 'arg0')])\n ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]\n cls.add_method('Cancel', \n 'void', \n [param('ns3::EventId const &', 'id')], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]\n cls.add_method('Destroy', \n 'void', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]\n cls.add_method('GetContext', \n 'uint32_t', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]\n cls.add_method('GetDelayLeft', \n 'ns3::Time', \n [param('ns3::EventId const &', 'id')], \n is_static=True)\n ## simulator.h (module 'core'): static ns3::Ptr ns3::Simulator::GetImplementation() [member function]\n cls.add_method('GetImplementation', \n 'ns3::Ptr< ns3::SimulatorImpl >', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]\n cls.add_method('GetMaximumSimulationTime', \n 'ns3::Time', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]\n cls.add_method('GetSystemId', \n 'uint32_t', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]\n cls.add_method('IsExpired', \n 'bool', \n [param('ns3::EventId const &', 'id')], \n is_static=True)\n ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]\n cls.add_method('IsFinished', \n 'bool', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]\n cls.add_method('Now', \n 'ns3::Time', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]\n cls.add_method('Remove', \n 'void', \n [param('ns3::EventId const &', 'id')], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr impl) [member function]\n cls.add_method('SetImplementation', \n 'void', \n [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]\n cls.add_method('SetScheduler', \n 'void', \n [param('ns3::ObjectFactory', 'schedulerFactory')], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]\n cls.add_method('Stop', \n 'void', \n [], \n is_static=True)\n ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]\n cls.add_method('Stop', \n 'void', \n [param('ns3::Time const &', 'time')], \n is_static=True)\n return\n\ndef register_Ns3SystemCondition_methods(root_module, cls):\n ## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])\n ## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor]\n cls.add_constructor([])\n ## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function]\n cls.add_method('Broadcast', \n 'void', \n [])\n ## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function]\n cls.add_method('GetCondition', \n 'bool', \n [])\n ## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function]\n cls.add_method('SetCondition', \n 'void', \n [param('bool', 'condition')])\n ## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function]\n cls.add_method('Signal', \n 'void', \n [])\n ## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]\n cls.add_method('TimedWait', \n 'bool', \n [param('uint64_t', 'ns')])\n ## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function]\n cls.add_method('Wait', \n 'void', \n [])\n return\n\ndef register_Ns3SystemMutex_methods(root_module, cls):\n ## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])\n ## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]\n cls.add_constructor([])\n ## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]\n cls.add_method('Lock', \n 'void', \n [])\n ## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]\n cls.add_method('Unlock', \n 'void', \n [])\n return\n\ndef register_Ns3Tag_methods(root_module, cls):\n ## tag.h (module 'network'): ns3::Tag::Tag() [constructor]\n cls.add_constructor([])\n ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Tag const &', 'arg0')])\n ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]\n cls.add_method('Deserialize', \n 'void', \n [param('ns3::TagBuffer', 'i')], \n is_pure_virtual=True, is_virtual=True)\n ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('ns3::TagBuffer', 'i')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3TagBuffer_methods(root_module, cls):\n ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])\n ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]\n cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]\n cls.add_method('CopyFrom', \n 'void', \n [param('ns3::TagBuffer', 'o')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]\n cls.add_method('Read', \n 'void', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'size')])\n ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]\n cls.add_method('ReadDouble', \n 'double', \n [])\n ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]\n cls.add_method('ReadU16', \n 'uint16_t', \n [])\n ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]\n cls.add_method('ReadU32', \n 'uint32_t', \n [])\n ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]\n cls.add_method('ReadU64', \n 'uint64_t', \n [])\n ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]\n cls.add_method('ReadU8', \n 'uint8_t', \n [])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]\n cls.add_method('TrimAtEnd', \n 'void', \n [param('uint32_t', 'trim')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]\n cls.add_method('Write', \n 'void', \n [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]\n cls.add_method('WriteDouble', \n 'void', \n [param('double', 'v')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]\n cls.add_method('WriteU16', \n 'void', \n [param('uint16_t', 'data')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]\n cls.add_method('WriteU32', \n 'void', \n [param('uint32_t', 'data')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]\n cls.add_method('WriteU64', \n 'void', \n [param('uint64_t', 'v')])\n ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]\n cls.add_method('WriteU8', \n 'void', \n [param('uint8_t', 'v')])\n return\n\ndef register_Ns3TypeId_methods(root_module, cls):\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('!=')\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('==')\n ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]\n cls.add_constructor([param('char const *', 'name')])\n ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]\n cls.add_constructor([])\n ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]\n cls.add_constructor([param('ns3::TypeId const &', 'o')])\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr accessor, ns3::Ptr checker) [member function]\n cls.add_method('AddAttribute', \n 'ns3::TypeId', \n [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr accessor, ns3::Ptr checker) [member function]\n cls.add_method('AddAttribute', \n 'ns3::TypeId', \n [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr accessor) [member function]\n cls.add_method('AddTraceSource', \n 'ns3::TypeId', \n [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]\n cls.add_method('GetAttribute', \n 'ns3::TypeId::AttributeInformation', \n [param('uint32_t', 'i')], \n is_const=True)\n ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]\n cls.add_method('GetAttributeFullName', \n 'std::string', \n [param('uint32_t', 'i')], \n is_const=True)\n ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]\n cls.add_method('GetAttributeN', \n 'uint32_t', \n [], \n is_const=True)\n ## type-id.h (module 'core'): ns3::Callback ns3::TypeId::GetConstructor() const [member function]\n cls.add_method('GetConstructor', \n 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', \n [], \n is_const=True)\n ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]\n cls.add_method('GetGroupName', \n 'std::string', \n [], \n is_const=True)\n ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]\n cls.add_method('GetName', \n 'std::string', \n [], \n is_const=True)\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]\n cls.add_method('GetParent', \n 'ns3::TypeId', \n [], \n is_const=True)\n ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]\n cls.add_method('GetRegistered', \n 'ns3::TypeId', \n [param('uint32_t', 'i')], \n is_static=True)\n ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]\n cls.add_method('GetRegisteredN', \n 'uint32_t', \n [], \n is_static=True)\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]\n cls.add_method('GetTraceSource', \n 'ns3::TypeId::TraceSourceInformation', \n [param('uint32_t', 'i')], \n is_const=True)\n ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]\n cls.add_method('GetTraceSourceN', \n 'uint32_t', \n [], \n is_const=True)\n ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]\n cls.add_method('GetUid', \n 'uint16_t', \n [], \n is_const=True)\n ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]\n cls.add_method('HasConstructor', \n 'bool', \n [], \n is_const=True)\n ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]\n cls.add_method('HasParent', \n 'bool', \n [], \n is_const=True)\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]\n cls.add_method('HideFromDocumentation', \n 'ns3::TypeId', \n [])\n ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]\n cls.add_method('IsChildOf', \n 'bool', \n [param('ns3::TypeId', 'other')], \n is_const=True)\n ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]\n cls.add_method('LookupAttributeByName', \n 'bool', \n [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], \n is_const=True)\n ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]\n cls.add_method('LookupByName', \n 'ns3::TypeId', \n [param('std::string', 'name')], \n is_static=True)\n ## type-id.h (module 'core'): ns3::Ptr ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]\n cls.add_method('LookupTraceSourceByName', \n 'ns3::Ptr< ns3::TraceSourceAccessor const >', \n [param('std::string', 'name')], \n is_const=True)\n ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]\n cls.add_method('MustHideFromDocumentation', \n 'bool', \n [], \n is_const=True)\n ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr initialValue) [member function]\n cls.add_method('SetAttributeInitialValue', \n 'bool', \n [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]\n cls.add_method('SetGroupName', \n 'ns3::TypeId', \n [param('std::string', 'groupName')])\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]\n cls.add_method('SetParent', \n 'ns3::TypeId', \n [param('ns3::TypeId', 'tid')])\n ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]\n cls.add_method('SetUid', \n 'void', \n [param('uint16_t', 'tid')])\n return\n\ndef register_Ns3TypeIdAttributeInformation_methods(root_module, cls):\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]\n cls.add_constructor([])\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]\n cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]\n cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]\n cls.add_instance_attribute('flags', 'uint32_t', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]\n cls.add_instance_attribute('help', 'std::string', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]\n cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]\n cls.add_instance_attribute('name', 'std::string', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]\n cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)\n return\n\ndef register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]\n cls.add_constructor([])\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]\n cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]\n cls.add_instance_attribute('help', 'std::string', is_const=False)\n ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]\n cls.add_instance_attribute('name', 'std::string', is_const=False)\n return\n\ndef register_Ns3Empty_methods(root_module, cls):\n ## empty.h (module 'core'): ns3::empty::empty() [constructor]\n cls.add_constructor([])\n ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::empty const &', 'arg0')])\n return\n\ndef register_Ns3Int64x64_t_methods(root_module, cls):\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))\n cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))\n cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))\n cls.add_unary_numeric_operator('-')\n cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))\n cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('>')\n cls.add_binary_comparison_operator('!=')\n cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))\n cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))\n cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))\n cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('<=')\n cls.add_binary_comparison_operator('==')\n cls.add_binary_comparison_operator('>=')\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]\n cls.add_constructor([])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]\n cls.add_constructor([param('double', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]\n cls.add_constructor([param('int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]\n cls.add_constructor([param('long int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]\n cls.add_constructor([param('long long int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]\n cls.add_constructor([param('unsigned int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]\n cls.add_constructor([param('long unsigned int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]\n cls.add_constructor([param('long long unsigned int', 'v')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]\n cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])\n ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]\n cls.add_constructor([param('ns3::int64x64_t const &', 'o')])\n ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]\n cls.add_method('GetDouble', \n 'double', \n [], \n is_const=True)\n ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]\n cls.add_method('GetHigh', \n 'int64_t', \n [], \n is_const=True)\n ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]\n cls.add_method('GetLow', \n 'uint64_t', \n [], \n is_const=True)\n ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]\n cls.add_method('Invert', \n 'ns3::int64x64_t', \n [param('uint64_t', 'v')], \n is_static=True)\n ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]\n cls.add_method('MulByInvert', \n 'void', \n [param('ns3::int64x64_t const &', 'o')])\n return\n\ndef register_Ns3Chunk_methods(root_module, cls):\n ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]\n cls.add_constructor([])\n ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Chunk const &', 'arg0')])\n ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('ns3::Buffer::Iterator', 'start')], \n is_pure_virtual=True, is_virtual=True)\n ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3FdNetDeviceHelper_methods(root_module, cls):\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper(ns3::FdNetDeviceHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::FdNetDeviceHelper const &', 'arg0')])\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::FdNetDeviceHelper::FdNetDeviceHelper() [constructor]\n cls.add_constructor([])\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::Ptr node) const [member function]\n cls.add_method('Install', \n 'ns3::NetDeviceContainer', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_const=True, is_virtual=True)\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(std::string name) const [member function]\n cls.add_method('Install', \n 'ns3::NetDeviceContainer', \n [param('std::string', 'name')], \n is_const=True, is_virtual=True)\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::NetDeviceContainer ns3::FdNetDeviceHelper::Install(ns3::NodeContainer const & c) const [member function]\n cls.add_method('Install', \n 'ns3::NetDeviceContainer', \n [param('ns3::NodeContainer const &', 'c')], \n is_const=True, is_virtual=True)\n ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::SetAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]\n cls.add_method('SetAttribute', \n 'void', \n [param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])\n ## fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr ns3::FdNetDeviceHelper::InstallPriv(ns3::Ptr node) const [member function]\n cls.add_method('InstallPriv', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_const=True, visibility='protected', is_virtual=True)\n ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnableAsciiInternal(ns3::Ptr stream, std::string prefix, ns3::Ptr nd, bool explicitFilename) [member function]\n cls.add_method('EnableAsciiInternal', \n 'void', \n [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], \n visibility='private', is_virtual=True)\n ## fd-net-device-helper.h (module 'fd-net-device'): void ns3::FdNetDeviceHelper::EnablePcapInternal(std::string prefix, ns3::Ptr nd, bool promiscuous, bool explicitFilename) [member function]\n cls.add_method('EnablePcapInternal', \n 'void', \n [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], \n visibility='private', is_virtual=True)\n return\n\ndef register_Ns3Header_methods(root_module, cls):\n cls.add_output_stream_operator()\n ## header.h (module 'network'): ns3::Header::Header() [constructor]\n cls.add_constructor([])\n ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Header const &', 'arg0')])\n ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('ns3::Buffer::Iterator', 'start')], \n is_pure_virtual=True, is_virtual=True)\n ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('ns3::Buffer::Iterator', 'start')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3Object_methods(root_module, cls):\n ## object.h (module 'core'): ns3::Object::Object() [constructor]\n cls.add_constructor([])\n ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr other) [member function]\n cls.add_method('AggregateObject', \n 'void', \n [param('ns3::Ptr< ns3::Object >', 'other')])\n ## object.h (module 'core'): void ns3::Object::Dispose() [member function]\n cls.add_method('Dispose', \n 'void', \n [])\n ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]\n cls.add_method('GetAggregateIterator', \n 'ns3::Object::AggregateIterator', \n [], \n is_const=True)\n ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]\n cls.add_method('GetInstanceTypeId', \n 'ns3::TypeId', \n [], \n is_const=True, is_virtual=True)\n ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## object.h (module 'core'): void ns3::Object::Initialize() [member function]\n cls.add_method('Initialize', \n 'void', \n [])\n ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]\n cls.add_constructor([param('ns3::Object const &', 'o')], \n visibility='protected')\n ## object.h (module 'core'): void ns3::Object::DoDispose() [member function]\n cls.add_method('DoDispose', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]\n cls.add_method('DoInitialize', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]\n cls.add_method('NotifyNewAggregate', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3ObjectAggregateIterator_methods(root_module, cls):\n ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])\n ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]\n cls.add_constructor([])\n ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]\n cls.add_method('HasNext', \n 'bool', \n [], \n is_const=True)\n ## object.h (module 'core'): ns3::Ptr ns3::Object::AggregateIterator::Next() [member function]\n cls.add_method('Next', \n 'ns3::Ptr< ns3::Object const >', \n [])\n return\n\ndef register_Ns3PcapFileWrapper_methods(root_module, cls):\n ## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]\n cls.add_constructor([])\n ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]\n cls.add_method('Fail', \n 'bool', \n [], \n is_const=True)\n ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]\n cls.add_method('Eof', \n 'bool', \n [], \n is_const=True)\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]\n cls.add_method('Clear', \n 'void', \n [])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]\n cls.add_method('Open', \n 'void', \n [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]\n cls.add_method('Close', \n 'void', \n [])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]\n cls.add_method('Init', \n 'void', \n [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr p) [member function]\n cls.add_method('Write', \n 'void', \n [param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr p) [member function]\n cls.add_method('Write', \n 'void', \n [param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])\n ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]\n cls.add_method('Write', \n 'void', \n [param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])\n ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]\n cls.add_method('GetMagic', \n 'uint32_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]\n cls.add_method('GetVersionMajor', \n 'uint16_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]\n cls.add_method('GetVersionMinor', \n 'uint16_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]\n cls.add_method('GetTimeZoneOffset', \n 'int32_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]\n cls.add_method('GetSigFigs', \n 'uint32_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]\n cls.add_method('GetSnapLen', \n 'uint32_t', \n [])\n ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]\n cls.add_method('GetDataLinkType', \n 'uint32_t', \n [])\n return\n\ndef register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount() [constructor]\n cls.add_constructor([])\n ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount >::SimpleRefCount(ns3::SimpleRefCount > const & o) [copy constructor]\n cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])\n ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount >::Cleanup() [member function]\n cls.add_method('Cleanup', \n 'void', \n [], \n is_static=True)\n return\n\ndef register_Ns3SystemThread_methods(root_module, cls):\n ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])\n ## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback callback) [constructor]\n cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])\n ## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]\n cls.add_method('Equals', \n 'bool', \n [param('pthread_t', 'id')], \n is_static=True)\n ## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]\n cls.add_method('Join', \n 'void', \n [])\n ## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]\n cls.add_method('Self', \n 'pthread_t', \n [], \n is_static=True)\n ## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]\n cls.add_method('Start', \n 'void', \n [])\n return\n\ndef register_Ns3Time_methods(root_module, cls):\n cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))\n cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))\n cls.add_binary_comparison_operator('<')\n cls.add_binary_comparison_operator('>')\n cls.add_binary_comparison_operator('!=')\n cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))\n cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))\n cls.add_output_stream_operator()\n cls.add_binary_comparison_operator('<=')\n cls.add_binary_comparison_operator('==')\n cls.add_binary_comparison_operator('>=')\n ## nstime.h (module 'core'): ns3::Time::Time() [constructor]\n cls.add_constructor([])\n ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]\n cls.add_constructor([param('ns3::Time const &', 'o')])\n ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]\n cls.add_constructor([param('double', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]\n cls.add_constructor([param('int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]\n cls.add_constructor([param('long int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]\n cls.add_constructor([param('long long int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]\n cls.add_constructor([param('unsigned int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]\n cls.add_constructor([param('long unsigned int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]\n cls.add_constructor([param('long long unsigned int', 'v')])\n ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]\n cls.add_constructor([param('std::string const &', 's')])\n ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]\n cls.add_constructor([param('ns3::int64x64_t const &', 'value')])\n ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]\n cls.add_method('Compare', \n 'int', \n [param('ns3::Time const &', 'o')], \n is_const=True)\n ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]\n cls.add_method('From', \n 'ns3::Time', \n [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], \n is_static=True)\n ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]\n cls.add_method('From', \n 'ns3::Time', \n [param('ns3::int64x64_t const &', 'value')], \n is_static=True)\n ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]\n cls.add_method('FromDouble', \n 'ns3::Time', \n [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], \n is_static=True)\n ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]\n cls.add_method('FromInteger', \n 'ns3::Time', \n [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], \n is_static=True)\n ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]\n cls.add_method('GetDouble', \n 'double', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]\n cls.add_method('GetFemtoSeconds', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]\n cls.add_method('GetInteger', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]\n cls.add_method('GetMicroSeconds', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]\n cls.add_method('GetMilliSeconds', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]\n cls.add_method('GetNanoSeconds', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]\n cls.add_method('GetPicoSeconds', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]\n cls.add_method('GetResolution', \n 'ns3::Time::Unit', \n [], \n is_static=True)\n ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]\n cls.add_method('GetSeconds', \n 'double', \n [], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]\n cls.add_method('GetTimeStep', \n 'int64_t', \n [], \n is_const=True)\n ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]\n cls.add_method('IsNegative', \n 'bool', \n [], \n is_const=True)\n ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]\n cls.add_method('IsPositive', \n 'bool', \n [], \n is_const=True)\n ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]\n cls.add_method('IsStrictlyNegative', \n 'bool', \n [], \n is_const=True)\n ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]\n cls.add_method('IsStrictlyPositive', \n 'bool', \n [], \n is_const=True)\n ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]\n cls.add_method('IsZero', \n 'bool', \n [], \n is_const=True)\n ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]\n cls.add_method('SetResolution', \n 'void', \n [param('ns3::Time::Unit', 'resolution')], \n is_static=True)\n ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]\n cls.add_method('To', \n 'ns3::int64x64_t', \n [param('ns3::Time::Unit', 'timeUnit')], \n is_const=True)\n ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]\n cls.add_method('ToDouble', \n 'double', \n [param('ns3::Time::Unit', 'timeUnit')], \n is_const=True)\n ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]\n cls.add_method('ToInteger', \n 'int64_t', \n [param('ns3::Time::Unit', 'timeUnit')], \n is_const=True)\n return\n\ndef register_Ns3TraceSourceAccessor_methods(root_module, cls):\n ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])\n ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]\n cls.add_constructor([])\n ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]\n cls.add_method('Connect', \n 'bool', \n [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]\n cls.add_method('ConnectWithoutContext', \n 'bool', \n [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]\n cls.add_method('Disconnect', \n 'bool', \n [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]\n cls.add_method('DisconnectWithoutContext', \n 'bool', \n [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3Trailer_methods(root_module, cls):\n cls.add_output_stream_operator()\n ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]\n cls.add_constructor([])\n ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Trailer const &', 'arg0')])\n ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('ns3::Buffer::Iterator', 'end')], \n is_pure_virtual=True, is_virtual=True)\n ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]\n cls.add_method('Serialize', \n 'void', \n [param('ns3::Buffer::Iterator', 'start')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3AttributeAccessor_methods(root_module, cls):\n ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])\n ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]\n cls.add_constructor([])\n ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]\n cls.add_method('Get', \n 'bool', \n [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]\n cls.add_method('HasGetter', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]\n cls.add_method('HasSetter', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]\n cls.add_method('Set', \n 'bool', \n [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3AttributeChecker_methods(root_module, cls):\n ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])\n ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]\n cls.add_constructor([])\n ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]\n cls.add_method('Check', \n 'bool', \n [param('ns3::AttributeValue const &', 'value')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]\n cls.add_method('Copy', \n 'bool', \n [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): ns3::Ptr ns3::AttributeChecker::Create() const [member function]\n cls.add_method('Create', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): ns3::Ptr ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]\n cls.add_method('CreateValidValue', \n 'ns3::Ptr< ns3::AttributeValue >', \n [param('ns3::AttributeValue const &', 'value')], \n is_const=True)\n ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]\n cls.add_method('GetUnderlyingTypeInformation', \n 'std::string', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]\n cls.add_method('GetValueTypeName', \n 'std::string', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]\n cls.add_method('HasUnderlyingTypeInformation', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3AttributeValue_methods(root_module, cls):\n ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])\n ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]\n cls.add_constructor([])\n ## attribute.h (module 'core'): ns3::Ptr ns3::AttributeValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_pure_virtual=True, is_virtual=True)\n ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3CallbackChecker_methods(root_module, cls):\n ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]\n cls.add_constructor([])\n ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])\n return\n\ndef register_Ns3CallbackImplBase_methods(root_module, cls):\n ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]\n cls.add_constructor([])\n ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])\n ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr other) const [member function]\n cls.add_method('IsEqual', \n 'bool', \n [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3CallbackValue_methods(root_module, cls):\n ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])\n ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]\n cls.add_constructor([])\n ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]\n cls.add_constructor([param('ns3::CallbackBase const &', 'base')])\n ## callback.h (module 'core'): ns3::Ptr ns3::CallbackValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::CallbackBase', 'base')])\n return\n\ndef register_Ns3DataRateChecker_methods(root_module, cls):\n ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor]\n cls.add_constructor([])\n ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')])\n return\n\ndef register_Ns3DataRateValue_methods(root_module, cls):\n ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor]\n cls.add_constructor([])\n ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')])\n ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor]\n cls.add_constructor([param('ns3::DataRate const &', 'value')])\n ## data-rate.h (module 'network'): ns3::Ptr ns3::DataRateValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::DataRate', \n [], \n is_const=True)\n ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::DataRate const &', 'value')])\n return\n\ndef register_Ns3EmptyAttributeValue_methods(root_module, cls):\n ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])\n ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]\n cls.add_constructor([])\n ## attribute.h (module 'core'): ns3::Ptr ns3::EmptyAttributeValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, visibility='private', is_virtual=True)\n ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n visibility='private', is_virtual=True)\n ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, visibility='private', is_virtual=True)\n return\n\ndef register_Ns3EmuFdNetDeviceHelper_methods(root_module, cls):\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper(ns3::EmuFdNetDeviceHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::EmuFdNetDeviceHelper const &', 'arg0')])\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::EmuFdNetDeviceHelper::EmuFdNetDeviceHelper() [constructor]\n cls.add_constructor([])\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): std::string ns3::EmuFdNetDeviceHelper::GetDeviceName() [member function]\n cls.add_method('GetDeviceName', \n 'std::string', \n [])\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetDeviceName(std::string deviceName) [member function]\n cls.add_method('SetDeviceName', \n 'void', \n [param('std::string', 'deviceName')])\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): int ns3::EmuFdNetDeviceHelper::CreateFileDescriptor() const [member function]\n cls.add_method('CreateFileDescriptor', \n 'int', \n [], \n is_const=True, visibility='protected', is_virtual=True)\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr ns3::EmuFdNetDeviceHelper::InstallPriv(ns3::Ptr node) const [member function]\n cls.add_method('InstallPriv', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_const=True, visibility='protected', is_virtual=True)\n ## emu-fd-net-device-helper.h (module 'fd-net-device'): void ns3::EmuFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr device) const [member function]\n cls.add_method('SetFileDescriptor', \n 'void', \n [param('ns3::Ptr< ns3::FdNetDevice >', 'device')], \n is_const=True, visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3EventImpl_methods(root_module, cls):\n ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])\n ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]\n cls.add_constructor([])\n ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]\n cls.add_method('Cancel', \n 'void', \n [])\n ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]\n cls.add_method('Invoke', \n 'void', \n [])\n ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]\n cls.add_method('IsCancelled', \n 'bool', \n [])\n ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]\n cls.add_method('Notify', \n 'void', \n [], \n is_pure_virtual=True, visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3FdReader_methods(root_module, cls):\n ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::FdReader const &', 'arg0')])\n ## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor]\n cls.add_constructor([])\n ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback readCallback) [member function]\n cls.add_method('Start', \n 'void', \n [param('int', 'fd'), param('ns3::Callback< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')])\n ## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function]\n cls.add_method('Stop', \n 'void', \n [])\n ## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function]\n cls.add_method('DoRead', \n 'ns3::FdReader::Data', \n [], \n is_pure_virtual=True, visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3Ipv4AddressChecker_methods(root_module, cls):\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])\n return\n\ndef register_Ns3Ipv4AddressValue_methods(root_module, cls):\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]\n cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])\n ## ipv4-address.h (module 'network'): ns3::Ptr ns3::Ipv4AddressValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Ipv4Address', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Ipv4Address const &', 'value')])\n return\n\ndef register_Ns3Ipv4MaskChecker_methods(root_module, cls):\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])\n return\n\ndef register_Ns3Ipv4MaskValue_methods(root_module, cls):\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]\n cls.add_constructor([])\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])\n ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]\n cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])\n ## ipv4-address.h (module 'network'): ns3::Ptr ns3::Ipv4MaskValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Ipv4Mask', \n [], \n is_const=True)\n ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Ipv4Mask const &', 'value')])\n return\n\ndef register_Ns3Ipv6AddressChecker_methods(root_module, cls):\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])\n return\n\ndef register_Ns3Ipv6AddressValue_methods(root_module, cls):\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]\n cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])\n ## ipv6-address.h (module 'network'): ns3::Ptr ns3::Ipv6AddressValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Ipv6Address', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Ipv6Address const &', 'value')])\n return\n\ndef register_Ns3Ipv6PrefixChecker_methods(root_module, cls):\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])\n return\n\ndef register_Ns3Ipv6PrefixValue_methods(root_module, cls):\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]\n cls.add_constructor([])\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])\n ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]\n cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])\n ## ipv6-address.h (module 'network'): ns3::Ptr ns3::Ipv6PrefixValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Ipv6Prefix', \n [], \n is_const=True)\n ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Ipv6Prefix const &', 'value')])\n return\n\ndef register_Ns3Mac48AddressChecker_methods(root_module, cls):\n ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]\n cls.add_constructor([])\n ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])\n return\n\ndef register_Ns3Mac48AddressValue_methods(root_module, cls):\n ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]\n cls.add_constructor([])\n ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])\n ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]\n cls.add_constructor([param('ns3::Mac48Address const &', 'value')])\n ## mac48-address.h (module 'network'): ns3::Ptr ns3::Mac48AddressValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Mac48Address', \n [], \n is_const=True)\n ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Mac48Address const &', 'value')])\n return\n\ndef register_Ns3NetDevice_methods(root_module, cls):\n ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]\n cls.add_constructor([])\n ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])\n ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback callback) [member function]\n cls.add_method('AddLinkChangeCallback', \n 'void', \n [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]\n cls.add_method('GetAddress', \n 'ns3::Address', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]\n cls.add_method('GetBroadcast', \n 'ns3::Address', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Ptr ns3::NetDevice::GetChannel() const [member function]\n cls.add_method('GetChannel', \n 'ns3::Ptr< ns3::Channel >', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]\n cls.add_method('GetIfIndex', \n 'uint32_t', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]\n cls.add_method('GetMtu', \n 'uint16_t', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]\n cls.add_method('GetMulticast', \n 'ns3::Address', \n [param('ns3::Ipv4Address', 'multicastGroup')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]\n cls.add_method('GetMulticast', \n 'ns3::Address', \n [param('ns3::Ipv6Address', 'addr')], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): ns3::Ptr ns3::NetDevice::GetNode() const [member function]\n cls.add_method('GetNode', \n 'ns3::Ptr< ns3::Node >', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]\n cls.add_method('IsBridge', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]\n cls.add_method('IsBroadcast', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]\n cls.add_method('IsLinkUp', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]\n cls.add_method('IsMulticast', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]\n cls.add_method('IsPointToPoint', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]\n cls.add_method('NeedsArp', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]\n cls.add_method('Send', \n 'bool', \n [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]\n cls.add_method('SendFrom', \n 'bool', \n [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]\n cls.add_method('SetAddress', \n 'void', \n [param('ns3::Address', 'address')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]\n cls.add_method('SetIfIndex', \n 'void', \n [param('uint32_t const', 'index')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]\n cls.add_method('SetMtu', \n 'bool', \n [param('uint16_t const', 'mtu')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr node) [member function]\n cls.add_method('SetNode', \n 'void', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]\n cls.add_method('SetPromiscReceiveCallback', \n 'void', \n [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]\n cls.add_method('SetReceiveCallback', \n 'void', \n [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], \n is_pure_virtual=True, is_virtual=True)\n ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]\n cls.add_method('SupportsSendFrom', \n 'bool', \n [], \n is_pure_virtual=True, is_const=True, is_virtual=True)\n return\n\ndef register_Ns3NixVector_methods(root_module, cls):\n cls.add_output_stream_operator()\n ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]\n cls.add_constructor([])\n ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]\n cls.add_constructor([param('ns3::NixVector const &', 'o')])\n ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]\n cls.add_method('AddNeighborIndex', \n 'void', \n [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]\n cls.add_method('BitCount', \n 'uint32_t', \n [param('uint32_t', 'numberOfNeighbors')], \n is_const=True)\n ## nix-vector.h (module 'network'): ns3::Ptr ns3::NixVector::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::NixVector >', \n [], \n is_const=True)\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]\n cls.add_method('Deserialize', \n 'uint32_t', \n [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]\n cls.add_method('ExtractNeighborIndex', \n 'uint32_t', \n [param('uint32_t', 'numberOfBits')])\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]\n cls.add_method('GetRemainingBits', \n 'uint32_t', \n [])\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_const=True)\n ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]\n cls.add_method('Serialize', \n 'uint32_t', \n [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], \n is_const=True)\n return\n\ndef register_Ns3Node_methods(root_module, cls):\n ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::Node const &', 'arg0')])\n ## node.h (module 'network'): ns3::Node::Node() [constructor]\n cls.add_constructor([])\n ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]\n cls.add_constructor([param('uint32_t', 'systemId')])\n ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr application) [member function]\n cls.add_method('AddApplication', \n 'uint32_t', \n [param('ns3::Ptr< ns3::Application >', 'application')])\n ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr device) [member function]\n cls.add_method('AddDevice', \n 'uint32_t', \n [param('ns3::Ptr< ns3::NetDevice >', 'device')])\n ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]\n cls.add_method('ChecksumEnabled', \n 'bool', \n [], \n is_static=True)\n ## node.h (module 'network'): ns3::Ptr ns3::Node::GetApplication(uint32_t index) const [member function]\n cls.add_method('GetApplication', \n 'ns3::Ptr< ns3::Application >', \n [param('uint32_t', 'index')], \n is_const=True)\n ## node.h (module 'network'): ns3::Ptr ns3::Node::GetDevice(uint32_t index) const [member function]\n cls.add_method('GetDevice', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('uint32_t', 'index')], \n is_const=True)\n ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]\n cls.add_method('GetId', \n 'uint32_t', \n [], \n is_const=True)\n ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]\n cls.add_method('GetNApplications', \n 'uint32_t', \n [], \n is_const=True)\n ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]\n cls.add_method('GetNDevices', \n 'uint32_t', \n [], \n is_const=True)\n ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]\n cls.add_method('GetSystemId', \n 'uint32_t', \n [], \n is_const=True)\n ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]\n cls.add_method('RegisterDeviceAdditionListener', \n 'void', \n [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])\n ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr device, bool promiscuous=false) [member function]\n cls.add_method('RegisterProtocolHandler', \n 'void', \n [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])\n ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]\n cls.add_method('UnregisterDeviceAdditionListener', \n 'void', \n [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])\n ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]\n cls.add_method('UnregisterProtocolHandler', \n 'void', \n [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])\n ## node.h (module 'network'): void ns3::Node::DoDispose() [member function]\n cls.add_method('DoDispose', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]\n cls.add_method('DoInitialize', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3ObjectFactoryChecker_methods(root_module, cls):\n ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]\n cls.add_constructor([])\n ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])\n return\n\ndef register_Ns3ObjectFactoryValue_methods(root_module, cls):\n ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]\n cls.add_constructor([])\n ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])\n ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]\n cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])\n ## object-factory.h (module 'core'): ns3::Ptr ns3::ObjectFactoryValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::ObjectFactory', \n [], \n is_const=True)\n ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::ObjectFactory const &', 'value')])\n return\n\ndef register_Ns3OutputStreamWrapper_methods(root_module, cls):\n ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])\n ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]\n cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])\n ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]\n cls.add_constructor([param('std::ostream *', 'os')])\n ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]\n cls.add_method('GetStream', \n 'std::ostream *', \n [])\n return\n\ndef register_Ns3Packet_methods(root_module, cls):\n cls.add_output_stream_operator()\n ## packet.h (module 'network'): ns3::Packet::Packet() [constructor]\n cls.add_constructor([])\n ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]\n cls.add_constructor([param('ns3::Packet const &', 'o')])\n ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]\n cls.add_constructor([param('uint32_t', 'size')])\n ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]\n cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])\n ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]\n cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])\n ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr packet) [member function]\n cls.add_method('AddAtEnd', \n 'void', \n [param('ns3::Ptr< ns3::Packet const >', 'packet')])\n ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]\n cls.add_method('AddByteTag', \n 'void', \n [param('ns3::Tag const &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]\n cls.add_method('AddHeader', \n 'void', \n [param('ns3::Header const &', 'header')])\n ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]\n cls.add_method('AddPacketTag', \n 'void', \n [param('ns3::Tag const &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]\n cls.add_method('AddPaddingAtEnd', \n 'void', \n [param('uint32_t', 'size')])\n ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]\n cls.add_method('AddTrailer', \n 'void', \n [param('ns3::Trailer const &', 'trailer')])\n ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]\n cls.add_method('BeginItem', \n 'ns3::PacketMetadata::ItemIterator', \n [], \n is_const=True)\n ## packet.h (module 'network'): ns3::Ptr ns3::Packet::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::Packet >', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]\n cls.add_method('CopyData', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]\n cls.add_method('CopyData', \n 'void', \n [param('std::ostream *', 'os'), param('uint32_t', 'size')], \n is_const=True)\n ## packet.h (module 'network'): ns3::Ptr ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]\n cls.add_method('CreateFragment', \n 'ns3::Ptr< ns3::Packet >', \n [param('uint32_t', 'start'), param('uint32_t', 'length')], \n is_const=True)\n ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]\n cls.add_method('EnableChecking', \n 'void', \n [], \n is_static=True)\n ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]\n cls.add_method('EnablePrinting', \n 'void', \n [], \n is_static=True)\n ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]\n cls.add_method('FindFirstMatchingByteTag', \n 'bool', \n [param('ns3::Tag &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]\n cls.add_method('GetByteTagIterator', \n 'ns3::ByteTagIterator', \n [], \n is_const=True)\n ## packet.h (module 'network'): ns3::Ptr ns3::Packet::GetNixVector() const [member function]\n cls.add_method('GetNixVector', \n 'ns3::Ptr< ns3::NixVector >', \n [], \n is_const=True)\n ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]\n cls.add_method('GetPacketTagIterator', \n 'ns3::PacketTagIterator', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]\n cls.add_method('GetSerializedSize', \n 'uint32_t', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]\n cls.add_method('GetSize', \n 'uint32_t', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]\n cls.add_method('GetUid', \n 'uint64_t', \n [], \n is_const=True)\n ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]\n cls.add_method('PeekData', \n 'uint8_t const *', \n [], \n deprecated=True, is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]\n cls.add_method('PeekHeader', \n 'uint32_t', \n [param('ns3::Header &', 'header')], \n is_const=True)\n ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]\n cls.add_method('PeekPacketTag', \n 'bool', \n [param('ns3::Tag &', 'tag')], \n is_const=True)\n ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]\n cls.add_method('PeekTrailer', \n 'uint32_t', \n [param('ns3::Trailer &', 'trailer')])\n ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]\n cls.add_method('Print', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]\n cls.add_method('PrintByteTags', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]\n cls.add_method('PrintPacketTags', \n 'void', \n [param('std::ostream &', 'os')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]\n cls.add_method('RemoveAllByteTags', \n 'void', \n [])\n ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]\n cls.add_method('RemoveAllPacketTags', \n 'void', \n [])\n ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]\n cls.add_method('RemoveAtEnd', \n 'void', \n [param('uint32_t', 'size')])\n ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]\n cls.add_method('RemoveAtStart', \n 'void', \n [param('uint32_t', 'size')])\n ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]\n cls.add_method('RemoveHeader', \n 'uint32_t', \n [param('ns3::Header &', 'header')])\n ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]\n cls.add_method('RemovePacketTag', \n 'bool', \n [param('ns3::Tag &', 'tag')])\n ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]\n cls.add_method('RemoveTrailer', \n 'uint32_t', \n [param('ns3::Trailer &', 'trailer')])\n ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]\n cls.add_method('Serialize', \n 'uint32_t', \n [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], \n is_const=True)\n ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr arg0) [member function]\n cls.add_method('SetNixVector', \n 'void', \n [param('ns3::Ptr< ns3::NixVector >', 'arg0')])\n return\n\ndef register_Ns3PlanetLabFdNetDeviceHelper_methods(root_module, cls):\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper::PlanetLabFdNetDeviceHelper(ns3::PlanetLabFdNetDeviceHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::PlanetLabFdNetDeviceHelper const &', 'arg0')])\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::PlanetLabFdNetDeviceHelper::PlanetLabFdNetDeviceHelper() [constructor]\n cls.add_constructor([])\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetTapIpAddress(ns3::Ipv4Address address) [member function]\n cls.add_method('SetTapIpAddress', \n 'void', \n [param('ns3::Ipv4Address', 'address')])\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetTapMask(ns3::Ipv4Mask mask) [member function]\n cls.add_method('SetTapMask', \n 'void', \n [param('ns3::Ipv4Mask', 'mask')])\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): int ns3::PlanetLabFdNetDeviceHelper::CreateFileDescriptor() const [member function]\n cls.add_method('CreateFileDescriptor', \n 'int', \n [], \n is_const=True, visibility='protected', is_virtual=True)\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr ns3::PlanetLabFdNetDeviceHelper::InstallPriv(ns3::Ptr node) const [member function]\n cls.add_method('InstallPriv', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_const=True, visibility='protected', is_virtual=True)\n ## planetlab-fd-net-device-helper.h (module 'fd-net-device'): void ns3::PlanetLabFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr device) const [member function]\n cls.add_method('SetFileDescriptor', \n 'void', \n [param('ns3::Ptr< ns3::FdNetDevice >', 'device')], \n is_const=True, visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3TapFdNetDeviceHelper_methods(root_module, cls):\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper(ns3::TapFdNetDeviceHelper const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TapFdNetDeviceHelper const &', 'arg0')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::TapFdNetDeviceHelper::TapFdNetDeviceHelper() [constructor]\n cls.add_constructor([])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetModePi(bool pi) [member function]\n cls.add_method('SetModePi', \n 'void', \n [param('bool', 'pi')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Address(ns3::Ipv4Address address) [member function]\n cls.add_method('SetTapIpv4Address', \n 'void', \n [param('ns3::Ipv4Address', 'address')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv4Mask(ns3::Ipv4Mask mask) [member function]\n cls.add_method('SetTapIpv4Mask', \n 'void', \n [param('ns3::Ipv4Mask', 'mask')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Address(ns3::Ipv6Address address) [member function]\n cls.add_method('SetTapIpv6Address', \n 'void', \n [param('ns3::Ipv6Address', 'address')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapIpv6Prefix(int prefix) [member function]\n cls.add_method('SetTapIpv6Prefix', \n 'void', \n [param('int', 'prefix')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetTapMacAddress(ns3::Mac48Address mac) [member function]\n cls.add_method('SetTapMacAddress', \n 'void', \n [param('ns3::Mac48Address', 'mac')])\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): int ns3::TapFdNetDeviceHelper::CreateFileDescriptor() const [member function]\n cls.add_method('CreateFileDescriptor', \n 'int', \n [], \n is_const=True, visibility='protected', is_virtual=True)\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): ns3::Ptr ns3::TapFdNetDeviceHelper::InstallPriv(ns3::Ptr node) const [member function]\n cls.add_method('InstallPriv', \n 'ns3::Ptr< ns3::NetDevice >', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_const=True, visibility='protected', is_virtual=True)\n ## tap-fd-net-device-helper.h (module 'fd-net-device'): void ns3::TapFdNetDeviceHelper::SetFileDescriptor(ns3::Ptr device) const [member function]\n cls.add_method('SetFileDescriptor', \n 'void', \n [param('ns3::Ptr< ns3::FdNetDevice >', 'device')], \n is_const=True, visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3TimeChecker_methods(root_module, cls):\n ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]\n cls.add_constructor([])\n ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])\n return\n\ndef register_Ns3TimeValue_methods(root_module, cls):\n ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]\n cls.add_constructor([])\n ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])\n ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]\n cls.add_constructor([param('ns3::Time const &', 'value')])\n ## nstime.h (module 'core'): ns3::Ptr ns3::TimeValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Time', \n [], \n is_const=True)\n ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Time const &', 'value')])\n return\n\ndef register_Ns3TypeIdChecker_methods(root_module, cls):\n ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]\n cls.add_constructor([])\n ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])\n return\n\ndef register_Ns3TypeIdValue_methods(root_module, cls):\n ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]\n cls.add_constructor([])\n ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])\n ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]\n cls.add_constructor([param('ns3::TypeId const &', 'value')])\n ## type-id.h (module 'core'): ns3::Ptr ns3::TypeIdValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::TypeId', \n [], \n is_const=True)\n ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::TypeId const &', 'value')])\n return\n\ndef register_Ns3AddressChecker_methods(root_module, cls):\n ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]\n cls.add_constructor([])\n ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])\n return\n\ndef register_Ns3AddressValue_methods(root_module, cls):\n ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]\n cls.add_constructor([])\n ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])\n ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]\n cls.add_constructor([param('ns3::Address const &', 'value')])\n ## address.h (module 'network'): ns3::Ptr ns3::AddressValue::Copy() const [member function]\n cls.add_method('Copy', \n 'ns3::Ptr< ns3::AttributeValue >', \n [], \n is_const=True, is_virtual=True)\n ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr checker) [member function]\n cls.add_method('DeserializeFromString', \n 'bool', \n [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_virtual=True)\n ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]\n cls.add_method('Get', \n 'ns3::Address', \n [], \n is_const=True)\n ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr checker) const [member function]\n cls.add_method('SerializeToString', \n 'std::string', \n [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], \n is_const=True, is_virtual=True)\n ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]\n cls.add_method('Set', \n 'void', \n [param('ns3::Address const &', 'value')])\n return\n\ndef register_Ns3FdNetDevice_methods(root_module, cls):\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::FdNetDevice() [constructor]\n cls.add_constructor([])\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::AddLinkChangeCallback(ns3::Callback callback) [member function]\n cls.add_method('AddLinkChangeCallback', \n 'void', \n [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetAddress() const [member function]\n cls.add_method('GetAddress', \n 'ns3::Address', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetBroadcast() const [member function]\n cls.add_method('GetBroadcast', \n 'ns3::Address', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Ptr ns3::FdNetDevice::GetChannel() const [member function]\n cls.add_method('GetChannel', \n 'ns3::Ptr< ns3::Channel >', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDevice::EncapsulationMode ns3::FdNetDevice::GetEncapsulationMode() const [member function]\n cls.add_method('GetEncapsulationMode', \n 'ns3::FdNetDevice::EncapsulationMode', \n [], \n is_const=True)\n ## fd-net-device.h (module 'fd-net-device'): uint32_t ns3::FdNetDevice::GetIfIndex() const [member function]\n cls.add_method('GetIfIndex', \n 'uint32_t', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): uint16_t ns3::FdNetDevice::GetMtu() const [member function]\n cls.add_method('GetMtu', \n 'uint16_t', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]\n cls.add_method('GetMulticast', \n 'ns3::Address', \n [param('ns3::Ipv4Address', 'multicastGroup')], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Address ns3::FdNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]\n cls.add_method('GetMulticast', \n 'ns3::Address', \n [param('ns3::Ipv6Address', 'addr')], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): ns3::Ptr ns3::FdNetDevice::GetNode() const [member function]\n cls.add_method('GetNode', \n 'ns3::Ptr< ns3::Node >', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): static ns3::TypeId ns3::FdNetDevice::GetTypeId() [member function]\n cls.add_method('GetTypeId', \n 'ns3::TypeId', \n [], \n is_static=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBridge() const [member function]\n cls.add_method('IsBridge', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsBroadcast() const [member function]\n cls.add_method('IsBroadcast', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsLinkUp() const [member function]\n cls.add_method('IsLinkUp', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsMulticast() const [member function]\n cls.add_method('IsMulticast', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::IsPointToPoint() const [member function]\n cls.add_method('IsPointToPoint', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::NeedsArp() const [member function]\n cls.add_method('NeedsArp', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::Send(ns3::Ptr packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]\n cls.add_method('Send', \n 'bool', \n [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SendFrom(ns3::Ptr packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]\n cls.add_method('SendFrom', \n 'bool', \n [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetAddress(ns3::Address address) [member function]\n cls.add_method('SetAddress', \n 'void', \n [param('ns3::Address', 'address')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetEncapsulationMode(ns3::FdNetDevice::EncapsulationMode mode) [member function]\n cls.add_method('SetEncapsulationMode', \n 'void', \n [param('ns3::FdNetDevice::EncapsulationMode', 'mode')])\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetFileDescriptor(int fd) [member function]\n cls.add_method('SetFileDescriptor', \n 'void', \n [param('int', 'fd')])\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIfIndex(uint32_t const index) [member function]\n cls.add_method('SetIfIndex', \n 'void', \n [param('uint32_t const', 'index')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsBroadcast(bool broadcast) [member function]\n cls.add_method('SetIsBroadcast', \n 'void', \n [param('bool', 'broadcast')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetIsMulticast(bool multicast) [member function]\n cls.add_method('SetIsMulticast', \n 'void', \n [param('bool', 'multicast')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SetMtu(uint16_t const mtu) [member function]\n cls.add_method('SetMtu', \n 'bool', \n [param('uint16_t const', 'mtu')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetNode(ns3::Ptr node) [member function]\n cls.add_method('SetNode', \n 'void', \n [param('ns3::Ptr< ns3::Node >', 'node')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetPromiscReceiveCallback(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]\n cls.add_method('SetPromiscReceiveCallback', \n 'void', \n [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::SetReceiveCallback(ns3::Callback, ns3::Ptr, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]\n cls.add_method('SetReceiveCallback', \n 'void', \n [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], \n is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Start(ns3::Time tStart) [member function]\n cls.add_method('Start', \n 'void', \n [param('ns3::Time', 'tStart')])\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::Stop(ns3::Time tStop) [member function]\n cls.add_method('Stop', \n 'void', \n [param('ns3::Time', 'tStop')])\n ## fd-net-device.h (module 'fd-net-device'): bool ns3::FdNetDevice::SupportsSendFrom() const [member function]\n cls.add_method('SupportsSendFrom', \n 'bool', \n [], \n is_const=True, is_virtual=True)\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDevice::DoDispose() [member function]\n cls.add_method('DoDispose', \n 'void', \n [], \n visibility='protected', is_virtual=True)\n return\n\ndef register_Ns3FdNetDeviceFdReader_methods(root_module, cls):\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader(ns3::FdNetDeviceFdReader const & arg0) [copy constructor]\n cls.add_constructor([param('ns3::FdNetDeviceFdReader const &', 'arg0')])\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdNetDeviceFdReader::FdNetDeviceFdReader() [constructor]\n cls.add_constructor([])\n ## fd-net-device.h (module 'fd-net-device'): void ns3::FdNetDeviceFdReader::SetBufferSize(uint32_t bufferSize) [member function]\n cls.add_method('SetBufferSize', \n 'void', \n [param('uint32_t', 'bufferSize')])\n ## fd-net-device.h (module 'fd-net-device'): ns3::FdReader::Data ns3::FdNetDeviceFdReader::DoRead() [member function]\n cls.add_method('DoRead', \n 'ns3::FdReader::Data', \n [], \n visibility='private', is_virtual=True)\n return\n\ndef register_functions(root_module):\n module = root_module\n register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)\n return\n\ndef register_functions_ns3_FatalImpl(module, root_module):\n return\n\ndef main():\n out = FileCodeSink(sys.stdout)\n root_module = module_init()\n register_types(root_module)\n register_methods(root_module)\n register_functions(root_module)\n root_module.generate(out)\n\nif __name__ == '__main__':\n main()\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475228,"cells":{"repo_name":{"kind":"string","value":"aneeshusa/servo"},"path":{"kind":"string","value":"tests/wpt/web-platform-tests/tools/html5lib/html5lib/serializer/htmlserializer.py"},"copies":{"kind":"string","value":"423"},"size":{"kind":"string","value":"12897"},"content":{"kind":"string","value":"from __future__ import absolute_import, division, unicode_literals\nfrom six import text_type\n\nimport gettext\n_ = gettext.gettext\n\ntry:\n from functools import reduce\nexcept ImportError:\n pass\n\nfrom ..constants import voidElements, booleanAttributes, spaceCharacters\nfrom ..constants import rcdataElements, entities, xmlEntities\nfrom .. import utils\nfrom xml.sax.saxutils import escape\n\nspaceCharacters = \"\".join(spaceCharacters)\n\ntry:\n from codecs import register_error, xmlcharrefreplace_errors\nexcept ImportError:\n unicode_encode_errors = \"strict\"\nelse:\n unicode_encode_errors = \"htmlentityreplace\"\n\n encode_entity_map = {}\n is_ucs4 = len(\"\\U0010FFFF\") == 1\n for k, v in list(entities.items()):\n # skip multi-character entities\n if ((is_ucs4 and len(v) > 1) or\n (not is_ucs4 and len(v) > 2)):\n continue\n if v != \"&\":\n if len(v) == 2:\n v = utils.surrogatePairToCodepoint(v)\n else:\n v = ord(v)\n if v not in encode_entity_map or k.islower():\n # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc.\n encode_entity_map[v] = k\n\n def htmlentityreplace_errors(exc):\n if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):\n res = []\n codepoints = []\n skip = False\n for i, c in enumerate(exc.object[exc.start:exc.end]):\n if skip:\n skip = False\n continue\n index = i + exc.start\n if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):\n codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])\n skip = True\n else:\n codepoint = ord(c)\n codepoints.append(codepoint)\n for cp in codepoints:\n e = encode_entity_map.get(cp)\n if e:\n res.append(\"&\")\n res.append(e)\n if not e.endswith(\";\"):\n res.append(\";\")\n else:\n res.append(\"&#x%s;\" % (hex(cp)[2:]))\n return (\"\".join(res), exc.end)\n else:\n return xmlcharrefreplace_errors(exc)\n\n register_error(unicode_encode_errors, htmlentityreplace_errors)\n\n del register_error\n\n\nclass HTMLSerializer(object):\n\n # attribute quoting options\n quote_attr_values = False\n quote_char = '\"'\n use_best_quote_char = True\n\n # tag syntax options\n omit_optional_tags = True\n minimize_boolean_attributes = True\n use_trailing_solidus = False\n space_before_trailing_solidus = True\n\n # escaping options\n escape_lt_in_attrs = False\n escape_rcdata = False\n resolve_entities = True\n\n # miscellaneous options\n alphabetical_attributes = False\n inject_meta_charset = True\n strip_whitespace = False\n sanitize = False\n\n options = (\"quote_attr_values\", \"quote_char\", \"use_best_quote_char\",\n \"omit_optional_tags\", \"minimize_boolean_attributes\",\n \"use_trailing_solidus\", \"space_before_trailing_solidus\",\n \"escape_lt_in_attrs\", \"escape_rcdata\", \"resolve_entities\",\n \"alphabetical_attributes\", \"inject_meta_charset\",\n \"strip_whitespace\", \"sanitize\")\n\n def __init__(self, **kwargs):\n \"\"\"Initialize HTMLSerializer.\n\n Keyword options (default given first unless specified) include:\n\n inject_meta_charset=True|False\n Whether it insert a meta element to define the character set of the\n document.\n quote_attr_values=True|False\n Whether to quote attribute values that don't require quoting\n per HTML5 parsing rules.\n quote_char=u'\"'|u\"'\"\n Use given quote character for attribute quoting. Default is to\n use double quote unless attribute value contains a double quote,\n in which case single quotes are used instead.\n escape_lt_in_attrs=False|True\n Whether to escape < in attribute values.\n escape_rcdata=False|True\n Whether to escape characters that need to be escaped within normal\n elements within rcdata elements such as style.\n resolve_entities=True|False\n Whether to resolve named character entities that appear in the\n source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos;\n are unaffected by this setting.\n strip_whitespace=False|True\n Whether to remove semantically meaningless whitespace. (This\n compresses all whitespace to a single space except within pre.)\n minimize_boolean_attributes=True|False\n Shortens boolean attributes to give just the attribute value,\n for example becomes .\n use_trailing_solidus=False|True\n Includes a close-tag slash at the end of the start tag of void\n elements (empty elements whose end tag is forbidden). E.g.
.\n space_before_trailing_solidus=True|False\n Places a space immediately before the closing slash in a tag\n using a trailing solidus. E.g.
. Requires use_trailing_solidus.\n sanitize=False|True\n Strip all unsafe or unknown constructs from output.\n See `html5lib user documentation`_\n omit_optional_tags=True|False\n Omit start/end tags that are optional.\n alphabetical_attributes=False|True\n Reorder attributes to be in alphabetical order.\n\n .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation\n \"\"\"\n if 'quote_char' in kwargs:\n self.use_best_quote_char = False\n for attr in self.options:\n setattr(self, attr, kwargs.get(attr, getattr(self, attr)))\n self.errors = []\n self.strict = False\n\n def encode(self, string):\n assert(isinstance(string, text_type))\n if self.encoding:\n return string.encode(self.encoding, unicode_encode_errors)\n else:\n return string\n\n def encodeStrict(self, string):\n assert(isinstance(string, text_type))\n if self.encoding:\n return string.encode(self.encoding, \"strict\")\n else:\n return string\n\n def serialize(self, treewalker, encoding=None):\n self.encoding = encoding\n in_cdata = False\n self.errors = []\n\n if encoding and self.inject_meta_charset:\n from ..filters.inject_meta_charset import Filter\n treewalker = Filter(treewalker, encoding)\n # WhitespaceFilter should be used before OptionalTagFilter\n # for maximum efficiently of this latter filter\n if self.strip_whitespace:\n from ..filters.whitespace import Filter\n treewalker = Filter(treewalker)\n if self.sanitize:\n from ..filters.sanitizer import Filter\n treewalker = Filter(treewalker)\n if self.omit_optional_tags:\n from ..filters.optionaltags import Filter\n treewalker = Filter(treewalker)\n # Alphabetical attributes must be last, as other filters\n # could add attributes and alter the order\n if self.alphabetical_attributes:\n from ..filters.alphabeticalattributes import Filter\n treewalker = Filter(treewalker)\n\n for token in treewalker:\n type = token[\"type\"]\n if type == \"Doctype\":\n doctype = \"= 0:\n if token[\"systemId\"].find(\"'\") >= 0:\n self.serializeError(_(\"System identifer contains both single and double quote characters\"))\n quote_char = \"'\"\n else:\n quote_char = '\"'\n doctype += \" %s%s%s\" % (quote_char, token[\"systemId\"], quote_char)\n\n doctype += \">\"\n yield self.encodeStrict(doctype)\n\n elif type in (\"Characters\", \"SpaceCharacters\"):\n if type == \"SpaceCharacters\" or in_cdata:\n if in_cdata and token[\"data\"].find(\"= 0:\n self.serializeError(_(\"Unexpected \\\"'=\", False)\n v = v.replace(\"&\", \"&amp;\")\n if self.escape_lt_in_attrs:\n v = v.replace(\"<\", \"&lt;\")\n if quote_attr:\n quote_char = self.quote_char\n if self.use_best_quote_char:\n if \"'\" in v and '\"' not in v:\n quote_char = '\"'\n elif '\"' in v and \"'\" not in v:\n quote_char = \"'\"\n if quote_char == \"'\":\n v = v.replace(\"'\", \"&#39;\")\n else:\n v = v.replace('\"', \"&quot;\")\n yield self.encodeStrict(quote_char)\n yield self.encode(v)\n yield self.encodeStrict(quote_char)\n else:\n yield self.encode(v)\n if name in voidElements and self.use_trailing_solidus:\n if self.space_before_trailing_solidus:\n yield self.encodeStrict(\" /\")\n else:\n yield self.encodeStrict(\"/\")\n yield self.encode(\">\")\n\n elif type == \"EndTag\":\n name = token[\"name\"]\n if name in rcdataElements:\n in_cdata = False\n elif in_cdata:\n self.serializeError(_(\"Unexpected child element of a CDATA element\"))\n yield self.encodeStrict(\"\" % name)\n\n elif type == \"Comment\":\n data = token[\"data\"]\n if data.find(\"--\") >= 0:\n self.serializeError(_(\"Comment contains --\"))\n yield self.encodeStrict(\"\" % token[\"data\"])\n\n elif type == \"Entity\":\n name = token[\"name\"]\n key = name + \";\"\n if key not in entities:\n self.serializeError(_(\"Entity %s not recognized\" % name))\n if self.resolve_entities and key not in xmlEntities:\n data = entities[key]\n else:\n data = \"&%s;\" % name\n yield self.encodeStrict(data)\n\n else:\n self.serializeError(token[\"data\"])\n\n def render(self, treewalker, encoding=None):\n if encoding:\n return b\"\".join(list(self.serialize(treewalker, encoding)))\n else:\n return \"\".join(list(self.serialize(treewalker)))\n\n def serializeError(self, data=\"XXX ERROR MESSAGE NEEDED\"):\n # XXX The idea is to make data mandatory.\n self.errors.append(data)\n if self.strict:\n raise SerializeError\n\n\ndef SerializeError(Exception):\n \"\"\"Error in serialized tree\"\"\"\n pass\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":475229,"cells":{"repo_name":{"kind":"string","value":"xifle/home-assistant"},"path":{"kind":"string","value":"homeassistant/components/climate/heatmiser.py"},"copies":{"kind":"string","value":"22"},"size":{"kind":"string","value":"3369"},"content":{"kind":"string","value":"\"\"\"\nSupport for the PRT Heatmiser themostats using the V3 protocol.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/climate.heatmiser/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA\nfrom homeassistant.const import (\n TEMP_CELSIUS, ATTR_TEMPERATURE, CONF_PORT, CONF_NAME, CONF_ID)\nimport homeassistant.helpers.config_validation as cv\n\nREQUIREMENTS = ['heatmiserV3==0.9.1']\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_IPADDRESS = 'ipaddress'\nCONF_TSTATS = 'tstats'\n\nTSTATS_SCHEMA = vol.Schema({\n vol.Required(CONF_ID): cv.string,\n vol.Required(CONF_NAME): cv.string,\n})\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_IPADDRESS): cv.string,\n vol.Required(CONF_PORT): cv.port,\n vol.Required(CONF_TSTATS, default={}):\n vol.Schema({cv.string: TSTATS_SCHEMA}),\n})\n\n\n# pylint: disable=unused-variable\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Setup the heatmiser thermostat.\"\"\"\n from heatmiserV3 import heatmiser, connection\n\n ipaddress = config.get(CONF_IPADDRESS)\n port = str(config.get(CONF_PORT))\n tstats = config.get(CONF_TSTATS)\n\n serport = connection.connection(ipaddress, port)\n serport.open()\n\n for thermostat, tstat in tstats.items():\n add_devices([\n HeatmiserV3Thermostat(\n heatmiser, tstat.get(CONF_ID), tstat.get(CONF_NAME), serport)\n ])\n return\n\n\nclass HeatmiserV3Thermostat(ClimateDevice):\n \"\"\"Representation of a HeatmiserV3 thermostat.\"\"\"\n\n def __init__(self, heatmiser, device, name, serport):\n \"\"\"Initialize the thermostat.\"\"\"\n self.heatmiser = heatmiser\n self.device = device\n self.serport = serport\n self._current_temperature = None\n self._name = name\n self._id = device\n self.dcb = None\n self.update()\n self._target_temperature = int(self.dcb.get('roomset'))\n\n @property\n def name(self):\n \"\"\"Return the name of the thermostat, if any.\"\"\"\n return self._name\n\n @property\n def temperature_unit(self):\n \"\"\"Return the unit of measurement which this thermostat uses.\"\"\"\n return TEMP_CELSIUS\n\n @property\n def current_temperature(self):\n \"\"\"Return the current temperature.\"\"\"\n if self.dcb is not None:\n low = self.dcb.get('floortemplow ')\n high = self.dcb.get('floortemphigh')\n temp = (high * 256 + low) / 10.0\n self._current_temperature = temp\n else:\n self._current_temperature = None\n return self._current_temperature\n\n @property\n def target_temperature(self):\n \"\"\"Return the temperature we try to reach.\"\"\"\n return self._target_temperature\n\n def set_temperature(self, **kwargs):\n \"\"\"Set new target temperature.\"\"\"\n temperature = kwargs.get(ATTR_TEMPERATURE)\n if temperature is None:\n return\n self.heatmiser.hmSendAddress(\n self._id,\n 18,\n temperature,\n 1,\n self.serport)\n self._target_temperature = temperature\n\n def update(self):\n \"\"\"Get the latest data.\"\"\"\n self.dcb = self.heatmiser.hmReadAddress(self._id, 'prt', self.serport)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475230,"cells":{"repo_name":{"kind":"string","value":"dhruvsrivastava/OJ"},"path":{"kind":"string","value":"flask/lib/python2.7/site-packages/werkzeug/posixemulation.py"},"copies":{"kind":"string","value":"148"},"size":{"kind":"string","value":"3483"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nr\"\"\"\n werkzeug.posixemulation\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Provides a POSIX emulation for some features that are relevant to\n web applications. The main purpose is to simplify support for\n systems such as Windows NT that are not 100% POSIX compatible.\n\n Currently this only implements a :func:`rename` function that\n follows POSIX semantics. Eg: if the target file already exists it\n will be replaced without asking.\n\n This module was introduced in 0.6.1 and is not a public interface.\n It might become one in later versions of Werkzeug.\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport sys\nimport os\nimport errno\nimport time\nimport random\nfrom ._compat import to_unicode\n\n\ncan_rename_open_file = False\nif os.name == 'nt': # pragma: no cover\n _rename = lambda src, dst: False\n _rename_atomic = lambda src, dst: False\n\n try:\n import ctypes\n\n _MOVEFILE_REPLACE_EXISTING = 0x1\n _MOVEFILE_WRITE_THROUGH = 0x8\n _MoveFileEx = ctypes.windll.kernel32.MoveFileExW\n\n def _rename(src, dst):\n src = to_unicode(src, sys.getfilesystemencoding())\n dst = to_unicode(dst, sys.getfilesystemencoding())\n if _rename_atomic(src, dst):\n return True\n retry = 0\n rv = False\n while not rv and retry < 100:\n rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |\n _MOVEFILE_WRITE_THROUGH)\n if not rv:\n time.sleep(0.001)\n retry += 1\n return rv\n\n # new in Vista and Windows Server 2008\n _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction\n _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction\n _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW\n _CloseHandle = ctypes.windll.kernel32.CloseHandle\n can_rename_open_file = True\n\n def _rename_atomic(src, dst):\n ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')\n if ta == -1:\n return False\n try:\n retry = 0\n rv = False\n while not rv and retry < 100:\n rv = _MoveFileTransacted(src, dst, None, None,\n _MOVEFILE_REPLACE_EXISTING |\n _MOVEFILE_WRITE_THROUGH, ta)\n if rv:\n rv = _CommitTransaction(ta)\n break\n else:\n time.sleep(0.001)\n retry += 1\n return rv\n finally:\n _CloseHandle(ta)\n except Exception:\n pass\n\n def rename(src, dst):\n # Try atomic or pseudo-atomic rename\n if _rename(src, dst):\n return\n # Fall back to \"move away and replace\"\n try:\n os.rename(src, dst)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n old = \"%s-%08x\" % (dst, random.randint(0, sys.maxint))\n os.rename(dst, old)\n os.rename(src, dst)\n try:\n os.unlink(old)\n except Exception:\n pass\nelse:\n rename = os.rename\n can_rename_open_file = True\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475231,"cells":{"repo_name":{"kind":"string","value":"vrv/tensorflow"},"path":{"kind":"string","value":"tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py"},"copies":{"kind":"string","value":"59"},"size":{"kind":"string","value":"4537"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for distributions KL mechanism.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.distributions import kullback_leibler\nfrom tensorflow.python.ops.distributions import normal\nfrom tensorflow.python.platform import test\n\n# pylint: disable=protected-access\n_DIVERGENCES = kullback_leibler._DIVERGENCES\n_registered_kl = kullback_leibler._registered_kl\n\n# pylint: enable=protected-access\n\n\nclass KLTest(test.TestCase):\n\n def testRegistration(self):\n\n class MyDist(normal.Normal):\n pass\n\n # Register KL to a lambda that spits out the name parameter\n @kullback_leibler.RegisterKL(MyDist, MyDist)\n def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable\n return name\n\n a = MyDist(loc=0.0, scale=1.0)\n self.assertEqual(\"OK\", kullback_leibler.kl_divergence(a, a, name=\"OK\"))\n\n def testDomainErrorExceptions(self):\n\n class MyDistException(normal.Normal):\n pass\n\n # Register KL to a lambda that spits out the name parameter\n @kullback_leibler.RegisterKL(MyDistException, MyDistException)\n # pylint: disable=unused-argument,unused-variable\n def _kl(a, b, name=None):\n return array_ops.identity([float(\"nan\")])\n\n # pylint: disable=unused-argument,unused-variable\n\n with self.test_session():\n a = MyDistException(loc=0.0, scale=1.0)\n kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)\n with self.assertRaisesOpError(\n \"KL calculation between .* and .* returned NaN values\"):\n kl.eval()\n kl_ok = kullback_leibler.kl_divergence(a, a)\n self.assertAllEqual([float(\"nan\")], kl_ok.eval())\n\n def testRegistrationFailures(self):\n\n class MyDist(normal.Normal):\n pass\n\n with self.assertRaisesRegexp(TypeError, \"must be callable\"):\n kullback_leibler.RegisterKL(MyDist, MyDist)(\"blah\")\n\n # First registration is OK\n kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)\n\n # Second registration fails\n with self.assertRaisesRegexp(ValueError, \"has already been registered\"):\n kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)\n\n def testExactRegistrationsAllMatch(self):\n for (k, v) in _DIVERGENCES.items():\n self.assertEqual(v, _registered_kl(*k))\n\n def testIndirectRegistration(self):\n\n class Sub1(normal.Normal):\n pass\n\n class Sub2(normal.Normal):\n pass\n\n class Sub11(Sub1):\n pass\n\n # pylint: disable=unused-argument,unused-variable\n @kullback_leibler.RegisterKL(Sub1, Sub1)\n def _kl11(a, b, name=None):\n return \"sub1-1\"\n\n @kullback_leibler.RegisterKL(Sub1, Sub2)\n def _kl12(a, b, name=None):\n return \"sub1-2\"\n\n @kullback_leibler.RegisterKL(Sub2, Sub1)\n def _kl21(a, b, name=None):\n return \"sub2-1\"\n\n # pylint: enable=unused-argument,unused_variable\n\n sub1 = Sub1(loc=0.0, scale=1.0)\n sub2 = Sub2(loc=0.0, scale=1.0)\n sub11 = Sub11(loc=0.0, scale=1.0)\n\n self.assertEqual(\"sub1-1\", kullback_leibler.kl_divergence(sub1, sub1))\n self.assertEqual(\"sub1-2\", kullback_leibler.kl_divergence(sub1, sub2))\n self.assertEqual(\"sub2-1\", kullback_leibler.kl_divergence(sub2, sub1))\n self.assertEqual(\"sub1-1\", kullback_leibler.kl_divergence(sub11, sub11))\n self.assertEqual(\"sub1-1\", kullback_leibler.kl_divergence(sub11, sub1))\n self.assertEqual(\"sub1-2\", kullback_leibler.kl_divergence(sub11, sub2))\n self.assertEqual(\"sub1-1\", kullback_leibler.kl_divergence(sub11, sub1))\n self.assertEqual(\"sub1-2\", kullback_leibler.kl_divergence(sub11, sub2))\n self.assertEqual(\"sub2-1\", kullback_leibler.kl_divergence(sub2, sub11))\n self.assertEqual(\"sub1-1\", kullback_leibler.kl_divergence(sub1, sub11))\n\n\nif __name__ == \"__main__\":\n test.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475232,"cells":{"repo_name":{"kind":"string","value":"charles-cooper/raiden"},"path":{"kind":"string","value":"raiden/tests/smart_contracts/test_endpointregistry.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"1083"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom ethereum import tester\nfrom raiden.utils import get_contract_path\n\n\ndef test_endpointregistry(tester_state, tester_events):\n account0 = tester.DEFAULT_ACCOUNT\n sender = account0.encode('hex')\n\n endpointregistry_path = get_contract_path('EndpointRegistry.sol')\n registry_contract = tester_state.abi_contract(\n None,\n path=endpointregistry_path,\n language='solidity',\n log_listener=tester_events.append,\n )\n\n registry_contract.registerEndpoint('127.0.0.1:4001')\n assert registry_contract.findAddressByEndpoint('127.0.0.1:4001') == sender\n assert registry_contract.findEndpointByAddress(sender) == '127.0.0.1:4001'\n\n registry_contract.registerEndpoint('192.168.0.1:4002')\n assert registry_contract.findAddressByEndpoint('192.168.0.1:4002') == sender\n assert registry_contract.findEndpointByAddress(sender) == '192.168.0.1:4002'\n\n assert len(tester_events) == 2\n assert tester_events[0]['_event_type'] == 'AddressRegistered'\n assert tester_events[1]['_event_type'] == 'AddressRegistered'\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475233,"cells":{"repo_name":{"kind":"string","value":"Kongsea/tensorflow"},"path":{"kind":"string","value":"tensorflow/contrib/quantize/python/graph_matcher.py"},"copies":{"kind":"string","value":"17"},"size":{"kind":"string","value":"6445"},"content":{"kind":"string","value":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities that match patterns in a tf.Graph.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nclass OpTypePattern(object):\n \"\"\"A tree pattern that matches TF expressions with certain op types.\"\"\"\n\n def __init__(self, op_type, name=None, inputs=None):\n \"\"\"Initializes an OpTypePattern.\n\n Args:\n op_type: string that specifies the allowed types of the root. It can be\n (1) an op type, e.g. 'Conv2D',\n (2) '*', i.e. wildcard, or\n (3) multiple op types separated by '|', e.g., 'Relu|Relu6'.\n We could use regex strings, which might be worthwhile when we have many\n similar TF op types.\n name: Optional string. The name of the pattern that can be looked up in\n MatchResult.\n inputs: Optional list of `OpTypePattern`s or strings that specify the\n patterns for the inputs of a matching op. If None, this pattern accepts\n any inputs of a matching op.\n \"\"\"\n self._op_type = op_type\n self._name = name\n if inputs is None:\n inputs = []\n self._inputs = [\n input_pattern if isinstance(input_pattern, OpTypePattern) else\n OpTypePattern(input_pattern) for input_pattern in inputs\n ]\n\n @property\n def op_type(self):\n return self._op_type\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def name(self):\n return self._name\n\n\nclass MatchResult(object):\n r\"\"\"Encapsulates the result of a match done by GraphMatcher.\n\n MatchResult contains a map from OpTypePattern to the matching op and tensor.\n When the matching op has multiple output tensors, the matching tensor is the\n output tensor used by the matching op of the parent pattern. E.g., when we\n match graph\n\n - +\n / \\y0 y1/ \\\n x split z\n |\n y (nodes are ops; edges are going up)\n\n against add_pattern defined as\n\n y1_pattern = OpTypePattern('*')\n z_pattern = OpTypePattern('*')\n add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern])\n\n the matching op of `y1_pattern` is `split`, and the matching tensor of\n `y1_pattern`\n is `y1` not `y0`.\n \"\"\"\n\n def __init__(self):\n self._pattern_to_op_tensor = {}\n self._name_to_pattern = {}\n\n def add(self, pattern, op, tensor):\n self._pattern_to_op_tensor[pattern] = op, tensor\n if pattern.name is not None:\n if pattern.name in self._name_to_pattern:\n raise ValueError(\n 'Name %s is already bound to another pattern' % pattern.name)\n self._name_to_pattern[pattern.name] = pattern\n\n def _to_pattern(self, pattern_or_name):\n if isinstance(pattern_or_name, OpTypePattern):\n return pattern_or_name\n\n if isinstance(pattern_or_name, str):\n return self._name_to_pattern[pattern_or_name]\n\n raise ValueError('pattern_or_name has type %s. Expect OpTypePattern or str.'\n % type(pattern_or_name))\n\n def get_op(self, pattern_or_name):\n return self._pattern_to_op_tensor[self._to_pattern(pattern_or_name)][0]\n\n def get_tensor(self, pattern_or_name):\n return self._pattern_to_op_tensor[self._to_pattern(pattern_or_name)][1]\n\n\nclass GraphMatcher(object):\n \"\"\"Checks if a particular subgraph matches a given pattern.\"\"\"\n\n def __init__(self, pattern):\n \"\"\"Initializes a GraphMatcher.\n\n Args:\n pattern: The `OpTypePattern` against which `GraphMatcher` matches\n subgraphs.\n \"\"\"\n self._pattern = pattern\n\n def _match_pattern(self, pattern, op, tensor):\n \"\"\"Returns whether an TF expression rooted at `op` matches `pattern`.\n\n If there is a match, adds to `self._match_result` the matching op and tensor\n with key `pattern`.\n\n Args:\n pattern: An `OpTypePattern`.\n op: A `tf.Operation` to match against the pattern.\n tensor: the output `tf.Tensor` of `op` that is used by the matching op of\n `pattern`'s parent. Can be None if `pattern` is already the root of the\n pattern tree.\n\n Returns:\n True if an TF expression rooted at `op` matches `pattern`.\n \"\"\"\n if pattern.op_type != '*':\n if op.type not in pattern.op_type.split('|'):\n return False\n\n self._match_result.add(pattern, op, tensor)\n\n if not pattern.inputs:\n # If pattern.inputs is empty, skips the rest and accepts all the inputs.\n return True\n\n return len(op.inputs) == len(pattern.inputs) and all([\n self._match_pattern(input_pattern, input_tensor.op, input_tensor)\n for input_tensor, input_pattern in zip(op.inputs, pattern.inputs)\n ])\n\n def match_op(self, op):\n \"\"\"Matches `op` against `self._pattern`.\n\n Args:\n op: `tf.Operation` to match against the pattern.\n\n Returns:\n Returns a `MatchResult` if `op` matches the pattern; otherwise, returns\n None.\n \"\"\"\n self._match_result = MatchResult()\n if not self._match_pattern(self._pattern, op, tensor=None):\n return None\n return self._match_result\n\n def match_ops(self, ops):\n \"\"\"Matches each operation in `ops` against `self._pattern`.\n\n Args:\n ops: collection of `tf.Operation` to match against the pattern.\n\n Yields:\n `MatchResult` for each `tf.Operation` that matches the pattern.\n \"\"\"\n for op in ops:\n match_result = self.match_op(op)\n if match_result:\n yield match_result\n\n def match_graph(self, graph):\n \"\"\"Matches each operation in `graph` against `self._pattern`.\n\n Args:\n graph: `tf.Graph` containing operations to match.\n\n Yields:\n `MatchResult` for each `tf.Operation` in `graph` that matches the pattern.\n \"\"\"\n # Python 3.3.2+ implements `yield from`, but for now:\n for match_result in self.match_ops(graph.get_operations()):\n yield match_result\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475234,"cells":{"repo_name":{"kind":"string","value":"huawei-cloud/compass"},"path":{"kind":"string","value":"compass/tests/config_management/utils/test_config_reference.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"5733"},"content":{"kind":"string","value":"import unittest2\nfrom copy import deepcopy\n\nfrom compass.utils import util\nfrom compass.config_management.utils import config_reference\n\n\nclass TestConfigReference(unittest2.TestCase):\n def test_init(self):\n config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n config2 = {'5': {'6': 6}}\n ref2 = config_reference.ConfigReference(config2['5'], ref, '5')\n expected_config = deepcopy(config)\n util.merge_dict(expected_config, config2)\n self.assertEqual(ref.config, expected_config)\n self.assertEqual(id(ref.config['5']), id(ref2.config))\n config3 = {'5': {'7': 7}}\n ref3 = config_reference.ConfigReference(config3['5'], ref, '5')\n self.assertEqual(id(ref.config['5']), id(ref3.config))\n\n def test_ref(self):\n config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n self.assertRaises(KeyError, ref.ref, '')\n self.assertRaises(KeyError, ref.ref, '/1/2/4')\n self.assertEqual(ref.ref('.').config, config)\n self.assertEqual(ref.ref('..').config, config)\n self.assertEqual(ref.ref('/').config, config)\n self.assertEqual(ref.ref('1').config, config['1'])\n self.assertEqual(ref.ref('1/2').config, config['1']['2'])\n self.assertEqual(ref.ref('1/2/.').config, config['1']['2'])\n self.assertEqual(ref.ref('1/2/..').config, config['1'])\n self.assertEqual(ref.ref('1/2//').config, config['1']['2'])\n self.assertEqual(ref.ref('/1').config, config['1'])\n self.assertEqual(ref.ref('/1/2').config, config['1']['2'])\n subref = ref.ref('1')\n self.assertEqual(subref.ref('2').config, config['1']['2'])\n self.assertEqual(subref.ref('2/..').config, config['1'])\n self.assertEqual(subref.ref('..').config, config)\n self.assertEqual(subref.ref('../..').config, config)\n self.assertEqual(subref.ref('/').config, config)\n self.assertEqual(subref.ref('/4').config, config['4'])\n self.assertRaises(KeyError, subref.ref, '/4/5')\n self.assertRaises(KeyError, subref.ref, '/9')\n subref2 = ref.ref('9', True)\n self.assertEqual(ref.ref('9'), subref2)\n\n def test_refs(self):\n config = {'1': {'2': 3, '10': {}}, '4': [5, 6, 7], '8': 8, '88': 88}\n ref = config_reference.ConfigReference(config)\n refkeys = ref.ref_keys('1')\n self.assertEqual(set(refkeys), set(['1']))\n refkeys = ref.ref_keys('/1/*')\n self.assertEqual(set(refkeys), set(['/1/2', '/1/10']))\n refkeys = ref.ref_keys('*')\n self.assertEqual(set(refkeys), set(['1', '4', '8', '88']))\n refkeys = ref.ref_keys('8*')\n self.assertEqual(set(refkeys), set(['8', '88']))\n self.assertRaises(KeyError, ref.ref_keys, '')\n\n def test_contains(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n self.assertIn('/1/2', ref)\n self.assertIn('1/10/', ref)\n self.assertIn('4/', ref)\n self.assertIn('/1/2/..', ref)\n self.assertNotIn('/1/3/7', ref)\n self.assertNotIn('/1/2/3/..', ref)\n\n def test_setitem(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n ref['/1/2'] = '6'\n self.assertEqual(config['1']['2'], '6')\n self.assertEqual(ref['/1/2'], '6')\n ref['1/10/5'] = 7\n self.assertEqual(config['1']['10']['5'], 7)\n self.assertEqual(ref['1/10/5'], 7)\n ref['3/6/8'] = [1, 3, 5]\n self.assertEqual(config['3']['6']['8'], [1, 3, 5])\n self.assertEqual(ref['3/6/8'], [1, 3, 5])\n\n def test_del(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n del ref['/8']\n self.assertNotIn('8', config)\n del ref['1/2']\n self.assertNotIn('2', config['1'])\n del ref['1']\n self.assertNotIn('1', config)\n self.assertRaises(KeyError, ref.__delitem__, '9')\n\n def test_get(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n self.assertEqual(ref.get('1/2'), config['1']['2'])\n self.assertIsNone(ref.get('1/3'))\n self.assertEqual(ref.get('1/3', 3), 3)\n self.assertNotIn('3', config['1'])\n\n def test_setdefault(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n self.assertEqual(ref.setdefault('1/2').config, config['1']['2'])\n self.assertIsNone(ref.setdefault('1/3').config)\n self.assertEqual(ref.setdefault('1/4', 4).config, 4)\n self.assertEqual(4, config['1']['4'])\n\n def test_update(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n expected_config = deepcopy(config)\n\n ref = config_reference.ConfigReference(config)\n config2 = {'9': 9, '10': {'10': 10}}\n util.merge_dict(expected_config, config2)\n ref.update(config2)\n self.assertEqual(ref.config, expected_config)\n ref.update(10, False)\n self.assertEqual(ref.config, expected_config)\n ref.update(10)\n self.assertEqual(ref.config, 10)\n\n def test_iter(self):\n config = {'1': {'2': '3', '10': {}}, '4': [5, 6, 7], '8': 8}\n ref = config_reference.ConfigReference(config)\n keys = ref.keys()\n self.assertEqual(set(keys), set(['1', '1/2', '1/10', '4', '8']))\n\n\nif __name__ == '__main__':\n unittest2.main()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475235,"cells":{"repo_name":{"kind":"string","value":"ilexius/odoo"},"path":{"kind":"string","value":"addons/lunch/tests/test_lunch.py"},"copies":{"kind":"string","value":"47"},"size":{"kind":"string","value":"3259"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom openerp.tests import common\n\n\nclass Test_Lunch(common.TransactionCase):\n\n def setUp(self):\n \"\"\"*****setUp*****\"\"\"\n super(Test_Lunch, self).setUp()\n\n self.demo_user = self.env['res.users'].search([('name', '=', 'Demo User')])\n self.product_bolognese_ref = self.env['ir.model.data'].get_object_reference('lunch', 'product_Bolognese')\n self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False\n self.new_id_order = self.env['lunch.order'].create({\n 'user_id': self.demo_user.id,\n 'order_line_ids': '[]',\n })\n self.new_id_order_line = self.env['lunch.order.line'].create({\n 'order_id': self.new_id_order.id,\n 'product_id': self.product_Bolognese_id,\n 'note': '+Emmental',\n 'cashmove': [],\n 'price': self.env['lunch.product'].browse(self.product_Bolognese_id).price,\n })\n\n def test_00_lunch_order(self):\n \"\"\"Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line\"\"\"\n self.order_one = self.new_id_order_line\n #we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:\n self.assertEqual(self.order_one.state, 'new')\n self.assertEqual(list(self.order_one.cashmove), [])\n #we order that orderline so it's state will be 'ordered'\n self.order_one.order()\n self.order_one = self.new_id_order_line\n #we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:\n self.assertEqual(self.order_one.state, 'ordered')\n self.assertEqual(list(self.order_one.cashmove), [])\n\n def test_01_lunch_order(self):\n \"\"\"Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line\"\"\"\n self.test_00_lunch_order()\n #We receive the order so we confirm the order line so it's state will be 'confirmed'\n #A cashmove will be created and we will test that the cashmove amount equals the order line price\n self.order_one.confirm()\n self.order_one = self.new_id_order_line\n #we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:\n self.assertEqual(self.order_one.state, 'confirmed')\n self.assertTrue(self.order_one.cashmove)\n self.assertTrue(self.order_one.cashmove[0].amount == -self.order_one.price)\n\n def test_02_lunch_order(self):\n \"\"\"Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted\"\"\"\n self.test_01_lunch_order()\n #We have a confirmed order with its associate cashmove\n #We execute the cancel function\n self.order_one.cancel()\n self.order_one = self.new_id_order_line\n #We check that the state is cancelled and that the cashmove has been deleted\n self.assertEqual(self.order_one.state, 'cancelled')\n self.assertFalse(self.order_one.cashmove)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475236,"cells":{"repo_name":{"kind":"string","value":"codecollision/DropboxToFlickr"},"path":{"kind":"string","value":"django/template/smartif.py"},"copies":{"kind":"string","value":"331"},"size":{"kind":"string","value":"6261"},"content":{"kind":"string","value":"\"\"\"\nParser and utilities for the smart 'if' tag\n\"\"\"\nimport operator\n\n# Using a simple top down parser, as described here:\n# http://effbot.org/zone/simple-top-down-parsing.htm.\n# 'led' = left denotation\n# 'nud' = null denotation\n# 'bp' = binding power (left = lbp, right = rbp)\n\nclass TokenBase(object):\n \"\"\"\n Base class for operators and literals, mainly for debugging and for throwing\n syntax errors.\n \"\"\"\n id = None # node/token type name\n value = None # used by literals\n first = second = None # used by tree nodes\n\n def nud(self, parser):\n # Null denotation - called in prefix context\n raise parser.error_class(\n \"Not expecting '%s' in this position in if tag.\" % self.id\n )\n\n def led(self, left, parser):\n # Left denotation - called in infix context\n raise parser.error_class(\n \"Not expecting '%s' as infix operator in if tag.\" % self.id\n )\n\n def display(self):\n \"\"\"\n Returns what to display in error messages for this node\n \"\"\"\n return self.id\n\n def __repr__(self):\n out = [str(x) for x in [self.id, self.first, self.second] if x is not None]\n return \"(\" + \" \".join(out) + \")\"\n\n\ndef infix(bp, func):\n \"\"\"\n Creates an infix operator, given a binding power and a function that\n evaluates the node\n \"\"\"\n class Operator(TokenBase):\n lbp = bp\n\n def led(self, left, parser):\n self.first = left\n self.second = parser.expression(bp)\n return self\n\n def eval(self, context):\n try:\n return func(context, self.first, self.second)\n except Exception:\n # Templates shouldn't throw exceptions when rendering. We are\n # most likely to get exceptions for things like {% if foo in bar\n # %} where 'bar' does not support 'in', so default to False\n return False\n\n return Operator\n\n\ndef prefix(bp, func):\n \"\"\"\n Creates a prefix operator, given a binding power and a function that\n evaluates the node.\n \"\"\"\n class Operator(TokenBase):\n lbp = bp\n\n def nud(self, parser):\n self.first = parser.expression(bp)\n self.second = None\n return self\n\n def eval(self, context):\n try:\n return func(context, self.first)\n except Exception:\n return False\n\n return Operator\n\n\n# Operator precedence follows Python.\n# NB - we can get slightly more accurate syntax error messages by not using the\n# same object for '==' and '='.\n# We defer variable evaluation to the lambda to ensure that terms are\n# lazily evaluated using Python's boolean parsing logic.\nOPERATORS = {\n 'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),\n 'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),\n 'not': prefix(8, lambda context, x: not x.eval(context)),\n 'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),\n 'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),\n '=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),\n '==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),\n '!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),\n '>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),\n '>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),\n '<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),\n '<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),\n}\n\n# Assign 'id' to each:\nfor key, op in OPERATORS.items():\n op.id = key\n\n\nclass Literal(TokenBase):\n \"\"\"\n A basic self-resolvable object similar to a Django template variable.\n \"\"\"\n # IfParser uses Literal in create_var, but TemplateIfParser overrides\n # create_var so that a proper implementation that actually resolves\n # variables, filters etc is used.\n id = \"literal\"\n lbp = 0\n\n def __init__(self, value):\n self.value = value\n\n def display(self):\n return repr(self.value)\n\n def nud(self, parser):\n return self\n\n def eval(self, context):\n return self.value\n\n def __repr__(self):\n return \"(%s %r)\" % (self.id, self.value)\n\n\nclass EndToken(TokenBase):\n lbp = 0\n\n def nud(self, parser):\n raise parser.error_class(\"Unexpected end of expression in if tag.\")\n\nEndToken = EndToken()\n\n\nclass IfParser(object):\n error_class = ValueError\n\n def __init__(self, tokens):\n # pre-pass necessary to turn 'not','in' into single token\n l = len(tokens)\n mapped_tokens = []\n i = 0\n while i < l:\n token = tokens[i]\n if token == \"not\" and i + 1 < l and tokens[i+1] == \"in\":\n token = \"not in\"\n i += 1 # skip 'in'\n mapped_tokens.append(self.translate_token(token))\n i += 1\n\n self.tokens = mapped_tokens\n self.pos = 0\n self.current_token = self.next()\n\n def translate_token(self, token):\n try:\n op = OPERATORS[token]\n except (KeyError, TypeError):\n return self.create_var(token)\n else:\n return op()\n\n def next(self):\n if self.pos >= len(self.tokens):\n return EndToken\n else:\n retval = self.tokens[self.pos]\n self.pos += 1\n return retval\n\n def parse(self):\n retval = self.expression()\n # Check that we have exhausted all the tokens\n if self.current_token is not EndToken:\n raise self.error_class(\"Unused '%s' at end of if expression.\" %\n self.current_token.display())\n return retval\n\n def expression(self, rbp=0):\n t = self.current_token\n self.current_token = self.next()\n left = t.nud(self)\n while rbp < self.current_token.lbp:\n t = self.current_token\n self.current_token = self.next()\n left = t.led(left, self)\n return left\n\n def create_var(self, value):\n return Literal(value)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475237,"cells":{"repo_name":{"kind":"string","value":"patmcb/odoo"},"path":{"kind":"string","value":"addons/account/wizard/account_move_line_unreconcile_select.py"},"copies":{"kind":"string","value":"385"},"size":{"kind":"string","value":"1864"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\nclass account_move_line_unreconcile_select(osv.osv_memory):\n _name = \"account.move.line.unreconcile.select\"\n _description = \"Unreconciliation\"\n _columns ={\n 'account_id': fields.many2one('account.account','Account',required=True),\n }\n def action_open_window(self, cr, uid, ids, context=None):\n data = self.read(cr, uid, ids, context=context)[0]\n return {\n 'domain': \"[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]\" % data['account_id'],\n 'name': 'Unreconciliation',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'view_id': False,\n 'res_model': 'account.move.line',\n 'type': 'ir.actions.act_window'\n }\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475238,"cells":{"repo_name":{"kind":"string","value":"maellak/invenio"},"path":{"kind":"string","value":"modules/webauthorprofile/lib/webauthorprofile_cli.py"},"copies":{"kind":"string","value":"18"},"size":{"kind":"string","value":"1182"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##\n## This file is part of Invenio.\n## Copyright (C) 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"\nWebAuthorProfile command-line interface\n\"\"\"\n\nfrom invenio import bibauthorid_config as bconfig\n\n\ndef main():\n \"\"\" Main function \"\"\"\n try:\n from invenio import webauthorprofile_daemon as daemon\n except ImportError:\n bconfig.LOGGER.error(\"Hmm...No Daemon process running.\")\n return\n\n daemon.webauthorprofile_daemon()\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475239,"cells":{"repo_name":{"kind":"string","value":"josefgrosch/Pyetree"},"path":{"kind":"string","value":"pyetree/PyetreeBandnames.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"28154"},"content":{"kind":"string","value":"# -----------------------------------------------------------------------\n#\n# < pyetree_bandnames.py >\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# File Name : pyetree_bandnames.py\n#\n# Author : Josef Grosch\n#\n# Date : 15 October 2015\n#\n# Modification : Some\n#\n# Application :\n#\n# Description :\n#\n# Notes :\n#\n# Functions :\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# Copyright\n#\n# Copyright (c) 2015 - 2016 Moose River LLC.\n# < jgrosch@gmail.com >\n#\n# All Rights Reserved\n#\n# Deadicated to my brother Jerry Garcia,\n# who passed from this life on August 9, 1995.\n# Happy trails to you, Jerry\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# License\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# GPG Key\n#\n# pub 4096R/2C38BBFA 2016-05-21 [expires: 2017-05-21]\n# Key fingerprint = A855 3BF0 544B 3B4E F06D 2B90 8DDC FDDA 2C38 BBFA\n# uid Josef Grosch \n# sub 4096R/CC2D1F80 2016-05-21 [expires: 2017-05-21]\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# Contact Information\n#\n# Moose River LLC.\n# P.O. Box 9403\n# Berkeley, Ca. 94709\n#\n# http://www.mooseriver.com\n#\n# -----------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------\n#\n# Import\n#\n# -----------------------------------------------------------------------\nimport sys\nimport os\n\n\"\"\"\n\"\"\"\n\n#--start constants--\n\n__author__ = \"Josef Grosch\"\n__copyright__ = \"Copyright 2015 - 2016 Moose River, LLC.\"\n__license__ = \"Apache License v2.0\"\n__version__ = \"0.1\"\n__maintainer__ = \"Josef Grosch\"\n__email__ = \"jgrosch@gmail.com\"\n__status__ = \"Development\"\n\n#--end constants--\n\n\n# -----------------------------------------------------------------------\n#\n# Class pyetree_bandnames\n#\n# -----------------------------------------------------------------------\nclass pyetree_bandnames:\n OddBandNames = {}\n BandNames = {}\n BandUrl = {}\n BandDB = {}\n \n OddBandNames = {'jm3' : 'John Mayer Trio',\n 'jmayer3' : 'John Mayer Trio',\n 'kk' : 'Kai Kln',\n 'kk' : 'Kudzu Kings',\n 'rr' : 'Robert Randolph & the Family Band',\n 'rr' : 'Rusted Root',\n 'sts9' : 'Sound Tribe Sector 9',\n 'sy' : 'Seth Yacovone Band',\n 'sy' : 'Sonic Youth',\n 'willyp' : 'Willy Porter Band',\n 'willyp' : 'Willy Porter'\n }\n\n BandNames = {'3ah' : '3 Apples High',\n 'D&T' : 'Dave Matthews & Tim Reynolds',\n 'DM' : 'Dave Matthews (solo)',\n 'FB' : 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'Garcia' : 'Jerry Garcia',\n 'abb' : 'Allman Brothers Band',\n 'ah&f' : 'Alan Hertz and Friends',\n 'ahf' : 'Alan Hertz and Friends',\n 'ahp' : 'Alan Hertz and Friends',\n 'amendola' : 'Scott Amendola Band',\n 'amf' : 'Amfibian',\n 'bc' : 'Black Crowes',\n 'bcue' : 'Barbara Cue',\n 'be' : 'Bockman\\'s Euphio',\n 'beanland' : 'Beanlan',\n 'bfft' : 'Bela Fleck & the Flecktones',\n 'bfm' : 'Barefoot Manner',\n 'bftt' : 'Bela Fleck & Tony Trischka',\n 'bgug' : 'Blueground Undergrass',\n 'bh' : 'Ben Harper',\n 'bh' : 'Bruce Hornsby',\n 'bhic' : 'Ben Harper and the Innocent Criminals',\n 'bij' : 'Big In Japan',\n 'bmelon' : 'Blind Melon',\n 'bnb' : 'Burt Neilson Band',\n 'bog' : 'A Band of Gypsys',\n 'bp' : 'Brothers Past',\n 'bruce' : 'Bruce Hornsby',\n 'bs' : 'Big Smith',\n 'bspear' : 'Burning Spear',\n 'bt' : 'Blues Traveler',\n 'bts' : 'Built to Spill',\n 'buho' : 'El Buho',\n 'cb' : 'Critters Buggin',\n 'cbobb' : 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'cc' : 'Counting Crows',\n 'ccity' : 'Cerulean City',\n 'ch' : 'Charlie Hunter',\n 'cj' : 'Cowboy Junkies',\n 'claypool' : 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'cracker' : 'Cracker',\n 'crowes' : 'Black Crowes',\n 'cvb' : 'Camper van Beethoven',\n 'db' : 'Disco Biscuits',\n 'dbb' : 'Deep Banana Blackout',\n 'dbr' : 'Day by the River',\n 'dbt' : 'Drive-By Truckers',\n 'ddbb' : 'Dirty Dozen Brass Band',\n 'dead' : 'The Dead',\n 'dgray' : 'David Gray',\n 'disp' : 'Dispatch',\n 'djlogic' : 'DJ Logic',\n 'dmb' : 'Dave Matthews Band',\n 'dnb' : 'David Nelson Band',\n 'dso' : 'Dark Star Orchestra',\n 'dt' : 'Derek Trucks Band',\n 'dtb' : 'Donna the Buffalo',\n 'eb' : 'Edie Brickell',\n 'eh' : 'Ekoostik Hookah',\n 'farrah' : 'Farrah',\n 'fffb' : 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'fhg' : 'Fareed Haque Group',\n 'gal' : 'Galactic',\n 'garaj' : 'Garaj Mahal',\n 'gba' : 'GreyBoy AllStars',\n 'gd' : 'Grateful Dead',\n 'glove' : 'G. Love and Special Sauce',\n 'gm' : 'Gov\\'t Mule',\n 'gsw' : 'God Street Wine',\n 'gt' : 'Ghost Trane',\n 'gtb' : 'Grand Theft Bus',\n 'gus' : 'Guster',\n 'guster' : 'Guster',\n 'guymalone' : 'Guy Malone',\n 'hday' : 'Howie Day',\n 'hip' : 'The Tragically Hip',\n 'ho' : 'Schleigho',\n 'ht' : 'Hot Tuna',\n 'ig' : 'Indigo Girls',\n 'itf' : 'Indiana Trip Factory',\n 'jg' : 'Jerry Garcia',\n 'jgb' : 'Jerry Garcia Band',\n 'jh' : 'Jimi Hendrix Experience',\n 'jhbg' : 'A Band of Gypsys',\n 'jhe' : 'Jimi Hendrix Experience',\n 'jj' : 'Jack Johnson',\n 'jjj' : 'Jerry Joseph & The Jackmormons',\n 'jk' : 'Jorma Kaukonen',\n 'jmayer' : 'John Mayer',\n 'jmos' : 'Jerry Joseph & The Jackmormons',\n 'jmp' : 'Jazz Mandolin Project',\n 'jol' : 'Jolene',\n 'jt' : 'Jeff Tweedy',\n 'kaikln' : 'Kai Kln',\n 'kdcw' : 'Karl Denson and Chris Wood',\n 'kdtu' : 'Karl Denson\\'s Tiny Universe',\n 'kdub' : 'Keller Williams',\n 'kmck' : 'Steve Kimock & Friends',\n 'kudzu' : 'Kudzu Kings',\n 'kvhw' : 'KVHW',\n 'kw' : 'Keller Williams',\n 'kwi' : 'Keller Williams String Cheese Incident',\n 'laf' : 'Life After Failing',\n 'lcbbb' : 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'lcbobb' : 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'lcfffb' : 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'ld' : 'Living Daylights',\n 'lf' : 'Little Feat',\n 'lfb' : 'Lo Faber Band',\n 'logic' : 'DJ Logic',\n 'lom' : 'Legion of Mary',\n 'los' : 'Leftover Salmon',\n 'ls' : 'Leftover Salmon',\n 'lt' : 'Lake Trout',\n 'mammals' : 'The Mammals',\n 'marlow' : 'Marlow',\n 'mcrx' : 'Mike Clark\\'s Prescription Renewal',\n 'mel' : 'Marcus Eaton & The Lobby',\n 'metheny' : 'Pat Metheny',\n 'mf' : 'Marc Ford',\n 'mfs' : 'Michael Franti & Spearhead',\n 'mmw' : 'Medeski Martin & Wood',\n 'moe' : 'moe.',\n 'mule' : 'Gov\\'t Mule',\n 'nd' : 'The New Deal',\n 'nmas' : 'North Mississippi Allstars',\n 'oar' : 'O.A.R. (Of A Revolution)',\n 'oh' : 'Oysterhead',\n 'or' : 'Oregon',\n 'oregon' : 'Oregon',\n 'oysterhead' : 'Oysterhead',\n 'p&f' : 'Phil Lesh & Friends',\n 'p+f' : 'Phil Lesh & Friends',\n 'paf' : 'Phil Lesh & Friends',\n 'par' : 'Particle',\n 'particle' : 'Particle',\n 'pb' : 'Psychedelic Breakfast',\n 'pf' : 'Phil Lesh & Friends',\n 'ph' : 'Phish',\n 'phil' : 'Phil Lesh & Friends',\n 'pj' : 'Pearl Jam',\n 'pm' : 'Pat Metheny',\n 'pmb' : 'Pat McGee Band',\n 'pmg' : 'Pat Metheny Group',\n 'pmt' : 'Pat Metheny Trio',\n 'pnf' : 'Phil Lesh & Friends',\n 'porter' : 'George Porter, Jr. & Runnin\\' Pardners',\n 'rad' : 'Radiators',\n 'rads' : 'Radiators',\n 'raq' : 'Raq',\n 'ratdog' : 'Ratdog',\n 'rd' : 'Ratdog',\n 'reservoir' : 'Reservoir',\n 'rezi' : 'Rezi',\n 'rg' : 'Reid Genauer',\n 'rre' : 'Railroad Earth',\n 'rrfb' : 'Robert Randolph & the Family Band',\n 'rw2c' : 'Robert Walter\\'s 20th Congress',\n 'rwtc' : 'Robert Walter\\'s 20th Congress',\n 'schas' : 'Santa Cruz Hemp Allstars',\n 'schwillb' : 'The Schwillbillies',\n 'sci' : 'String Cheese Incident',\n 'sco' : 'John Scofield',\n 'sexmob' : 'Sex Mob',\n 'sf' : 'Strangefolk',\n 'sk' : 'Kimock',\n 'skb' : 'Steve Kimock Band',\n 'sl' : 'The Slip',\n 'slip' : 'The Slip',\n 'sonicyouth' : 'Sonic Youth',\n 'soulive' : 'Soulive',\n 'spin' : 'Spin Doctors',\n 'spod' : 'Serial Pod',\n 'spr' : 'Michael Franti & Spearhead',\n 'ss' : 'Stockholm Syndrome',\n 'st' : 'Shaking Tree',\n 'stocksyn' : 'Stockholm Syndrome',\n 'syb' : 'Seth Yacovone Band',\n 'tab' : 'Trey Anastasio (Band)',\n 'td' : 'The Dead',\n 'tend' : 'Tenacious D',\n 'tlg' : 'Tea Leaf Green',\n 'tn' : 'The Nadas',\n 'tnd' : 'The New Deal',\n 'too' : 'The Other Ones',\n 'tortoise' : 'Tortoise',\n 'tr' : 'Tim Reynolds',\n 'trey' : 'Trey Anastasio (Band)',\n 'um' : 'Umphrey\\'s McGee',\n 'umph' : 'Umphrey\\'s McGee',\n 'us' : 'Uncle Sammy',\n 'vb' : 'Vida Blue',\n 'wb4t' : 'Will Bernard 4tet',\n 'ween' : 'Ween',\n 'wh' : 'Warren Haynes',\n 'wilco' : 'Wilco',\n 'word' : 'The Word',\n 'wp' : 'Widespread Panic',\n 'wsp' : 'Widespread Panic',\n 'wu' : 'The Big Wu',\n 'ymsb' : 'Yonder Mountain String Band',\n 'zero' : 'Zero',\n 'zm' : 'Zony Mash',\n 'zwan' : 'Zwan'\n }\n \n\n BandUrl = {'3ah' : 'www.myspace.com/3appleshighmusic', # '3 Apples High',\n 'D&T' : 'www.davematthewsband.com', # 'Dave Matthews & Tim Reynolds',\n 'DM' : 'www.davematthewsband.com', # 'Dave Matthews (solo)',\n 'FB' : 'www.lesclaypool.com', # 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'Garcia' : 'www.dead.net', # 'Jerry Garcia',\n 'abb' : 'www.allmanbrothersband.com', # 'Allman Brothers Band',\n 'ah&f' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',\n 'ahf' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',\n 'ahp' : 'www.garajmahaljams.com', # 'Alan Hertz and Friends',\n 'amendola' : 'www.scottamendola.com', # 'Scott Amendola Band',\n 'amf' : 'www.myspace.com/Amfibian', # 'Amfibian',\n 'bc' : 'www.blackcrowes.com', # 'Black Crowes',\n 'bcue' : 'www.myspace.com/bbcue', # 'Barbara Cue',\n 'be' : 'www.euphio.net', # 'Bockman\\'s Euphio',\n 'beanland' : 'www.beanland.net', # 'Beanlan',\n 'bfft' : 'www.belafleck.com', # 'Bela Fleck & the Flecktones',\n 'bfm' : 'www.myspace.com/barefootmanner', # 'Barefoot Manner',\n 'bftt' : 'www.belafleck.com', # 'Bela Fleck & Tony Trischka',\n 'bgug' : 'www.bluegroundundergrass.com', # 'Blueground Undergrass',\n 'bh' : 'www.benharper.com', # 'Ben Harper',\n 'bh' : 'www.brucehornsby.com', # 'Bruce Hornsby',\n 'bhic' : 'www.benharper.com', # 'Ben Harper and the Innocent Criminals',\n 'bij' : 'www.myspace.com/bigjapan', # 'Big In Japan',\n 'bmelon' : 'www.myspace.com/blindmelon', # 'Blind Melon',\n 'bnb' : 'www.myspace.com/burtneilsonband', # 'Burt Neilson Band',\n 'bog' : 'UNKNOWN', # 'A Band of Gypsys',\n 'bp' : 'www.brotherspast.com', # 'Brothers Past',\n 'bruce' : 'www.brucehornsby.com', # 'Bruce Hornsby',\n 'bs' : 'www.bigsmithband.com', # 'Big Smith',\n 'bspear' : 'www.burningspear.net', # 'Burning Spear',\n 'bt' : 'www.bluestraveler.com', # 'Blues Traveler',\n 'bts' : 'www.builttospill.com', # 'Built to Spill',\n 'buho' : 'www.elbuho.com', # 'El Buho',\n 'cb' : 'www.crittersbuggin.com', # 'Critters Buggin',\n 'cbobb' : 'www.lesclaypool.com', # 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'cc' : 'www.countingcrows.com', # 'Counting Crows',\n 'ccity' : 'www.myspace.com/ceruleancity', # 'Cerulean City',\n 'ch' : 'www.charliehunter.com', # 'Charlie Hunter',\n 'cj' : 'latentrecordings.com/cowboyjunkies', # 'Cowboy Junkies',\n 'claypool' : 'www.lesclaypool.com', # 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'cracker' : 'www.crackersoul.com', # 'Cracker',\n 'crowes' : 'www.blackcrowes.com', # 'Black Crowes',\n 'cvb' : 'www.campervanbeethoven.com', # 'Camper van Beethoven',\n 'db' : 'www.discobiscuits.com', # 'Disco Biscuits',\n 'dbb' : 'www.deepbananablackout.com', # 'Deep Banana Blackout',\n 'dbr' : 'www.daybytheriver.org', # 'Day by the River',\n 'dbt' : 'www.drivebytruckers.com', # 'Drive-By Truckers',\n 'ddbb' : 'www.dirtydozenbrass.com', # 'Dirty Dozen Brass Band',\n 'dead' : 'www.dead.net', # 'The Dead',\n 'dgray' : 'www.davidgray.com', # 'David Gray',\n 'disp' : 'www.dispatchmusic.com', # 'Dispatch',\n 'djlogic' : 'www.djlogic.com', # 'DJ Logic',\n 'dmb' : 'www.davematthewsband.com', # 'Dave Matthews Band',\n 'dnb' : 'www.nelsonband.com', # 'David Nelson Band',\n 'dso' : 'www.darkstarorchestra.net', # 'Dark Star Orchestra',\n 'dt' : 'www.derektrucks.com', # 'Derek Trucks Band',\n 'dtb' : 'www.donnathebuffalo.com', # 'Donna the Buffalo',\n 'eb' : 'www.ediebrickell.com', # 'Edie Brickell',\n 'eh' : 'www.ekoostik.com', # 'Ekoostik Hookah',\n 'farrah' : 'UNKNOWN', # 'Farrah',\n 'fffb' : 'www.lesclaypool.com', # 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'fhg' : 'UNKNOWN', # 'Fareed Haque Group',\n 'gal' : 'UNKNOWN', # 'Galactic',\n 'garaj' : 'www.garajmahaljams.com', # 'Garaj Mahal',\n 'gba' : 'UNKNOWN', # 'GreyBoy AllStars',\n 'gd' : 'www.dead.net', # 'Grateful Dead',\n 'glove' : 'UNKNOWN', # 'G. Love and Special Sauce',\n 'gm' : 'www.mule.net', # 'Gov\\'t Mule',\n 'gsw' : 'UNKNOWN', # 'God Street Wine',\n 'gt' : 'UNKNOWN', # 'Ghost Trane',\n 'gtb' : 'UNKNOWN', # 'Grand Theft Bus',\n 'gus' : 'UNKNOWN', # 'Guster',\n 'guster' : 'UNKNOWN', # 'Guster',\n 'guymalone' : 'UNKNOWN', # 'Guy Malone',\n 'hday' : 'UNKNOWN', # 'Howie Day',\n 'hip' : 'www.thehip.com', # 'The Tragically Hip',\n 'ho' : 'UNKNOWN', # 'Schleigho',\n 'ht' : 'www.hottuna.com', # 'Hot Tuna',\n 'ig' : 'UNKNOWN', # 'Indigo Girls',\n 'itf' : 'UNKNOWN', # 'Indiana Trip Factory',\n 'jg' : 'www.dead.net', # 'Jerry Garcia',\n 'jgb' : 'www.dead.net', # 'Jerry Garcia Band',\n 'jh' : 'www.jimi-hendrix.com', # 'Jimi Hendrix Experience',\n 'jhbg' : 'UNKNOWN', # 'A Band of Gypsys',\n 'jhe' : 'www.jimi-hendrix.com', # 'Jimi Hendrix Experience',\n 'jj' : 'UNKNOWN', # 'Jack Johnson',\n 'jjj' : 'UNKNOWN', # 'Jerry Joseph & The Jackmormons',\n 'jk' : 'www.hottuna.com', # 'Jorma Kaukonen',\n 'jmayer' : 'UNKNOWN', # 'John Mayer',\n 'jmos' : 'UNKNOWN', # 'Jerry Joseph & The Jackmormons',\n 'jmp' : 'UNKNOWN', # 'Jazz Mandolin Project',\n 'jol' : 'UNKNOWN', # 'Jolene',\n 'jt' : 'www.wilcoworld.net', # 'Jeff Tweedy',\n 'kaikln' : 'UNKNOWN', # 'Kai Kln',\n 'kdcw' : 'UNKNOWN', # 'Karl Denson and Chris Wood',\n 'kdtu' : 'UNKNOWN', # 'Karl Denson\\'s Tiny Universe',\n 'kdub' : 'UNKNOWN', # 'Keller Williams',\n 'kmck' : 'UNKNOWN', # 'Steve Kimock & Friends',\n 'kudzu' : 'UNKNOWN', # 'Kudzu Kings',\n 'kvhw' : 'UNKNOWN', # 'KVHW',\n 'kw' : 'UNKNOWN', # 'Keller Williams',\n 'kwi' : 'UNKNOWN', # 'Keller Williams String Cheese Incident',\n 'laf' : 'UNKNOWN', # 'Life After Failing',\n 'lcbbb' : 'www.lesclaypool.com', # 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'lcbobb' : 'www.lesclaypool.com', # 'Col. Claypool\\'s Bucket of Bernie Brains',\n 'lcfffb' : 'www.lesclaypool.com', # 'Les Claypool\\'s Fearless Flying Frog Brigade',\n 'ld' : 'UNKNOWN', # 'Living Daylights',\n 'lf' : 'UNKNOWN', # 'Little Feat',\n 'lfb' : 'UNKNOWN', # 'Lo Faber Band',\n 'logic' : 'UNKNOWN', # 'DJ Logic',\n 'lom' : 'www.dead.net', # 'Legion of Mary',\n 'los' : 'www.leftoversalmon.com', # 'Leftover Salmon',\n 'ls' : 'www.leftoversalmon.com', # 'Leftover Salmon',\n 'lt' : 'UNKNOWN', # 'Lake Trout',\n 'mammals' : 'UNKNOWN', # 'The Mammals',\n 'marlow' : 'UNKNOWN', # 'Marlow',\n 'mcrx' : 'UNKNOWN', # 'Mike Clark\\'s Prescription Renewal',\n 'mel' : 'UNKNOWN', # 'Marcus Eaton & The Lobby',\n 'metheny' : 'UNKNOWN', # 'Pat Metheny',\n 'mf' : 'UNKNOWN', # 'Marc Ford',\n 'mfs' : 'UNKNOWN', # 'Michael Franti & Spearhead',\n 'mmw' : 'UNKNOWN', # 'Medeski Martin & Wood',\n 'moe' : 'www.moe.org', # 'moe.',\n 'mule' : 'www.mule.net', # 'Gov\\'t Mule',\n 'nd' : 'UNKNOWN', # 'The New Deal',\n 'nmas' : 'UNKNOWN', # 'North Mississippi Allstars',\n 'oar' : 'UNKNOWN', # 'O.A.R. (Of A Revolution)',\n 'oh' : 'www.oysterhead.com', # 'Oysterhead',\n 'or' : 'www.oregonband.com', # 'Oregon',\n 'oregon' : 'www.oregonband.com', # 'Oregon',\n 'oysterhead' : 'www.oysterhead.com', # 'Oysterhead',\n 'p&f' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'p+f' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'paf' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'par' : 'www.particlepeople.com', # 'Particle',\n 'particle' : 'www.particlepeople.com', # 'Particle',\n 'pb' : 'www.thebreakfast.org', # 'Psychedelic Breakfast',\n 'pf' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'ph' : 'www.phish.com', # 'Phish',\n 'phil' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'pj' : 'UNKNOWN', # 'Pearl Jam',\n 'pm' : 'UNKNOWN', # 'Pat Metheny',\n 'pmb' : 'UNKNOWN', # 'Pat McGee Band',\n 'pmg' : 'UNKNOWN', # 'Pat Metheny Group',\n 'pmt' : 'UNKNOWN', # 'Pat Metheny Trio',\n 'pnf' : 'www.phillesh.net', # 'Phil Lesh & Friends',\n 'porter' : 'UNKNOWN', # 'George Porter, Jr. & Runnin\\' Pardners',\n 'rad' : 'www.theradiators.org', # 'Radiators',\n 'rads' : 'www.theradiators.org', # 'Radiators',\n 'raq' : 'UNKNOWN', # 'Raq',\n 'ratdog' : 'www.rat-dog.com', # 'Ratdog',\n 'rd' : 'www.rat-dog.com', # 'Ratdog',\n 'reservoir' : 'UNKNOWN', # 'Reservoir',\n 'rezi' : 'UNKNOWN', # 'Rezi',\n 'rg' : 'UNKNOWN', # 'Reid Genauer',\n 'rre' : 'UNKNOWN', # 'Railroad Earth',\n 'rrfb' : 'UNKNOWN', # 'Robert Randolph & the Family Band',\n 'rw2c' : 'UNKNOWN', # 'Robert Walter\\'s 20th Congress',\n 'rwtc' : 'UNKNOWN', # 'Robert Walter\\'s 20th Congress',\n 'schas' : 'UNKNOWN', # 'Santa Cruz Hemp Allstars',\n 'schwillb' : 'UNKNOWN', # 'The Schwillbillies',\n 'sci' : 'UNKNOWN', # 'String Cheese Incident',\n 'sco' : 'UNKNOWN', # 'John Scofield',\n 'sexmob' : 'UNKNOWN', # 'Sex Mob',\n 'sf' : 'UNKNOWN', # 'Strangefolk',\n 'sk' : 'www.kimock.com', # 'Kimock',\n 'skb' : 'www.kimock.com', # 'Steve Kimock Band',\n 'sl' : 'UNKNOWN', # 'The Slip',\n 'slip' : 'UNKNOWN', # 'The Slip',\n 'sonicyouth' : 'www.sonicyouth.com', # 'Sonic Youth',\n 'soulive' : 'UNKNOWN', # 'Soulive',\n 'spin' : 'UNKNOWN', # 'Spin Doctors',\n 'spod' : 'UNKNOWN', # 'Serial Pod',\n 'spr' : 'UNKNOWN', # 'Michael Franti & Spearhead',\n 'ss' : 'UNKNOWN', # 'Stockholm Syndrome',\n 'st' : 'UNKNOWN', # 'Shaking Tree',\n 'stocksyn' : 'UNKNOWN', # 'Stockholm Syndrome',\n 'syb' : 'UNKNOWN', # 'Seth Yacovone Band',\n 'tab' : 'UNKNOWN', # 'Trey Anastasio (Band)',\n 'td' : 'www.dead.net', # 'The Dead',\n 'tend' : 'UNKNOWN', # 'Tenacious D',\n 'tlg' : 'UNKNOWN', # 'Tea Leaf Green',\n 'tn' : 'UNKNOWN', # 'The Nadas',\n 'tnd' : 'UNKNOWN', # 'The New Deal',\n 'too' : 'www.dead.net', # 'The Other Ones',\n 'tortoise' : 'UNKNOWN', # 'Tortoise',\n 'tr' : 'UNKNOWN', # 'Tim Reynolds',\n 'trey' : 'UNKNOWN', # 'Trey Anastasio (Band)',\n 'um' : 'www.umphreys.com', # 'Umphrey\\'s McGee',\n 'umph' : 'www.umphreys.com', # 'Umphrey\\'s McGee',\n 'us' : 'UNKNOWN', # 'Uncle Sammy',\n 'vb' : 'UNKNOWN', # 'Vida Blue',\n 'wb4t' : 'UNKNOWN', # 'Will Bernard 4tet',\n 'ween' : 'UNKNOWN', # 'Ween',\n 'wh' : 'UNKNOWN', # 'Warren Haynes',\n 'wilco' : 'www.wilcoworld.net', # 'Wilco',\n 'word' : 'UNKNOWN', # 'The Word',\n 'wp' : 'www.widespreadpanic.com', # 'Widespread Panic',\n 'wsp' : 'www.widespreadpanic.com', # 'Widespread Panic',\n 'wu' : 'UNKNOWN', # 'The Big Wu',\n 'ymsb' : 'UNKNOWN', # 'Yonder Mountain String Band',\n 'zero' : 'www.zerolive.com', # 'Zero',\n 'zm' : 'UNKNOWN', # 'Zony Mash',\n 'zwan' : 'UNKNOWN' # 'Zwan'\n }\n\n\n # -----------------------------------------------------------------------\n #\n # __init__\n #\n # -----------------------------------------------------------------------\n def __init__(self):\n \"\"\"\n Initializes this class with the following variables\n\n debug = False\n \"\"\"\n\n self.debug = False\n\n #\n # End of __init__\n #\n\n # -----------------------------------------------------------------------\n #\n # turnOnDebug\n #\n # -----------------------------------------------------------------------\n def turnOnDebug(self):\n \"\"\"\n Set the debug flag to True\n \"\"\"\n\n self.debug = True\n\n return\n #\n # End of turnOnDebug\n #\n\n \n # -----------------------------------------------------------------------\n #\n # turnOffDebug\n #\n # -----------------------------------------------------------------------\n def turnOffDebug(self):\n \"\"\"\n Set the debug flag to False\n \"\"\"\n\n self.debug = False\n\n return\n #\n # End of turnOffDebug\n #\n\n # -----------------------------------------------------------------------\n #\n # loadBandDB\n #\n # -----------------------------------------------------------------------\n def loadBandDB(self):\n # D = {}\n # D['a'] = {'name': 'bob', 'age': 43}\n # name = D['a']['name']\n\n for name in BandNames:\n full_name = BandNames[name]\n url = BandUrl[name]\n BandDB[name] = {'full_name': full_name, 'url': url}\n\n return\n #\n # End of loadBandDB\n #\n\n def createBandDB(self, D):\n fqp = D['fqp']\n dbh = D['dbh']\n \n return\n #\n # End of createBandDB\n #\n\n\n #\n # End of class pyetree_bandnames\n #\n\n# -----------------------------------------------------------------------\n#\n# < End of pyetree_bandnames >\n#\n# -----------------------------------------------------------------------\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475240,"cells":{"repo_name":{"kind":"string","value":"ivanhorvath/openshift-tools"},"path":{"kind":"string","value":"openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/lib/volume.py"},"copies":{"kind":"string","value":"64"},"size":{"kind":"string","value":"2002"},"content":{"kind":"string","value":"# pylint: skip-file\n# flake8: noqa\n\nclass Volume(object):\n ''' Class to represent an openshift volume object'''\n volume_mounts_path = {\"pod\": \"spec.containers[0].volumeMounts\",\n \"dc\": \"spec.template.spec.containers[0].volumeMounts\",\n \"rc\": \"spec.template.spec.containers[0].volumeMounts\",\n }\n volumes_path = {\"pod\": \"spec.volumes\",\n \"dc\": \"spec.template.spec.volumes\",\n \"rc\": \"spec.template.spec.volumes\",\n }\n\n @staticmethod\n def create_volume_structure(volume_info):\n ''' return a properly structured volume '''\n volume_mount = None\n volume = {'name': volume_info['name']}\n volume_type = volume_info['type'].lower()\n if volume_type == 'secret':\n volume['secret'] = {}\n volume[volume_info['type']] = {'secretName': volume_info['secret_name']}\n volume_mount = {'mountPath': volume_info['path'],\n 'name': volume_info['name']}\n elif volume_type == 'emptydir':\n volume['emptyDir'] = {}\n volume_mount = {'mountPath': volume_info['path'],\n 'name': volume_info['name']}\n elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':\n volume['persistentVolumeClaim'] = {}\n volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']\n volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']\n elif volume_type == 'hostpath':\n volume['hostPath'] = {}\n volume['hostPath']['path'] = volume_info['path']\n elif volume_type == 'configmap':\n volume['configMap'] = {}\n volume['configMap']['name'] = volume_info['configmap_name']\n volume_mount = {'mountPath': volume_info['path'],\n 'name': volume_info['name']}\n\n return (volume, volume_mount)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475241,"cells":{"repo_name":{"kind":"string","value":"varunarya10/boto"},"path":{"kind":"string","value":"tests/integration/ec2/test_cert_verification.py"},"copies":{"kind":"string","value":"126"},"size":{"kind":"string","value":"1549"},"content":{"kind":"string","value":"# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nCheck that all of the certs on all service endpoints validate.\n\"\"\"\nimport unittest\n\nfrom tests.integration import ServiceCertVerificationTest\n\nimport boto.ec2\n\n\nclass EC2CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):\n ec2 = True\n regions = boto.ec2.regions()\n\n def sample_service_call(self, conn):\n conn.get_all_reservations()\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475242,"cells":{"repo_name":{"kind":"string","value":"npiganeau/odoo"},"path":{"kind":"string","value":"addons/base_gengo/res_company.py"},"copies":{"kind":"string","value":"24"},"size":{"kind":"string","value":"1784"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Business Applications\n# Copyright (C) 2004-2012 OpenERP S.A. ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\n\n\nclass res_company(osv.Model):\n _name = \"res.company\"\n _inherit = \"res.company\"\n _columns = {\n \"gengo_private_key\": fields.text(\"Gengo Private Key\", copy=False),\n \"gengo_public_key\": fields.text(\"Gengo Public Key\", copy=False),\n \"gengo_comment\": fields.text(\"Comments\", help=\"This comment will be automatically be enclosed in each an every request sent to Gengo\"),\n \"gengo_auto_approve\": fields.boolean(\"Auto Approve Translation ?\", help=\"Jobs are Automatically Approved by Gengo.\"),\n \"gengo_sandbox\": fields.boolean(\"Sandbox Mode\", help=\"Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose.\"),\n }\n\n _defaults = {\n \"gengo_auto_approve\": True,\n }\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475243,"cells":{"repo_name":{"kind":"string","value":"trondhindenes/ansible"},"path":{"kind":"string","value":"test/units/modules/network/netscaler/test_netscaler_gslb_site.py"},"copies":{"kind":"string","value":"68"},"size":{"kind":"string","value":"24193"},"content":{"kind":"string","value":"\n# Copyright (c) 2017 Citrix Systems\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n\nfrom units.compat.mock import patch, Mock, MagicMock, call\nfrom units.modules.utils import set_module_args\nfrom .netscaler_module import TestModule, nitro_base_patcher\n\nimport sys\n\nif sys.version_info[:2] != (2, 6):\n import requests\n\n\nclass TestNetscalerGSLBSiteModule(TestModule):\n\n @classmethod\n def setUpClass(cls):\n class MockException(Exception):\n pass\n\n cls.MockException = MockException\n\n m = MagicMock()\n nssrc_modules_mock = {\n 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,\n 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite': m,\n 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite.gslbsite': m,\n }\n\n cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)\n cls.nitro_base_patcher = nitro_base_patcher\n\n @classmethod\n def tearDownClass(cls):\n cls.nitro_base_patcher.stop()\n cls.nitro_specific_patcher.stop()\n\n def setUp(self):\n super(TestNetscalerGSLBSiteModule, self).setUp()\n\n self.nitro_base_patcher.start()\n self.nitro_specific_patcher.start()\n\n # Setup minimal required arguments to pass AnsibleModule argument parsing\n\n def tearDown(self):\n super(TestNetscalerGSLBSiteModule, self).tearDown()\n\n self.nitro_base_patcher.stop()\n self.nitro_specific_patcher.stop()\n\n def test_graceful_nitro_api_import_error(self):\n # Stop nitro api patching to cause ImportError\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n self.nitro_base_patcher.stop()\n self.nitro_specific_patcher.stop()\n from ansible.modules.network.netscaler import netscaler_gslb_site\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertEqual(result['msg'], 'Could not load nitro python sdk')\n\n def test_graceful_nitro_error_on_login(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n class MockException(Exception):\n def __init__(self, *args, **kwargs):\n self.errorcode = 0\n self.message = ''\n\n client_mock = Mock()\n client_mock.login = Mock(side_effect=MockException)\n m = Mock(return_value=client_mock)\n with patch('ansible.modules.network.netscaler.netscaler_gslb_site.get_nitro_client', m):\n with patch('ansible.modules.network.netscaler.netscaler_gslb_site.nitro_exception', MockException):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')\n\n def test_graceful_no_connection_error(self):\n\n if sys.version_info[:2] == (2, 6):\n self.skipTest('requests library not available under python2.6')\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n class MockException(Exception):\n pass\n client_mock = Mock()\n attrs = {'login.side_effect': requests.exceptions.ConnectionError}\n client_mock.configure_mock(**attrs)\n m = Mock(return_value=client_mock)\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n nitro_exception=MockException,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')\n\n def test_graceful_login_error(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n if sys.version_info[:2] == (2, 6):\n self.skipTest('requests library not available under python2.6')\n\n class MockException(Exception):\n pass\n client_mock = Mock()\n attrs = {'login.side_effect': requests.exceptions.SSLError}\n client_mock.configure_mock(**attrs)\n m = Mock(return_value=client_mock)\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n nitro_exception=MockException,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')\n\n def test_ensure_feature_is_enabled_called(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n gslb_site_proxy_mock = Mock()\n ensure_feature_is_enabled_mock = Mock()\n client_mock = Mock()\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=Mock(return_value=client_mock),\n gslb_site_exists=Mock(side_effect=[False, True]),\n gslb_site_identical=Mock(side_effect=[True]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=ensure_feature_is_enabled_mock,\n ConfigProxy=Mock(return_value=gslb_site_proxy_mock),\n ):\n self.module = netscaler_gslb_site\n self.exited()\n ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')\n\n def test_save_config_called_on_state_present(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n gslb_site_proxy_mock = Mock()\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n gslb_site_exists=Mock(side_effect=[False, True]),\n gslb_site_identical=Mock(side_effect=[True]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=Mock(return_value=gslb_site_proxy_mock),\n ):\n self.module = netscaler_gslb_site\n self.exited()\n self.assertIn(call.save_config(), client_mock.mock_calls)\n\n def test_save_config_called_on_state_absent(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n gslb_site_proxy_mock = Mock()\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n gslb_site_exists=Mock(side_effect=[True, False]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=Mock(return_value=gslb_site_proxy_mock),\n ):\n self.module = netscaler_gslb_site\n self.exited()\n self.assertIn(call.save_config(), client_mock.mock_calls)\n\n def test_save_config_not_called_on_state_present(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n save_config=False,\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n gslb_site_proxy_mock = Mock()\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n gslb_site_exists=Mock(side_effect=[False, True]),\n gslb_site_identical=Mock(side_effect=[True]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=Mock(return_value=gslb_site_proxy_mock),\n ):\n self.module = netscaler_gslb_site\n self.exited()\n self.assertNotIn(call.save_config(), client_mock.mock_calls)\n\n def test_save_config_not_called_on_state_absent(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n save_config=False,\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n gslb_site_proxy_mock = Mock()\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n gslb_site_exists=Mock(side_effect=[True, False]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=Mock(return_value=gslb_site_proxy_mock),\n ):\n self.module = netscaler_gslb_site\n self.exited()\n self.assertNotIn(call.save_config(), client_mock.mock_calls)\n\n def test_new_gslb_site_execution_flow(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n gslb_site_exists=Mock(side_effect=[False, True]),\n gslb_site_identical=Mock(side_effect=[True]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n self.exited()\n gslb_site_proxy_mock.assert_has_calls([call.add()])\n\n def test_modified_gslb_site_execution_flow(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[True, True]),\n gslb_site_identical=Mock(side_effect=[False, True]),\n ensure_feature_is_enabled=Mock(),\n nitro_exception=self.MockException,\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n self.exited()\n gslb_site_proxy_mock.assert_has_calls([call.update()])\n\n def test_absent_gslb_site_execution_flow(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[True, False]),\n gslb_site_identical=Mock(side_effect=[False, True]),\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n self.exited()\n gslb_site_proxy_mock.assert_has_calls([call.delete()])\n\n def test_present_gslb_site_identical_flow(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[True, True]),\n gslb_site_identical=Mock(side_effect=[True, True]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n self.exited()\n gslb_site_proxy_mock.assert_not_called()\n\n def test_absent_gslb_site_noop_flow(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[False, False]),\n gslb_site_identical=Mock(side_effect=[False, False]),\n nitro_exception=self.MockException,\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n self.exited()\n gslb_site_proxy_mock.assert_not_called()\n\n def test_present_gslb_site_failed_update(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n nitro_exception=self.MockException,\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[True, True]),\n gslb_site_identical=Mock(side_effect=[False, False]),\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertEqual(result['msg'], 'GSLB site differs from configured')\n self.assertTrue(result['failed'])\n\n def test_present_gslb_site_failed_create(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n nitro_exception=self.MockException,\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[False, False]),\n gslb_site_identical=Mock(side_effect=[False, False]),\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertEqual(result['msg'], 'GSLB site does not exist')\n self.assertTrue(result['failed'])\n\n def test_present_gslb_site_update_immutable_attribute(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n nitro_exception=self.MockException,\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=['domain']),\n gslb_site_exists=Mock(side_effect=[True, True]),\n gslb_site_identical=Mock(side_effect=[False, False]),\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertEqual(result['msg'], 'Cannot update immutable attributes [\\'domain\\']')\n self.assertTrue(result['failed'])\n\n def test_absent_gslb_site_failed_delete(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n client_mock = Mock()\n\n m = Mock(return_value=client_mock)\n\n glsb_site_proxy_attrs = {\n 'diff_object.return_value': {},\n }\n gslb_site_proxy_mock = Mock()\n gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)\n config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)\n\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n nitro_exception=self.MockException,\n get_nitro_client=m,\n diff_list=Mock(return_value={}),\n get_immutables_intersection=Mock(return_value=[]),\n gslb_site_exists=Mock(side_effect=[True, True]),\n gslb_site_identical=Mock(side_effect=[False, False]),\n ensure_feature_is_enabled=Mock(),\n ConfigProxy=config_proxy_mock,\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertEqual(result['msg'], 'GSLB site still exists')\n self.assertTrue(result['failed'])\n\n def test_graceful_nitro_exception_state_present(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='present',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n class MockException(Exception):\n def __init__(self, *args, **kwargs):\n self.errorcode = 0\n self.message = ''\n\n m = Mock(side_effect=MockException)\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n gslb_site_exists=m,\n ensure_feature_is_enabled=Mock(),\n nitro_exception=MockException\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertTrue(\n result['msg'].startswith('nitro exception'),\n msg='Nitro exception not caught on operation absent'\n )\n\n def test_graceful_nitro_exception_state_absent(self):\n set_module_args(dict(\n nitro_user='user',\n nitro_pass='pass',\n nsip='192.0.2.1',\n state='absent',\n ))\n from ansible.modules.network.netscaler import netscaler_gslb_site\n\n class MockException(Exception):\n def __init__(self, *args, **kwargs):\n self.errorcode = 0\n self.message = ''\n\n m = Mock(side_effect=MockException)\n with patch.multiple(\n 'ansible.modules.network.netscaler.netscaler_gslb_site',\n gslb_site_exists=m,\n ensure_feature_is_enabled=Mock(),\n nitro_exception=MockException\n ):\n self.module = netscaler_gslb_site\n result = self.failed()\n self.assertTrue(\n result['msg'].startswith('nitro exception'),\n msg='Nitro exception not caught on operation absent'\n )\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475244,"cells":{"repo_name":{"kind":"string","value":"vishnuvaradaraj/firefox-ios"},"path":{"kind":"string","value":"scripts/export-xliff.py"},"copies":{"kind":"string","value":"49"},"size":{"kind":"string","value":"1380"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n#\n# xliff-export.py xcodeproj-path l10n-path\n#\n# Export all locales that are present in the l10n directory. We use xcodebuild\n# to export and write to l10n-directory/$locale/firefox-ios.xliff so that it\n# can be easily imported into svn. (which is a manual step)\n#\n# Example:\n#\n# cd firefox-ios\n# ./xliff-export.py Client.xcodeproj ../firefox-ios-l10n\n#\n\nimport glob\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nLOCALES_TO_SKIP = ['pl']\n\ndef available_locales(l10n_path):\n for xliff_path in glob.glob(l10n_path + \"/*/firefox-ios.xliff\"):\n parts = xliff_path.split(os.sep)\n yield parts[-2]\n\nif __name__ == \"__main__\":\n project_path = sys.argv[1]\n l10n_path = sys.argv[2]\n\n for locale in available_locales(l10n_path):\n if locale in LOCALES_TO_SKIP:\n continue\n command = [\n \"xcodebuild\",\n \"-exportLocalizations\",\n \"-localizationPath\", \"/tmp/xliff\",\n \"-project\", project_path,\n \"-exportLanguage\", locale\n ]\n\n print \"Exporting '%s' to '/tmp/xliff/%s.xliff'\" % (locale, locale)\n subprocess.call(command)\n\n src_path = \"/tmp/xliff/%s.xliff\" % locale\n dst_path = \"%s/%s/firefox-ios.xliff\" % (l10n_path, locale)\n print \"Copying '%s' to '%s'\" % (src_path, dst_path)\n shutil.copy(src_path, dst_path)\n"},"license":{"kind":"string","value":"mpl-2.0"}}},{"rowIdx":475245,"cells":{"repo_name":{"kind":"string","value":"runt18/mod_python"},"path":{"kind":"string","value":"lib/python/mod_python/Cookie.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"12964"},"content":{"kind":"string","value":" # vim: set sw=4 expandtab :\n #\n # Copyright (C) 2000, 2001, 2013 Gregory Trubetskoy\n # Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apache Software Foundation\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\"); you\n # may not use this file except in compliance with the License. You\n # may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n # implied. See the License for the specific language governing\n # permissions and limitations under the License.\n #\n # Originally developed by Gregory Trubetskoy.\n #\n\n\"\"\"\n\nThis module contains classes to support HTTP State Management\nMechanism, also known as Cookies. The classes provide simple\nways for creating, parsing and digitally signing cookies, as\nwell as the ability to store simple Python objects in Cookies\n(using marshalling).\n\nThe behaviour of the classes is designed to be most useful\nwithin mod_python applications.\n\nThe current state of HTTP State Management standardization is\nrather unclear. It appears that the de-facto standard is the\noriginal Netscape specification, even though already two RFC's\nhave been put out (RFC2109 (1997) and RFC2965 (2000)). The\nRFC's add a couple of useful features (e.g. using Max-Age instead\nof Expires, but my limited tests show that Max-Age is ignored\nby the two browsers tested (IE and Safari). As a result of this,\nperhaps trying to be RFC-compliant (by automatically providing\nMax-Age and Version) could be a waste of cookie space...\n\n\"\"\"\n\nimport sys\nimport time\nimport re\nimport hmac\nimport marshal\nimport base64\n\nPY2 = sys.version[0] == '2'\n\nclass CookieError(Exception):\n pass\n\nclass metaCookie(type):\n\n def __new__(cls, clsname, bases, clsdict):\n\n _valid_attr = (\n \"version\", \"path\", \"domain\", \"secure\",\n \"comment\", \"expires\", \"max_age\",\n # RFC 2965\n \"commentURL\", \"discard\", \"port\",\n # Microsoft Extension\n \"httponly\" )\n\n # _valid_attr + property values\n # (note __slots__ is a new Python feature, it\n # prevents any other attribute from being set)\n __slots__ = _valid_attr + (\"name\", \"value\", \"_value\",\n \"_expires\", \"__data__\")\n\n clsdict[\"_valid_attr\"] = _valid_attr\n clsdict[\"__slots__\"] = __slots__\n\n def set_expires(self, value):\n\n if type(value) == type(\"\"):\n # if it's a string, it should be\n # valid format as per Netscape spec\n try:\n t = time.strptime(value, \"%a, %d-%b-%Y %H:%M:%S GMT\")\n except ValueError:\n raise ValueError(\"Invalid expires time: %s\" % value)\n t = time.mktime(t)\n else:\n # otherwise assume it's a number\n # representing time as from time.time()\n t = value\n value = time.strftime(\"%a, %d-%b-%Y %H:%M:%S GMT\",\n time.gmtime(t))\n\n self._expires = \"%s\" % value\n\n def get_expires(self):\n return self._expires\n\n clsdict[\"expires\"] = property(fget=get_expires, fset=set_expires)\n\n return type.__new__(cls, clsname, bases, clsdict)\n\n# metaclass= workaround, see\n# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/#using-the-metaclass-in-python-2-x-and-3-x\n_metaCookie = metaCookie('Cookie', (object, ), {})\n\nclass Cookie(_metaCookie):\n \"\"\"\n This class implements the basic Cookie functionality. Note that\n unlike the Python Standard Library Cookie class, this class represents\n a single cookie (not a list of Morsels).\n \"\"\"\n\n DOWNGRADE = 0\n IGNORE = 1\n EXCEPTION = 3\n\n def parse(Class, str, **kw):\n \"\"\"\n Parse a Cookie or Set-Cookie header value, and return\n a dict of Cookies. Note: the string should NOT include the\n header name, only the value.\n \"\"\"\n\n dict = _parse_cookie(str, Class, **kw)\n return dict\n\n parse = classmethod(parse)\n\n def __init__(self, name, value, **kw):\n\n \"\"\"\n This constructor takes at least a name and value as the\n arguments, as well as optionally any of allowed cookie attributes\n as defined in the existing cookie standards.\n \"\"\"\n self.name, self.value = name, value\n\n for k in kw:\n setattr(self, k.lower(), kw[k])\n\n # subclasses can use this for internal stuff\n self.__data__ = {}\n\n\n def __str__(self):\n\n \"\"\"\n Provides the string representation of the Cookie suitable for\n sending to the browser. Note that the actual header name will\n not be part of the string.\n\n This method makes no attempt to automatically double-quote\n strings that contain special characters, even though the RFC's\n dictate this. This is because doing so seems to confuse most\n browsers out there.\n \"\"\"\n\n result = [\"%s=%s\" % (self.name, self.value)]\n for name in self._valid_attr:\n if hasattr(self, name):\n if name in (\"secure\", \"discard\", \"httponly\"):\n result.append(name)\n else:\n result.append(\"%s=%s\" % (name, getattr(self, name)))\n return \"; \".join(result)\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__,\n str(self))\n\n\nclass SignedCookie(Cookie):\n \"\"\"\n This is a variation of Cookie that provides automatic\n cryptographic signing of cookies and verification. It uses\n the HMAC support in the Python standard library. This ensures\n that the cookie has not been tamprered with on the client side.\n\n Note that this class does not encrypt cookie data, thus it\n is still plainly visible as part of the cookie.\n \"\"\"\n\n def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw):\n\n dict = _parse_cookie(s, Class, **kw)\n\n del_list = []\n for k in dict:\n c = dict[k]\n try:\n c.unsign(secret)\n except CookieError:\n if mismatch == Cookie.EXCEPTION:\n raise\n elif mismatch == Cookie.IGNORE:\n del_list.append(k)\n else:\n # downgrade to Cookie\n dict[k] = Cookie.parse(Cookie.__str__(c))[k]\n\n for k in del_list:\n del dict[k]\n\n return dict\n\n parse = classmethod(parse)\n\n def __init__(self, name, value, secret=None, **kw):\n Cookie.__init__(self, name, value, **kw)\n\n self.__data__[\"secret\"] = secret\n\n def hexdigest(self, str):\n if not self.__data__[\"secret\"]:\n raise CookieError(\"Cannot sign without a secret\")\n _hmac = hmac.new(self.__data__[\"secret\"], self.name)\n _hmac.update(str)\n if PY2:\n return _hmac.hexdigest()\n else:\n return _hmac.hexdigest().decode()\n\n def __str__(self):\n\n result = [\"%s=%s%s\" % (self.name, self.hexdigest(self.value),\n self.value)]\n for name in self._valid_attr:\n if hasattr(self, name):\n if name in (\"secure\", \"discard\", \"httponly\"):\n result.append(name)\n else:\n result.append(\"%s=%s\" % (name, getattr(self, name)))\n return \"; \".join(result)\n\n def unsign(self, secret):\n\n sig, val = self.value[:32], self.value[32:]\n\n mac = hmac.new(secret, self.name)\n mac.update(val)\n\n if mac.hexdigest() == sig:\n self.value = val\n self.__data__[\"secret\"] = secret\n else:\n raise CookieError(\"Incorrectly Signed Cookie: %s=%s\" % (self.name, self.value))\n\n\nclass MarshalCookie(SignedCookie):\n\n \"\"\"\n This is a variation of SignedCookie that can store more than\n just strings. It will automatically marshal the cookie value,\n therefore any marshallable object can be used as value.\n\n The standard library Cookie module provides the ability to pickle\n data, which is a major security problem. It is believed that unmarshalling\n (as opposed to unpickling) is safe, yet we still err on the side of caution\n which is why this class is a subclass of SignedCooke making sure what\n we are about to unmarshal passes the digital signature test.\n\n Here is a link to a sugesstion that marshalling is safer than unpickling\n http://groups.google.com/groups?hl=en&lr=&ie=UTF-8&selm=7xn0hcugmy.fsf%40ruckus.brouhaha.com\n \"\"\"\n\n def parse(Class, s, secret, mismatch=Cookie.DOWNGRADE, **kw):\n\n dict = _parse_cookie(s, Class, **kw)\n\n del_list = []\n for k in dict:\n c = dict[k]\n try:\n c.unmarshal(secret)\n except CookieError:\n if mismatch == Cookie.EXCEPTION:\n raise\n elif mismatch == Cookie.IGNORE:\n del_list.append(k)\n else:\n # downgrade to Cookie\n dict[k] = Cookie.parse(Cookie.__str__(c))[k]\n\n for k in del_list:\n del dict[k]\n\n return dict\n\n parse = classmethod(parse)\n\n def __str__(self):\n\n m = base64.encodestring(marshal.dumps(self.value))\n # on long cookies, the base64 encoding can contain multiple lines\n # separated by \\n or \\r\\n\n m = ''.join(m.split())\n\n result = [\"%s=%s%s\" % (self.name, self.hexdigest(m), m)]\n for name in self._valid_attr:\n if hasattr(self, name):\n if name in (\"secure\", \"discard\", \"httponly\"):\n result.append(name)\n else:\n result.append(\"%s=%s\" % (name, getattr(self, name)))\n return \"; \".join(result)\n\n def unmarshal(self, secret):\n\n self.unsign(secret)\n\n try:\n data = base64.decodestring(self.value)\n except:\n raise CookieError(\"Cannot base64 Decode Cookie: %s=%s\" % (self.name, self.value))\n\n try:\n self.value = marshal.loads(data)\n except (EOFError, ValueError, TypeError):\n raise CookieError(\"Cannot Unmarshal Cookie: %s=%s\" % (self.name, self.value))\n\n\n# This is a simplified and in some places corrected\n# (at least I think it is) pattern from standard lib Cookie.py\n\n_cookiePattern = re.compile(\n r\"(?x)\" # Verbose pattern\n r\"[,\\ ]*\" # space/comma (RFC2616 4.2) before attr-val is eaten\n r\"(?P\" # Start of group 'key'\n r\"[^;\\ =]+\" # anything but ';', ' ' or '='\n r\")\" # End of group 'key'\n r\"\\ *(=\\ *)?\" # a space, then may be \"=\", more space\n r\"(?P\" # Start of group 'val'\n r'\"(?:[^\\\\\"]|\\\\.)*\"' # a doublequoted string\n r\"|\" # or\n r\"[^;]*\" # any word or empty string\n r\")\" # End of group 'val'\n r\"\\s*;?\" # probably ending in a semi-colon\n )\n\ndef _parse_cookie(str, Class, names=None):\n # XXX problem is we should allow duplicate\n # strings\n result = {}\n\n matchIter = _cookiePattern.finditer(str)\n\n for match in matchIter:\n key, val = match.group(\"key\"), match.group(\"val\")\n\n # We just ditch the cookies names which start with a dollar sign since\n # those are in fact RFC2965 cookies attributes. See bug [#MODPYTHON-3].\n if key[0]!='$' and names is None or key in names:\n result[key] = Class(key, val)\n\n return result\n\ndef add_cookie(req, cookie, value=\"\", **kw):\n \"\"\"\n Sets a cookie in outgoing headers and adds a cache\n directive so that caches don't cache the cookie.\n \"\"\"\n\n # is this a cookie?\n if not isinstance(cookie, Cookie):\n\n # make a cookie\n cookie = Cookie(cookie, value, **kw)\n\n if \"Set-Cookie\" not in req.headers_out:\n req.headers_out.add(\"Cache-Control\", 'no-cache=\"set-cookie\"')\n\n req.headers_out.add(\"Set-Cookie\", str(cookie))\n\ndef get_cookies(req, Class=Cookie, **kw):\n \"\"\"\n A shorthand for retrieveing and parsing cookies given\n a Cookie class. The class must be one of the classes from\n this module.\n \"\"\"\n\n if \"cookie\" not in req.headers_in:\n return {}\n\n cookies = req.headers_in[\"cookie\"]\n if type(cookies) == type([]):\n cookies = '; '.join(cookies)\n\n return Class.parse(cookies, **kw)\n\ndef get_cookie(req, name, Class=Cookie, **kw):\n cookies = get_cookies(req, Class, names=[name], **kw)\n if name in cookies:\n return cookies[name]\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475246,"cells":{"repo_name":{"kind":"string","value":"kirmani/lockman"},"path":{"kind":"string","value":"MC/RPi.GPIO-0.1.0/build/lib.linux-armv6l-2.7/RPi/GPIO/__init__.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"3646"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright (c) 2012 Ben Croston\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport atexit\n\n# pins start from 1, tuple index starts from 0\n_GPIO_PINS = (None, None, None, '0', None, '1', None, '4', '14', None, '15', '17', '18', '21', None, '22', '23', None, '24', '10', None, '9', '25', '11', '8', None, '7')\n\nIN = 'in'\nOUT = 'out'\n\n_ExportedIds = {}\n\nclass InvalidPinException(Exception):\n \"\"\"The pin sent is invalid on a Raspberry Pi\"\"\"\n pass\n\nclass InvalidDirectionException(Exception):\n \"\"\"An invalid direction was passed to setup()\"\"\"\n pass\n\nclass WrongDirectionException(Exception):\n \"\"\"The GPIO channel has not been set up or is set up in the wrong direction\"\"\"\n pass\n\ndef _GetValidId(pin):\n try:\n value = _GPIO_PINS[int(pin)]\n except:\n raise InvalidPinException\n if value is None or pin < 1:\n raise InvalidPinException\n return value\n\ndef setup(pin, direction):\n \"\"\"\n Set up the GPIO channel and direction\n pin - RPi board GPIO pin number (not SOC pin number). Pins start from 1\n direction - IN or OUT\n \"\"\"\n id = _GetValidId(pin)\n if direction != IN and direction != OUT:\n raise InvalidDirectionException\n\n # unexport if it exists\n if os.path.exists('/sys/class/gpio/gpio%s'%id):\n with open('/sys/class/gpio/unexport', 'w') as f:\n f.write(id)\n\n # export\n with open('/sys/class/gpio/export', 'w') as f:\n f.write(id)\n\n # set i/o direction\n with open('/sys/class/gpio/gpio%s/direction'%id, 'w') as f:\n f.write(direction)\n _ExportedIds[id] = direction\n\ndef output(pin, value):\n \"\"\"Write to a GPIO channel\"\"\"\n id = _GetValidId(pin)\n if id not in _ExportedIds or _ExportedIds[id] != OUT:\n raise WrongDirectionException\n with open('/sys/class/gpio/gpio%s/value'%id, 'w') as f:\n f.write('1' if value else '0')\n\ndef input(pin):\n \"\"\"Read from a GPIO channel\"\"\"\n id = _GetValidId(pin)\n if id not in _ExportedIds or _ExportedIds[id] != IN:\n raise WrongDirectionException\n with open('/sys/class/gpio/gpio%s/value'%id, 'r') as f:\n return f.read(1) == '1'\n\n# clean up routine\ndef _unexport():\n \"\"\"Clean up by unexporting evey channel that we have set up\"\"\"\n for id in _ExportedIds:\n if os.path.exists('/sys/class/gpio/gpio%s'%id):\n with open('/sys/class/gpio/unexport', 'w') as f:\n f.write(id)\natexit.register(_unexport)\n\nif __name__ == '__main__':\n # assumes pin 11 INPUT\n # pin 12 OUTPUT\n setup(11, IN)\n setup(12, OUT)\n print(input(11))\n output(12, True)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475247,"cells":{"repo_name":{"kind":"string","value":"zakuro9715/lettuce"},"path":{"kind":"string","value":"tests/integration/lib/Django-1.2.5/tests/regressiontests/many_to_one_regress/tests.py"},"copies":{"kind":"string","value":"92"},"size":{"kind":"string","value":"4466"},"content":{"kind":"string","value":"from django.db import models\nfrom django.test import TestCase\n\nfrom models import First, Second, Third, Parent, Child, Category, Record, Relation\n\nclass ManyToOneRegressionTests(TestCase):\n def test_object_creation(self):\n Third.objects.create(id='3', name='An example')\n parent = Parent(name='fred')\n parent.save()\n Child.objects.create(name='bam-bam', parent=parent)\n\n def test_fk_assignment_and_related_object_cache(self):\n # Tests of ForeignKey assignment and the related-object cache (see #6886).\n\n p = Parent.objects.create(name=\"Parent\")\n c = Child.objects.create(name=\"Child\", parent=p)\n\n # Look up the object again so that we get a \"fresh\" object.\n c = Child.objects.get(name=\"Child\")\n p = c.parent\n\n # Accessing the related object again returns the exactly same object.\n self.assertTrue(c.parent is p)\n\n # But if we kill the cache, we get a new object.\n del c._parent_cache\n self.assertFalse(c.parent is p)\n\n # Assigning a new object results in that object getting cached immediately.\n p2 = Parent.objects.create(name=\"Parent 2\")\n c.parent = p2\n self.assertTrue(c.parent is p2)\n\n # Assigning None succeeds if field is null=True.\n p.bestchild = None\n self.assertTrue(p.bestchild is None)\n\n # bestchild should still be None after saving.\n p.save()\n self.assertTrue(p.bestchild is None)\n\n # bestchild should still be None after fetching the object again.\n p = Parent.objects.get(name=\"Parent\")\n self.assertTrue(p.bestchild is None)\n\n # Assigning None fails: Child.parent is null=False.\n self.assertRaises(ValueError, setattr, c, \"parent\", None)\n\n # You also can't assign an object of the wrong type here\n self.assertRaises(ValueError, setattr, c, \"parent\", First(id=1, second=1))\n\n # Nor can you explicitly assign None to Child.parent during object\n # creation (regression for #9649).\n self.assertRaises(ValueError, Child, name='xyzzy', parent=None)\n self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)\n\n # Creation using keyword argument should cache the related object.\n p = Parent.objects.get(name=\"Parent\")\n c = Child(parent=p)\n self.assertTrue(c.parent is p)\n\n # Creation using keyword argument and unsaved related instance (#8070).\n p = Parent()\n c = Child(parent=p)\n self.assertTrue(c.parent is p)\n\n # Creation using attname keyword argument and an id will cause the\n # related object to be fetched.\n p = Parent.objects.get(name=\"Parent\")\n c = Child(parent_id=p.id)\n self.assertFalse(c.parent is p)\n self.assertEqual(c.parent, p)\n\n def test_multiple_foreignkeys(self):\n # Test of multiple ForeignKeys to the same model (bug #7125).\n c1 = Category.objects.create(name='First')\n c2 = Category.objects.create(name='Second')\n c3 = Category.objects.create(name='Third')\n r1 = Record.objects.create(category=c1)\n r2 = Record.objects.create(category=c1)\n r3 = Record.objects.create(category=c2)\n r4 = Record.objects.create(category=c2)\n r5 = Record.objects.create(category=c3)\n r = Relation.objects.create(left=r1, right=r2)\n r = Relation.objects.create(left=r3, right=r4)\n r = Relation.objects.create(left=r1, right=r3)\n r = Relation.objects.create(left=r5, right=r2)\n r = Relation.objects.create(left=r3, right=r2)\n\n q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])\n self.assertQuerysetEqual(q1, [\"\"])\n\n q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')\n self.assertQuerysetEqual(q2, [\"\", \"\"])\n\n p = Parent.objects.create(name=\"Parent\")\n c = Child.objects.create(name=\"Child\", parent=p)\n self.assertRaises(ValueError, Child.objects.create, name=\"Grandchild\", parent=c)\n\n def test_fk_instantiation_outside_model(self):\n # Regression for #12190 -- Should be able to instantiate a FK outside\n # of a model, and interrogate its related field.\n cat = models.ForeignKey(Category)\n self.assertEqual('id', cat.rel.get_related_field().name)\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475248,"cells":{"repo_name":{"kind":"string","value":"Ant-OS/android_packages_apps_OTAUpdates"},"path":{"kind":"string","value":"jni/boost_1_57_0/libs/python/test/operators.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"1166"},"content":{"kind":"string","value":"# Copyright David Abrahams 2004. Distributed under the Boost\n# Software License, Version 1.0. (See accompanying\n# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n'''\n>>> from operators_ext import *\n\n Check __nonzero__ support\n \n>>> assert X(2)\n>>> assert not X(0)\n\n ----\n \n>>> x = X(42)\n>>> x.value()\n42\n>>> y = x - X(5)\n>>> y.value()\n37\n>>> y = x - 4\n>>> y.value()\n38\n>>> y = 3 - x\n>>> y.value()\n-39\n>>> (-y).value()\n39\n\n>>> (x + y).value()\n3\n\n>>> abs(y).value()\n39\n\n>>> x < 10\n0\n>>> x < 43\n1\n\n>>> 10 < x\n1\n>>> 43 < x\n0\n\n>>> x < y\n0\n>>> y < x\n1\n\n ------\n>>> x > 10\n1\n>>> x > 43\n0\n\n>>> 10 > x\n0\n>>> 43 > x\n1\n\n>>> x > y\n1\n>>> y > x\n0\n\n>>> y = x - 5\n>>> x -= y\n>>> x.value()\n5\n>>> str(x)\n'5'\n\n>>> z = Z(10)\n>>> int(z)\n10\n>>> float(z)\n10.0\n>>> complex(z)\n(10+0j)\n\n>>> pow(2,x)\n32\n>>> pow(x,2).value()\n25\n>>> pow(X(2),x).value()\n32\n'''\n\ndef run(args = None):\n import sys\n import doctest\n\n if args is not None:\n sys.argv = args\n return doctest.testmod(sys.modules.get(__name__))\n \nif __name__ == '__main__':\n print \"running...\"\n import sys\n status = run()[0]\n if (status == 0): print \"Done.\"\n sys.exit(status)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475249,"cells":{"repo_name":{"kind":"string","value":"mistercrunch/airflow"},"path":{"kind":"string","value":"airflow/migrations/versions/45ba3f1493b9_add_k8s_yaml_to_rendered_templates.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"1689"},"content":{"kind":"string","value":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"add-k8s-yaml-to-rendered-templates\n\nRevision ID: 45ba3f1493b9\nRevises: 364159666cbd\nCreate Date: 2020-10-23 23:01:52.471442\n\n\"\"\"\n\nimport sqlalchemy_jsonfield\nfrom alembic import op\nfrom sqlalchemy import Column\n\nfrom airflow.settings import json\n\n# revision identifiers, used by Alembic.\nrevision = '45ba3f1493b9'\ndown_revision = '364159666cbd'\nbranch_labels = None\ndepends_on = None\n\n__tablename__ = \"rendered_task_instance_fields\"\nk8s_pod_yaml = Column('k8s_pod_yaml', sqlalchemy_jsonfield.JSONField(json=json), nullable=True)\n\n\ndef upgrade():\n \"\"\"Apply add-k8s-yaml-to-rendered-templates\"\"\"\n with op.batch_alter_table(__tablename__, schema=None) as batch_op:\n batch_op.add_column(k8s_pod_yaml)\n\n\ndef downgrade():\n \"\"\"Unapply add-k8s-yaml-to-rendered-templates\"\"\"\n with op.batch_alter_table(__tablename__, schema=None) as batch_op:\n batch_op.drop_column('k8s_pod_yaml')\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475250,"cells":{"repo_name":{"kind":"string","value":"haoch/incubator-eagle"},"path":{"kind":"string","value":"eagle-external/eagle-ambari/lib/EAGLE/package/scripts/eagle_userprofile_topology.py"},"copies":{"kind":"string","value":"21"},"size":{"kind":"string","value":"3337"},"content":{"kind":"string","value":"#!/usr/bin/python\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nfrom resource_management import *\nfrom resource_management.libraries.script.script import Script\nfrom resource_management.libraries.functions import get_unique_id_and_date\nfrom resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version\nfrom resource_management.libraries.functions.security_commons import build_expectations, \\\n cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \\\n FILE_TYPE_JAAS_CONF\nfrom resource_management.core.shell import call\nfrom resource_management.core.logger import Logger\nfrom resource_management.core.resources.system import Execute\nfrom resource_management.libraries.functions.check_process_status import check_process_status\nfrom resource_management.libraries.functions.format import format\nfrom resource_management.libraries.functions.validate import call_and_match_output\n\nfrom actions import *\n\nclass EagleUserProfileTopology(Script):\n # def get_stack_to_component(self):\n # return {\"HDP\": \"EAGLE-TOPOLOGY\"}\n\n def install(self, env):\n Logger.info('Install Eagle DAM UserProfile Topology')\n # self.install_packages(env)\n import params\n env.set_params(params)\n self.configure(env)\n # eagle_hdfs_topology_exec(action = 'init')\n\n\n def configure(self,env):\n Logger.info(\"Configure Eagle DAM UserProfile Topology\")\n import params\n env.set_params(params)\n # eagle_hdfs_topology_exec(action = 'init')\n\n def pre_rolling_restart(self,env):\n Logger.info(\"Executing rolling pre-restart Eagle DAM UserProfile Topology\")\n import params\n env.set_params(params)\n\n # if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:\n # Execute(format(\"hdp-select set eagle-topology {version}\"))\n\n def stop(self, env):\n Logger.info('Stopping Eagle DAM UserProfile Topology')\n import params\n env.set_params(params)\n self.configure(env)\n eagle_userprofile_topology_exec(action = 'stop')\n\n def start(self, env):\n Logger.info('Starting Eagle DAM UserProfile Topology')\n import params\n env.set_params(params)\n self.configure(env)\n\n eagle_userprofile_topology_exec(action = 'init')\n eagle_userprofile_topology_exec(action = 'start')\n\n def status(self, env):\n Logger.info('Checking Eagle DAM UserProfile Topology status')\n import params\n env.set_params(params)\n self.configure(env)\n eagle_userprofile_topology_exec(action = 'status')\n\nif __name__ == \"__main__\":\n EagleUserProfileTopology().execute()"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475251,"cells":{"repo_name":{"kind":"string","value":"DailyActie/Surrogate-Model"},"path":{"kind":"string","value":"01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/geom.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2622"},"content":{"kind":"string","value":"from openmdao.main.interfaces import IParametricGeometry, implements\n\n\nclass ParametricGeometry(object):\n \"\"\"A base class for objects implementing the IParametricGeometry\n interface.\n \"\"\"\n\n implements(IParametricGeometry)\n\n def __init__(self):\n self._callbacks = []\n\n def regen_model(self):\n \"\"\"Rebuid the geometry based on the current values of the input parameters.\"\"\"\n raise NotImplementedError(\"regen_model\")\n\n def list_parameters(self):\n \"\"\"Return a list of (name, meta) where `name` is the name of the parameter\n and `meta` is a metadata dict.\n \"\"\"\n raise NotImplementedError('list_parameters')\n\n def set_parameter(self, name, value):\n \"\"\"Set the value of the named input parameter.\"\"\"\n raise NotImplementedError(\"set_parameter\")\n\n def get_parameters(self, names):\n \"\"\"Return a list of values of the named parameters.\"\"\"\n raise NotImplementedError(\"get_parameters\")\n\n def get_static_geometry(self):\n \"\"\"Return a 'static' instance of this geometry.\"\"\"\n raise NotImplementedError(\"get_static_geometry\")\n\n def register_param_list_changedCB(self, callback):\n \"\"\"Register a callback that will be called when self.invoke_callbacks() is called.\n self.invoke_callbacks() should be called from the inheriting class whenever any\n parameters are added, removed, or change their type.\n \"\"\"\n self._callbacks.append(callback)\n\n def invoke_callbacks(self):\n \"\"\"Invokes any callbacks that have been registered via register_param_list_changedCB.\"\"\"\n\n for cb in self._callbacks:\n cb()\n\n def get_attributes(self, io_only=True):\n \"\"\"Return an attribute dict for use by the openmdao GUI. You only need to\n override this if you have inputs that the user must set directly into your\n ParametricGeometry object. For example, GEMParametricGeometry has a model_file\n input that the user can set in the GUI to change the .csm file that supplies\n the geometry.\n \"\"\"\n\n # the commented out section below shows an example of how you would report \n # the existence of an input named 'model_file' to the OpenMDAO GUI. \n return {\n 'type': type(self).__name__,\n 'Inputs': [\n # {\n # 'name': 'model_file',\n # 'id': 'model_file',\n # 'type': type(self._model_file).__name__,\n # 'value': self._model_file,\n # 'connected': '',\n # }\n ]\n }\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475252,"cells":{"repo_name":{"kind":"string","value":"bytor99999/vertx-web"},"path":{"kind":"string","value":"src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py"},"copies":{"kind":"string","value":"2994"},"size":{"kind":"string","value":"1676"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is mozilla.org code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 1998\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n# \n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n# \n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\nfrom .mbcharsetprober import MultiByteCharSetProber\nfrom .codingstatemachine import CodingStateMachine\nfrom .chardistribution import EUCTWDistributionAnalysis\nfrom .mbcssm import EUCTWSMModel\n\nclass EUCTWProber(MultiByteCharSetProber):\n def __init__(self):\n MultiByteCharSetProber.__init__(self)\n self._mCodingSM = CodingStateMachine(EUCTWSMModel)\n self._mDistributionAnalyzer = EUCTWDistributionAnalysis()\n self.reset()\n\n def get_charset_name(self):\n return \"EUC-TW\"\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475253,"cells":{"repo_name":{"kind":"string","value":"eoghan2t9/Oppo-Find5-4.2-Kernel"},"path":{"kind":"string","value":"arch/ia64/scripts/unwcheck.py"},"copies":{"kind":"string","value":"13143"},"size":{"kind":"string","value":"1714"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Usage: unwcheck.py FILE\n#\n# This script checks the unwind info of each function in file FILE\n# and verifies that the sum of the region-lengths matches the total\n# length of the function.\n#\n# Based on a shell/awk script originally written by Harish Patil,\n# which was converted to Perl by Matthew Chapman, which was converted\n# to Python by David Mosberger.\n#\nimport os\nimport re\nimport sys\n\nif len(sys.argv) != 2:\n print \"Usage: %s FILE\" % sys.argv[0]\n sys.exit(2)\n\nreadelf = os.getenv(\"READELF\", \"readelf\")\n\nstart_pattern = re.compile(\"<([^>]*)>: \\[0x([0-9a-f]+)-0x([0-9a-f]+)\\]\")\nrlen_pattern = re.compile(\".*rlen=([0-9]+)\")\n\ndef check_func (func, slots, rlen_sum):\n if slots != rlen_sum:\n global num_errors\n num_errors += 1\n if not func: func = \"[%#x-%#x]\" % (start, end)\n print \"ERROR: %s: %lu slots, total region length = %lu\" % (func, slots, rlen_sum)\n return\n\nnum_funcs = 0\nnum_errors = 0\nfunc = False\nslots = 0\nrlen_sum = 0\nfor line in os.popen(\"%s -u %s\" % (readelf, sys.argv[1])):\n m = start_pattern.match(line)\n if m:\n check_func(func, slots, rlen_sum)\n\n func = m.group(1)\n start = long(m.group(2), 16)\n end = long(m.group(3), 16)\n slots = 3 * (end - start) / 16\n rlen_sum = 0L\n num_funcs += 1\n else:\n m = rlen_pattern.match(line)\n if m:\n rlen_sum += long(m.group(1))\ncheck_func(func, slots, rlen_sum)\n\nif num_errors == 0:\n print \"No errors detected in %u functions.\" % num_funcs\nelse:\n if num_errors > 1:\n err=\"errors\"\n else:\n err=\"error\"\n print \"%u %s detected in %u functions.\" % (num_errors, err, num_funcs)\n sys.exit(1)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475254,"cells":{"repo_name":{"kind":"string","value":"dungeonsnd/test-code"},"path":{"kind":"string","value":"dev_examples/pyserver/examples/server.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1790"},"content":{"kind":"string","value":"\r\n\r\nclass Actor(gevent.Greenlet):\r\n\r\n def __init__(self):\r\n self.inbox = Queue()\r\n Greenlet.__init__(self)\r\n\r\n def onRecv(self, message):\r\n \"\"\"\r\n Define in your subclass.\r\n \"\"\"\r\n raise NotImplemented()\r\n\r\n def _run(self):\r\n self.running = True\r\n\r\n while self.running:\r\n message = self.inbox.get()\r\n self.onRecv(message)\r\n \r\nimport gevent\r\nfrom gevent.queue import Queue\r\nfrom gevent import Greenlet\r\n\r\nclass loginActor(Actor):\r\n def onRecv(self, message):\r\n print(message)\r\n chatAct.inbox.put('loginAct')\r\n gevent.sleep(0)\r\n\r\nclass chatActor(Actor):\r\n def onRecv(self, message):\r\n print(message)\r\n loginAct.inbox.put('chatAct')\r\n gevent.sleep(0)\r\n\r\n \r\nimport gevent\r\nfrom gevent.queue import Queue\r\n\r\ndef Dispatch(line):\r\n if line=='loginAct':\r\n return loginAct.inbox.put('login act returned.')\r\n else if line=='chatActor':\r\n return chatActor.inbox.put('chat act returned.')\r\n else\r\n return None\r\n \r\n \r\n \r\nimport gevent.server\r\n\r\ndef handler(sock, address):\r\n fp = sock.makefile()\r\n while True:\r\n line = fp.readline()\r\n \r\n result =Dispatch(line)\r\n if len(result)>0:\r\n fp.write(result)\r\n fp.flush()\r\n else if len(result)==0:\r\n continue\r\n else :\r\n break\r\n sock.shutdown(socket.SHUT_WR)\r\n sock.close()\r\n\r\nif __name__ == '__main__':\r\n\r\n queueSize =4\r\n queueList =[Queue() for i in xrange(queueSize)]\r\n \r\n server = gevent.server.StreamServer(('0.0.0.0', 18600), backlog=1024, handler, spawn=Pool(50000)) # do not accept more than 10000 connections\r\n server.serve_forever()\r\n \r\n \r\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475255,"cells":{"repo_name":{"kind":"string","value":"drpjk/Arduino"},"path":{"kind":"string","value":"arduino-core/src/processing/app/i18n/python/requests/packages/charade/sbcsgroupprober.py"},"copies":{"kind":"string","value":"2936"},"size":{"kind":"string","value":"3291"},"content":{"kind":"string","value":"######################## BEGIN LICENSE BLOCK ########################\n# The Original Code is Mozilla Universal charset detector code.\n#\n# The Initial Developer of the Original Code is\n# Netscape Communications Corporation.\n# Portions created by the Initial Developer are Copyright (C) 2001\n# the Initial Developer. All Rights Reserved.\n#\n# Contributor(s):\n# Mark Pilgrim - port to Python\n# Shy Shalom - original C code\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA\n# 02110-1301 USA\n######################### END LICENSE BLOCK #########################\n\nfrom .charsetgroupprober import CharSetGroupProber\nfrom .sbcharsetprober import SingleByteCharSetProber\nfrom .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,\n Latin5CyrillicModel, MacCyrillicModel,\n Ibm866Model, Ibm855Model)\nfrom .langgreekmodel import Latin7GreekModel, Win1253GreekModel\nfrom .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel\nfrom .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel\nfrom .langthaimodel import TIS620ThaiModel\nfrom .langhebrewmodel import Win1255HebrewModel\nfrom .hebrewprober import HebrewProber\n\n\nclass SBCSGroupProber(CharSetGroupProber):\n def __init__(self):\n CharSetGroupProber.__init__(self)\n self._mProbers = [\n SingleByteCharSetProber(Win1251CyrillicModel),\n SingleByteCharSetProber(Koi8rModel),\n SingleByteCharSetProber(Latin5CyrillicModel),\n SingleByteCharSetProber(MacCyrillicModel),\n SingleByteCharSetProber(Ibm866Model),\n SingleByteCharSetProber(Ibm855Model),\n SingleByteCharSetProber(Latin7GreekModel),\n SingleByteCharSetProber(Win1253GreekModel),\n SingleByteCharSetProber(Latin5BulgarianModel),\n SingleByteCharSetProber(Win1251BulgarianModel),\n SingleByteCharSetProber(Latin2HungarianModel),\n SingleByteCharSetProber(Win1250HungarianModel),\n SingleByteCharSetProber(TIS620ThaiModel),\n ]\n hebrewProber = HebrewProber()\n logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,\n False, hebrewProber)\n visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,\n hebrewProber)\n hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)\n self._mProbers.extend([hebrewProber, logicalHebrewProber,\n visualHebrewProber])\n\n self.reset()\n"},"license":{"kind":"string","value":"lgpl-2.1"}}},{"rowIdx":475256,"cells":{"repo_name":{"kind":"string","value":"ecosoft-odoo/odoo"},"path":{"kind":"string","value":"addons/account/report/account_print_overdue.py"},"copies":{"kind":"string","value":"380"},"size":{"kind":"string","value":"3907"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport time\n\nfrom openerp.report import report_sxw\nfrom openerp.osv import osv\n\n\nclass Overdue(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(Overdue, self).__init__(cr, uid, name, context=context)\n ids = context.get('active_ids')\n partner_obj = self.pool['res.partner']\n docs = partner_obj.browse(cr, uid, ids, context)\n\n due = {}\n paid = {}\n mat = {}\n\n for partner in docs:\n due[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['debit'] or 0) or (y['account_id']['type'] == 'payable' and y['credit'] * -1 or 0)), self._lines_get(partner), 0)\n paid[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['credit'] or 0) or (y['account_id']['type'] == 'payable' and y['debit'] * -1 or 0)), self._lines_get(partner), 0)\n mat[partner.id] = reduce(lambda x, y: x + (y['debit'] - y['credit']), filter(lambda x: x['date_maturity'] < time.strftime('%Y-%m-%d'), self._lines_get(partner)), 0)\n\n addresses = self.pool['res.partner']._address_display(cr, uid, ids, None, None)\n self.localcontext.update({\n 'docs': docs,\n 'time': time,\n 'getLines': self._lines_get,\n 'tel_get': self._tel_get,\n 'message': self._message,\n 'due': due,\n 'paid': paid,\n 'mat': mat,\n 'addresses': addresses\n })\n self.context = context\n\n def _tel_get(self,partner):\n if not partner:\n return False\n res_partner = self.pool['res.partner']\n addresses = res_partner.address_get(self.cr, self.uid, [partner.id], ['invoice'])\n adr_id = addresses and addresses['invoice'] or False\n if adr_id:\n adr=res_partner.read(self.cr, self.uid, [adr_id])[0]\n return adr['phone']\n else:\n return partner.phone or False\n return False\n\n def _lines_get(self, partner):\n moveline_obj = self.pool['account.move.line']\n movelines = moveline_obj.search(self.cr, self.uid,\n [('partner_id', '=', partner.id),\n ('account_id.type', 'in', ['receivable', 'payable']),\n ('state', '<>', 'draft'), ('reconcile_id', '=', False)])\n movelines = moveline_obj.browse(self.cr, self.uid, movelines)\n return movelines\n\n def _message(self, obj, company):\n company_pool = self.pool['res.company']\n message = company_pool.browse(self.cr, self.uid, company.id, {'lang':obj.lang}).overdue_msg\n return message.split('\\n')\n\n\nclass report_overdue(osv.AbstractModel):\n _name = 'report.account.report_overdue'\n _inherit = 'report.abstract_report'\n _template = 'account.report_overdue'\n _wrapped_report_class = Overdue\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475257,"cells":{"repo_name":{"kind":"string","value":"datamade/jekyll-hook"},"path":{"kind":"string","value":"tasks.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1685"},"content":{"kind":"string","value":"import pickle\nfrom redis import Redis\nimport subprocess\nfrom uuid import uuid4\n\nfrom app_config import SENTRY_DSN, REDIS_QUEUE_KEY\n\nfrom raven import Client\n\nsentry = Client(SENTRY_DSN)\n\nredis = Redis()\n\nclass DelayedResult(object):\n def __init__(self, key):\n self.key = key\n self._rv = None\n\n @property\n def return_value(self):\n if self._rv is None:\n rv = redis.get(self.key)\n if rv is not None:\n self._rv = pickle.loads(rv)\n return self._rv\n\ndef queuefunc(f):\n def delay(*args, **kwargs):\n\n qkey = REDIS_QUEUE_KEY\n key = '%s:result:%s' % (qkey, str(uuid4()))\n s = pickle.dumps((f, key, args, kwargs))\n \n redis.rpush(REDIS_QUEUE_KEY, s)\n \n return DelayedResult(key)\n \n f.delay = delay\n return f\n\n@queuefunc\ndef run_scripts(scripts, args):\n build, publish = scripts\n\n try:\n subprocess.check_call([build] + args)\n except subprocess.CalledProcessError as e:\n print('EXCEPTION', e)\n sentry.captureException()\n\n try:\n subprocess.check_call([publish] + args)\n except subprocess.CalledProcessError as e:\n print('EXCEPTION', e)\n sentry.captureException()\n\ndef queue_daemon():\n print('Listening for work ... ')\n while 1:\n msg = redis.blpop(REDIS_QUEUE_KEY)\n func, key, args, kwargs = pickle.loads(msg[1])\n \n try:\n rv = func(*args, **kwargs)\n except Exception as e:\n sentry.captureException()\n rv = e.message\n \n if rv is not None:\n redis.set(key, pickle.dumps(rv))\n redis.expire(key, rv_ttl)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475258,"cells":{"repo_name":{"kind":"string","value":"mhvk/astropy"},"path":{"kind":"string","value":"astropy/io/misc/asdf/tags/transform/basic.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"8303"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\nfrom asdf.versioning import AsdfVersion\n\nfrom astropy.modeling import mappings\nfrom astropy.modeling import functional_models\nfrom astropy.modeling.core import CompoundModel\nfrom astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType\nfrom . import _parameter_to_value\n\n\n__all__ = ['TransformType', 'IdentityType', 'ConstantType']\n\n\nclass TransformType(AstropyAsdfType):\n version = '1.2.0'\n requires = ['astropy']\n\n @classmethod\n def _from_tree_base_transform_members(cls, model, node, ctx):\n if 'name' in node:\n model.name = node['name']\n\n if 'bounding_box' in node:\n model.bounding_box = node['bounding_box']\n\n if \"inputs\" in node:\n if model.n_inputs == 1:\n model.inputs = (node[\"inputs\"],)\n else:\n model.inputs = tuple(node[\"inputs\"])\n\n if \"outputs\" in node:\n if model.n_outputs == 1:\n model.outputs = (node[\"outputs\"],)\n else:\n model.outputs = tuple(node[\"outputs\"])\n\n param_and_model_constraints = {}\n for constraint in ['fixed', 'bounds']:\n if constraint in node:\n param_and_model_constraints[constraint] = node[constraint]\n model._initialize_constraints(param_and_model_constraints)\n\n yield model\n\n if 'inverse' in node:\n model.inverse = node['inverse']\n\n @classmethod\n def from_tree_transform(cls, node, ctx):\n raise NotImplementedError(\n \"Must be implemented in TransformType subclasses\")\n\n @classmethod\n def from_tree(cls, node, ctx):\n model = cls.from_tree_transform(node, ctx)\n return cls._from_tree_base_transform_members(model, node, ctx)\n\n @classmethod\n def _to_tree_base_transform_members(cls, model, node, ctx):\n if getattr(model, '_user_inverse', None) is not None:\n node['inverse'] = model._user_inverse\n\n if model.name is not None:\n node['name'] = model.name\n\n try:\n bb = model.bounding_box\n except NotImplementedError:\n bb = None\n\n if bb is not None:\n if model.n_inputs == 1:\n bb = list(bb)\n else:\n bb = [list(item) for item in model.bounding_box]\n node['bounding_box'] = bb\n if type(model.__class__.inputs) != property:\n node['inputs'] = model.inputs\n node['outputs'] = model.outputs\n\n # model / parameter constraints\n if not isinstance(model, CompoundModel):\n fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}\n if fixed_nondefaults:\n node['fixed'] = fixed_nondefaults\n bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}\n if bounds_nondefaults:\n node['bounds'] = bounds_nondefaults\n\n return node\n\n @classmethod\n def to_tree_transform(cls, model, ctx):\n raise NotImplementedError(\"Must be implemented in TransformType subclasses\")\n\n @classmethod\n def to_tree(cls, model, ctx):\n node = cls.to_tree_transform(model, ctx)\n return cls._to_tree_base_transform_members(model, node, ctx)\n\n @classmethod\n def assert_equal(cls, a, b):\n # TODO: If models become comparable themselves, remove this.\n assert a.name == b.name\n # TODO: Assert inverses are the same\n\n\nclass IdentityType(TransformType):\n name = \"transform/identity\"\n types = ['astropy.modeling.mappings.Identity']\n\n @classmethod\n def from_tree_transform(cls, node, ctx):\n return mappings.Identity(node.get('n_dims', 1))\n\n @classmethod\n def to_tree_transform(cls, data, ctx):\n node = {}\n if data.n_inputs != 1:\n node['n_dims'] = data.n_inputs\n return node\n\n @classmethod\n def assert_equal(cls, a, b):\n # TODO: If models become comparable themselves, remove this.\n TransformType.assert_equal(a, b)\n assert (isinstance(a, mappings.Identity) and\n isinstance(b, mappings.Identity) and\n a.n_inputs == b.n_inputs)\n\n\nclass ConstantType(TransformType):\n name = \"transform/constant\"\n version = '1.4.0'\n supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']\n types = ['astropy.modeling.functional_models.Const1D',\n 'astropy.modeling.functional_models.Const2D']\n\n @classmethod\n def from_tree_transform(cls, node, ctx):\n if cls.version < AsdfVersion('1.4.0'):\n # The 'dimensions' property was added in 1.4.0,\n # previously all values were 1D.\n return functional_models.Const1D(node['value'])\n elif node['dimensions'] == 1:\n return functional_models.Const1D(node['value'])\n elif node['dimensions'] == 2:\n return functional_models.Const2D(node['value'])\n\n @classmethod\n def to_tree_transform(cls, data, ctx):\n if cls.version < AsdfVersion('1.4.0'):\n if not isinstance(data, functional_models.Const1D):\n raise ValueError(\n f'constant-{cls.version} does not support models with > 1 dimension')\n return {\n 'value': _parameter_to_value(data.amplitude)\n }\n else:\n if isinstance(data, functional_models.Const1D):\n dimension = 1\n elif isinstance(data, functional_models.Const2D):\n dimension = 2\n return {\n 'value': _parameter_to_value(data.amplitude),\n 'dimensions': dimension\n }\n\n\nclass GenericModel(mappings.Mapping):\n\n def __init__(self, n_inputs, n_outputs):\n mapping = tuple(range(n_inputs))\n super().__init__(mapping)\n self._n_outputs = n_outputs\n self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))\n\n @property\n def inverse(self):\n raise NotImplementedError()\n\n\nclass GenericType(TransformType):\n name = \"transform/generic\"\n types = [GenericModel]\n\n @classmethod\n def from_tree_transform(cls, node, ctx):\n return GenericModel(\n node['n_inputs'], node['n_outputs'])\n\n @classmethod\n def to_tree_transform(cls, data, ctx):\n return {\n 'n_inputs': data.n_inputs,\n 'n_outputs': data.n_outputs\n }\n\n\nclass UnitsMappingType(AstropyType):\n name = \"transform/units_mapping\"\n version = \"1.0.0\"\n types = [mappings.UnitsMapping]\n\n @classmethod\n def to_tree(cls, node, ctx):\n tree = {}\n\n if node.name is not None:\n tree[\"name\"] = node.name\n\n inputs = []\n outputs = []\n for i, o, m in zip(node.inputs, node.outputs, node.mapping):\n input = {\n \"name\": i,\n \"allow_dimensionless\": node.input_units_allow_dimensionless[i],\n }\n if m[0] is not None:\n input[\"unit\"] = m[0]\n if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:\n input[\"equivalencies\"] = node.input_units_equivalencies[i]\n inputs.append(input)\n\n output = {\n \"name\": o,\n }\n if m[-1] is not None:\n output[\"unit\"] = m[-1]\n outputs.append(output)\n\n tree[\"inputs\"] = inputs\n tree[\"outputs\"] = outputs\n\n return tree\n\n @classmethod\n def from_tree(cls, tree, ctx):\n mapping = tuple((i.get(\"unit\"), o.get(\"unit\"))\n for i, o in zip(tree[\"inputs\"], tree[\"outputs\"]))\n\n equivalencies = None\n for i in tree[\"inputs\"]:\n if \"equivalencies\" in i:\n if equivalencies is None:\n equivalencies = {}\n equivalencies[i[\"name\"]] = i[\"equivalencies\"]\n\n kwargs = {\n \"input_units_equivalencies\": equivalencies,\n \"input_units_allow_dimensionless\": {\n i[\"name\"]: i.get(\"allow_dimensionless\", False) for i in tree[\"inputs\"]},\n }\n\n if \"name\" in tree:\n kwargs[\"name\"] = tree[\"name\"]\n\n return mappings.UnitsMapping(mapping, **kwargs)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475259,"cells":{"repo_name":{"kind":"string","value":"collex100/odoo"},"path":{"kind":"string","value":"addons/account_anglo_saxon/invoice.py"},"copies":{"kind":"string","value":"50"},"size":{"kind":"string","value":"13086"},"content":{"kind":"string","value":"##############################################################################\n# \n# OpenERP, Open Source Management Solution\n# Copyright (C) \n# 2004-2010 Tiny SPRL (). \n# 2009-2010 Veritos (http://veritos.nl).\n# All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see . \n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\nfrom openerp.tools.float_utils import float_round as round\n\nclass account_invoice_line(osv.osv):\n _inherit = \"account.invoice.line\"\n\n _columns = {\n 'move_id': fields.many2one('stock.move', string=\"Move line\", help=\"If the invoice was generated from a stock.picking, reference to the related move line.\"),\n }\n\n def move_line_get(self, cr, uid, invoice_id, context=None):\n res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)\n inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)\n if inv.type in ('out_invoice','out_refund'):\n for i_line in inv.invoice_line:\n res.extend(self._anglo_saxon_sale_move_lines(cr, uid, i_line, res, context=context))\n elif inv.type in ('in_invoice','in_refund'):\n for i_line in inv.invoice_line:\n res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))\n return res\n\n\n def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):\n fiscal_pool = self.pool.get('account.fiscal.position')\n res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id, context)\n if not product:\n return res\n if type in ('in_invoice','in_refund'):\n product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)\n oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id\n if not oa:\n oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id\n if oa:\n fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False\n a = fiscal_pool.map_account(cr, uid, fpos, oa)\n res['value'].update({'account_id':a})\n return res\n\n def _get_price(self, cr, uid, inv, company_currency, i_line, price_unit):\n cur_obj = self.pool.get('res.currency')\n decimal_precision = self.pool.get('decimal.precision')\n if inv.currency_id.id != company_currency:\n price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})\n else:\n price = price_unit * i_line.quantity\n return round(price, decimal_precision.precision_get(cr, uid, 'Account'))\n\n def _anglo_saxon_sale_move_lines(self, cr, uid, i_line, res, context=None):\n \"\"\"Return the additional move lines for sales invoices and refunds.\n\n i_line: An account.invoice.line object.\n res: The move line entries produced so far by the parent move_line_get.\n \"\"\"\n inv = i_line.invoice_id\n fiscal_pool = self.pool.get('account.fiscal.position')\n fpos = inv.fiscal_position or False\n company_currency = inv.company_id.currency_id.id\n\n if i_line.product_id.type != 'service' and i_line.product_id.valuation == 'real_time':\n # debit account dacc will be the output account\n # first check the product, if empty check the category\n dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id\n if not dacc:\n dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id\n # in both cases the credit account cacc will be the expense account\n # first check the product, if empty check the category\n cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id\n if not cacc:\n cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id\n if dacc and cacc:\n price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price\n return [\n {\n 'type':'src',\n 'name': i_line.name[:64],\n 'price_unit':price_unit,\n 'quantity':i_line.quantity,\n 'price':self._get_price(cr, uid, inv, company_currency, i_line, price_unit),\n 'account_id':dacc,\n 'product_id':i_line.product_id.id,\n 'uos_id':i_line.uos_id.id,\n 'account_analytic_id': False,\n 'taxes':i_line.invoice_line_tax_id,\n },\n\n {\n 'type':'src',\n 'name': i_line.name[:64],\n 'price_unit':price_unit,\n 'quantity':i_line.quantity,\n 'price': -1 * self._get_price(cr, uid, inv, company_currency, i_line, price_unit),\n 'account_id':fiscal_pool.map_account(cr, uid, fpos, cacc),\n 'product_id':i_line.product_id.id,\n 'uos_id':i_line.uos_id.id,\n 'account_analytic_id': False,\n 'taxes':i_line.invoice_line_tax_id,\n },\n ]\n return []\n\n\n def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):\n \"\"\"Return the additional move lines for purchase invoices and refunds.\n\n i_line: An account.invoice.line object.\n res: The move line entries produced so far by the parent move_line_get.\n \"\"\"\n inv = i_line.invoice_id\n company_currency = inv.company_id.currency_id.id\n if i_line.product_id and i_line.product_id.valuation == 'real_time':\n if i_line.product_id.type != 'service':\n # get the price difference account at the product\n acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id\n if not acc:\n # if not found on the product get the price difference account at the category\n acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id\n a = None\n\n # oa will be the stock input account\n # first check the product, if empty check the category\n oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id\n if not oa:\n oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id\n if oa:\n # get the fiscal position\n fpos = i_line.invoice_id.fiscal_position or False\n a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)\n diff_res = []\n decimal_precision = self.pool.get('decimal.precision')\n account_prec = decimal_precision.precision_get(cr, uid, 'Account')\n # calculate and write down the possible price difference between invoice price and product price\n for line in res:\n if line.get('invl_id', 0) == i_line.id and a == line['account_id']:\n uom = i_line.product_id.uos_id or i_line.product_id.uom_id\n valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)\n if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:\n #for average/fifo/lifo costing method, fetch real cost price from incomming moves\n stock_move_obj = self.pool.get('stock.move')\n valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)\n if valuation_stock_move:\n valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit\n if inv.currency_id.id != company_currency:\n valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})\n if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:\n # price with discount and without tax included\n price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'],\n i_line.price_unit * (1-(i_line.discount or 0.0)/100.0), line['quantity'])['total']\n price_line = round(valuation_price_unit * line['quantity'], account_prec)\n price_diff = round(price_unit - price_line, account_prec)\n line.update({'price': price_line})\n diff_res.append({\n 'type': 'src',\n 'name': i_line.name[:64],\n 'price_unit': round(price_diff / line['quantity'], account_prec),\n 'quantity': line['quantity'],\n 'price': price_diff,\n 'account_id': acc,\n 'product_id': line['product_id'],\n 'uos_id': line['uos_id'],\n 'account_analytic_id': line['account_analytic_id'],\n 'taxes': line.get('taxes', []),\n })\n return diff_res\n return []\n\n\nclass account_invoice(osv.osv):\n _inherit = \"account.invoice\"\n\n def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):\n invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,\n description, journal_id, context=context)\n if invoice.type == 'in_invoice':\n fiscal_position = self.pool.get('account.fiscal.position')\n for _, _, line_dict in invoice_data['invoice_line']:\n if line_dict.get('product_id'):\n product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)\n counterpart_acct_id = product.property_stock_account_output and \\\n product.property_stock_account_output.id\n if not counterpart_acct_id:\n counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \\\n product.categ_id.property_stock_account_output_categ.id\n if counterpart_acct_id:\n fpos = invoice.fiscal_position or False\n line_dict['account_id'] = fiscal_position.map_account(cr, uid,\n fpos,\n counterpart_acct_id)\n return invoice_data\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475260,"cells":{"repo_name":{"kind":"string","value":"orion1024/Sick-Beard"},"path":{"kind":"string","value":"lib/imdb/parser/mobile/__init__.py"},"copies":{"kind":"string","value":"50"},"size":{"kind":"string","value":"37466"},"content":{"kind":"string","value":"\"\"\"\nparser.mobile package (imdb package).\n\nThis package provides the IMDbMobileAccessSystem class used to access\nIMDb's data for mobile systems.\nthe imdb.IMDb function will return an instance of this class when\ncalled with the 'accessSystem' argument set to \"mobile\".\n\nCopyright 2005-2011 Davide Alberani \n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\"\"\"\n\nimport re\nimport logging\nfrom urllib import unquote\n\nfrom imdb.Movie import Movie\nfrom imdb.utils import analyze_title, analyze_name, canonicalName, \\\n date_and_notes\nfrom imdb._exceptions import IMDbDataAccessError\nfrom imdb.parser.http import IMDbHTTPAccessSystem\nfrom imdb.parser.http.utils import subXMLRefs, subSGMLRefs, build_person, \\\n build_movie, re_spaces\n\n# XXX NOTE: the first version of this module was heavily based on\n# regular expressions. This new version replace regexps with\n# find() strings' method calls; despite being less flexible, it\n# seems to be at least as fast and, hopefully, much more\n# lightweight. Yes: the regexp-based version was too heavyweight\n# for systems with very limited CPU power and memory footprint.\nre_spacessub = re_spaces.sub\n# Strip html.\nre_unhtml = re.compile(r'<.+?>')\nre_unhtmlsub = re_unhtml.sub\n# imdb person or movie ids.\nre_imdbID = re.compile(r'(?<=nm|tt|ch)([0-9]{7})\\b')\n\n# movie AKAs.\nre_makas = re.compile('(

.*?

)')\n\n# Remove episode numbers.\nre_filmo_episodes = re.compile('
.*?
',\n re.M | re.I)\n\n\ndef _unHtml(s):\n \"\"\"Return a string without tags and no multiple spaces.\"\"\"\n return subSGMLRefs(re_spacessub(' ', re_unhtmlsub('', s)).strip())\n\n\n_inttype = type(0)\n\ndef _getTagsWith(s, cont, toClosure=False, maxRes=None):\n \"\"\"Return the html tags in the 's' string containing the 'cont'\n string; if toClosure is True, everything between the opening\n tag and the closing tag is returned.\"\"\"\n lres = []\n bi = s.find(cont)\n if bi != -1:\n btag = s[:bi].rfind('<')\n if btag != -1:\n if not toClosure:\n etag = s[bi+1:].find('>')\n if etag != -1:\n endidx = bi+2+etag\n lres.append(s[btag:endidx])\n if maxRes is not None and len(lres) >= maxRes: return lres\n lres += _getTagsWith(s[endidx:], cont,\n toClosure=toClosure)\n else:\n spaceidx = s[btag:].find(' ')\n if spaceidx != -1:\n ctag = '' % s[btag+1:btag+spaceidx]\n closeidx = s[bi:].find(ctag)\n if closeidx != -1:\n endidx = bi+closeidx+len(ctag)\n lres.append(s[btag:endidx])\n if maxRes is not None and len(lres) >= maxRes:\n return lres\n lres += _getTagsWith(s[endidx:], cont,\n toClosure=toClosure)\n return lres\n\n\ndef _findBetween(s, begins, ends, beginindx=0, maxRes=None, lres=None):\n \"\"\"Return the list of strings from the 's' string which are included\n between the 'begins' and 'ends' strings.\"\"\"\n if lres is None:\n lres = []\n bi = s.find(begins, beginindx)\n if bi != -1:\n lbegins = len(begins)\n if isinstance(ends, (list, tuple)):\n eset = [s.find(end, bi+lbegins) for end in ends]\n eset[:] = [x for x in eset if x != -1]\n if not eset: ei = -1\n else: ei = min(eset)\n else:\n ei = s.find(ends, bi+lbegins)\n if ei != -1:\n match = s[bi+lbegins:ei]\n lres.append(match)\n if maxRes is not None and len(lres) >= maxRes: return lres\n _findBetween(s, begins, ends, beginindx=ei, maxRes=maxRes,\n lres=lres)\n return lres\n\n\nclass IMDbMobileAccessSystem(IMDbHTTPAccessSystem):\n \"\"\"The class used to access IMDb's data through the web for\n mobile terminals.\"\"\"\n\n accessSystem = 'mobile'\n _mobile_logger = logging.getLogger('imdbpy.parser.mobile')\n\n def __init__(self, isThin=0, *arguments, **keywords):\n self.accessSystem = 'mobile'\n IMDbHTTPAccessSystem.__init__(self, isThin, *arguments, **keywords)\n\n def _clean_html(self, html):\n \"\"\"Normalize the retrieve html.\"\"\"\n html = re_spaces.sub(' ', html)\n # Remove silly &nbsp;&raquo; chars.\n html = html.replace('&nbsp;&raquo;', '')\n return subXMLRefs(html)\n\n def _mretrieve(self, url, size=-1):\n \"\"\"Retrieve an html page and normalize it.\"\"\"\n cont = self._retrieve(url, size=size)\n return self._clean_html(cont)\n\n def _getPersons(self, s, sep='
'):\n \"\"\"Return a list of Person objects, from the string s; items\n are assumed to be separated by the sep string.\"\"\"\n names = s.split(sep)\n pl = []\n plappend = pl.append\n counter = 1\n for name in names:\n pid = re_imdbID.findall(name)\n if not pid: continue\n characters = _getTagsWith(name, 'class=\"char\"',\n toClosure=True, maxRes=1)\n chpids = []\n if characters:\n for ch in characters[0].split(' / '):\n chid = re_imdbID.findall(ch)\n if not chid:\n chpids.append(None)\n else:\n chpids.append(chid[-1])\n if not chpids:\n chpids = None\n elif len(chpids) == 1:\n chpids = chpids[0]\n name = _unHtml(name)\n # Catch unclosed tags.\n gt_indx = name.find('>')\n if gt_indx != -1:\n name = name[gt_indx+1:].lstrip()\n if not name: continue\n if name.endswith('...'):\n name = name[:-3]\n p = build_person(name, personID=str(pid[0]), billingPos=counter,\n modFunct=self._defModFunct, roleID=chpids,\n accessSystem=self.accessSystem)\n plappend(p)\n counter += 1\n return pl\n\n def _search_movie(self, title, results):\n ##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})\n ##params = 'q=%s&tt=on&mx=%s' % (urllib.quote_plus(title), str(results))\n ##cont = self._mretrieve(imdbURL_search % params)\n cont = subXMLRefs(self._get_search_content('tt', title, results))\n title = _findBetween(cont, '', '', maxRes=1)\n res = []\n if not title:\n self._mobile_logger.error('no title tag searching for movie %s',\n title)\n return res\n tl = title[0].lower()\n if not tl.startswith('imdb title'):\n # a direct hit!\n title = _unHtml(title[0])\n mid = None\n midtag = _getTagsWith(cont, 'rel=\"canonical\"', maxRes=1)\n if midtag:\n mid = _findBetween(midtag[0], '/title/tt', '/', maxRes=1)\n if not (mid and title):\n self._mobile_logger.error('no direct hit title/movieID for' \\\n ' title %s', title)\n return res\n if cont.find('TV mini-series') != -1:\n title += ' (mini)'\n res[:] = [(str(mid[0]), analyze_title(title))]\n else:\n # XXX: this results*3 prevents some recursion errors, but...\n # it's not exactly understandable (i.e.: why 'results' is\n # not enough to get all the results?)\n lis = _findBetween(cont, 'td valign=\"top\">', '',\n maxRes=results*3)\n for li in lis:\n akas = re_makas.findall(li)\n for idx, aka in enumerate(akas):\n aka = aka.replace('\" - ', '::', 1)\n aka = _unHtml(aka)\n if aka.startswith('aka \"'):\n aka = aka[5:].strip()\n if aka[-1] == '\"':\n aka = aka[:-1]\n akas[idx] = aka\n imdbid = re_imdbID.findall(li)\n li = re_makas.sub('', li)\n mtitle = _unHtml(li)\n if not (imdbid and mtitle):\n self._mobile_logger.debug('no title/movieID parsing' \\\n ' %s searching for title %s', li,\n title)\n continue\n mtitle = mtitle.replace('(TV mini-series)', '(mini)')\n resd = analyze_title(mtitle)\n if akas:\n resd['akas'] = akas\n res.append((str(imdbid[0]), resd))\n return res\n\n def get_movie_main(self, movieID):\n cont = self._mretrieve(self.urls['movie_main'] % movieID + 'maindetails')\n title = _findBetween(cont, '', '', maxRes=1)\n if not title:\n raise IMDbDataAccessError('unable to get movieID \"%s\"' % movieID)\n title = _unHtml(title[0])\n if title.endswith(' - IMDb'):\n title = title[:-7]\n if cont.find('TV mini-series') != -1:\n title += ' (mini)'\n d = analyze_title(title)\n kind = d.get('kind')\n tv_series = _findBetween(cont, 'TV Series:', '', maxRes=1)\n if tv_series: mid = re_imdbID.findall(tv_series[0])\n else: mid = None\n if tv_series and mid:\n s_title = _unHtml(tv_series[0])\n s_data = analyze_title(s_title)\n m = Movie(movieID=str(mid[0]), data=s_data,\n accessSystem=self.accessSystem,\n modFunct=self._defModFunct)\n d['kind'] = kind = u'episode'\n d['episode of'] = m\n if kind in ('tv series', 'tv mini series'):\n years = _findBetween(cont, '

', '

', maxRes=1)\n if years:\n years[:] = _findBetween(years[0], 'TV series', '',\n maxRes=1)\n if years:\n d['series years'] = years[0].strip()\n air_date = _findBetween(cont, 'Original Air Date:', '
',\n maxRes=1)\n if air_date:\n air_date = air_date[0]\n vi = air_date.find('(')\n if vi != -1:\n date = _unHtml(air_date[:vi]).strip()\n if date != '????':\n d['original air date'] = date\n air_date = air_date[vi:]\n season = _findBetween(air_date, 'Season', ',', maxRes=1)\n if season:\n season = season[0].strip()\n try: season = int(season)\n except: pass\n if season or type(season) is _inttype:\n d['season'] = season\n episode = _findBetween(air_date, 'Episode', ')', maxRes=1)\n if episode:\n episode = episode[0].strip()\n try: episode = int(episode)\n except: pass\n if episode or type(season) is _inttype:\n d['episode'] = episode\n direct = _findBetween(cont, '
Director', ('
', '

'),\n maxRes=1)\n if direct:\n direct = direct[0]\n h5idx = direct.find('/h5>')\n if h5idx != -1:\n direct = direct[h5idx+4:]\n direct = self._getPersons(direct)\n if direct: d['director'] = direct\n if kind in ('tv series', 'tv mini series', 'episode'):\n if kind != 'episode':\n seasons = _findBetween(cont, 'Seasons:', '',\n maxRes=1)\n if seasons:\n d['number of seasons'] = seasons[0].count('|') + 1\n creator = _findBetween(cont, 'Created by', ('class=\"tn15more\"',\n '',\n '

'),\n maxRes=1)\n if not creator:\n # They change 'Created by' to 'Creator' and viceversa\n # from time to time...\n # XXX: is 'Creators' also used?\n creator = _findBetween(cont, 'Creator:',\n ('class=\"tn15more\"', '',\n '

'), maxRes=1)\n if creator:\n creator = creator[0]\n if creator.find('tn15more'): creator = '%s>' % creator\n creator = self._getPersons(creator)\n if creator: d['creator'] = creator\n writers = _findBetween(cont, '
Writer', ('', '

'),\n maxRes=1)\n if writers:\n writers = writers[0]\n h5idx = writers.find('/h5>')\n if h5idx != -1:\n writers = writers[h5idx+4:]\n writers = self._getPersons(writers)\n if writers: d['writer'] = writers\n cvurl = _getTagsWith(cont, 'name=\"poster\"', toClosure=True, maxRes=1)\n if cvurl:\n cvurl = _findBetween(cvurl[0], 'src=\"', '\"', maxRes=1)\n if cvurl: d['cover url'] = cvurl[0]\n genres = _findBetween(cont, 'href=\"/genre/', '\"')\n if genres:\n d['genres'] = list(set(genres))\n ur = _findBetween(cont, 'id=\"star-bar-user-rate\">', '',\n maxRes=1)\n if ur:\n rat = _findBetween(ur[0], '', '', maxRes=1)\n if rat:\n if rat:\n d['rating'] = rat[0].strip()\n else:\n self._mobile_logger.warn('wrong rating: %s', rat)\n vi = ur[0].rfind('href=\"ratings\"')\n if vi != -1 and ur[0][vi+10:].find('await') == -1:\n try:\n votes = _findBetween(ur[0][vi:], \"title='\",\n \" IMDb\", maxRes=1)\n votes = int(votes[0].replace(',', ''))\n d['votes'] = votes\n except (ValueError, IndexError):\n self._mobile_logger.warn('wrong votes: %s', ur)\n top250 = _findBetween(cont, 'href=\"/chart/top?', '', maxRes=1)\n if top250:\n fn = top250[0].rfind('#')\n if fn != -1:\n try:\n td = int(top250[0][fn+1:])\n d['top 250 rank'] = td\n except ValueError:\n self._mobile_logger.warn('wrong top250: %s', top250)\n castdata = _findBetween(cont, 'Cast overview', '', maxRes=1)\n if not castdata:\n castdata = _findBetween(cont, 'Credited cast', '', maxRes=1)\n if not castdata:\n castdata = _findBetween(cont, 'Complete credited cast', '',\n maxRes=1)\n if not castdata:\n castdata = _findBetween(cont, 'Series Cast Summary', '',\n maxRes=1)\n if not castdata:\n castdata = _findBetween(cont, 'Episode Credited cast', '',\n maxRes=1)\n if castdata:\n castdata = castdata[0]\n # Reintegrate the fist tag.\n fl = castdata.find('href=')\n if fl != -1: castdata = '')\n if smib != -1:\n smie = castdata.rfind('')\n if smie != -1:\n castdata = castdata[:smib].strip() + \\\n castdata[smie+18:].strip()\n castdata = castdata.replace('/tr> ', '', maxRes=1)\n if akas:\n # For some reason, here
is still used in place of
.\n akas[:] = [x for x in akas[0].split('
') if x.strip()]\n akas = [_unHtml(x).replace('\" - ','::', 1).lstrip('\"').strip()\n for x in akas]\n if 'See more' in akas: akas.remove('See more')\n akas[:] = [x for x in akas if x]\n if akas:\n d['akas'] = akas\n mpaa = _findBetween(cont, 'MPAA
:', '', maxRes=1)\n if mpaa: d['mpaa'] = _unHtml(mpaa[0])\n runtimes = _findBetween(cont, 'Runtime:
', '', maxRes=1)\n if runtimes:\n runtimes = runtimes[0]\n runtimes = [x.strip().replace(' min', '').replace(' (', '::(', 1)\n for x in runtimes.split('|')]\n d['runtimes'] = [_unHtml(x).strip() for x in runtimes]\n if kind == 'episode':\n # number of episodes.\n epsn = _findBetween(cont, 'title=\"Full Episode List\">', '',\n maxRes=1)\n if epsn:\n epsn = epsn[0].replace(' Episodes', '').strip()\n if epsn:\n try:\n epsn = int(epsn)\n except:\n self._mobile_logger.warn('wrong episodes #: %s', epsn)\n d['number of episodes'] = epsn\n country = _findBetween(cont, 'Country:', '', maxRes=1)\n if country:\n country[:] = country[0].split(' | ')\n country[:] = ['', '::')) for x in country]\n if country: d['countries'] = country\n lang = _findBetween(cont, 'Language:', '', maxRes=1)\n if lang:\n lang[:] = lang[0].split(' | ')\n lang[:] = ['', '::')) for x in lang]\n if lang: d['languages'] = lang\n col = _findBetween(cont, '\"/search/title?colors=', '')\n if col:\n col[:] = col[0].split(' | ')\n col[:] = ['', '::')) for x in col]\n if col: d['color info'] = col\n sm = _findBetween(cont, '/search/title?sound_mixes=', '',\n maxRes=1)\n if sm:\n sm[:] = sm[0].split(' | ')\n sm[:] = ['', '::')) for x in sm]\n if sm: d['sound mix'] = sm\n cert = _findBetween(cont, 'Certification:', '', maxRes=1)\n if cert:\n cert[:] = cert[0].split(' | ')\n cert[:] = [_unHtml(x.replace(' ', '::')) for x in cert]\n if cert: d['certificates'] = cert\n plotoutline = _findBetween(cont, 'Plot:', [''],\n maxRes=1)\n if plotoutline:\n plotoutline = plotoutline[0].strip()\n plotoutline = plotoutline.rstrip('|').rstrip()\n if plotoutline: d['plot outline'] = _unHtml(plotoutline)\n aratio = _findBetween(cont, 'Aspect Ratio:', [''],\n maxRes=1)\n if aratio:\n aratio = aratio[0].strip().replace(' (', '::(', 1)\n if aratio:\n d['aspect ratio'] = _unHtml(aratio)\n return {'data': d}\n\n def get_movie_plot(self, movieID):\n cont = self._mretrieve(self.urls['movie_main'] % movieID + 'plotsummary')\n plot = _findBetween(cont, '

', '

')\n plot[:] = [_unHtml(x) for x in plot]\n for i in xrange(len(plot)):\n p = plot[i]\n wbyidx = p.rfind(' Written by ')\n if wbyidx != -1:\n plot[i] = '%s::%s' % \\\n (p[:wbyidx].rstrip(),\n p[wbyidx+12:].rstrip().replace('{','<').replace('}','>'))\n if plot: return {'data': {'plot': plot}}\n return {'data': {}}\n\n def _search_person(self, name, results):\n ##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})\n ##params = 'q=%s&nm=on&mx=%s' % (urllib.quote_plus(name), str(results))\n ##cont = self._mretrieve(imdbURL_search % params)\n cont = subXMLRefs(self._get_search_content('nm', name, results))\n name = _findBetween(cont, '', '', maxRes=1)\n res = []\n if not name:\n self._mobile_logger.warn('no title tag searching for name %s', name)\n return res\n nl = name[0].lower()\n if not nl.startswith('imdb name'):\n # a direct hit!\n name = _unHtml(name[0])\n name = name.replace('- Filmography by type' , '').strip()\n pid = None\n pidtag = _getTagsWith(cont, 'rel=\"canonical\"', maxRes=1)\n if pidtag:\n pid = _findBetween(pidtag[0], '/name/nm', '/', maxRes=1)\n if not (pid and name):\n self._mobile_logger.error('no direct hit name/personID for' \\\n ' name %s', name)\n return res\n res[:] = [(str(pid[0]), analyze_name(name, canonical=1))]\n else:\n lis = _findBetween(cont, 'td valign=\"top\">', '',\n maxRes=results*3)\n for li in lis:\n akas = _findBetween(li, '\"', '\"')\n for sep in [' aka', '
birth name']:\n sepIdx = li.find(sep)\n if sepIdx != -1:\n li = li[:sepIdx]\n pid = re_imdbID.findall(li)\n pname = _unHtml(li)\n if not (pid and pname):\n self._mobile_logger.debug('no name/personID parsing' \\\n ' %s searching for name %s', li,\n name)\n continue\n resd = analyze_name(pname, canonical=1)\n if akas:\n resd['akas'] = akas\n res.append((str(pid[0]), resd))\n return res\n\n def get_person_main(self, personID, _parseChr=False):\n if not _parseChr:\n url = self.urls['person_main'] % personID + 'maindetails'\n else:\n url = self.urls['character_main'] % personID\n s = self._mretrieve(url)\n r = {}\n name = _findBetween(s, '', '', maxRes=1)\n if not name:\n if _parseChr: w = 'characterID'\n else: w = 'personID'\n raise IMDbDataAccessError('unable to get %s \"%s\"' % (w, personID))\n name = _unHtml(name[0].replace(' - IMDb', ''))\n if _parseChr:\n name = name.replace('(Character)', '').strip()\n name = name.replace('- Filmography by type', '').strip()\n else:\n name = name.replace('- Filmography by', '').strip()\n r = analyze_name(name, canonical=not _parseChr)\n for dKind in ('Born', 'Died'):\n date = _findBetween(s, '%s:' % dKind.capitalize(),\n ('
', '

'), maxRes=1)\n if date:\n date = _unHtml(date[0])\n if date:\n #date, notes = date_and_notes(date)\n # TODO: fix to handle real names.\n date_notes = date.split(' in ', 1)\n notes = u''\n date = date_notes[0]\n if len(date_notes) == 2:\n notes = date_notes[1]\n dtitle = 'birth'\n if dKind == 'Died':\n dtitle = 'death'\n if date:\n r['%s date' % dtitle] = date\n if notes:\n r['%s notes' % dtitle] = notes\n akas = _findBetween(s, 'Alternate Names:', ('
',\n '

'), maxRes=1)\n if akas:\n akas = akas[0]\n if akas:\n akas = _unHtml(akas)\n if akas.find(' | ') != -1:\n akas = akas.split(' | ')\n else:\n akas = akas.split(' / ')\n if akas: r['akas'] = filter(None, [x.strip() for x in akas])\n hs = _findBetween(s, \"rel='image_src'\", '>', maxRes=1)\n if not hs:\n hs = _findBetween(s, 'rel=\"image_src\"', '>', maxRes=1)\n if not hs:\n hs = _findBetween(s, '
', maxRes=1)\n if hs:\n hsl = _findBetween(hs[0], \"href='\", \"'\", maxRes=1)\n if not hsl:\n hsl = _findBetween(hs[0], 'href=\"', '\"', maxRes=1)\n if hsl and 'imdb-share-logo' not in hsl[0]:\n r['headshot'] = hsl[0]\n # Build a list of tuples such [('hrefLink', 'section name')]\n workkind = _findBetween(s, 'id=\"jumpto_', '')\n ws = []\n for work in workkind:\n sep = '\" >'\n if '\">' in work:\n sep = '\">'\n wsplit = work.split(sep, 1)\n if len(wsplit) == 2:\n sect = wsplit[0]\n if '\"' in sect:\n sect = sect[:sect.find('\"')]\n ws.append((sect, wsplit[1].lower()))\n # XXX: I think \"guest appearances\" are gone.\n if s.find(' tag.\n if _parseChr and sect == 'filmography':\n inisect = s.find('
')\n else:\n inisect = s.find('',))\n for m in mlist:\n fCB = m.find('>')\n if fCB != -1:\n m = m[fCB+1:].lstrip()\n m = re_filmo_episodes.sub('', m)\n # For every movie in the current section.\n movieID = re_imdbID.findall(m)\n if not movieID:\n self._mobile_logger.debug('no movieID in %s', m)\n continue\n m = m.replace('
', ' .... ', 1)\n if not _parseChr:\n chrIndx = m.find(' .... ')\n else:\n chrIndx = m.find(' Played by ')\n chids = []\n if chrIndx != -1:\n chrtxt = m[chrIndx+6:]\n if _parseChr:\n chrtxt = chrtxt[5:]\n for ch in chrtxt.split(' / '):\n chid = re_imdbID.findall(ch)\n if not chid:\n chids.append(None)\n else:\n chids.append(chid[-1])\n if not chids:\n chids = None\n elif len(chids) == 1:\n chids = chids[0]\n movieID = str(movieID[0])\n # Search the status.\n stidx = m.find('')\n status = u''\n if stidx != -1:\n stendidx = m.rfind('')\n if stendidx != -1:\n status = _unHtml(m[stidx+3:stendidx])\n m = m.replace(m[stidx+3:stendidx], '')\n year = _findBetween(m, 'year_column\">', '', maxRes=1)\n if year:\n year = year[0]\n m = m.replace('%s' % year,\n '')\n else:\n year = None\n m = _unHtml(m)\n if not m:\n self._mobile_logger.warn('no title for movieID %s', movieID)\n continue\n movie = build_movie(m, movieID=movieID, status=status,\n roleID=chids, modFunct=self._defModFunct,\n accessSystem=self.accessSystem,\n _parsingCharacter=_parseChr, year=year)\n sectName = sectName.split(':')[0]\n r.setdefault(sectName, []).append(movie)\n # If available, take the always correct name from a form.\n itag = _getTagsWith(s, 'NAME=\"primary\"', maxRes=1)\n if not itag:\n itag = _getTagsWith(s, 'name=\"primary\"', maxRes=1)\n if itag:\n vtag = _findBetween(itag[0], 'VALUE=\"', ('\"', '>'), maxRes=1)\n if not vtag:\n vtag = _findBetween(itag[0], 'value=\"', ('\"', '>'), maxRes=1)\n if vtag:\n try:\n vtag = unquote(str(vtag[0]))\n vtag = unicode(vtag, 'latin_1')\n r.update(analyze_name(vtag))\n except UnicodeEncodeError:\n pass\n return {'data': r, 'info sets': ('main', 'filmography')}\n\n def get_person_biography(self, personID):\n cont = self._mretrieve(self.urls['person_main'] % personID + 'bio')\n d = {}\n spouses = _findBetween(cont, 'Spouse', ('', ''),\n maxRes=1)\n if spouses:\n sl = []\n for spouse in spouses[0].split(''):\n if spouse.count('') > 1:\n spouse = spouse.replace('', '::', 1)\n spouse = _unHtml(spouse)\n spouse = spouse.replace(':: ', '::').strip()\n if spouse: sl.append(spouse)\n if sl: d['spouse'] = sl\n nnames = _findBetween(cont, '
Nickname
', ('

','
'),\n maxRes=1)\n if nnames:\n nnames = nnames[0]\n if nnames:\n nnames = [x.strip().replace(' (', '::(', 1)\n for x in nnames.split('
')]\n if nnames:\n d['nick names'] = nnames\n misc_sects = _findBetween(cont, '
', '
')\n misc_sects[:] = [x.split('
') for x in misc_sects]\n misc_sects[:] = [x for x in misc_sects if len(x) == 2]\n for sect, data in misc_sects:\n sect = sect.lower().replace(':', '').strip()\n if d.has_key(sect) and sect != 'mini biography': continue\n elif sect in ('spouse', 'nickname'): continue\n if sect == 'salary': sect = 'salary history'\n elif sect == 'where are they now': sect = 'where now'\n elif sect == 'personal quotes': sect = 'quotes'\n data = data.replace('

', '::')\n data = data.replace('

', ' ') # for multi-paragraphs 'bio'\n data = data.replace(' ', '@@@@')\n data = data.replace(' ', '::')\n data = _unHtml(data)\n data = [x.strip() for x in data.split('::')]\n data[:] = [x.replace('@@@@', '::') for x in data if x]\n if sect == 'height' and data: data = data[0]\n elif sect == 'birth name': data = canonicalName(data[0])\n elif sect == 'date of birth':\n date, notes = date_and_notes(data[0])\n if date:\n d['birth date'] = date\n if notes:\n d['birth notes'] = notes\n continue\n elif sect == 'date of death':\n date, notes = date_and_notes(data[0])\n if date:\n d['death date'] = date\n if notes:\n d['death notes'] = notes\n continue\n elif sect == 'mini biography':\n ndata = []\n for bio in data:\n byidx = bio.rfind('IMDb Mini Biography By')\n if byidx != -1:\n bioAuth = bio[:byidx].rstrip()\n else:\n bioAuth = 'Anonymous'\n bio = u'%s::%s' % (bioAuth, bio[byidx+23:].lstrip())\n ndata.append(bio)\n data[:] = ndata\n if 'mini biography' in d:\n d['mini biography'].append(ndata[0])\n continue\n d[sect] = data\n return {'data': d}\n\n def _search_character(self, name, results):\n cont = subXMLRefs(self._get_search_content('char', name, results))\n name = _findBetween(cont, '', '', maxRes=1)\n res = []\n if not name:\n self._mobile_logger.error('no title tag searching character %s',\n name)\n return res\n nl = name[0].lower()\n if not (nl.startswith('imdb search') or nl.startswith('imdb search') \\\n or nl.startswith('imdb character')):\n # a direct hit!\n name = _unHtml(name[0]).replace('(Character)', '').strip()\n pid = None\n pidtag = _getTagsWith(cont, 'rel=\"canonical\"', maxRes=1)\n if pidtag:\n pid = _findBetween(pidtag[0], '/character/ch', '/', maxRes=1)\n if not (pid and name):\n self._mobile_logger.error('no direct hit name/characterID for' \\\n ' character %s', name)\n return res\n res[:] = [(str(pid[0]), analyze_name(name))]\n else:\n sects = _findBetween(cont, 'Popular Characters', '',\n maxRes=results*3)\n sects += _findBetween(cont, 'Characters', '',\n maxRes=results*3)\n for sect in sects:\n lis = _findBetween(sect, '
', '',\n ('', '

'), maxRes=1)\n if intro:\n intro = _unHtml(intro[0]).strip()\n if intro:\n d['introduction'] = intro\n tocidx = cont.find('', ('

', ''))\n if bios:\n for bio in bios:\n bio = bio.replace('

', '::')\n bio = bio.replace('\\n', ' ')\n bio = bio.replace('
', '\\n')\n bio = bio.replace('
', '\\n')\n bio = subSGMLRefs(re_unhtmlsub('', bio).strip())\n bio = bio.replace(' ::', '::').replace(':: ', '::')\n bio = bio.replace('::', ': ', 1)\n if bio:\n d.setdefault('biography', []).append(bio)\n return {'data': d}\n\n\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475261,"cells":{"repo_name":{"kind":"string","value":"sephii/django"},"path":{"kind":"string","value":"django/template/defaulttags.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"53552"},"content":{"kind":"string","value":"\"\"\"Default tags used by the template system, available to all templates.\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport re\nfrom datetime import datetime\nfrom itertools import groupby, cycle as itertools_cycle\nimport warnings\n\nfrom django.conf import settings\nfrom django.template.base import (Node, NodeList, Template, Context, Library,\n TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary,\n BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END,\n SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END,\n VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re,\n render_value_in_context)\nfrom django.template.smartif import IfParser, Literal\nfrom django.template.defaultfilters import date\nfrom django.utils.deprecation import RemovedInDjango20Warning\nfrom django.utils.encoding import force_text, smart_text\nfrom django.utils.lorem_ipsum import words, paragraphs\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import format_html\nfrom django.utils import six\nfrom django.utils import timezone\n\nregister = Library()\n\n\nclass AutoEscapeControlNode(Node):\n \"\"\"Implements the actions of the autoescape tag.\"\"\"\n def __init__(self, setting, nodelist):\n self.setting, self.nodelist = setting, nodelist\n\n def render(self, context):\n old_setting = context.autoescape\n context.autoescape = self.setting\n output = self.nodelist.render(context)\n context.autoescape = old_setting\n if self.setting:\n return mark_safe(output)\n else:\n return output\n\n\nclass CommentNode(Node):\n def render(self, context):\n return ''\n\n\nclass CsrfTokenNode(Node):\n def render(self, context):\n csrf_token = context.get('csrf_token', None)\n if csrf_token:\n if csrf_token == 'NOTPROVIDED':\n return format_html(\"\")\n else:\n return format_html(\"\", csrf_token)\n else:\n # It's very probable that the token is missing because of\n # misconfiguration, so we raise a warning\n if settings.DEBUG:\n warnings.warn(\n \"A {% csrf_token %} was used in a template, but the context \"\n \"did not provide the value. This is usually caused by not \"\n \"using RequestContext.\"\n )\n return ''\n\n\nclass CycleNode(Node):\n def __init__(self, cyclevars, variable_name=None, silent=False):\n self.cyclevars = cyclevars\n self.variable_name = variable_name\n self.silent = silent\n\n def render(self, context):\n if self not in context.render_context:\n # First time the node is rendered in template\n context.render_context[self] = itertools_cycle(self.cyclevars)\n cycle_iter = context.render_context[self]\n value = next(cycle_iter).resolve(context)\n if self.variable_name:\n context[self.variable_name] = value\n if self.silent:\n return ''\n return render_value_in_context(value, context)\n\n\nclass DebugNode(Node):\n def render(self, context):\n from pprint import pformat\n output = [force_text(pformat(val)) for val in context]\n output.append('\\n\\n')\n output.append(pformat(sys.modules))\n return ''.join(output)\n\n\nclass FilterNode(Node):\n def __init__(self, filter_expr, nodelist):\n self.filter_expr, self.nodelist = filter_expr, nodelist\n\n def render(self, context):\n output = self.nodelist.render(context)\n # Apply filters.\n with context.push(var=output):\n return self.filter_expr.resolve(context)\n\n\nclass FirstOfNode(Node):\n def __init__(self, variables):\n self.vars = variables\n\n def render(self, context):\n for var in self.vars:\n value = var.resolve(context, True)\n if value:\n return render_value_in_context(value, context)\n return ''\n\n\nclass ForNode(Node):\n child_nodelists = ('nodelist_loop', 'nodelist_empty')\n\n def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None):\n self.loopvars, self.sequence = loopvars, sequence\n self.is_reversed = is_reversed\n self.nodelist_loop = nodelist_loop\n if nodelist_empty is None:\n self.nodelist_empty = NodeList()\n else:\n self.nodelist_empty = nodelist_empty\n\n def __repr__(self):\n reversed_text = ' reversed' if self.is_reversed else ''\n return \"\" % \\\n (', '.join(self.loopvars), self.sequence, len(self.nodelist_loop),\n reversed_text)\n\n def __iter__(self):\n for node in self.nodelist_loop:\n yield node\n for node in self.nodelist_empty:\n yield node\n\n def render(self, context):\n if 'forloop' in context:\n parentloop = context['forloop']\n else:\n parentloop = {}\n with context.push():\n try:\n values = self.sequence.resolve(context, True)\n except VariableDoesNotExist:\n values = []\n if values is None:\n values = []\n if not hasattr(values, '__len__'):\n values = list(values)\n len_values = len(values)\n if len_values < 1:\n return self.nodelist_empty.render(context)\n nodelist = []\n if self.is_reversed:\n values = reversed(values)\n num_loopvars = len(self.loopvars)\n unpack = num_loopvars > 1\n # Create a forloop value in the context. We'll update counters on each\n # iteration just below.\n loop_dict = context['forloop'] = {'parentloop': parentloop}\n for i, item in enumerate(values):\n # Shortcuts for current loop iteration number.\n loop_dict['counter0'] = i\n loop_dict['counter'] = i + 1\n # Reverse counter iteration numbers.\n loop_dict['revcounter'] = len_values - i\n loop_dict['revcounter0'] = len_values - i - 1\n # Boolean values designating first and last times through loop.\n loop_dict['first'] = (i == 0)\n loop_dict['last'] = (i == len_values - 1)\n\n pop_context = False\n if unpack:\n # If there are multiple loop variables, unpack the item into\n # them.\n\n # To complete this deprecation, remove from here to the\n # try/except block as well as the try/except itself,\n # leaving `unpacked_vars = ...` and the \"else\" statements.\n if not isinstance(item, (list, tuple)):\n len_item = 1\n else:\n len_item = len(item)\n # Check loop variable count before unpacking\n if num_loopvars != len_item:\n warnings.warn(\n \"Need {} values to unpack in for loop; got {}. \"\n \"This will raise an exception in Django 2.0.\"\n .format(num_loopvars, len_item),\n RemovedInDjango20Warning)\n try:\n unpacked_vars = dict(zip(self.loopvars, item))\n except TypeError:\n pass\n else:\n pop_context = True\n context.update(unpacked_vars)\n else:\n context[self.loopvars[0]] = item\n # In TEMPLATE_DEBUG mode provide source of the node which\n # actually raised the exception\n if context.engine.debug:\n for node in self.nodelist_loop:\n try:\n nodelist.append(node.render(context))\n except Exception as e:\n if not hasattr(e, 'django_template_source'):\n e.django_template_source = node.source\n raise\n else:\n for node in self.nodelist_loop:\n nodelist.append(node.render(context))\n if pop_context:\n # The loop variables were pushed on to the context so pop them\n # off again. This is necessary because the tag lets the length\n # of loopvars differ to the length of each set of items and we\n # don't want to leave any vars from the previous loop on the\n # context.\n context.pop()\n return mark_safe(''.join(force_text(n) for n in nodelist))\n\n\nclass IfChangedNode(Node):\n child_nodelists = ('nodelist_true', 'nodelist_false')\n\n def __init__(self, nodelist_true, nodelist_false, *varlist):\n self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false\n self._varlist = varlist\n\n def render(self, context):\n # Init state storage\n state_frame = self._get_context_stack_frame(context)\n if self not in state_frame:\n state_frame[self] = None\n\n nodelist_true_output = None\n try:\n if self._varlist:\n # Consider multiple parameters. This automatically behaves\n # like an OR evaluation of the multiple variables.\n compare_to = [var.resolve(context, True) for var in self._varlist]\n else:\n # The \"{% ifchanged %}\" syntax (without any variables) compares the rendered output.\n compare_to = nodelist_true_output = self.nodelist_true.render(context)\n except VariableDoesNotExist:\n compare_to = None\n\n if compare_to != state_frame[self]:\n state_frame[self] = compare_to\n # render true block if not already rendered\n return nodelist_true_output or self.nodelist_true.render(context)\n elif self.nodelist_false:\n return self.nodelist_false.render(context)\n return ''\n\n def _get_context_stack_frame(self, context):\n # The Context object behaves like a stack where each template tag can create a new scope.\n # Find the place where to store the state to detect changes.\n if 'forloop' in context:\n # Ifchanged is bound to the local for loop.\n # When there is a loop-in-loop, the state is bound to the inner loop,\n # so it resets when the outer loop continues.\n return context['forloop']\n else:\n # Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'.\n return context.render_context\n\n\nclass IfEqualNode(Node):\n child_nodelists = ('nodelist_true', 'nodelist_false')\n\n def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):\n self.var1, self.var2 = var1, var2\n self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false\n self.negate = negate\n\n def __repr__(self):\n return \"\"\n\n def render(self, context):\n val1 = self.var1.resolve(context, True)\n val2 = self.var2.resolve(context, True)\n if (self.negate and val1 != val2) or (not self.negate and val1 == val2):\n return self.nodelist_true.render(context)\n return self.nodelist_false.render(context)\n\n\nclass IfNode(Node):\n\n def __init__(self, conditions_nodelists):\n self.conditions_nodelists = conditions_nodelists\n\n def __repr__(self):\n return \"\"\n\n def __iter__(self):\n for _, nodelist in self.conditions_nodelists:\n for node in nodelist:\n yield node\n\n @property\n def nodelist(self):\n return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist)\n\n def render(self, context):\n for condition, nodelist in self.conditions_nodelists:\n\n if condition is not None: # if / elif clause\n try:\n match = condition.eval(context)\n except VariableDoesNotExist:\n match = None\n else: # else clause\n match = True\n\n if match:\n return nodelist.render(context)\n\n return ''\n\n\nclass LoremNode(Node):\n def __init__(self, count, method, common):\n self.count, self.method, self.common = count, method, common\n\n def render(self, context):\n try:\n count = int(self.count.resolve(context))\n except (ValueError, TypeError):\n count = 1\n if self.method == 'w':\n return words(count, common=self.common)\n else:\n paras = paragraphs(count, common=self.common)\n if self.method == 'p':\n paras = ['

%s

' % p for p in paras]\n return '\\n\\n'.join(paras)\n\n\nclass RegroupNode(Node):\n def __init__(self, target, expression, var_name):\n self.target, self.expression = target, expression\n self.var_name = var_name\n\n def resolve_expression(self, obj, context):\n # This method is called for each object in self.target. See regroup()\n # for the reason why we temporarily put the object in the context.\n context[self.var_name] = obj\n return self.expression.resolve(context, True)\n\n def render(self, context):\n obj_list = self.target.resolve(context, True)\n if obj_list is None:\n # target variable wasn't found in context; fail silently.\n context[self.var_name] = []\n return ''\n # List of dictionaries in the format:\n # {'grouper': 'key', 'list': [list of contents]}.\n context[self.var_name] = [\n {'grouper': key, 'list': list(val)}\n for key, val in\n groupby(obj_list, lambda obj: self.resolve_expression(obj, context))\n ]\n return ''\n\n\ndef include_is_allowed(filepath, allowed_include_roots):\n filepath = os.path.abspath(filepath)\n for root in allowed_include_roots:\n if filepath.startswith(root):\n return True\n return False\n\n\nclass SsiNode(Node):\n def __init__(self, filepath, parsed):\n self.filepath = filepath\n self.parsed = parsed\n\n def render(self, context):\n filepath = self.filepath.resolve(context)\n\n if not include_is_allowed(filepath, context.engine.allowed_include_roots):\n if settings.DEBUG:\n return \"[Didn't have permission to include file]\"\n else:\n return '' # Fail silently for invalid includes.\n try:\n with open(filepath, 'r') as fp:\n output = fp.read()\n except IOError:\n output = ''\n if self.parsed:\n try:\n t = Template(output, name=filepath, engine=context.engine)\n return t.render(context)\n except TemplateSyntaxError as e:\n if settings.DEBUG:\n return \"[Included template had syntax error: %s]\" % e\n else:\n return '' # Fail silently for invalid included templates.\n return output\n\n\nclass LoadNode(Node):\n def render(self, context):\n return ''\n\n\nclass NowNode(Node):\n def __init__(self, format_string, asvar=None):\n self.format_string = format_string\n self.asvar = asvar\n\n def render(self, context):\n tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None\n formatted = date(datetime.now(tz=tzinfo), self.format_string)\n\n if self.asvar:\n context[self.asvar] = formatted\n return ''\n else:\n return formatted\n\n\nclass SpacelessNode(Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n from django.utils.html import strip_spaces_between_tags\n return strip_spaces_between_tags(self.nodelist.render(context).strip())\n\n\nclass TemplateTagNode(Node):\n mapping = {'openblock': BLOCK_TAG_START,\n 'closeblock': BLOCK_TAG_END,\n 'openvariable': VARIABLE_TAG_START,\n 'closevariable': VARIABLE_TAG_END,\n 'openbrace': SINGLE_BRACE_START,\n 'closebrace': SINGLE_BRACE_END,\n 'opencomment': COMMENT_TAG_START,\n 'closecomment': COMMENT_TAG_END,\n }\n\n def __init__(self, tagtype):\n self.tagtype = tagtype\n\n def render(self, context):\n return self.mapping.get(self.tagtype, '')\n\n\nclass URLNode(Node):\n def __init__(self, view_name, args, kwargs, asvar):\n self.view_name = view_name\n self.args = args\n self.kwargs = kwargs\n self.asvar = asvar\n\n def render(self, context):\n from django.core.urlresolvers import reverse, NoReverseMatch\n args = [arg.resolve(context) for arg in self.args]\n kwargs = dict((smart_text(k, 'ascii'), v.resolve(context))\n for k, v in self.kwargs.items())\n\n view_name = self.view_name.resolve(context)\n\n try:\n current_app = context.request.current_app\n except AttributeError:\n # Change the fallback value to None when the deprecation path for\n # Context.current_app completes in Django 2.0.\n current_app = context.current_app\n\n # Try to look up the URL twice: once given the view name, and again\n # relative to what we guess is the \"main\" app. If they both fail,\n # re-raise the NoReverseMatch unless we're using the\n # {% url ... as var %} construct in which case return nothing.\n url = ''\n try:\n url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app)\n except NoReverseMatch:\n exc_info = sys.exc_info()\n if settings.SETTINGS_MODULE:\n project_name = settings.SETTINGS_MODULE.split('.')[0]\n try:\n url = reverse(project_name + '.' + view_name,\n args=args, kwargs=kwargs,\n current_app=current_app)\n except NoReverseMatch:\n if self.asvar is None:\n # Re-raise the original exception, not the one with\n # the path relative to the project. This makes a\n # better error message.\n six.reraise(*exc_info)\n else:\n if self.asvar is None:\n raise\n\n if self.asvar:\n context[self.asvar] = url\n return ''\n else:\n return url\n\n\nclass VerbatimNode(Node):\n def __init__(self, content):\n self.content = content\n\n def render(self, context):\n return self.content\n\n\nclass WidthRatioNode(Node):\n def __init__(self, val_expr, max_expr, max_width, asvar=None):\n self.val_expr = val_expr\n self.max_expr = max_expr\n self.max_width = max_width\n self.asvar = asvar\n\n def render(self, context):\n try:\n value = self.val_expr.resolve(context)\n max_value = self.max_expr.resolve(context)\n max_width = int(self.max_width.resolve(context))\n except VariableDoesNotExist:\n return ''\n except (ValueError, TypeError):\n raise TemplateSyntaxError(\"widthratio final argument must be a number\")\n try:\n value = float(value)\n max_value = float(max_value)\n ratio = (value / max_value) * max_width\n result = str(int(round(ratio)))\n except ZeroDivisionError:\n return '0'\n except (ValueError, TypeError, OverflowError):\n return ''\n\n if self.asvar:\n context[self.asvar] = result\n return ''\n else:\n return result\n\n\nclass WithNode(Node):\n def __init__(self, var, name, nodelist, extra_context=None):\n self.nodelist = nodelist\n # var and name are legacy attributes, being left in case they are used\n # by third-party subclasses of this Node.\n self.extra_context = extra_context or {}\n if name:\n self.extra_context[name] = var\n\n def __repr__(self):\n return \"\"\n\n def render(self, context):\n values = {key: val.resolve(context) for key, val in\n six.iteritems(self.extra_context)}\n with context.push(**values):\n return self.nodelist.render(context)\n\n\n@register.tag\ndef autoescape(parser, token):\n \"\"\"\n Force autoescape behavior for this block.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n args = token.contents.split()\n if len(args) != 2:\n raise TemplateSyntaxError(\"'autoescape' tag requires exactly one argument.\")\n arg = args[1]\n if arg not in ('on', 'off'):\n raise TemplateSyntaxError(\"'autoescape' argument should be 'on' or 'off'\")\n nodelist = parser.parse(('endautoescape',))\n parser.delete_first_token()\n return AutoEscapeControlNode((arg == 'on'), nodelist)\n\n\n@register.tag\ndef comment(parser, token):\n \"\"\"\n Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.\n \"\"\"\n parser.skip_past('endcomment')\n return CommentNode()\n\n\n@register.tag\ndef cycle(parser, token):\n \"\"\"\n Cycles among the given strings each time this tag is encountered.\n\n Within a loop, cycles among the given strings each time through\n the loop::\n\n {% for o in some_list %}\n
\n ...\n \n {% endfor %}\n\n Outside of a loop, give the values a unique name the first time you call\n it, then use that name each successive time through::\n\n ...\n ...\n ...\n\n You can use any number of values, separated by spaces. Commas can also\n be used to separate values; if a comma is used, the cycle values are\n interpreted as literal strings.\n\n The optional flag \"silent\" can be used to prevent the cycle declaration\n from returning any value::\n\n {% for o in some_list %}\n {% cycle 'row1' 'row2' as rowcolors silent %}\n {% include \"subtemplate.html \" %}\n {% endfor %}\n\n \"\"\"\n # Note: This returns the exact same node on each {% cycle name %} call;\n # that is, the node object returned from {% cycle a b c as name %} and the\n # one returned from {% cycle name %} are the exact same object. This\n # shouldn't cause problems (heh), but if it does, now you know.\n #\n # Ugly hack warning: This stuffs the named template dict into parser so\n # that names are only unique within each template (as opposed to using\n # a global variable, which would make cycle names have to be unique across\n # *all* templates.\n\n args = token.split_contents()\n\n if len(args) < 2:\n raise TemplateSyntaxError(\"'cycle' tag requires at least two arguments\")\n\n if ',' in args[1]:\n # Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}\n # case.\n args[1:2] = ['\"%s\"' % arg for arg in args[1].split(\",\")]\n\n if len(args) == 2:\n # {% cycle foo %} case.\n name = args[1]\n if not hasattr(parser, '_namedCycleNodes'):\n raise TemplateSyntaxError(\"No named cycles in template. '%s' is not defined\" % name)\n if name not in parser._namedCycleNodes:\n raise TemplateSyntaxError(\"Named cycle '%s' does not exist\" % name)\n return parser._namedCycleNodes[name]\n\n as_form = False\n\n if len(args) > 4:\n # {% cycle ... as foo [silent] %} case.\n if args[-3] == \"as\":\n if args[-1] != \"silent\":\n raise TemplateSyntaxError(\"Only 'silent' flag is allowed after cycle's name, not '%s'.\" % args[-1])\n as_form = True\n silent = True\n args = args[:-1]\n elif args[-2] == \"as\":\n as_form = True\n silent = False\n\n if as_form:\n name = args[-1]\n values = [parser.compile_filter(arg) for arg in args[1:-2]]\n node = CycleNode(values, name, silent=silent)\n if not hasattr(parser, '_namedCycleNodes'):\n parser._namedCycleNodes = {}\n parser._namedCycleNodes[name] = node\n else:\n values = [parser.compile_filter(arg) for arg in args[1:]]\n node = CycleNode(values)\n return node\n\n\n@register.tag\ndef csrf_token(parser, token):\n return CsrfTokenNode()\n\n\n@register.tag\ndef debug(parser, token):\n \"\"\"\n Outputs a whole load of debugging information, including the current\n context and imported modules.\n\n Sample usage::\n\n
\n            {% debug %}\n        
\n \"\"\"\n return DebugNode()\n\n\n@register.tag('filter')\ndef do_filter(parser, token):\n \"\"\"\n Filters the contents of the block through variable filters.\n\n Filters can also be piped through each other, and they can have\n arguments -- just like in variable syntax.\n\n Sample usage::\n\n {% filter force_escape|lower %}\n This text will be HTML-escaped, and will appear in lowercase.\n {% endfilter %}\n\n Note that the ``escape`` and ``safe`` filters are not acceptable arguments.\n Instead, use the ``autoescape`` tag to manage autoescaping for blocks of\n template code.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n _, rest = token.contents.split(None, 1)\n filter_expr = parser.compile_filter(\"var|%s\" % (rest))\n for func, unused in filter_expr.filters:\n filter_name = getattr(func, '_filter_name', None)\n if filter_name in ('escape', 'safe'):\n raise TemplateSyntaxError('\"filter %s\" is not permitted. Use the \"autoescape\" tag instead.' % filter_name)\n nodelist = parser.parse(('endfilter',))\n parser.delete_first_token()\n return FilterNode(filter_expr, nodelist)\n\n\n@register.tag\ndef firstof(parser, token):\n \"\"\"\n Outputs the first variable passed that is not False, without escaping.\n\n Outputs nothing if all the passed variables are False.\n\n Sample usage::\n\n {% firstof var1 var2 var3 %}\n\n This is equivalent to::\n\n {% if var1 %}\n {{ var1|safe }}\n {% elif var2 %}\n {{ var2|safe }}\n {% elif var3 %}\n {{ var3|safe }}\n {% endif %}\n\n but obviously much cleaner!\n\n You can also use a literal string as a fallback value in case all\n passed variables are False::\n\n {% firstof var1 var2 var3 \"fallback value\" %}\n\n If you want to escape the output, use a filter tag::\n\n {% filter force_escape %}\n {% firstof var1 var2 var3 \"fallback value\" %}\n {% endfilter %}\n\n \"\"\"\n bits = token.split_contents()[1:]\n if len(bits) < 1:\n raise TemplateSyntaxError(\"'firstof' statement requires at least one argument\")\n return FirstOfNode([parser.compile_filter(bit) for bit in bits])\n\n\n@register.tag('for')\ndef do_for(parser, token):\n \"\"\"\n Loops over each item in an array.\n\n For example, to display a list of athletes given ``athlete_list``::\n\n
    \n {% for athlete in athlete_list %}\n
  • {{ athlete.name }}
  • \n {% endfor %}\n
\n\n You can loop over a list in reverse by using\n ``{% for obj in list reversed %}``.\n\n You can also unpack multiple values from a two-dimensional array::\n\n {% for key,value in dict.items %}\n {{ key }}: {{ value }}\n {% endfor %}\n\n The ``for`` tag can take an optional ``{% empty %}`` clause that will\n be displayed if the given array is empty or could not be found::\n\n
    \n {% for athlete in athlete_list %}\n
  • {{ athlete.name }}
  • \n {% empty %}\n
  • Sorry, no athletes in this list.
  • \n {% endfor %}\n
      \n\n The above is equivalent to -- but shorter, cleaner, and possibly faster\n than -- the following::\n\n
        \n {% if althete_list %}\n {% for athlete in athlete_list %}\n
      • {{ athlete.name }}
      • \n {% endfor %}\n {% else %}\n
      • Sorry, no athletes in this list.
      • \n {% endif %}\n
      \n\n The for loop sets a number of variables available within the loop:\n\n ========================== ================================================\n Variable Description\n ========================== ================================================\n ``forloop.counter`` The current iteration of the loop (1-indexed)\n ``forloop.counter0`` The current iteration of the loop (0-indexed)\n ``forloop.revcounter`` The number of iterations from the end of the\n loop (1-indexed)\n ``forloop.revcounter0`` The number of iterations from the end of the\n loop (0-indexed)\n ``forloop.first`` True if this is the first time through the loop\n ``forloop.last`` True if this is the last time through the loop\n ``forloop.parentloop`` For nested loops, this is the loop \"above\" the\n current one\n ========================== ================================================\n\n \"\"\"\n bits = token.split_contents()\n if len(bits) < 4:\n raise TemplateSyntaxError(\"'for' statements should have at least four\"\n \" words: %s\" % token.contents)\n\n is_reversed = bits[-1] == 'reversed'\n in_index = -3 if is_reversed else -2\n if bits[in_index] != 'in':\n raise TemplateSyntaxError(\"'for' statements should use the format\"\n \" 'for x in y': %s\" % token.contents)\n\n loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))\n for var in loopvars:\n if not var or ' ' in var:\n raise TemplateSyntaxError(\"'for' tag received an invalid argument:\"\n \" %s\" % token.contents)\n\n sequence = parser.compile_filter(bits[in_index + 1])\n nodelist_loop = parser.parse(('empty', 'endfor',))\n token = parser.next_token()\n if token.contents == 'empty':\n nodelist_empty = parser.parse(('endfor',))\n parser.delete_first_token()\n else:\n nodelist_empty = None\n return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)\n\n\ndef do_ifequal(parser, token, negate):\n bits = list(token.split_contents())\n if len(bits) != 3:\n raise TemplateSyntaxError(\"%r takes two arguments\" % bits[0])\n end_tag = 'end' + bits[0]\n nodelist_true = parser.parse(('else', end_tag))\n token = parser.next_token()\n if token.contents == 'else':\n nodelist_false = parser.parse((end_tag,))\n parser.delete_first_token()\n else:\n nodelist_false = NodeList()\n val1 = parser.compile_filter(bits[1])\n val2 = parser.compile_filter(bits[2])\n return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)\n\n\n@register.tag\ndef ifequal(parser, token):\n \"\"\"\n Outputs the contents of the block if the two arguments equal each other.\n\n Examples::\n\n {% ifequal user.id comment.user_id %}\n ...\n {% endifequal %}\n\n {% ifnotequal user.id comment.user_id %}\n ...\n {% else %}\n ...\n {% endifnotequal %}\n \"\"\"\n return do_ifequal(parser, token, False)\n\n\n@register.tag\ndef ifnotequal(parser, token):\n \"\"\"\n Outputs the contents of the block if the two arguments are not equal.\n See ifequal.\n \"\"\"\n return do_ifequal(parser, token, True)\n\n\nclass TemplateLiteral(Literal):\n def __init__(self, value, text):\n self.value = value\n self.text = text # for better error messages\n\n def display(self):\n return self.text\n\n def eval(self, context):\n return self.value.resolve(context, ignore_failures=True)\n\n\nclass TemplateIfParser(IfParser):\n error_class = TemplateSyntaxError\n\n def __init__(self, parser, *args, **kwargs):\n self.template_parser = parser\n super(TemplateIfParser, self).__init__(*args, **kwargs)\n\n def create_var(self, value):\n return TemplateLiteral(self.template_parser.compile_filter(value), value)\n\n\n@register.tag('if')\ndef do_if(parser, token):\n \"\"\"\n The ``{% if %}`` tag evaluates a variable, and if that variable is \"true\"\n (i.e., exists, is not empty, and is not a false boolean value), the\n contents of the block are output:\n\n ::\n\n {% if athlete_list %}\n Number of athletes: {{ athlete_list|count }}\n {% elif athlete_in_locker_room_list %}\n Athletes should be out of the locker room soon!\n {% else %}\n No athletes.\n {% endif %}\n\n In the above, if ``athlete_list`` is not empty, the number of athletes will\n be displayed by the ``{{ athlete_list|count }}`` variable.\n\n As you can see, the ``if`` tag may take one or several `` {% elif %}``\n clauses, as well as an ``{% else %}`` clause that will be displayed if all\n previous conditions fail. These clauses are optional.\n\n ``if`` tags may use ``or``, ``and`` or ``not`` to test a number of\n variables or to negate a given variable::\n\n {% if not athlete_list %}\n There are no athletes.\n {% endif %}\n\n {% if athlete_list or coach_list %}\n There are some athletes or some coaches.\n {% endif %}\n\n {% if athlete_list and coach_list %}\n Both athletes and coaches are available.\n {% endif %}\n\n {% if not athlete_list or coach_list %}\n There are no athletes, or there are some coaches.\n {% endif %}\n\n {% if athlete_list and not coach_list %}\n There are some athletes and absolutely no coaches.\n {% endif %}\n\n Comparison operators are also available, and the use of filters is also\n allowed, for example::\n\n {% if articles|length >= 5 %}...{% endif %}\n\n Arguments and operators _must_ have a space between them, so\n ``{% if 1>2 %}`` is not a valid if tag.\n\n All supported operators are: ``or``, ``and``, ``in``, ``not in``\n ``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.\n\n Operator precedence follows Python.\n \"\"\"\n # {% if ... %}\n bits = token.split_contents()[1:]\n condition = TemplateIfParser(parser, bits).parse()\n nodelist = parser.parse(('elif', 'else', 'endif'))\n conditions_nodelists = [(condition, nodelist)]\n token = parser.next_token()\n\n # {% elif ... %} (repeatable)\n while token.contents.startswith('elif'):\n bits = token.split_contents()[1:]\n condition = TemplateIfParser(parser, bits).parse()\n nodelist = parser.parse(('elif', 'else', 'endif'))\n conditions_nodelists.append((condition, nodelist))\n token = parser.next_token()\n\n # {% else %} (optional)\n if token.contents == 'else':\n nodelist = parser.parse(('endif',))\n conditions_nodelists.append((None, nodelist))\n token = parser.next_token()\n\n # {% endif %}\n assert token.contents == 'endif'\n\n return IfNode(conditions_nodelists)\n\n\n@register.tag\ndef ifchanged(parser, token):\n \"\"\"\n Checks if a value has changed from the last iteration of a loop.\n\n The ``{% ifchanged %}`` block tag is used within a loop. It has two\n possible uses.\n\n 1. Checks its own rendered contents against its previous state and only\n displays the content if it has changed. For example, this displays a\n list of days, only displaying the month if it changes::\n\n

      Archive for {{ year }}

      \n\n {% for date in days %}\n {% ifchanged %}

      {{ date|date:\"F\" }}

      {% endifchanged %}\n {{ date|date:\"j\" }}\n {% endfor %}\n\n 2. If given one or more variables, check whether any variable has changed.\n For example, the following shows the date every time it changes, while\n showing the hour if either the hour or the date has changed::\n\n {% for date in days %}\n {% ifchanged date.date %} {{ date.date }} {% endifchanged %}\n {% ifchanged date.hour date.date %}\n {{ date.hour }}\n {% endifchanged %}\n {% endfor %}\n \"\"\"\n bits = token.split_contents()\n nodelist_true = parser.parse(('else', 'endifchanged'))\n token = parser.next_token()\n if token.contents == 'else':\n nodelist_false = parser.parse(('endifchanged',))\n parser.delete_first_token()\n else:\n nodelist_false = NodeList()\n values = [parser.compile_filter(bit) for bit in bits[1:]]\n return IfChangedNode(nodelist_true, nodelist_false, *values)\n\n\n@register.tag\ndef ssi(parser, token):\n \"\"\"\n Outputs the contents of a given file into the page.\n\n Like a simple \"include\" tag, the ``ssi`` tag includes the contents\n of another file -- which must be specified using an absolute path --\n in the current page::\n\n {% ssi \"/home/html/ljworld.com/includes/right_generic.html\" %}\n\n If the optional \"parsed\" parameter is given, the contents of the included\n file are evaluated as template code, with the current context::\n\n {% ssi \"/home/html/ljworld.com/includes/right_generic.html\" parsed %}\n \"\"\"\n warnings.warn(\n \"The {% ssi %} tag is deprecated. Use the {% include %} tag instead.\",\n RemovedInDjango20Warning,\n )\n\n bits = token.split_contents()\n parsed = False\n if len(bits) not in (2, 3):\n raise TemplateSyntaxError(\"'ssi' tag takes one argument: the path to\"\n \" the file to be included\")\n if len(bits) == 3:\n if bits[2] == 'parsed':\n parsed = True\n else:\n raise TemplateSyntaxError(\"Second (optional) argument to %s tag\"\n \" must be 'parsed'\" % bits[0])\n filepath = parser.compile_filter(bits[1])\n return SsiNode(filepath, parsed)\n\n\n@register.tag\ndef load(parser, token):\n \"\"\"\n Loads a custom template tag set.\n\n For example, to load the template tags in\n ``django/templatetags/news/photos.py``::\n\n {% load news.photos %}\n\n Can also be used to load an individual tag/filter from\n a library::\n\n {% load byline from news %}\n\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n bits = token.contents.split()\n if len(bits) >= 4 and bits[-2] == \"from\":\n try:\n taglib = bits[-1]\n lib = get_library(taglib)\n except InvalidTemplateLibrary as e:\n raise TemplateSyntaxError(\"'%s' is not a valid tag library: %s\" %\n (taglib, e))\n else:\n temp_lib = Library()\n for name in bits[1:-2]:\n if name in lib.tags:\n temp_lib.tags[name] = lib.tags[name]\n # a name could be a tag *and* a filter, so check for both\n if name in lib.filters:\n temp_lib.filters[name] = lib.filters[name]\n elif name in lib.filters:\n temp_lib.filters[name] = lib.filters[name]\n else:\n raise TemplateSyntaxError(\"'%s' is not a valid tag or filter in tag library '%s'\" %\n (name, taglib))\n parser.add_library(temp_lib)\n else:\n for taglib in bits[1:]:\n # add the library to the parser\n try:\n lib = get_library(taglib)\n parser.add_library(lib)\n except InvalidTemplateLibrary as e:\n raise TemplateSyntaxError(\"'%s' is not a valid tag library: %s\" %\n (taglib, e))\n return LoadNode()\n\n\n@register.tag\ndef lorem(parser, token):\n \"\"\"\n Creates random Latin text useful for providing test data in templates.\n\n Usage format::\n\n {% lorem [count] [method] [random] %}\n\n ``count`` is a number (or variable) containing the number of paragraphs or\n words to generate (default is 1).\n\n ``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for\n plain-text paragraph blocks (default is ``b``).\n\n ``random`` is the word ``random``, which if given, does not use the common\n paragraph (starting \"Lorem ipsum dolor sit amet, consectetuer...\").\n\n Examples:\n\n * ``{% lorem %}`` will output the common \"lorem ipsum\" paragraph\n * ``{% lorem 3 p %}`` will output the common \"lorem ipsum\" paragraph\n and two random paragraphs each wrapped in HTML ``

      `` tags\n * ``{% lorem 2 w random %}`` will output two random latin words\n \"\"\"\n bits = list(token.split_contents())\n tagname = bits[0]\n # Random bit\n common = bits[-1] != 'random'\n if not common:\n bits.pop()\n # Method bit\n if bits[-1] in ('w', 'p', 'b'):\n method = bits.pop()\n else:\n method = 'b'\n # Count bit\n if len(bits) > 1:\n count = bits.pop()\n else:\n count = '1'\n count = parser.compile_filter(count)\n if len(bits) != 1:\n raise TemplateSyntaxError(\"Incorrect format for %r tag\" % tagname)\n return LoremNode(count, method, common)\n\n\n@register.tag\ndef now(parser, token):\n \"\"\"\n Displays the date, formatted according to the given string.\n\n Uses the same format as PHP's ``date()`` function; see http://php.net/date\n for all the possible values.\n\n Sample usage::\n\n It is {% now \"jS F Y H:i\" %}\n \"\"\"\n bits = token.split_contents()\n asvar = None\n if len(bits) == 4 and bits[-2] == 'as':\n asvar = bits[-1]\n bits = bits[:-2]\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'now' statement takes one argument\")\n format_string = bits[1][1:-1]\n return NowNode(format_string, asvar)\n\n\n@register.tag\ndef regroup(parser, token):\n \"\"\"\n Regroups a list of alike objects by a common attribute.\n\n This complex tag is best illustrated by use of an example: say that\n ``people`` is a list of ``Person`` objects that have ``first_name``,\n ``last_name``, and ``gender`` attributes, and you'd like to display a list\n that looks like:\n\n * Male:\n * George Bush\n * Bill Clinton\n * Female:\n * Margaret Thatcher\n * Colendeeza Rice\n * Unknown:\n * Pat Smith\n\n The following snippet of template code would accomplish this dubious task::\n\n {% regroup people by gender as grouped %}\n

        \n {% for group in grouped %}\n
      • {{ group.grouper }}\n
          \n {% for item in group.list %}\n
        • {{ item }}
        • \n {% endfor %}\n
        \n {% endfor %}\n
      \n\n As you can see, ``{% regroup %}`` populates a variable with a list of\n objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the\n item that was grouped by; ``list`` contains the list of objects that share\n that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``\n and ``Unknown``, and ``list`` is the list of people with those genders.\n\n Note that ``{% regroup %}`` does not work when the list to be grouped is not\n sorted by the key you are grouping by! This means that if your list of\n people was not sorted by gender, you'd need to make sure it is sorted\n before using it, i.e.::\n\n {% regroup people|dictsort:\"gender\" by gender as grouped %}\n\n \"\"\"\n bits = token.split_contents()\n if len(bits) != 6:\n raise TemplateSyntaxError(\"'regroup' tag takes five arguments\")\n target = parser.compile_filter(bits[1])\n if bits[2] != 'by':\n raise TemplateSyntaxError(\"second argument to 'regroup' tag must be 'by'\")\n if bits[4] != 'as':\n raise TemplateSyntaxError(\"next-to-last argument to 'regroup' tag must\"\n \" be 'as'\")\n var_name = bits[5]\n # RegroupNode will take each item in 'target', put it in the context under\n # 'var_name', evaluate 'var_name'.'expression' in the current context, and\n # group by the resulting value. After all items are processed, it will\n # save the final result in the context under 'var_name', thus clearing the\n # temporary values. This hack is necessary because the template engine\n # doesn't provide a context-aware equivalent of Python's getattr.\n expression = parser.compile_filter(var_name +\n VARIABLE_ATTRIBUTE_SEPARATOR +\n bits[3])\n return RegroupNode(target, expression, var_name)\n\n\n@register.tag\ndef spaceless(parser, token):\n \"\"\"\n Removes whitespace between HTML tags, including tab and newline characters.\n\n Example usage::\n\n {% spaceless %}\n

      \n Foo\n

      \n {% endspaceless %}\n\n This example would return this HTML::\n\n

      Foo

      \n\n Only space between *tags* is normalized -- not space between tags and text.\n In this example, the space around ``Hello`` won't be stripped::\n\n {% spaceless %}\n \n Hello\n \n {% endspaceless %}\n \"\"\"\n nodelist = parser.parse(('endspaceless',))\n parser.delete_first_token()\n return SpacelessNode(nodelist)\n\n\n@register.tag\ndef templatetag(parser, token):\n \"\"\"\n Outputs one of the bits used to compose template tags.\n\n Since the template system has no concept of \"escaping\", to display one of\n the bits used in template tags, you must use the ``{% templatetag %}`` tag.\n\n The argument tells which template bit to output:\n\n ================== =======\n Argument Outputs\n ================== =======\n ``openblock`` ``{%``\n ``closeblock`` ``%}``\n ``openvariable`` ``{{``\n ``closevariable`` ``}}``\n ``openbrace`` ``{``\n ``closebrace`` ``}``\n ``opencomment`` ``{#``\n ``closecomment`` ``#}``\n ================== =======\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n bits = token.contents.split()\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'templatetag' statement takes one argument\")\n tag = bits[1]\n if tag not in TemplateTagNode.mapping:\n raise TemplateSyntaxError(\"Invalid templatetag argument: '%s'.\"\n \" Must be one of: %s\" %\n (tag, list(TemplateTagNode.mapping)))\n return TemplateTagNode(tag)\n\n\n@register.tag\ndef url(parser, token):\n \"\"\"\n Returns an absolute URL matching given view with its parameters.\n\n This is a way to define links that aren't tied to a particular URL\n configuration::\n\n {% url \"path.to.some_view\" arg1 arg2 %}\n\n or\n\n {% url \"path.to.some_view\" name1=value1 name2=value2 %}\n\n The first argument is a path to a view. It can be an absolute Python path\n or just ``app_name.view_name`` without the project name if the view is\n located inside the project.\n\n Other arguments are space-separated values that will be filled in place of\n positional and keyword arguments in the URL. Don't mix positional and\n keyword arguments.\n\n All arguments for the URL should be present.\n\n For example if you have a view ``app_name.client`` taking client's id and\n the corresponding line in a URLconf looks like this::\n\n ('^client/(\\d+)/$', 'app_name.client')\n\n and this app's URLconf is included into the project's URLconf under some\n path::\n\n ('^clients/', include('project_name.app_name.urls'))\n\n then in a template you can create a link for a certain client like this::\n\n {% url \"app_name.client\" client.id %}\n\n The URL will look like ``/clients/client/123/``.\n\n The first argument can also be a named URL instead of the Python path to\n the view callable. For example if the URLconf entry looks like this::\n\n url('^client/(\\d+)/$', name='client-detail-view')\n\n then in the template you can use::\n\n {% url \"client-detail-view\" client.id %}\n\n There is even another possible value type for the first argument. It can be\n the name of a template variable that will be evaluated to obtain the view\n name or the URL name, e.g.::\n\n {% with view_path=\"app_name.client\" %}\n {% url view_path client.id %}\n {% endwith %}\n\n or,\n\n {% with url_name=\"client-detail-view\" %}\n {% url url_name client.id %}\n {% endwith %}\n\n \"\"\"\n bits = token.split_contents()\n if len(bits) < 2:\n raise TemplateSyntaxError(\"'%s' takes at least one argument\"\n \" (path to a view)\" % bits[0])\n viewname = parser.compile_filter(bits[1])\n args = []\n kwargs = {}\n asvar = None\n bits = bits[2:]\n if len(bits) >= 2 and bits[-2] == 'as':\n asvar = bits[-1]\n bits = bits[:-2]\n\n if len(bits):\n for bit in bits:\n match = kwarg_re.match(bit)\n if not match:\n raise TemplateSyntaxError(\"Malformed arguments to url tag\")\n name, value = match.groups()\n if name:\n kwargs[name] = parser.compile_filter(value)\n else:\n args.append(parser.compile_filter(value))\n\n return URLNode(viewname, args, kwargs, asvar)\n\n\n@register.tag\ndef verbatim(parser, token):\n \"\"\"\n Stops the template engine from rendering the contents of this block tag.\n\n Usage::\n\n {% verbatim %}\n {% don't process this %}\n {% endverbatim %}\n\n You can also designate a specific closing tag block (allowing the\n unrendered use of ``{% endverbatim %}``)::\n\n {% verbatim myblock %}\n ...\n {% endverbatim myblock %}\n \"\"\"\n nodelist = parser.parse(('endverbatim',))\n parser.delete_first_token()\n return VerbatimNode(nodelist.render(Context()))\n\n\n@register.tag\ndef widthratio(parser, token):\n \"\"\"\n For creating bar charts and such, this tag calculates the ratio of a given\n value to a maximum value, and then applies that ratio to a constant.\n\n For example::\n\n \"Bar\"\n\n\n If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,\n the image in the above example will be 88 pixels wide\n (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).\n\n In some cases you might want to capture the result of widthratio in a\n variable. It can be useful for instance in a blocktrans like this::\n\n {% widthratio this_value max_value max_width as width %}\n {% blocktrans %}The width is: {{ width }}{% endblocktrans %}\n \"\"\"\n bits = token.split_contents()\n if len(bits) == 4:\n tag, this_value_expr, max_value_expr, max_width = bits\n asvar = None\n elif len(bits) == 6:\n tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits\n if as_ != 'as':\n raise TemplateSyntaxError(\"Invalid syntax in widthratio tag. Expecting 'as' keyword\")\n else:\n raise TemplateSyntaxError(\"widthratio takes at least three arguments\")\n\n return WidthRatioNode(parser.compile_filter(this_value_expr),\n parser.compile_filter(max_value_expr),\n parser.compile_filter(max_width),\n asvar=asvar)\n\n\n@register.tag('with')\ndef do_with(parser, token):\n \"\"\"\n Adds one or more values to the context (inside of this block) for caching\n and easy access.\n\n For example::\n\n {% with total=person.some_sql_method %}\n {{ total }} object{{ total|pluralize }}\n {% endwith %}\n\n Multiple values can be added to the context::\n\n {% with foo=1 bar=2 %}\n ...\n {% endwith %}\n\n The legacy format of ``{% with person.some_sql_method as total %}`` is\n still accepted.\n \"\"\"\n bits = token.split_contents()\n remaining_bits = bits[1:]\n extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)\n if not extra_context:\n raise TemplateSyntaxError(\"%r expected at least one variable \"\n \"assignment\" % bits[0])\n if remaining_bits:\n raise TemplateSyntaxError(\"%r received an invalid token: %r\" %\n (bits[0], remaining_bits[0]))\n nodelist = parser.parse(('endwith',))\n parser.delete_first_token()\n return WithNode(None, None, nodelist, extra_context=extra_context)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475262,"cells":{"repo_name":{"kind":"string","value":"GENIEMC/GENIE_2_9_0"},"path":{"kind":"string","value":"data/validation/hA/fsiv.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"22493"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\n Author: Tomasz Golan\n\n For help run one from the following commands:\n\t'./fsiv.py --help' or './fsiv.py -h'\n\t'python fsiv.py --help' or 'python fsiv.py -h'\n\"\"\"\n\nimport os, re, sys, getopt, time\nfrom subprocess import call\n\n#GLOBAL VARIABLES\n\nDAT_DIR = os.environ.get('FSIV_DAT_DIR') or 'data_files'\t#set a directory with data files (use 'pwd/data' if $FSIV_DAT_DIR is not defined)\nSIM_DIR = os.environ.get('FSIV_SIM_DIR') or 'sim_files'\t\t#set a directory for root/xsec files (use 'pwd/sim_files' if $FSIV_SIM_DIR is not defined)\nPNG_DIR = os.environ.get('FSIV_PNG_DIR') or 'png_files'\t\t#set a directory for plots (use 'pwd/png_files' if $FSIV_PNG_DIR is not defined)\nLOG_DIR = os.environ.get('FSIV_LOG_DIR') or 'log_files'\t\t#set a direcotry for log files (use 'pwd/log_files' if $FSIV_LOG_DIR is not defined) \nBSH_DIR = os.environ.get('FSIV_BSH_DIR') or 'bash_scripts'\t#set a direcotry for bash scripts (use 'pwd/bash_files' if $FSIV_BSH_DIR is not defined) \n\nauthors = []\t\t#storage for the list of available authors\ntot_proc_list = []\t#storage for the list of available processes for total cross section\n\ndate_ = time.strftime(\"%b\") + \"_\" + time.strftime(\"%d\") + \"_\" + time.strftime(\"%y\")\n\n#CLASSES\n\nclass Author:\n #store available processes for given author\n def __init__ (self, name): \n\tself.name = name\t\t\t\t\t\t\t\t#author's name\n\tself.pion = self.nucl = self.kaon = self.tot = self.nrg = self.ang = False\t#author's switchers for simluations/plots to be done\n\tself.tot_proc = []\t\t\t\t\t\t\t\t#storage for processes for total cross section data ([particle, tk_min, tk_max, target, fate])\n def __cmp__(self, name): return cmp(self.name, name)\t\t\t\t#compare authors by name\n def __repr__(self): return repr(self.name)\n def print_ (self, dest = None):\t\t\t\t\t\t\t#print switchers for given author\n\tprint >> dest, '-'*22\n\tprint >> dest, self.name,':'\n\tprint >> dest, '-'*22\n\tprint >> dest, 'Pion mode:\\t',self.pion\n\tprint >> dest, 'Nucleon mode:\\t',self.nucl\n\tprint >> dest, 'Kaon mode:\\t',self.kaon\n\tprint >> dest, 'Xsec data:\\t',self.tot\n\tprint >> dest, 'Energy data:\\t',self.nrg\n\tprint >> dest, 'Angle data:\\t',self.ang\n def check (self, particle):\t\t\t\t\t\t\t\t#return particle switcher for given particle\n\tif todo_authors and self.name not in todo_authors: return False\n\tif (particle == 'pion'): return self.pion\n\telif (particle == 'nucleon'): return self.nucl\n\telif (particle == 'kaon'): return self.kaon\n\nclass TotalProcess:\n #store information about particle, target and energy range for total cross section\n def __init__ (self, proc):\t\t\t\t\t#create proccess from proc = [particle, [tk list], xsec_max, target, fate]\n\tself.particle = proc[0]\n\tif self.particle in ['p','n']: self.particle_kind = 'nucleon'\n\telif self.particle in ['kp', 'km']: self.particle_kind = 'kaon'\n\telse: self.particle_kind = 'pion'\n\tself.tk_data = set(proc[1])\n\tself.tk_max = [max(proc[1])]\n\tself.xsec_max = [proc[2]]\n\tself.target = proc[3]\n\tself.fate = [proc[4]]\n\tself.name = self.particle + '_' + self.target\n def __cmp__(self, x): return cmp(self.name, x)\t\t#compare processes by name = particle_target\n def __repr__(self): return repr(self.name)\n def update (self,x):\t\t\t\t\t#update process information\n\tself.tk_data.update(x.tk_data)\n\tif x.fate[0] not in self.fate:\t\t\t\t#add new fate (and new xsec max) if necessary\n\t self.fate.append(x.fate[0])\n\t self.xsec_max.append(x.xsec_max[0])\n\t self.tk_max.append(x.tk_max[0])\n\telse:\t\t\t\t\t\t\t#if fate exists -> update xsec max and energy max [for plot range]\n\t index = self.fate.index(x.fate[0])\n\t if x.xsec_max[0] > self.xsec_max[index]: self.xsec_max[index] = x.xsec_max[0]\n\t if x.tk_max[0] > self.tk_max[index]: self.tk_max[index] = x.tk_max[0]\n def print_ (self, dest = None):\n\tprint >> dest, ' * ', self.particle, '+', self.target\n\tprint >> dest, '\\tTk found in data = ', str(sorted(self.tk_data))\n\tprint >> dest, '\\tTk prepared for simulation = ', str(sorted(self.tk_sim))\n\tprint >> dest, '\\tFates:'\n\tfor f in self.fate:\n\t index = self.fate.index(f)\n\t print >> dest, '\\t\\t', f, '\\t-> ', 'Tk max = ', self.tk_max[index], ', xsec max = ', self.xsec_max[index]\n\n#FUNCTIONS\n\t\ndef help():\n print '\\n','*'*110\n print \"\"\"\nfsiv.py runs 'runfast.pl' and 'intranukeplotter.pl' in a loop (assuming they are in the same directory)\n\nthe data are taken from the directory defined in $FSIV_DAT_DIR environmental variable\nif $FSIV_DAT_DIR is not defined 'pwd/data_files' directory is taken instead\n\nthe output root/xsec files are saved into the directory defined in $FSIV_SIM_DIR environmental variable\nif $FSIV_SIM_DIR is not defined 'pwd/sim_files' is used\n\nthe output png files are saved into the directory defined in $FSIV_PNG_DIR environmental variable\nif $FSIV_PNG_DIR is not defined 'pwd/png_files' is used\n\nthe bash scripts are saved into the directory defined in $FSIV_BSH_DIR environmental variable\nif $FSIV_BSH_DIR is not defined 'pwd/bash_scripts' is used\n\nthe log files are saved into the directory defined in $FSIV_LOG_DIR environmental variable\nif $FSIV_LOG_DIR is not defined 'pwd/log_files' is used\n \nby default all available data sets are done unless otherwise is specified\n\navailable options are:\n\n -p, --pion \\t -> turn on pion validation\n -n, --nucleon \\t -> turn on nucleon validation\n -k, --kaon \\t -> turn on kaon validation\n\n *** if none from aboves is chosen they all are set on ***\n\n -t, --total \\t -> turn on total cross section validation\n -e, --energy \\t -> turn on dsigma / denergy validation\n -a, --angle \\t -> turn on dsigma / dangle validation\n\n *** if none from aboves is chosen they all are set on ***\n\n -s, --simulation \\t -> turn on 'runfast.pl' to generate root files\n -f, --plot \\t -> turn on 'intrenukeplotter.pl' to generate plots\n\n *** if none from aboves is chosen they all are set on ***\n\n --nof_events_diff= -> number of events for differential cross section (default=1,000,000)\n --nof_events_total= -> number of events for total cross section (default=100,000)\n \n --command= \\t -> command\n --FNAL\t\\t -> set command for FNAL grid (i.e. jobsub -e GENIE -e LD_LIBRARY_PATH -e PATH)\n\n --authors= \\t -> list of authors to be done (only for differential xsec)\n --isospins= \\t -> list of particles to be done (only for total xsec): pip,pim,p,n,kp,km\n\nExamples:\n\n 1. run simulations and create plots for all available pion data sets for total cross section\n\n\t./fsiv.py --pion --total\n\n 2. create only plots for author1 and author2\n\n\t./fsiv.py -n --plots --authors='author1 author2'\n\n 3. run simulations and create plots for all particles for dsigma/denergy data sets with custom command\n\n\t./fsiv.py --energy --command='my_command --option'\n\n 4. run only simulations for total cross section for pip and kp with 1000 events\n\n\t./fsiv.py -t --isospin='pip kp' --nof_events_total=1000\n\n\"\"\"\n print '*'*110,'\\n'\n\ndef init ():\n #set global switchers respect to the command line arguments (see help for details)\n global do_pion, do_nucl, do_kaon, do_tot, do_nrg, do_ang, do_sims, do_plot, nof_events_diff, nof_events_total, command, log_file, todo_authors, todo_isospins\n command = None\t\t\t\t#command to run bash scripts (e.g. jobsub -q script_name.sh)\n\n nof_events_diff = '1000000'\t\t\t#number of events for runfast.pl for differential cross section\n nof_events_total = '100000'\t\t\t#number of events for runfast.pl for total cross section\n\n do_pion = do_nucl = do_kaon = False\t\t#switchers for particles\n do_tot = do_nrg = do_ang = False\t\t#switchers for types of plots\n do_sims = do_plot = False\t\t\t#switchers for simulations and plots\n\n todo_authors = None\t\t\t#the list of authors to be done for diff xsec (if not defined -> all are done)\n todo_isospins = None\t\t\t#the list of targets to be done (if not defined -> all are done)\n\n try:\n\topts, args = getopt.getopt(sys.argv[1:], \"hpnkteasf\", [\"help\", \"pion\", \"nucleon\",\"kaon\",\"total\",\"energy\",\"angle\",\"simulation\",\"plot\",\"nof_events_diff=\",\"nof_events_total=\",\"command=\",\"authors=\",\"isospins=\",\"FNAL\"])\n except getopt.GetoptError as err:\n\thelp()\n\tprint 'INPUT ERROR: ', str(err), '\\n'\n\tsys.exit(2)\n \n for o, a in opts:\n\tif o in (\"-h\", \"--help\"):\n\t help()\n\t sys.exit(0)\n\telif o in (\"-p\", \"--pion\"): do_pion = True\n\telif o in (\"-n\", \"--nucleon\"): do_nucl = True\n\telif o in (\"-k\", \"--kaon\"): do_kaon = True\n\telif o in (\"-t\", \"--total\"): do_tot = True\n\telif o in (\"-e\", \"--energy\"): do_nrg = True\n\telif o in (\"-a\", \"--angle\"): do_ang = True\n\telif o in (\"-s\", \"--simulation\"): do_sims = True\n\telif o in (\"-f\", \"--plot\"): do_plot = True\n\telif o in (\"--nof_events_diff\"): nof_events_diff = a\n\telif o in (\"--nof_events_total\"): nof_events_total = a\n\telif o in (\"--command\"): command = a\n\telif o in (\"--authors\"): todo_authors = a.split(' ')\n\telif o in (\"--isospins\"): todo_isospins = a.split(' ')\n\telif o in (\"--FNAL\"): command = \"jobsub -e GENIE -e LD_LIBRARY_PATH -e PATH -e HOME\"\n\t\n if not do_pion + do_nucl + do_kaon: do_pion = do_nucl = do_kaon = True\t#if no particle is chosen all are set on\n if not do_tot + do_nrg + do_ang: do_tot = do_nrg = do_ang = True\t\t#if no xsec type is chosen all are set on\n if not do_sims + do_plot: do_sims = do_plot = True\t\t\t\t#if no task is selected do both: run simulations and prepare plots\n\n print_options()\n if not opts: print \"\\nYou did not choose any option. Default settings will be used. Run ./fsiv.py -h to see available options.\"\n while True:\n\tc = raw_input(\"\\nDo you want to proceed ['Y' or 'N']? \")\n\tif c.lower() == 'n': sys.exit()\n\tif c.lower() == 'y': break;\n\n call([\"mkdir\", \"-p\", LOG_DIR])\n log_file = open(LOG_DIR + \"/fsiv_\" + date_ + \".log\", \"w\")\n\ndef print_options (dest=None):\n print >> dest, '\\nThe following options were chosen:\\n'\n print >> dest, 'Pion mode:\\t',do_pion\n print >> dest, 'Nucleon mode:\\t',do_nucl\n print >> dest, 'Kaon mode:\\t',do_kaon\n print >> dest, '\\nXsec data:\\t',do_tot\n print >> dest, 'Energy data:\\t',do_nrg\n print >> dest, 'Angle data:\\t', do_ang\n print >> dest, '\\nSimulations:\\t', do_sims\n print >> dest, 'Plots:\\t\\t', do_plot\n print >> dest, '\\nNof events:\\t', nof_events_diff, '(differential), ', nof_events_total, '(total)'\n print >> dest, 'Command:\\t', command\n print >> dest, 'Author cut:\\t', todo_authors\n print >> dest, 'Isospin cut:\\t', todo_isospins\n print >> dest, '\\nData directory:\\t', DAT_DIR\n print >> dest, 'ROOT/xsec dir:\\t', SIM_DIR\n print >> dest, 'PNG directory:\\t', PNG_DIR\n print >> dest, 'Logs directory:\\t', LOG_DIR\n print >> dest, 'Scripts dir:\\t', BSH_DIR\n \ndef read_authors(dir_):\n #read authors list from given directory\n global wrongfiles\n wrongfiles=[]\n fates = ['total', 'tot', 'reac', 'elas', 'inel', 'abs', 'cex']\n pions = ['pip', 'pim']\n nucls = ['p', 'n', 'pp', 'np']\n kaons = ['kp', 'km']\n for root, dirs, files in os.walk(dir_):\n\tif '.svn' in dirs:\n\t dirs.remove('.svn')\t\t\t\t\t\t\t#skip .svn directory\n\tfor file in files:\n\t if file.endswith(\".dat\"):\t\t\t\t\t\t#look only at files with 'dat' extension\n\t\tfilename = re.split('[- .]',file)\t\t\t\t#extract process info assuimng the following convention: author-particle-target-fate.dat (total) or author-particle-*.dat (differential)\n\t\tif len(filename) < 5: \t\t\t\t\t\t#skip file with less then 4 parts in the filename\n\t\t wrongfiles.append(file)\n\t\t continue\n\t\tauthor = filename[0]\n\t\tparticle = filename[1]\n\t\tif author not in authors: authors.append(Author(author))\t#add author to the list (if not exist)\n\t\tindex = authors.index(author)\t\t\t\t\t#set index for given author\n\t\tif particle in pions: authors[index].pion = True\t\t#add particle mode corresponding to filename\n\t\telif particle in nucls: authors[index].nucl = True\n\t\telif particle in kaons: authors[index].kaon = True\n\t\telse:\n\t\t wrongfiles.append(file)\n\t\t continue\n\t\tif filename[3] in fates:\t\t\t\t\t#is total cross section data\n\t\t authors[index].tot = True\t\t\t\t\t#add total xsec distribution for this author\n\t\t target = filename[2]\n\t\t fate = filename[3]\n\t\t tk = map(float,list(line.split()[0] for line in open(root+'/'+file, 'r') \\\n\t\t\t if line.strip() and not line.strip().startswith('#')))\t\t#take first column of any non-empty and non-commented line (kinetic energy)\n\t\t xs = map(float,list(line.split()[1] for line in open(root+'/'+file, 'r') \\\n\t\t\t if line.strip() and not line.strip().startswith('#')))\t\t#take second column of any non-empty and non-commented line (xsec)\n\t\t authors[index].tot_proc.append( \\\n\t\t\t [particle, \\\n\t\t\t tk,\n\t\t\t max(xs), \\\n\t\t\t target, \\\n\t\t\t fate])\t\t\t\t\t\t#add proccess information in the following format: [particle, [tk list], xsec_max, target, fate]\n\t\telif file.endswith(\"angdist.dat\"): authors[index].ang = True\t#add dsigma/dangle distribution for this author if the file ends with 'angdist.dat'\n\t\telse: authors[index].nrg = True;\t\t\t\t#add disgma/denergy distribution if not 'total' nor 'angdist'\n\ndef auto_set_tk ():\n #remove tk points where to dense and add where to rare\n for proc in tot_proc_list:\t\t\t\t\t#for each available process for total xsec\n\tres = []\t\t\t\t\t\t#storage for result\n\ttemp = set([int(x) for x in proc.tk_data])\t\t#take data tk points (and round them to int)\n\ttemp = filter(lambda x: x > 10, temp)\t\t\t#remove all points less than 10\n\tif len(temp) < 3: res = temp\t\t\t\t#do nothing if there are less than 3 points\n\telse:\n\t temp = sorted(temp, reverse=True)\t\t\t#sort points (so pop() takes the lowest value)\n\t res.append(temp.pop())\t\t\t\t#save first tk point\n\t first_tk = sum_tk = temp.pop()\t\t\t#take the lowest\n\t counter = 1\t\t\t\t\t\t#and set counter to 1\n\t while True:\t\t\t\t\t\t#start loop\n\t\tnext_tk = temp.pop()\t\t\t\t#take next one\n\t\tif not temp:\t\t\t\t\t#if it is the last one\n\t\t res.append(sum_tk/counter)\t\t\t#save the current average tk\n\t\t res.append(next_tk)\t\t\t\t#save the last tk value\n\t\t break\t\t\t\t\t#and stop the loop\n\t\tif float(next_tk)/float(first_tk) < 1.25:\t#if difference between first_tk and next_tk is smaller than 25%\n\t\t sum_tk += next_tk\t\t\t\t#add next_tk to the sum\n\t\t counter += 1\t\t\t\t#and increase counter\n\t\telse:\t\t\t\t\t\t#if not\n\t\t res.append(sum_tk/counter)\t\t\t#save current average tk\n\t\t counter = 1\t\t\t\t\t#set counter to 1\n\t\t first_tk = sum_tk = next_tk\t\t\t#next_tk is new first_tk\n\n\tres.sort()\n\ttodo = True\n\twhile todo:\t\t\t\t\t\t#start loop\n\t for x in res:\t\t\t\t\t#go through all tk points\n\t\tindex = res.index(x)\n\t\tif not index == len(res)-1:\t\t\t#if it is not last point\n\t\t if float(res[index+1]) / float(x) > 2.0:\t#if the difference between two adjacent points is greater than 200%\n\t\t\tres.append(int((x+res[index+1])/2))\t#add the average of them\n\t\t\tres.sort()\n\t\t\tbreak\t\t\t\t\t#and start for loop from begining\n\t\telse:\t\t\t\t\t\t#stop main loop if reach last point\n\t\t todo = False\n\t\t break\n\t \n\tres.sort()\n\tproc.tk_sim = set(res)\n\ndef prepare_total():\n #create a list of particle-nuclei processes to be simulated\n for author in filter(lambda a: a.tot, authors):\t\t\t\t#for all authors with total xsec data\n\tfor tp in author.tot_proc:\t\t\t\t\t\t#take each type of process\n\t temp = TotalProcess(tp)\t\t\t\t\t\t#add or update if already exists\n\t if temp not in tot_proc_list: tot_proc_list.append(temp)\n\t else: tot_proc_list[tot_proc_list.index(temp)].update(temp)\n auto_set_tk()\n\ndef log(text = None):\n #print 'text' into a log file\n if text: print >> log_file, text\n #if 'text' is not given, print the list of chosen options, found authors and list of processes to be simulated for total xsec\n else:\n\tprint_options(log_file)\n\tprint >> log_file, '\\nThe following non-data files were found in ' + DAT_DIR + ': ', wrongfiles\n\tprint >> log_file, '\\nThe following authors were found in ' + DAT_DIR + ':\\n'\n\tfor author in authors: author.print_(log_file)\n\tprint >> log_file, '-'*22, '\\n'\n\tprint >> log_file, 'The following processes/fates are available for total cross section:\\n'\n\tfor x in tot_proc_list: x.print_(log_file)\n log_file.flush()\n\ndef make_dirs(xsec,particle):\n #create folders for logs, bash scripts, root/xsec files and plots\n call([\"mkdir\", \"-p\", LOG_DIR+\"/runfast/\"+xsec+\"/\"+particle])\n call([\"mkdir\", \"-p\", LOG_DIR+\"/intranukeplotter/\"+xsec+\"/\"+particle])\n call([\"mkdir\", \"-p\", PNG_DIR+\"/\"+xsec+\"/\"+particle])\n call([\"mkdir\", \"-p\", BSH_DIR+\"/\"+xsec+\"/\"+particle])\n call([\"mkdir\", \"-p\", SIM_DIR+\"/\"+xsec+\"/\"+particle])\n\ndef create_dirs():\n #call make_dirs for turn on processes\n if do_tot:\t\n\tif do_pion: make_dirs(\"total\", \"pion\")\n\tif do_nucl: make_dirs(\"total\", \"nucleon\")\n\tif do_kaon: make_dirs(\"total\", \"kaon\")\n if do_nrg :\n\tif do_pion: make_dirs(\"dbldiff_dist\", \"pion\")\n\tif do_nucl: make_dirs(\"dbldiff_dist\", \"nucleon\")\n\tif do_kaon: make_dirs(\"dbldiff_dist\", \"kaon\")\n if do_ang:\n\tif do_pion: make_dirs(\"ang_dist\", \"pion\")\n\tif do_nucl: make_dirs(\"ang_dist\", \"nucleon\")\n\tif do_kaon: make_dirs(\"ang_dist\", \"kaon\")\n\ndef run_bs (bs):\n #run bash script\n print \"\\n\\trunning \" + bs + \"\\n\"\n log(\"\\trunning \" + bs)\n if command: call(command + \" 'bash \" + bs + \"'\", shell=True)\t#if custom command is defined add it in front of 'bash bash_script.sh'\n else: call(\"bash \" + bs, shell=True)\n\ndef run_total (particle):\n log(\"\\nTotal xsec validation for \" + particle + \":\\n\")\n for proc in filter(lambda p: p.particle_kind == particle,tot_proc_list):\t\t\t\t\t\t\t\t\t#go through all available processes for given particle\n\tif not todo_isospins or proc.particle in todo_isospins:\t\t\t\t\t\t\t\t\t\t\t#check isospin filter\n\t bash_scrt = BSH_DIR + \"/total/\" + particle + \"/\" + proc.particle + \"_\" + proc.target + \"_\" + date_ + \".sh\"\t\t\t\t#create bash script file\n\t bash_file = open(bash_scrt, \"w\")\n\t print >> bash_file, \"#!/bin/bash\"\n\t if do_sims:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#if simulations are on\n\t\trunfast = \"perl runfast.pl --type totxs --rm yes\" \\\n\t\t + \" --p \" + proc.particle \\\n\t\t + \" --t \" + proc.target \\\n\t\t + \" --el '\" + str(sorted(proc.tk_sim))[1:-1] + \"'\" \\\n\t\t + \" --n \" + nof_events_total \\\n\t\t + \" --rootdir \" + SIM_DIR + \"/total/\" + particle \\\n\t\t + \" 1>/dev/null 2>\" + LOG_DIR + \"/runfast/total/\" + particle + \"/\" + proc.particle + \"_\" + proc.target + \"_\" + date_ + \".log\"\t\t#prepare runfast.pl command\n\t\tprint >> bash_file, runfast\t\t\t\t\t\t\t\t\t\t\t\t\t#put the command to bash sctipt\n\t if do_plot:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#if plots are on\n\t\tprint >> bash_file, \"shopt -s nocaseglob\"\t\t\t\t\t\t\t\t\t\t\t#turn off case sensivity\n\t\ttemp = \"f=$(ls \" + SIM_DIR + \"/total/\" + particle + \"/*_\" + proc.particle + \"_\" + proc.target + \"_*.txt -Art | tail -n 1)\"\t#bash script will find the recent *_particle_target_*.txt file\n\t\tprint >> bash_file, temp\n\t\tprint >> bash_file, \"m=$(echo ${f##*/} | cut -d'_' -f 1)\"\t\t\t\t\t\t\t\t\t#and extract its date for intranukeplotter\n\t\tprint >> bash_file, \"d=$(echo ${f##*/} | cut -d'_' -f 2)\"\n\t\tprint >> bash_file, \"y=$(echo ${f##*/} | cut -d'_' -f 3)\"\n\t\tfor fate in proc.fate:\t\t\t\t\t\t\t\t\t\t\t\t\t\t#for each available fate for given process\n\t\t index = proc.fate.index(fate)\n\t\t plotter = \"perl intranukeplotter.pl --type totxs --rm yes\" \\\n\t\t\t+ \" --stype \" + fate \\\n\t\t\t+ \" --p \" + proc.particle \\\n\t\t\t+ \" --t \" + proc.target \\\n\t\t\t+ \" --vmax \" + str(1.3*proc.xsec_max[index]) \\\n\t\t\t+ \" --hmax \" + str(1.1*proc.tk_max[index]) \\\n\t\t\t+ \" --dorf $m'_'$d'_'$y\" \\\n\t\t\t+ \" --rootdir \" + SIM_DIR + \"/total/\" + particle \\\n\t\t\t+ \" --datadir \" + DAT_DIR + \"/total/\" + proc.particle_kind \\\n\t\t\t+ \" --pngdir \" + PNG_DIR + \"/total/\" + proc.particle_kind \\\n\t\t\t+ \" > \" + LOG_DIR + \"/intranukeplotter/total/\" + particle + \"/\" + proc.particle + \"_\" + proc.target + \"_\" + date_ + \".log\"\t#prepare intranukeplotter.pl command\n\t\t print >> bash_file, plotter\t\t\t#put the command to bash script\n\t bash_file.close()\n\t run_bs(bash_scrt)\t\t\t\t\t#run bash script\n\t \n\ndef run_diff (particle,ang=False):\n if ang: log(\"\\ndsigma/dtheta validation for \" + particle + \":\\n\")\n else: log(\"\\ndsigma/dE validation for \" + particle + \":\\n\")\n dir_ = \"dbldiff_dist\"\n type_ = \"nrg\"\n if ang:\n\tdir_ = \"ang_dist\"\n\ttype_ = \"ang\"\n for author in filter(lambda a: a.check(particle),authors): \t\t\t\t\t\t\t\t\t#go through all available authors for given particle\n\tif (not ang and author.nrg) or (ang and author.ang):\n\t bash_scrt = BSH_DIR + \"/\" + dir_ + \"/\" + particle + \"/\" + author.name + \"_\" + date_ + \".sh\"\t\t\t\t#create bash script file\n\t bash_file = open(bash_scrt, \"w\")\n\t print >> bash_file, \"#!/bin/bash\"\n\t if do_sims:\t\t\t\t\t\t\t\t\t\t\t\t\t\t#if simulations are on\n\t\trunfast = \"perl runfast.pl --type root --rm yes --name yes\" \\\n\t\t + \" --n \" + nof_events_diff \\\n\t\t + \" --a \" + author.name \\\n\t\t + \" --rootdir \" + SIM_DIR + \"/\" + dir_ + \"/\" + particle \\\n\t\t + \" 1>/dev/null 2>\" + LOG_DIR + \"/runfast/\" + dir_ + \"/\" + particle + \"/\" + author.name + \"_\" + date_ + \".log\"\t\t#prepare runfast.pl command\n\t\tprint >> bash_file, runfast\t\t\t\t\t\t\t\t\t\t\t#put the command to bash script\n\t if do_plot:\t\t\t\t\t\t\t\t\t\t\t\t\t\t#if plots are on\n\t\tprint >> bash_file, \"shopt -s nocaseglob\"\t\t\t\t\t\t\t\t\t#turn off case sensivity\n\t\ttemp = \"f=$(ls \" + SIM_DIR + \"/\" + dir_ + \"/\" + particle + \"/\" + author.name + \"_*.root -Art | tail -n 1)\"\t#bash script will find the recent author_*.root file\n\t\tprint >> bash_file, temp\n\t\tprint >> bash_file, \"m=$(echo ${f##*/} | cut -d'_' -f 2)\"\t\t\t\t\t\t\t#and extract its date for intranukeplotter\n\t\tprint >> bash_file, \"d=$(echo ${f##*/} | cut -d'_' -f 3)\"\n\t\tprint >> bash_file, \"y=$(echo ${f##*/} | cut -d'_' -f 4)\"\n\t\tplotter = \"perl intranukeplotter.pl --type \" + type_ + \" --name yes --rm yes\" \\\n\t\t\t+ \" --rootdir \" + SIM_DIR + \"/\" + dir_ + \"/\" + particle \\\n\t\t\t+ \" --datadir \" + DAT_DIR + \"/\" + dir_ + \"/\" + particle \\\n\t\t\t+ \" --pngdir \" + PNG_DIR + \"/\" + dir_ + \"/\" + particle \\\n\t\t\t+ \" --dorf $m'_'$d'_'$y\" \\\n\t\t\t+ \" --a \" + author.name \\\n\t\t\t+ \" >\" + LOG_DIR + \"/intranukeplotter/\" + dir_ + \"/\" + particle + \"/\" + author.name + \"_\" + date_ + \".log\"\t#prepare intrnukeplotter.pl command\n\t\tprint >> bash_file, plotter\t\t#put the command to bash script\n\t bash_file.close()\n\t run_bs(bash_scrt)\t\t\t\t#run bash script\n\n#MAIN PROGRAM\n \nif __name__ == \"__main__\":\n\n init()\n read_authors(DAT_DIR)\n prepare_total()\n log()\n create_dirs()\n\n if do_tot:\n\tif do_pion: run_total ('pion')\n\tif do_nucl: run_total ('nucleon')\n\tif do_kaon: run_total ('kaon')\n\t\n if do_nrg:\n\tif do_pion: run_diff ('pion')\n\tif do_nucl: run_diff ('nucleon')\n\tif do_kaon: run_diff ('kaon')\n\n if do_ang:\n\tif do_pion: run_diff ('pion',True)\n\tif do_nucl: run_diff ('nucleon',True)\n\tif do_kaon: run_diff ('kaon',True)\n\n log('\\nDONE')\n"},"license":{"kind":"string","value":"gpl-3.0"}}},{"rowIdx":475263,"cells":{"repo_name":{"kind":"string","value":"bottompawn/kbengine"},"path":{"kind":"string","value":"kbe/src/lib/python/Lib/encodings/iso8859_9.py"},"copies":{"kind":"string","value":"272"},"size":{"kind":"string","value":"13156"},"content":{"kind":"string","value":"\"\"\" Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.\n\n\"\"\"#\"\n\nimport codecs\n\n### Codec APIs\n\nclass Codec(codecs.Codec):\n\n def encode(self,input,errors='strict'):\n return codecs.charmap_encode(input,errors,encoding_table)\n\n def decode(self,input,errors='strict'):\n return codecs.charmap_decode(input,errors,decoding_table)\n\nclass IncrementalEncoder(codecs.IncrementalEncoder):\n def encode(self, input, final=False):\n return codecs.charmap_encode(input,self.errors,encoding_table)[0]\n\nclass IncrementalDecoder(codecs.IncrementalDecoder):\n def decode(self, input, final=False):\n return codecs.charmap_decode(input,self.errors,decoding_table)[0]\n\nclass StreamWriter(Codec,codecs.StreamWriter):\n pass\n\nclass StreamReader(Codec,codecs.StreamReader):\n pass\n\n### encodings module API\n\ndef getregentry():\n return codecs.CodecInfo(\n name='iso8859-9',\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamreader=StreamReader,\n streamwriter=StreamWriter,\n )\n\n\n### Decoding Table\n\ndecoding_table = (\n '\\x00' # 0x00 -> NULL\n '\\x01' # 0x01 -> START OF HEADING\n '\\x02' # 0x02 -> START OF TEXT\n '\\x03' # 0x03 -> END OF TEXT\n '\\x04' # 0x04 -> END OF TRANSMISSION\n '\\x05' # 0x05 -> ENQUIRY\n '\\x06' # 0x06 -> ACKNOWLEDGE\n '\\x07' # 0x07 -> BELL\n '\\x08' # 0x08 -> BACKSPACE\n '\\t' # 0x09 -> HORIZONTAL TABULATION\n '\\n' # 0x0A -> LINE FEED\n '\\x0b' # 0x0B -> VERTICAL TABULATION\n '\\x0c' # 0x0C -> FORM FEED\n '\\r' # 0x0D -> CARRIAGE RETURN\n '\\x0e' # 0x0E -> SHIFT OUT\n '\\x0f' # 0x0F -> SHIFT IN\n '\\x10' # 0x10 -> DATA LINK ESCAPE\n '\\x11' # 0x11 -> DEVICE CONTROL ONE\n '\\x12' # 0x12 -> DEVICE CONTROL TWO\n '\\x13' # 0x13 -> DEVICE CONTROL THREE\n '\\x14' # 0x14 -> DEVICE CONTROL FOUR\n '\\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE\n '\\x16' # 0x16 -> SYNCHRONOUS IDLE\n '\\x17' # 0x17 -> END OF TRANSMISSION BLOCK\n '\\x18' # 0x18 -> CANCEL\n '\\x19' # 0x19 -> END OF MEDIUM\n '\\x1a' # 0x1A -> SUBSTITUTE\n '\\x1b' # 0x1B -> ESCAPE\n '\\x1c' # 0x1C -> FILE SEPARATOR\n '\\x1d' # 0x1D -> GROUP SEPARATOR\n '\\x1e' # 0x1E -> RECORD SEPARATOR\n '\\x1f' # 0x1F -> UNIT SEPARATOR\n ' ' # 0x20 -> SPACE\n '!' # 0x21 -> EXCLAMATION MARK\n '\"' # 0x22 -> QUOTATION MARK\n '#' # 0x23 -> NUMBER SIGN\n '$' # 0x24 -> DOLLAR SIGN\n '%' # 0x25 -> PERCENT SIGN\n '&' # 0x26 -> AMPERSAND\n \"'\" # 0x27 -> APOSTROPHE\n '(' # 0x28 -> LEFT PARENTHESIS\n ')' # 0x29 -> RIGHT PARENTHESIS\n '*' # 0x2A -> ASTERISK\n '+' # 0x2B -> PLUS SIGN\n ',' # 0x2C -> COMMA\n '-' # 0x2D -> HYPHEN-MINUS\n '.' # 0x2E -> FULL STOP\n '/' # 0x2F -> SOLIDUS\n '0' # 0x30 -> DIGIT ZERO\n '1' # 0x31 -> DIGIT ONE\n '2' # 0x32 -> DIGIT TWO\n '3' # 0x33 -> DIGIT THREE\n '4' # 0x34 -> DIGIT FOUR\n '5' # 0x35 -> DIGIT FIVE\n '6' # 0x36 -> DIGIT SIX\n '7' # 0x37 -> DIGIT SEVEN\n '8' # 0x38 -> DIGIT EIGHT\n '9' # 0x39 -> DIGIT NINE\n ':' # 0x3A -> COLON\n ';' # 0x3B -> SEMICOLON\n '<' # 0x3C -> LESS-THAN SIGN\n '=' # 0x3D -> EQUALS SIGN\n '>' # 0x3E -> GREATER-THAN SIGN\n '?' # 0x3F -> QUESTION MARK\n '@' # 0x40 -> COMMERCIAL AT\n 'A' # 0x41 -> LATIN CAPITAL LETTER A\n 'B' # 0x42 -> LATIN CAPITAL LETTER B\n 'C' # 0x43 -> LATIN CAPITAL LETTER C\n 'D' # 0x44 -> LATIN CAPITAL LETTER D\n 'E' # 0x45 -> LATIN CAPITAL LETTER E\n 'F' # 0x46 -> LATIN CAPITAL LETTER F\n 'G' # 0x47 -> LATIN CAPITAL LETTER G\n 'H' # 0x48 -> LATIN CAPITAL LETTER H\n 'I' # 0x49 -> LATIN CAPITAL LETTER I\n 'J' # 0x4A -> LATIN CAPITAL LETTER J\n 'K' # 0x4B -> LATIN CAPITAL LETTER K\n 'L' # 0x4C -> LATIN CAPITAL LETTER L\n 'M' # 0x4D -> LATIN CAPITAL LETTER M\n 'N' # 0x4E -> LATIN CAPITAL LETTER N\n 'O' # 0x4F -> LATIN CAPITAL LETTER O\n 'P' # 0x50 -> LATIN CAPITAL LETTER P\n 'Q' # 0x51 -> LATIN CAPITAL LETTER Q\n 'R' # 0x52 -> LATIN CAPITAL LETTER R\n 'S' # 0x53 -> LATIN CAPITAL LETTER S\n 'T' # 0x54 -> LATIN CAPITAL LETTER T\n 'U' # 0x55 -> LATIN CAPITAL LETTER U\n 'V' # 0x56 -> LATIN CAPITAL LETTER V\n 'W' # 0x57 -> LATIN CAPITAL LETTER W\n 'X' # 0x58 -> LATIN CAPITAL LETTER X\n 'Y' # 0x59 -> LATIN CAPITAL LETTER Y\n 'Z' # 0x5A -> LATIN CAPITAL LETTER Z\n '[' # 0x5B -> LEFT SQUARE BRACKET\n '\\\\' # 0x5C -> REVERSE SOLIDUS\n ']' # 0x5D -> RIGHT SQUARE BRACKET\n '^' # 0x5E -> CIRCUMFLEX ACCENT\n '_' # 0x5F -> LOW LINE\n '`' # 0x60 -> GRAVE ACCENT\n 'a' # 0x61 -> LATIN SMALL LETTER A\n 'b' # 0x62 -> LATIN SMALL LETTER B\n 'c' # 0x63 -> LATIN SMALL LETTER C\n 'd' # 0x64 -> LATIN SMALL LETTER D\n 'e' # 0x65 -> LATIN SMALL LETTER E\n 'f' # 0x66 -> LATIN SMALL LETTER F\n 'g' # 0x67 -> LATIN SMALL LETTER G\n 'h' # 0x68 -> LATIN SMALL LETTER H\n 'i' # 0x69 -> LATIN SMALL LETTER I\n 'j' # 0x6A -> LATIN SMALL LETTER J\n 'k' # 0x6B -> LATIN SMALL LETTER K\n 'l' # 0x6C -> LATIN SMALL LETTER L\n 'm' # 0x6D -> LATIN SMALL LETTER M\n 'n' # 0x6E -> LATIN SMALL LETTER N\n 'o' # 0x6F -> LATIN SMALL LETTER O\n 'p' # 0x70 -> LATIN SMALL LETTER P\n 'q' # 0x71 -> LATIN SMALL LETTER Q\n 'r' # 0x72 -> LATIN SMALL LETTER R\n 's' # 0x73 -> LATIN SMALL LETTER S\n 't' # 0x74 -> LATIN SMALL LETTER T\n 'u' # 0x75 -> LATIN SMALL LETTER U\n 'v' # 0x76 -> LATIN SMALL LETTER V\n 'w' # 0x77 -> LATIN SMALL LETTER W\n 'x' # 0x78 -> LATIN SMALL LETTER X\n 'y' # 0x79 -> LATIN SMALL LETTER Y\n 'z' # 0x7A -> LATIN SMALL LETTER Z\n '{' # 0x7B -> LEFT CURLY BRACKET\n '|' # 0x7C -> VERTICAL LINE\n '}' # 0x7D -> RIGHT CURLY BRACKET\n '~' # 0x7E -> TILDE\n '\\x7f' # 0x7F -> DELETE\n '\\x80' # 0x80 -> \n '\\x81' # 0x81 -> \n '\\x82' # 0x82 -> \n '\\x83' # 0x83 -> \n '\\x84' # 0x84 -> \n '\\x85' # 0x85 -> \n '\\x86' # 0x86 -> \n '\\x87' # 0x87 -> \n '\\x88' # 0x88 -> \n '\\x89' # 0x89 -> \n '\\x8a' # 0x8A -> \n '\\x8b' # 0x8B -> \n '\\x8c' # 0x8C -> \n '\\x8d' # 0x8D -> \n '\\x8e' # 0x8E -> \n '\\x8f' # 0x8F -> \n '\\x90' # 0x90 -> \n '\\x91' # 0x91 -> \n '\\x92' # 0x92 -> \n '\\x93' # 0x93 -> \n '\\x94' # 0x94 -> \n '\\x95' # 0x95 -> \n '\\x96' # 0x96 -> \n '\\x97' # 0x97 -> \n '\\x98' # 0x98 -> \n '\\x99' # 0x99 -> \n '\\x9a' # 0x9A -> \n '\\x9b' # 0x9B -> \n '\\x9c' # 0x9C -> \n '\\x9d' # 0x9D -> \n '\\x9e' # 0x9E -> \n '\\x9f' # 0x9F -> \n '\\xa0' # 0xA0 -> NO-BREAK SPACE\n '\\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK\n '\\xa2' # 0xA2 -> CENT SIGN\n '\\xa3' # 0xA3 -> POUND SIGN\n '\\xa4' # 0xA4 -> CURRENCY SIGN\n '\\xa5' # 0xA5 -> YEN SIGN\n '\\xa6' # 0xA6 -> BROKEN BAR\n '\\xa7' # 0xA7 -> SECTION SIGN\n '\\xa8' # 0xA8 -> DIAERESIS\n '\\xa9' # 0xA9 -> COPYRIGHT SIGN\n '\\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR\n '\\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK\n '\\xac' # 0xAC -> NOT SIGN\n '\\xad' # 0xAD -> SOFT HYPHEN\n '\\xae' # 0xAE -> REGISTERED SIGN\n '\\xaf' # 0xAF -> MACRON\n '\\xb0' # 0xB0 -> DEGREE SIGN\n '\\xb1' # 0xB1 -> PLUS-MINUS SIGN\n '\\xb2' # 0xB2 -> SUPERSCRIPT TWO\n '\\xb3' # 0xB3 -> SUPERSCRIPT THREE\n '\\xb4' # 0xB4 -> ACUTE ACCENT\n '\\xb5' # 0xB5 -> MICRO SIGN\n '\\xb6' # 0xB6 -> PILCROW SIGN\n '\\xb7' # 0xB7 -> MIDDLE DOT\n '\\xb8' # 0xB8 -> CEDILLA\n '\\xb9' # 0xB9 -> SUPERSCRIPT ONE\n '\\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR\n '\\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK\n '\\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER\n '\\xbd' # 0xBD -> VULGAR FRACTION ONE HALF\n '\\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS\n '\\xbf' # 0xBF -> INVERTED QUESTION MARK\n '\\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE\n '\\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE\n '\\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX\n '\\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE\n '\\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS\n '\\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE\n '\\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE\n '\\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA\n '\\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE\n '\\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE\n '\\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX\n '\\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS\n '\\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE\n '\\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE\n '\\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX\n '\\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS\n '\\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE\n '\\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE\n '\\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE\n '\\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE\n '\\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX\n '\\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE\n '\\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS\n '\\xd7' # 0xD7 -> MULTIPLICATION SIGN\n '\\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE\n '\\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE\n '\\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE\n '\\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX\n '\\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS\n '\\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE\n '\\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA\n '\\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S\n '\\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE\n '\\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE\n '\\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX\n '\\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE\n '\\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS\n '\\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE\n '\\xe6' # 0xE6 -> LATIN SMALL LETTER AE\n '\\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA\n '\\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE\n '\\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE\n '\\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX\n '\\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS\n '\\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE\n '\\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE\n '\\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX\n '\\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS\n '\\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE\n '\\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE\n '\\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE\n '\\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE\n '\\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX\n '\\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE\n '\\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS\n '\\xf7' # 0xF7 -> DIVISION SIGN\n '\\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE\n '\\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE\n '\\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE\n '\\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX\n '\\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS\n '\\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I\n '\\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA\n '\\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS\n)\n\n### Encoding table\nencoding_table=codecs.charmap_build(decoding_table)\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":475264,"cells":{"repo_name":{"kind":"string","value":"xylsxyls/xueyelingshuang"},"path":{"kind":"string","value":"src/BigNumberBase/scripts/rebuild_BigNumberBase.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14102"},"content":{"kind":"string","value":"#!python3\n# -*- coding:utf-8 -*-\nimport os\nimport sys\nimport time\nimport ctypes\nimport shutil\nimport subprocess\nIsPy3 = sys.version_info[0] >= 3\nif IsPy3:\n import winreg\nelse:\n import codecs\n import _winreg as winreg\n\nBuildType = 'Release'\nIsRebuild = True\nBuild = 'Rebuild'\nUpdate = False\nCopy = False\nCleanAll = False\nBuildTimeout = 30*60\nBit = 'Win32'\nDlllib = 'dll'\nMSBuild = None\nIncrediBuild = None\nUseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译\n\n#不同项目只需修改下面5个变量\nSlnFile = '../BigNumberBase.sln' #相对于本py脚本路径的相对路径\nUpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新\nExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行\nMSBuildFirstProjects = [r'BigNumberBase'] #使用MSBuild需要工程文件在解决方案sln中的路径\n # MSBuild首先编译的项目,填空不指定顺序\nIncrediBuildFirstProjects = ['BigNumberBase'] #使用IncrediBuild只需工程名字\n #IncrediBuild首先编译的项目,填空不指定顺序\n\nclass ConsoleColor():\n '''This class defines the values of color for printing on console window'''\n Black = 0\n DarkBlue = 1\n DarkGreen = 2\n DarkCyan = 3\n DarkRed = 4\n DarkMagenta = 5\n DarkYellow = 6\n Gray = 7\n DarkGray = 8\n Blue = 9\n Green = 10\n Cyan = 11\n Red = 12\n Magenta = 13\n Yellow = 14\n White = 15\n\nclass Coord(ctypes.Structure):\n _fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]\n\nclass SmallRect(ctypes.Structure):\n _fields_ = [('Left', ctypes.c_short),\n ('Top', ctypes.c_short),\n ('Right', ctypes.c_short),\n ('Bottom', ctypes.c_short),\n ]\n\nclass ConsoleScreenBufferInfo(ctypes.Structure):\n _fields_ = [('dwSize', Coord),\n ('dwCursorPosition', Coord),\n ('wAttributes', ctypes.c_uint),\n ('srWindow', SmallRect),\n ('dwMaximumWindowSize', Coord),\n ]\n\nclass Win32API():\n '''Some native methods for python calling'''\n StdOutputHandle = -11\n ConsoleOutputHandle = None\n DefaultColor = None\n\n @staticmethod\n def SetConsoleColor(color):\n '''Change the text color on console window'''\n if not Win32API.DefaultColor:\n if not Win32API.ConsoleOutputHandle:\n Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)\n bufferInfo = ConsoleScreenBufferInfo()\n ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))\n Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)\n if IsPy3:\n sys.stdout.flush() # need flush stdout in python 3\n ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)\n\n @staticmethod\n def ResetConsoleColor():\n '''Reset the default text color on console window'''\n if IsPy3:\n sys.stdout.flush() # need flush stdout in python 3\n ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)\n\nclass Logger():\n LogFile = '@AutomationLog.txt'\n LineSep = '\\n'\n @staticmethod\n def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):\n '''\n consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\n if consoleColor == -1, use default color\n '''\n if printToStdout:\n isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)\n if isValidColor:\n Win32API.SetConsoleColor(consoleColor)\n try:\n sys.stdout.write(log)\n except UnicodeError as e:\n Win32API.SetConsoleColor(ConsoleColor.Red)\n isValidColor = True\n sys.stdout.write(str(type(e)) + ' can\\'t print the log!\\n')\n if isValidColor:\n Win32API.ResetConsoleColor()\n if not writeToFile:\n return\n if IsPy3:\n logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')\n else:\n logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')\n try:\n logFile.write(log)\n # logFile.flush() # need flush in python 3, otherwise log won't be saved\n except Exception as ex:\n logFile.close()\n sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))\n\n @staticmethod\n def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):\n '''\n consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\n if consoleColor == -1, use default color\n '''\n Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)\n\n @staticmethod\n def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):\n '''\n consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\n if consoleColor == -1, use default color\n '''\n t = time.localtime()\n log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,\n t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)\n Logger.Write(log, consoleColor, writeToFile, printToStdout)\n\n @staticmethod\n def DeleteLog():\n if os.path.exists(Logger.LogFile):\n os.remove(Logger.LogFile)\n\n\ndef GetMSBuildPath():\n if Bit == 'Win32':\n cmd = 'call \"%VS120COMNTOOLS%..\\\\..\\\\VC\\\\vcvarsall.bat\" x86\\nwhere msbuild'\n elif Bit == 'x64':\n cmd = 'call \"%VS120COMNTOOLS%..\\\\..\\\\VC\\\\vcvarsall.bat\" amd64\\nwhere msbuild'\n ftemp = open('GetMSBuildPath.bat', 'wt')\n ftemp.write(cmd)\n ftemp.close()\n p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)\n p.wait()\n lines = p.stdout.read().decode().splitlines()\n os.remove('GetMSBuildPath.bat')\n for line in lines:\n if 'MSBuild.exe' in line:\n return line\n\ndef GetIncrediBuildPath():\n try:\n key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\\Classes\\IncrediBuild.MonitorFile\\shell\\open\\command')\n value, typeId = winreg.QueryValueEx(key, '')\n if value:\n start = value.find('\"')\n end = value.find('\"', start + 1)\n path = value[start+1:end]\n buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')\n return buildConsole\n except FileNotFoundError as e:\n Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)\n\ndef UpdateCode():\n # put git to path first\n if not shutil.which('git.exe'):\n Logger.Log('找不到git.exe. 请确认安装git时将git\\bin目录路径加入到环境变量path中!!!\\n, 跳过更新代码!!!', ConsoleColor.Yellow)\n return false\n oldDir = os.getcwd()\n for dir in UpdateDir:\n os.chdir(dir)\n ret = os.system('git pull')\n os.chdir(oldDir)\n if ret != 0:\n Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)\n return false\n return True\n\ndef BuildProject(cmd):\n for i in range(6):\n Logger.WriteLine(cmd, ConsoleColor.Cyan)\n buildFailed = True\n startTime = time.time()\n p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug\n if IsPy3:\n try:\n buildFailed = p.wait(BuildTimeout)\n except subprocess.TimeoutExpired as e:\n Logger.Log('{0}'.format(e), ConsoleColor.Yellow)\n p.kill()\n else:\n buildFailed = p.wait()\n if not UseMSBuild:\n #IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断\n fin = open('IncrediBuild.log')\n for line in fin:\n if line.startswith('=========='):\n Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)\n if IsPy3:\n start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========\n else:#为了兼容py2做的特殊处理,很恶心\n start = 0\n n2 = 0\n while 1:\n if line[start].isdigit():\n n2 += 1\n if n2 == 2:\n break\n start = line.find(' ', start)\n start += 1\n end = line.find(' ', start)\n failCount = int(line[start:end])\n buildFailed = failCount > 0\n else:\n Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)\n fin.close()\n costTime = time.time() - startTime\n Logger.WriteLine('build cost time: {0:.1f}s\\n'.format(costTime), ConsoleColor.Green)\n if not buildFailed:\n return True\n return False\n\ndef BuildAllProjects():\n buildSuccess = False\n cmds = []\n if UseMSBuild:\n if IsRebuild:\n if CleanAll:\n cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))\n cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))\n else:\n cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))\n for project in MSBuildFirstProjects:\n cmds.append('{0} {1} /t:{2} /p:Configuration={3};platform={4} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType, Bit))\n cmds.append('{0} {1} /p:Configuration={2};platform={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType, Bit))\n else: #IncrediBuild\n if IsRebuild:\n if CleanAll:\n cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|{3}\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug', Bit))\n cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|{3}\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release', Bit))\n else:\n cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|{3}\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))\n for project in IncrediBuildFirstProjects:\n cmds.append('\"{0}\" {1} /build /prj={2} /cfg=\"{3}|{4}\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType, Bit))\n cmds.append('\"{0}\" {1} /build /cfg=\"{2}|{3}\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType, Bit))\n for cmd in cmds:\n buildSuccess = BuildProject(cmd)\n if not buildSuccess:\n break\n return buildSuccess\n\ndef main():\n if UseMSBuild:\n if not os.path.exists(MSBuild):\n Logger.Log('can not find msbuild.exe', ConsoleColor.Red)\n return 1\n else:\n if not os.path.exists(IncrediBuild):\n Logger.Log('can not find msbuild.exe', ConsoleColor.Red)\n return 1\n dir = os.path.dirname(__file__)\n if dir:\n oldDir = os.getcwd()\n os.chdir(dir)\n if Update:\n if not UpdateCode():\n return 1\n Logger.Log('git update succeed', ConsoleColor.Green)\n if Copy:\n for bat in ExecBatList:\n oldBatDir = os.getcwd()\n batDir = os.path.dirname(bat)\n batName = os.path.basename(bat)\n if batDir:\n os.chdir(batDir)\n start = time.clock()\n os.system(batName)\n Logger.Log('run \"{}\" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)\n if batDir:\n os.chdir(oldBatDir)\n buildSuccess = BuildAllProjects()\n if buildSuccess:\n Logger.Log('build succeed', ConsoleColor.Green)\n else:\n Logger.Log('build failed', ConsoleColor.Red)\n if dir:\n os.chdir(oldDir)\n return 0 if buildSuccess else 1\n\nif __name__ == '__main__':\n Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)\n sys.argv = [x.lower() for x in sys.argv]\n start_time = time.time()\n if 'debug' in sys.argv:\n BuildType = 'Debug'\n if 'lib' in sys.argv:\n Dlllib = 'lib'\n SlnFile = '../BigNumberBase_lib.sln'\n MSBuildFirstProjects = [r'BigNumberBase_lib']\n IncrediBuildFirstProjects = ['BigNumberBase_lib']\n if '64' in sys.argv:\n Bit = 'x64'\n if 'build' in sys.argv:\n IsRebuild = False\n Build = 'Build'\n if 'update' in sys.argv:\n Update = True\n if 'copy' in sys.argv:\n Copy = True\n if 'clean' in sys.argv:\n CleanAll = True\n if 'incredibuild' in sys.argv:\n UseMSBuild = False\n if UseMSBuild:\n MSBuild = GetMSBuildPath()\n if not MSBuild:\n Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)\n exit(1)\n else:\n IncrediBuild = GetIncrediBuildPath()\n if not IncrediBuild:\n Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)\n exit(1)\n cwd = os.getcwd()\n Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))\n ret = main()\n end_time = time.time()\n cost_time = end_time-start_time\n Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)\n exit(ret)"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475265,"cells":{"repo_name":{"kind":"string","value":"ChanChiChoi/scikit-learn"},"path":{"kind":"string","value":"sklearn/cluster/tests/test_affinity_propagation.py"},"copies":{"kind":"string","value":"341"},"size":{"kind":"string","value":"2620"},"content":{"kind":"string","value":"\"\"\"\nTesting for Clustering methods\n\n\"\"\"\n\nimport numpy as np\n\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_raises\n\nfrom sklearn.cluster.affinity_propagation_ import AffinityPropagation\nfrom sklearn.cluster.affinity_propagation_ import affinity_propagation\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.metrics import euclidean_distances\n\nn_clusters = 3\ncenters = np.array([[1, 1], [-1, -1], [1, -1]]) + 10\nX, _ = make_blobs(n_samples=60, n_features=2, centers=centers,\n cluster_std=0.4, shuffle=True, random_state=0)\n\n\ndef test_affinity_propagation():\n # Affinity Propagation algorithm\n # Compute similarities\n S = -euclidean_distances(X, squared=True)\n preference = np.median(S) * 10\n # Compute Affinity Propagation\n cluster_centers_indices, labels = affinity_propagation(\n S, preference=preference)\n\n n_clusters_ = len(cluster_centers_indices)\n\n assert_equal(n_clusters, n_clusters_)\n\n af = AffinityPropagation(preference=preference, affinity=\"precomputed\")\n labels_precomputed = af.fit(S).labels_\n\n af = AffinityPropagation(preference=preference, verbose=True)\n labels = af.fit(X).labels_\n\n assert_array_equal(labels, labels_precomputed)\n\n cluster_centers_indices = af.cluster_centers_indices_\n\n n_clusters_ = len(cluster_centers_indices)\n assert_equal(np.unique(labels).size, n_clusters_)\n assert_equal(n_clusters, n_clusters_)\n\n # Test also with no copy\n _, labels_no_copy = affinity_propagation(S, preference=preference,\n copy=False)\n assert_array_equal(labels, labels_no_copy)\n\n # Test input validation\n assert_raises(ValueError, affinity_propagation, S[:, :-1])\n assert_raises(ValueError, affinity_propagation, S, damping=0)\n af = AffinityPropagation(affinity=\"unknown\")\n assert_raises(ValueError, af.fit, X)\n\n\ndef test_affinity_propagation_predict():\n # Test AffinityPropagation.predict\n af = AffinityPropagation(affinity=\"euclidean\")\n labels = af.fit_predict(X)\n labels2 = af.predict(X)\n assert_array_equal(labels, labels2)\n\n\ndef test_affinity_propagation_predict_error():\n # Test exception in AffinityPropagation.predict\n # Not fitted.\n af = AffinityPropagation(affinity=\"euclidean\")\n assert_raises(ValueError, af.predict, X)\n\n # Predict not supported when affinity=\"precomputed\".\n S = np.dot(X, X.T)\n af = AffinityPropagation(affinity=\"precomputed\")\n af.fit(S)\n assert_raises(ValueError, af.predict, X)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475266,"cells":{"repo_name":{"kind":"string","value":"hugs/django"},"path":{"kind":"string","value":"django/contrib/gis/geos/prototypes/__init__.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"1585"},"content":{"kind":"string","value":"\"\"\"\n This module contains all of the GEOS ctypes function prototypes. Each\n prototype handles the interaction between the GEOS library and Python\n via ctypes.\n\"\"\"\n\n# Coordinate sequence routines.\nfrom django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \\\n cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \\\n cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims\n\n# Geometry routines.\nfrom django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \\\n create_point, create_linestring, create_linearring, create_polygon, create_collection, \\\n destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \\\n geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \\\n get_dims, get_num_coords, get_num_geoms, \\\n to_hex, to_wkb, to_wkt\n\n# Miscellaneous routines.\nfrom django.contrib.gis.geos.prototypes.misc import geos_area, geos_distance, geos_length\n\n# Predicates\nfrom django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \\\n geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \\\n geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \\\n geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within\n\n# Topology routines\nfrom django.contrib.gis.geos.prototypes.topology import \\\n geos_boundary, geos_buffer, geos_centroid, geos_convexhull, geos_difference, \\\n geos_envelope, geos_intersection, geos_pointonsurface, geos_preservesimplify, \\\n geos_simplify, geos_symdifference, geos_union, geos_relate\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475267,"cells":{"repo_name":{"kind":"string","value":"Toshakins/wagtail"},"path":{"kind":"string","value":"wagtail/wagtailcore/tests/test_migrations.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"2212"},"content":{"kind":"string","value":"\"\"\"\nCheck that all changes to Wagtail models have had migrations created. If there\nare outstanding model changes that need migrations, fail the tests.\n\"\"\"\n\nfrom __future__ import absolute_import, unicode_literals\n\nfrom django.apps import apps\nfrom django.db.migrations.autodetector import MigrationAutodetector\nfrom django.db.migrations.loader import MigrationLoader\nfrom django.db.migrations.questioner import MigrationQuestioner\nfrom django.db.migrations.state import ProjectState\nfrom django.test import TestCase\nfrom django.utils.six import iteritems\n\n\nclass TestForMigrations(TestCase):\n def test__migrations(self):\n app_labels = set(app.label for app in apps.get_app_configs()\n if app.name.startswith('wagtail.'))\n for app_label in app_labels:\n apps.get_app_config(app_label.split('.')[-1])\n loader = MigrationLoader(None, ignore_no_migrations=True)\n\n conflicts = dict(\n (app_label, conflict)\n for app_label, conflict in iteritems(loader.detect_conflicts())\n if app_label in app_labels\n )\n\n if conflicts:\n name_str = \"; \".join(\"%s in %s\" % (\", \".join(names), app)\n for app, names in conflicts.items())\n self.fail(\"Conflicting migrations detected (%s).\" % name_str)\n\n autodetector = MigrationAutodetector(\n loader.project_state(),\n ProjectState.from_apps(apps),\n MigrationQuestioner(specified_apps=app_labels, dry_run=True),\n )\n\n changes = autodetector.changes(\n graph=loader.graph,\n trim_to_apps=app_labels or None,\n convert_apps=app_labels or None,\n )\n\n if changes:\n migrations = '\\n'.join((\n ' {migration}\\n{changes}'.format(\n migration=migration,\n changes='\\n'.join(' {0}'.format(operation.describe())\n for operation in migration.operations))\n for (_, migrations) in changes.items()\n for migration in migrations))\n\n self.fail('Model changes with no migrations detected:\\n%s' % migrations)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475268,"cells":{"repo_name":{"kind":"string","value":"shubhdev/edx-platform"},"path":{"kind":"string","value":"common/djangoapps/embargo/admin.py"},"copies":{"kind":"string","value":"154"},"size":{"kind":"string","value":"1315"},"content":{"kind":"string","value":"\"\"\"\nDjango admin page for embargo models\n\"\"\"\nfrom django.contrib import admin\nimport textwrap\n\nfrom config_models.admin import ConfigurationModelAdmin\nfrom embargo.models import IPFilter, CountryAccessRule, RestrictedCourse\nfrom embargo.forms import IPFilterForm, RestrictedCourseForm\n\n\nclass IPFilterAdmin(ConfigurationModelAdmin):\n \"\"\"Admin for blacklisting/whitelisting specific IP addresses\"\"\"\n form = IPFilterForm\n fieldsets = (\n (None, {\n 'fields': ('enabled', 'whitelist', 'blacklist'),\n 'description': textwrap.dedent(\"\"\"Enter specific IP addresses to explicitly\n whitelist (not block) or blacklist (block) in the appropriate box below.\n Separate IP addresses with a comma. Do not surround with quotes.\n \"\"\")\n }),\n )\n\n\nclass CountryAccessRuleInline(admin.StackedInline):\n \"\"\"Inline editor for country access rules. \"\"\"\n model = CountryAccessRule\n extra = 1\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n\nclass RestrictedCourseAdmin(admin.ModelAdmin):\n \"\"\"Admin for configuring course restrictions. \"\"\"\n inlines = [CountryAccessRuleInline]\n form = RestrictedCourseForm\n\n\nadmin.site.register(IPFilter, IPFilterAdmin)\nadmin.site.register(RestrictedCourse, RestrictedCourseAdmin)\n"},"license":{"kind":"string","value":"agpl-3.0"}}},{"rowIdx":475269,"cells":{"repo_name":{"kind":"string","value":"precedenceguo/mxnet"},"path":{"kind":"string","value":"tests/python/unittest/test_module.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"36954"},"content":{"kind":"string","value":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport mxnet as mx\nimport mxnet.ndarray as nd\nfrom mxnet.test_utils import *\nimport numpy as np\nfrom functools import reduce\nfrom mxnet.module.executor_group import DataParallelExecutorGroup\nfrom common import setup_module, with_seed, assertRaises, teardown\nfrom collections import namedtuple\n\n\n@with_seed()\ndef test_module_dtype():\n dtype = np.float16\n dshape = (3, 8, 7)\n\n sym = mx.sym.Variable('data')\n sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')\n\n mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])\n mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])\n mod.init_params()\n mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],\n label=None))\n mod.backward([mx.nd.ones(dshape, dtype=dtype)])\n\n for x in mod.get_outputs():\n assert x.dtype == dtype\n\n\n@with_seed()\ndef test_module_input_grads():\n a = mx.sym.Variable('a', __layout__='NC')\n b = mx.sym.Variable('b', __layout__='NC')\n c = mx.sym.Variable('c', __layout__='NC')\n\n c = a + 2 * b + 3 * c\n net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,\n context=[mx.cpu(0), mx.cpu(1)])\n net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],\n label_shapes=None, inputs_need_grad=True)\n net.init_params()\n\n net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),\n nd.ones((5, 5)),\n nd.ones((5, 5))]))\n net.backward(out_grads=[nd.ones((5, 5))])\n input_grads = net.get_input_grads()\n b_grad = input_grads[0].asnumpy()\n c_grad = input_grads[1].asnumpy()\n a_grad = input_grads[2].asnumpy()\n assert np.all(a_grad == 1), a_grad\n assert np.all(b_grad == 2), b_grad\n assert np.all(c_grad == 3), c_grad\n\n\n@with_seed()\ndef test_module_ctx_group():\n def check_module_ctx_group(ctxs, group2ctxs, grad_ctxs=None):\n with mx.AttrScope(ctx_group='dev1'):\n a = mx.symbol.Variable('a')\n a = a * 2\n with mx.AttrScope(ctx_group='dev2'):\n b = mx.symbol.Variable('b')\n c = a + b\n shape = (2, 5)\n mod1 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None,\n group2ctxs=group2ctxs)\n mod1.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)\n mod1.init_params()\n mod1.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)\n mod1.backward([mx.nd.ones(shape)])\n mod1_input_grads = mod1.get_input_grads()\n\n mod2 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None)\n mod2.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)\n mod2.init_params()\n mod2.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)\n mod2.backward([mx.nd.ones(shape)])\n mod2_input_grads = mod2.get_input_grads()\n\n if grad_ctxs is not None:\n assert(mod1_input_grads[0].context == grad_ctxs[0])\n assert(mod1_input_grads[1].context == grad_ctxs[1])\n assert(np.all(mod1_input_grads[0].asnumpy() == mod2_input_grads[0].asnumpy()))\n assert(np.all(mod1_input_grads[1].asnumpy() == mod2_input_grads[1].asnumpy()))\n\n check_module_ctx_group([mx.cpu(0)], {'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}, grad_ctxs=[mx.cpu(1), mx.cpu(2)])\n check_module_ctx_group([mx.cpu(0), mx.cpu(1)],\n [{'dev1': mx.cpu(2), 'dev2': mx.cpu(3)}, {'dev1': mx.cpu(4), 'dev2': mx.cpu(5)}])\n check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': mx.cpu(3)})\n check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': [mx.cpu(3)]})\n check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1':mx.cpu(2), 'dev2':[mx.cpu(3), mx.cpu(3)]})\n check_module_ctx_group([mx.cpu(0), mx.cpu(1)],\n {'dev1':[mx.cpu(2), mx.cpu(2)], 'dev2':[mx.cpu(3), mx.cpu(3)]})\n\n@with_seed()\ndef test_bucket_module_ctx_group():\n num_hidden = 10\n batch_size = 5\n def sym_gen(seq_len):\n with mx.AttrScope(ctx_group='dev1'):\n data = mx.symbol.Variable('data')\n weight = mx.symbol.Variable('dev1_weight')\n bias = mx.symbol.Variable('dev1_bias')\n fc = data\n for i in range(seq_len):\n fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,\n name='dev1_fc_%d' % i, num_hidden=num_hidden)\n with mx.AttrScope(ctx_group='dev2'):\n label = mx.symbol.Variable('label')\n weight = mx.symbol.Variable('dev2_weight')\n bias = mx.symbol.Variable('dev2_bias')\n for i in range(seq_len):\n fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,\n name='dev2_fc_%d' % i, num_hidden=num_hidden)\n sym = mx.symbol.SoftmaxOutput(fc, label, name='softmax')\n\n return sym, ('data',), ('label',)\n\n mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10, context=[mx.cpu(0)],\n group2ctxs=[{'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}])\n mod.bind(data_shapes=[['data', (batch_size, num_hidden)]],\n label_shapes=[['label', (batch_size,)]],\n for_training=True, inputs_need_grad=True)\n assert(mod.binded)\n\n@with_seed()\ndef test_module_layout():\n sym = mx.sym.Variable('data')\n sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')\n\n dshape = (3, 8, 7)\n mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])\n mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])\n mod.init_params()\n mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],\n label=None))\n mod.backward([mx.nd.ones(dshape)])\n assert mod.get_outputs()[0].shape == dshape\n\n hdshape = (3, 4, 7)\n for x in mod.get_outputs(merge_multi_context=False)[0]:\n assert x.shape == hdshape\n\n\n@with_seed()\ndef test_save_load():\n def dict_equ(a, b):\n assert set(a) == set(b)\n for k in a:\n assert (a[k].asnumpy() == b[k].asnumpy()).all()\n\n sym = mx.sym.Variable('data')\n sym = mx.sym.FullyConnected(sym, num_hidden=100)\n\n # single device\n mod = mx.mod.Module(sym, ('data',))\n mod.bind(data_shapes=[('data', (10, 10))])\n mod.init_params()\n mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})\n mod.update()\n mod.save_checkpoint('test', 0, save_optimizer_states=True)\n\n mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))\n mod2.bind(data_shapes=[('data', (10, 10))])\n mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})\n assert mod._symbol.tojson() == mod2._symbol.tojson()\n dict_equ(mod.get_params()[0], mod2.get_params()[0])\n dict_equ(mod._updater.states, mod2._updater.states)\n\n # multi device\n mod = mx.mod.Module(sym, ('data',), context=[mx.cpu(0), mx.cpu(1)])\n mod.bind(data_shapes=[('data', (10, 10))])\n mod.init_params()\n mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})\n mod.update()\n mod.save_checkpoint('test', 0, save_optimizer_states=True)\n\n mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))\n mod2.bind(data_shapes=[('data', (10, 10))])\n mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})\n assert mod._symbol.tojson() == mod2._symbol.tojson()\n dict_equ(mod.get_params()[0], mod2.get_params()[0])\n dict_equ(mod._kvstore._updater.states, mod2._updater.states)\n\n\n@with_seed()\ndef test_module_reshape():\n data = mx.sym.Variable('data')\n sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')\n\n dshape = (7, 20)\n mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])\n mod.bind(data_shapes=[('data', dshape)])\n mod.init_params()\n mod.init_optimizer(optimizer_params={'learning_rate': 1})\n\n mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],\n label=None))\n mod.backward([mx.nd.ones(dshape)])\n mod.update()\n assert mod.get_outputs()[0].shape == dshape\n assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()\n\n dshape = (14, 20)\n mod.reshape(data_shapes=[('data', dshape)])\n mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],\n label=None))\n mod.backward([mx.nd.ones(dshape)])\n mod.update()\n assert mod.get_outputs()[0].shape == dshape\n assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all()\n\n\n@with_seed()\ndef test_module_states():\n stack = mx.rnn.SequentialRNNCell()\n for i in range(2):\n stack.add(mx.rnn.LSTMCell(num_hidden=20, prefix='lstm_l%d_'%i))\n begin_state = stack.begin_state(func=mx.sym.Variable)\n _, states = stack.unroll(10, begin_state=begin_state, inputs=mx.sym.Variable('data'))\n\n state_names = [i.name for i in begin_state]\n mod = mx.mod.Module(mx.sym.Group(states), context=[mx.cpu(0), mx.cpu(1)],\n label_names=None, state_names=state_names)\n mod.bind(data_shapes=[('data', (5, 10))], label_shapes=None, for_training=False)\n mod.init_params()\n batch = mx.io.DataBatch(data=[mx.nd.zeros((5, 10))], label=[])\n\n mod.set_states(value=1)\n mod.forward(batch)\n out = mod.get_outputs(merge_multi_context=False)\n out1 = mod.get_outputs(merge_multi_context=True)\n\n mod.set_states(states=out)\n mod.forward(batch)\n out2 = mod.get_outputs(merge_multi_context=True)\n\n for x1, x2 in zip(out1, out2):\n assert not mx.test_utils.almost_equal(x1.asnumpy(), x2.asnumpy(), rtol=1e-3)\n\n\n@with_seed()\ndef test_module_switch_bucket():\n vocab_dim = 5000\n num_hidden = 100\n num_embedding = 100\n num_layer = 2\n default_key = 10\n test_key = 5\n batch_size = 32\n contexts = [mx.cpu(0)]\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34)\n\n #generate symbols for an LSTM network\n def sym_gen(seq_len):\n data = mx.sym.Variable('data')\n label = mx.sym.Variable('softmax_label')\n embed = mx.sym.Embedding(data=data, input_dim=vocab_dim,\n output_dim=num_embedding)\n stack = mx.rnn.SequentialRNNCell()\n for i in range(num_layer):\n stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_'%i))\n outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)\n\n pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))\n pred = mx.sym.FullyConnected(data=pred, num_hidden=vocab_dim, name='pred')\n\n label = mx.sym.Reshape(label, shape=(-1,))\n pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')\n\n return pred, ('data',), ('softmax_label',)\n\n def create_bucketing_module(key):\n model = mx.mod.BucketingModule(\n sym_gen = sym_gen,\n default_bucket_key = key,\n context = contexts)\n model.bind([('data', (batch_size, key))],\n [('softmax_label', (batch_size, key))], True, False)\n model.init_params(initializer=initializer)\n return model\n #initialize the bucketing module with the default bucket key\n bucketing_model = create_bucketing_module(default_key)\n #check name\n assert bucketing_model.symbol.list_arguments()[1] == \"embedding0_weight\",\\\n \"Error in assigning names for args in BucketingModule\"\n\n #switch to test_key\n bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],\n [('softmax_label', (batch_size, test_key))])\n total_bytes_before = bucketing_model._buckets[default_key]._total_exec_bytes\n\n #remove test_key and switch again\n del bucketing_model._buckets[test_key]\n bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],\n [('softmax_label', (batch_size, test_key))])\n total_bytes_after = bucketing_model._buckets[default_key]._total_exec_bytes\n #the default bucket is expected to reuse the bytes allocated\n assert total_bytes_after == total_bytes_before\n\n\n\n@with_seed(11)\ndef test_module_set_params():\n # data iter\n data = mx.nd.array([[0.05, .10]]);\n label = mx.nd.array([[.01, 0.99]]);\n train_data = mx.io.NDArrayIter(data, label, batch_size=1)\n\n # symbols\n x = mx.symbol.Variable('data')\n x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)\n x = mx.symbol.Activation(name=\"act_0\", data=x, act_type='sigmoid')\n x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)\n x = mx.symbol.Activation(name=\"act_1\", data=x, act_type='sigmoid')\n x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)\n\n # create module\n mod = mx.mod.Module(x, context=[mx.cpu()]);\n mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,\n for_training=True)\n\n arg_params_correct = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),\n 'fc_0_bias' : mx.nd.array([.35, .35]),\n 'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),\n 'fc_1_bias' : mx.nd.array([.60, .60])}\n\n arg_params_missing = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),\n 'fc_0_bias' : mx.nd.array([.35, .35]),\n 'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]])}\n\n arg_params_extra = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),\n 'fc_0_bias' : mx.nd.array([.35, .35]),\n 'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),\n 'fc_1_bias' : mx.nd.array([.60, .60]),\n 'fc_2_weight': mx.nd.array([.60, .60])}\n\n arg_params_missing_extra = {'fc_2_weight': mx.nd.array([.60, .60])}\n\n # test regular set_params\n mod.set_params(force_init=True, arg_params=arg_params_correct, aux_params={})\n\n # test allow missing\n mod.set_params(force_init=True, arg_params=arg_params_missing, aux_params={}, allow_missing=True)\n assertRaises(RuntimeError, mod.set_params,\n force_init=True, arg_params=arg_params_missing,\n aux_params={}, allow_missing=False)\n\n # test allow extra\n mod.set_params(force_init=True, arg_params=arg_params_extra, aux_params={}, allow_missing=True, allow_extra=True)\n assertRaises(ValueError, mod.set_params,\n force_init=True, arg_params=arg_params_extra,\n aux_params={}, allow_missing=True, allow_extra=False)\n\n # test allow missing + extra,\n assertRaises(RuntimeError, mod.set_params,\n force_init=True, arg_params=arg_params_missing_extra,\n aux_params={}, allow_missing=False, allow_extra=False)\n\n # test allow missing + extra, this will throw a runtime error\n assertRaises(ValueError, mod.set_params,\n force_init=True, arg_params=arg_params_missing_extra,\n aux_params={}, allow_missing=True, allow_extra=False)\n\n\n@with_seed(11)\ndef test_monitor():\n # data iter\n data = mx.nd.array([[0.05, .10]]);\n label = mx.nd.array([[.01, 0.99]]);\n train_data = mx.io.NDArrayIter(data, label, batch_size=1)\n\n # symbols\n x = mx.symbol.Variable('data')\n x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)\n x = mx.symbol.Activation(name=\"act_0\", data=x, act_type='sigmoid')\n x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)\n x = mx.symbol.Activation(name=\"act_1\", data=x, act_type='sigmoid')\n x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)\n\n # create monitor\n def mean_abs(x):\n sum_abs = mx.ndarray.sum(mx.ndarray.abs(x))\n return mx.ndarray.divide(sum_abs, reduce(lambda x, y: x * y, x.shape))\n mon = mx.mon.Monitor(1, stat_func=mean_abs, pattern='.*', sort=True)\n\n # create module\n mod = mx.mod.Module(x, context=[mx.cpu()]);\n mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,\n for_training=True)\n mod.install_monitor(mon)\n arg_params = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),\n 'fc_0_bias' : mx.nd.array([.35, .35]),\n 'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),\n 'fc_1_bias' : mx.nd.array([.60, .60])}\n mod.init_params(arg_params=arg_params)\n\n data_iter = iter(train_data)\n data_batch = next(data_iter)\n mon.tic()\n mod.forward_backward(data_batch)\n res = mon.toc()\n keys = ['act_0', 'act_1', 'data', 'fc_0', 'fc_1', 'softmax']\n mon_result_counts = [0, 0, 0, 0, 0, 0]\n assert(len(res) == 21)\n for n, k, v in res:\n for idx, key in enumerate(keys):\n if k.startswith(key):\n mon_result_counts[idx] += 1\n break\n assert(mon_result_counts == [2, 2, 1, 6, 6, 4])\n\n@with_seed()\ndef test_executor_group():\n def get_rnn_sym(num_layers, num_words, num_hidden, num_embed, seq_len, sparse_embedding):\n stack = mx.rnn.SequentialRNNCell()\n for i in range(num_layers):\n stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i))\n data = mx.sym.Variable('data')\n label = mx.sym.Variable('softmax_label')\n if sparse_embedding:\n embed_weight = mx.sym.Variable('embed_weight', stype='row_sparse')\n embed = mx.sym.contrib.SparseEmbedding(data=data, input_dim=num_words,\n weight=embed_weight, output_dim=num_embed,\n name='embed')\n else:\n embed = mx.sym.Embedding(data=data, input_dim=num_words,\n output_dim=num_embed, name='embed')\n\n stack.reset()\n outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)\n\n pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))\n pred = mx.sym.FullyConnected(data=pred, num_hidden=num_words, name='pred')\n\n label = mx.sym.Reshape(label, shape=(-1,))\n pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')\n return pred\n\n def test_shared_exec_group(exec_grp_shared, exec_grp_created, shared_arg_names=None,\n extra_args=None, check_shared_grad=True):\n # Test shared data arrays\n for i in range(len(exec_grp_shared.execs)):\n # test same shared_data_arrays for two exec groups\n shared_data_array1 = exec_grp_shared.shared_data_arrays[i]\n shared_data_array2 = exec_grp_created.shared_data_arrays[i]\n if extra_args is not None:\n assert len(shared_data_array1) == len(extra_args),\\\n \"exec_grp_shared.shared_data_arrays[%d] should have same number of args as extra_args\"\n assert len(shared_data_array1) == len(shared_data_array2),\\\n \"length of shared_data_array of the shared executor group not equal to the created executor group\"\n for k, v in shared_data_array1.items():\n if extra_args is not None:\n assert k in extra_args, \"arg %s is not in extra_args\" % k\n assert k in shared_data_array2,\\\n \"arg %s of the shared executor group not in the shared_data_array of the created executor group\" % k\n assert mx.test_utils.same_array(v, shared_data_array2[k])\n\n for data_name, array in exec_grp_shared.shared_data_arrays[i].items():\n assert data_name in exec_grp_created.shared_data_arrays[i], \\\n \"Shared input data '%s' is not in \" \\\n \"shared_data_arrays of created executor group.\" % (data_name)\n assert mx.test_utils.same_array(array, exec_grp_created.shared_data_arrays[i][data_name]), \\\n \"Shared input data '%s' does not share memory.\" % (data_name)\n\n # Test shared argument arrays and gradient arrays\n exec_shared = exec_grp_shared.execs[i]\n exec_created = exec_grp_created.execs[i]\n if shared_arg_names is not None:\n # test shared arguments\n for arg_name in shared_arg_names:\n assert arg_name in exec_created.arg_dict, \\\n \"Shared argument '%s' is not in arg_dict of created executor group.\" % (arg_name)\n assert mx.test_utils.same_array(exec_shared.arg_dict[arg_name], exec_created.arg_dict[arg_name]), \\\n \"Shared argument '%s' does not share memory.\" % (arg_name)\n # test shared argument gradients\n if check_shared_grad:\n for arg_name in shared_arg_names:\n assert arg_name in exec_created.grad_dict, \\\n \"Shared argument gradient '%s' is not in \" \\\n \"grad_dict of created executor group.\" % (arg_name)\n assert mx.test_utils.same_array(exec_shared.grad_dict[arg_name], \\\n exec_created.grad_dict[arg_name]), \\\n \"Shared argument gradient '%s' does not share memory.\" % (arg_name)\n\n for arg_name, grad in exec_grp_shared.grad_req.items():\n assert grad == exec_grp_created.grad_req[arg_name], \\\n \"Gradient requirements for shared argument '%s' are inconsistent. \" \\\n \"Shared executor group requires '%s' while created executor group requires '%s'\" \\\n %(arg_name, grad, exec_grp_created.grad_req[arg_name])\n\n def check_shared_exec_group(sparse_embedding):\n # generate an rnn sym with #layers=5\n sym = get_rnn_sym(num_layers=3, num_words=num_words, num_hidden=num_hidden,\n num_embed=num_embed, seq_len=max_bucket_size,\n sparse_embedding=sparse_embedding)\n arg_names1 = sym.list_arguments()\n input_names = [name[0] for name in data_shapes] + [name[0] for name in label_shapes]\n shared_arg_names = [name for name in arg_names1 if name not in input_names]\n exec_group1 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,\n workload=workload, data_shapes=data_shapes,\n label_shapes=label_shapes, param_names=shared_arg_names,\n for_training=True, inputs_need_grad=False)\n\n # shared_data_arrays should only have input \"data\" and \"softmax_label\" arrays\n for i in range(len(contexts)):\n assert len(exec_group1.shared_data_arrays[i]) == len(input_names),\\\n \"exec_group1.shared_data_arrays[%d] should have the same number of names as in input_names\" % i\n for name in input_names:\n assert name in exec_group1.shared_data_arrays[i],\\\n \"arg %s should be in exec_group1.shared_data_arrays[%d]\" % (name, i)\n\n # generate an rnn sym with #layers=5\n sym = get_rnn_sym(num_layers=5, num_words=num_words, num_hidden=num_hidden,\n num_embed=num_embed, seq_len=max_bucket_size,\n sparse_embedding=sparse_embedding)\n arg_names2 = sym.list_arguments()\n exec_group2 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,\n workload=workload, data_shapes=data_shapes,\n label_shapes=label_shapes, param_names=shared_arg_names,\n for_training=True, inputs_need_grad=False,\n shared_group=exec_group1)\n extra_args = [name for name in arg_names2 if name not in shared_arg_names]\n check_shared_grad = not sparse_embedding\n test_shared_exec_group(exec_grp_shared=exec_group1, exec_grp_created=exec_group2,\n shared_arg_names=shared_arg_names, extra_args=extra_args,\n check_shared_grad=check_shared_grad)\n\n contexts = [mx.cpu(0), mx.cpu(1)]\n workload = [1] * len(contexts)\n batch_size = 32\n max_bucket_size = 80\n num_words = 1000\n num_hidden = 100\n num_embed = 200\n data_shapes = [('data', (batch_size, max_bucket_size))]\n label_shapes = [('softmax_label', (batch_size, max_bucket_size))]\n sparse_embedding_opt = [True, False]\n for opt in sparse_embedding_opt:\n check_shared_exec_group(opt)\n\n@with_seed(11)\ndef test_factorization_machine_module(verbose=False):\n \"\"\" Test factorization machine model with sparse operators \"\"\"\n def check_factorization_machine_module(optimizer=None, num_epochs=None):\n print(\"check_factorization_machine_module( {} )\".format(optimizer))\n\n def fm(factor_size, feature_dim, init):\n x = mx.symbol.Variable(\"data\", stype='csr')\n v = mx.symbol.Variable(\"v\", shape=(feature_dim, factor_size),\n init=init, stype='row_sparse')\n\n w1_weight = mx.symbol.var('w1_weight', shape=(feature_dim, 1),\n init=init, stype='row_sparse')\n w1_bias = mx.symbol.var('w1_bias', shape=(1))\n w1 = mx.symbol.broadcast_add(mx.symbol.dot(x, w1_weight), w1_bias)\n\n v_s = mx.symbol._internal._square_sum(data=v, axis=1, keepdims=True)\n x_s = mx.symbol.square(data=x)\n bd_sum = mx.sym.dot(x_s, v_s)\n\n w2 = mx.symbol.dot(x, v)\n w2_squared = 0.5 * mx.symbol.square(data=w2)\n\n w_all = mx.symbol.Concat(w1, w2_squared, dim=1)\n sum1 = mx.symbol.sum(data=w_all, axis=1, keepdims=True)\n sum2 = 0.5 * mx.symbol.negative(bd_sum)\n model = mx.sym.elemwise_add(sum1, sum2)\n\n y = mx.symbol.Variable(\"label\")\n model = mx.symbol.LinearRegressionOutput(data=model, label=y)\n return model\n\n # model\n init = mx.initializer.Normal(sigma=0.01)\n factor_size = 4\n feature_dim = 10000\n model = fm(factor_size, feature_dim, init)\n\n # data iter\n num_batches = 5\n batch_size = 64\n num_samples = batch_size * num_batches\n # generate some random csr data\n csr_nd = rand_ndarray((num_samples, feature_dim), 'csr', 0.1)\n label = mx.nd.ones((num_samples,1))\n # the alternative is to use LibSVMIter\n train_iter = mx.io.NDArrayIter(data=csr_nd,\n label={'label':label},\n batch_size=batch_size,\n last_batch_handle='discard')\n # create module\n mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])\n # allocate memory by given the input data and lable shapes\n mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)\n # initialize parameters by uniform random numbers\n mod.init_params(initializer=init)\n if optimizer == 'sgd':\n # use Sparse SGD with learning rate 0.1 to train\n sgd = mx.optimizer.SGD(momentum=0.1, clip_gradient=5.0, learning_rate=0.01,\n rescale_grad=1.0/batch_size)\n mod.init_optimizer(optimizer=sgd)\n if num_epochs is None:\n num_epochs = 10\n expected_accuracy = 0.02\n elif optimizer == 'adam':\n # use Sparse Adam to train\n adam = mx.optimizer.Adam(clip_gradient=5.0, learning_rate=0.0005,\n rescale_grad=1.0/batch_size)\n mod.init_optimizer(optimizer=adam)\n if num_epochs is None:\n num_epochs = 10\n expected_accuracy = 0.05\n elif optimizer == 'adagrad':\n # use Sparse AdaGrad with learning rate 0.1 to train\n adagrad = mx.optimizer.AdaGrad(clip_gradient=5.0, learning_rate=0.01,\n rescale_grad=1.0/batch_size)\n mod.init_optimizer(optimizer=adagrad)\n if num_epochs is None:\n num_epochs = 20\n expected_accuracy = 0.09\n else:\n raise AssertionError(\"Unsupported optimizer type '\" + optimizer + \"' specified\")\n # use accuracy as the metric\n metric = mx.metric.create('MSE')\n # train 'num_epochs' epoch\n for epoch in range(num_epochs):\n train_iter.reset()\n metric.reset()\n for batch in train_iter:\n mod.forward(batch, is_train=True) # compute predictions\n mod.update_metric(metric, batch.label) # accumulate prediction accuracy\n mod.backward() # compute gradients\n mod.update() # update parameters\n print('Epoch %d, Training %s' % (epoch, metric.get()))\n if num_epochs > 1:\n assert(metric.get()[1] < expected_accuracy)\n\n if verbose is True:\n print(\"============ SGD ==========================\")\n start = time.clock()\n check_factorization_machine_module('sgd')\n if verbose is True:\n print(\"Duration: {}\".format(time.clock() - start))\n print(\"============ ADAM ==========================\")\n start = time.clock()\n check_factorization_machine_module('adam')\n if verbose is True:\n print(\"Duration: {}\".format(time.clock() - start))\n print(\"============ ADAGRAD ==========================\")\n start = time.clock()\n check_factorization_machine_module('adagrad')\n if verbose is True:\n print(\"Duration: {}\".format(time.clock() - start))\n\n\n@with_seed()\ndef test_module_initializer():\n def regression_model(m):\n x = mx.symbol.var(\"data\", stype='csr')\n v = mx.symbol.var(\"v\", shape=(m, 1), init=mx.init.Uniform(scale=.1),\n stype='row_sparse')\n model = mx.symbol.dot(lhs=x, rhs=v)\n y = mx.symbol.Variable(\"label\")\n model = mx.symbol.LinearRegressionOutput(data=model, label=y, name=\"out\")\n return model\n\n n, m = 128, 100\n model = regression_model(m)\n\n data = mx.nd.zeros(shape=(n, m), stype='csr')\n label = mx.nd.zeros((n, 1))\n iterator = mx.io.NDArrayIter(data=data, label={'label':label},\n batch_size=n, last_batch_handle='discard')\n\n # create module\n mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])\n mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)\n mod.init_params()\n v = mod._arg_params['v']\n assert(v.stype == 'row_sparse')\n assert(np.sum(v.asnumpy()) != 0)\n\n@with_seed()\ndef test_forward_reshape():\n num_class=10\n data1 = mx.sym.Variable('data1')\n data2 = mx.sym.Variable('data2')\n conv1 = mx.sym.Convolution(data=data1, kernel=(2, 2), num_filter=2, stride=(2, 2))\n conv2 = mx.sym.Convolution(data=data2, kernel=(3, 3), num_filter=3, stride=(1, 1))\n pooling1 = mx.sym.Pooling(data=conv1, kernel=(2, 2), stride=(1, 1), pool_type=\"avg\")\n pooling2 = mx.sym.Pooling(data=conv2, kernel=(2, 2), stride=(1, 1), pool_type=\"max\")\n flatten1 = mx.sym.flatten(data=pooling1)\n flatten2 = mx.sym.flatten(data=pooling2)\n sum = mx.sym.sum(data=flatten1, axis=1) + mx.sym.sum(data=flatten2, axis=1)\n fc = mx.sym.FullyConnected(data=sum, num_hidden=num_class)\n sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')\n\n dshape1 = (10, 3, 64, 64)\n dshape2 = (10, 3, 32, 32)\n lshape = (10,)\n\n mod = mx.mod.Module(symbol=sym, data_names=['data1', 'data2'],\n label_names=['softmax_label'])\n mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],\n label_shapes=[('softmax_label', lshape)])\n mod.init_params()\n mod.init_optimizer(optimizer_params={'learning_rate': 0.01})\n\n # Train with original data shapes\n data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),\n mx.nd.random.uniform(5, 15, dshape2)],\n label=[mx.nd.ones(lshape)])\n mod.forward(data_batch)\n assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])\n mod.backward()\n mod.update()\n\n # Train with different batch size\n dshape1 = (3, 3, 64, 64)\n dshape2 = (3, 3, 32, 32)\n lshape = (3,)\n data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),\n mx.nd.random.uniform(5, 15, dshape2)],\n label=[mx.nd.ones(lshape)])\n mod.forward(data_batch)\n assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])\n mod.backward()\n mod.update()\n\n dshape1 = (20, 3, 64, 64)\n dshape2 = (20, 3, 32, 32)\n lshape = (20,)\n data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(3, 5, dshape1),\n mx.nd.random.uniform(10, 25, dshape2)],\n label=[mx.nd.ones(lshape)])\n mod.forward(data_batch)\n assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])\n mod.backward()\n mod.update()\n\n #Train with both different batch size and data shapes\n dshape1 = (20, 3, 120, 120)\n dshape2 = (20, 3, 32, 64)\n lshape = (20,)\n data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),\n mx.nd.random.uniform(5, 15, dshape2)],\n label=[mx.nd.ones(lshape)])\n mod.forward(data_batch)\n assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])\n mod.backward()\n mod.update()\n\n dshape1 = (5, 3, 28, 40)\n dshape2 = (5, 3, 24, 16)\n lshape = (5,)\n data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),\n mx.nd.random.uniform(15, 25, dshape2)],\n label=[mx.nd.ones(lshape)])\n mod.forward(data_batch)\n assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])\n mod.backward()\n mod.update()\n\n #Test score\n dataset_shape1 = (30, 3, 30, 30)\n dataset_shape2 = (30, 3, 20, 40)\n labelset_shape = (30,)\n\n eval_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),\n mx.nd.random.uniform(15, 25, dataset_shape2)],\n label=[mx.nd.ones(labelset_shape)],\n batch_size=5)\n assert len(mod.score(eval_data=eval_dataiter, eval_metric='acc')) == 1\n\n #Test prediction\n dshape1 = (1, 3, 30, 30)\n dshape2 = (1, 3, 20, 40)\n dataset_shape1 = (10, 3, 30, 30)\n dataset_shape2 = (10, 3, 20, 40)\n\n pred_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),\n mx.nd.random.uniform(15, 25, dataset_shape2)])\n mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],\n for_training=False, force_rebind=True)\n assert mod.predict(pred_dataiter).shape == tuple([10, num_class])\n\n #Test forward with other data batch API\n Batch = namedtuple('Batch', ['data'])\n data = mx.sym.Variable('data')\n out = data * 2\n mod = mx.mod.Module(symbol=out, label_names=None)\n mod.bind(data_shapes=[('data', (1, 10))])\n mod.init_params()\n data1 = [mx.nd.ones((1, 10))]\n mod.forward(Batch(data1))\n assert mod.get_outputs()[0].shape == (1, 10)\n data2 = [mx.nd.ones((3, 5))]\n mod.forward(Batch(data2))\n assert mod.get_outputs()[0].shape == (3, 5)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475270,"cells":{"repo_name":{"kind":"string","value":"bgxavier/neutron"},"path":{"kind":"string","value":"neutron/plugins/brocade/db/models.py"},"copies":{"kind":"string","value":"63"},"size":{"kind":"string","value":"4551"},"content":{"kind":"string","value":"# Copyright 2013 Brocade Communications System, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\n\"\"\"Brocade specific database schema/model.\"\"\"\n\nimport sqlalchemy as sa\n\nfrom neutron.db import model_base\nfrom neutron.db import models_v2\n\n\nclass BrocadeNetwork(model_base.BASEV2, models_v2.HasId):\n \"\"\"Schema for brocade network.\"\"\"\n\n vlan = sa.Column(sa.String(10))\n\n\nclass BrocadePort(model_base.BASEV2):\n \"\"\"Schema for brocade port.\"\"\"\n\n port_id = sa.Column(sa.String(36), primary_key=True, default=\"\",\n server_default='')\n network_id = sa.Column(sa.String(36),\n sa.ForeignKey(\"brocadenetworks.id\"),\n nullable=False)\n admin_state_up = sa.Column(sa.Boolean, nullable=False)\n physical_interface = sa.Column(sa.String(36))\n vlan_id = sa.Column(sa.String(36))\n tenant_id = sa.Column(sa.String(36))\n\n\ndef create_network(context, net_id, vlan):\n \"\"\"Create a brocade specific network/port-profiles.\"\"\"\n\n session = context.session\n with session.begin(subtransactions=True):\n net = BrocadeNetwork(id=net_id, vlan=vlan)\n session.add(net)\n\n return net\n\n\ndef delete_network(context, net_id):\n \"\"\"Delete a brocade specific network/port-profiles.\"\"\"\n\n session = context.session\n with session.begin(subtransactions=True):\n net = (session.query(BrocadeNetwork).filter_by(id=net_id).first())\n if net is not None:\n session.delete(net)\n\n\ndef get_network(context, net_id, fields=None):\n \"\"\"Get brocade specific network, with vlan extension.\"\"\"\n\n session = context.session\n return (session.query(BrocadeNetwork).filter_by(id=net_id).first())\n\n\ndef get_networks(context, filters=None, fields=None):\n \"\"\"Get all brocade specific networks.\"\"\"\n\n session = context.session\n try:\n nets = session.query(BrocadeNetwork).all()\n return nets\n except sa.exc.SQLAlchemyError:\n return None\n\n\ndef create_port(context, port_id, network_id, physical_interface,\n vlan_id, tenant_id, admin_state_up):\n \"\"\"Create a brocade specific port, has policy like vlan.\"\"\"\n\n # port_id is truncated: since the linux-bridge tap device names are\n # based on truncated port id, this enables port lookups using\n # tap devices\n port_id = port_id[0:11]\n session = context.session\n with session.begin(subtransactions=True):\n port = BrocadePort(port_id=port_id,\n network_id=network_id,\n physical_interface=physical_interface,\n vlan_id=vlan_id,\n admin_state_up=admin_state_up,\n tenant_id=tenant_id)\n session.add(port)\n return port\n\n\ndef get_port(context, port_id):\n \"\"\"get a brocade specific port.\"\"\"\n\n port_id = port_id[0:11]\n session = context.session\n port = (session.query(BrocadePort).filter_by(port_id=port_id).first())\n return port\n\n\ndef get_ports(context, network_id=None):\n \"\"\"get a brocade specific port.\"\"\"\n\n session = context.session\n ports = (session.query(BrocadePort).filter_by(network_id=network_id).all())\n return ports\n\n\ndef delete_port(context, port_id):\n \"\"\"delete brocade specific port.\"\"\"\n\n port_id = port_id[0:11]\n session = context.session\n with session.begin(subtransactions=True):\n port = (session.query(BrocadePort).filter_by(port_id=port_id).first())\n if port is not None:\n session.delete(port)\n\n\ndef get_port_from_device(session, port_id):\n \"\"\"get port from the tap device.\"\"\"\n\n # device is same as truncated port_id\n port = (session.query(BrocadePort).filter_by(port_id=port_id).first())\n return port\n\n\ndef update_port_state(context, port_id, admin_state_up):\n \"\"\"Update port attributes.\"\"\"\n\n port_id = port_id[0:11]\n session = context.session\n session.query(BrocadePort).filter_by(\n port_id=port_id).update({'admin_state_up': admin_state_up})\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475271,"cells":{"repo_name":{"kind":"string","value":"snasoft/QtCreatorPluginsPack"},"path":{"kind":"string","value":"Bin/3rdParty/vera/bin/lib/test/test_isinstance.py"},"copies":{"kind":"string","value":"198"},"size":{"kind":"string","value":"9806"},"content":{"kind":"string","value":"# Tests some corner cases with isinstance() and issubclass(). While these\n# tests use new style classes and properties, they actually do whitebox\n# testing of error conditions uncovered when using extension types.\n\nimport unittest\nfrom test import test_support\nimport sys\n\n\n\f\nclass TestIsInstanceExceptions(unittest.TestCase):\n # Test to make sure that an AttributeError when accessing the instance's\n # class's bases is masked. This was actually a bug in Python 2.2 and\n # 2.2.1 where the exception wasn't caught but it also wasn't being cleared\n # (leading to an \"undetected error\" in the debug build). Set up is,\n # isinstance(inst, cls) where:\n #\n # - inst isn't an InstanceType\n # - cls isn't a ClassType, a TypeType, or a TupleType\n # - cls has a __bases__ attribute\n # - inst has a __class__ attribute\n # - inst.__class__ as no __bases__ attribute\n #\n # Sounds complicated, I know, but this mimics a situation where an\n # extension type raises an AttributeError when its __bases__ attribute is\n # gotten. In that case, isinstance() should return False.\n def test_class_has_no_bases(self):\n class I(object):\n def getclass(self):\n # This must return an object that has no __bases__ attribute\n return None\n __class__ = property(getclass)\n\n class C(object):\n def getbases(self):\n return ()\n __bases__ = property(getbases)\n\n self.assertEqual(False, isinstance(I(), C()))\n\n # Like above except that inst.__class__.__bases__ raises an exception\n # other than AttributeError\n def test_bases_raises_other_than_attribute_error(self):\n class E(object):\n def getbases(self):\n raise RuntimeError\n __bases__ = property(getbases)\n\n class I(object):\n def getclass(self):\n return E()\n __class__ = property(getclass)\n\n class C(object):\n def getbases(self):\n return ()\n __bases__ = property(getbases)\n\n self.assertRaises(RuntimeError, isinstance, I(), C())\n\n # Here's a situation where getattr(cls, '__bases__') raises an exception.\n # If that exception is not AttributeError, it should not get masked\n def test_dont_mask_non_attribute_error(self):\n class I: pass\n\n class C(object):\n def getbases(self):\n raise RuntimeError\n __bases__ = property(getbases)\n\n self.assertRaises(RuntimeError, isinstance, I(), C())\n\n # Like above, except that getattr(cls, '__bases__') raises an\n # AttributeError, which /should/ get masked as a TypeError\n def test_mask_attribute_error(self):\n class I: pass\n\n class C(object):\n def getbases(self):\n raise AttributeError\n __bases__ = property(getbases)\n\n self.assertRaises(TypeError, isinstance, I(), C())\n\n\n\f\n# These tests are similar to above, but tickle certain code paths in\n# issubclass() instead of isinstance() -- really PyObject_IsSubclass()\n# vs. PyObject_IsInstance().\nclass TestIsSubclassExceptions(unittest.TestCase):\n def test_dont_mask_non_attribute_error(self):\n class C(object):\n def getbases(self):\n raise RuntimeError\n __bases__ = property(getbases)\n\n class S(C): pass\n\n self.assertRaises(RuntimeError, issubclass, C(), S())\n\n def test_mask_attribute_error(self):\n class C(object):\n def getbases(self):\n raise AttributeError\n __bases__ = property(getbases)\n\n class S(C): pass\n\n self.assertRaises(TypeError, issubclass, C(), S())\n\n # Like above, but test the second branch, where the __bases__ of the\n # second arg (the cls arg) is tested. This means the first arg must\n # return a valid __bases__, and it's okay for it to be a normal --\n # unrelated by inheritance -- class.\n def test_dont_mask_non_attribute_error_in_cls_arg(self):\n class B: pass\n\n class C(object):\n def getbases(self):\n raise RuntimeError\n __bases__ = property(getbases)\n\n self.assertRaises(RuntimeError, issubclass, B, C())\n\n def test_mask_attribute_error_in_cls_arg(self):\n class B: pass\n\n class C(object):\n def getbases(self):\n raise AttributeError\n __bases__ = property(getbases)\n\n self.assertRaises(TypeError, issubclass, B, C())\n\n\n\f\n# meta classes for creating abstract classes and instances\nclass AbstractClass(object):\n def __init__(self, bases):\n self.bases = bases\n\n def getbases(self):\n return self.bases\n __bases__ = property(getbases)\n\n def __call__(self):\n return AbstractInstance(self)\n\nclass AbstractInstance(object):\n def __init__(self, klass):\n self.klass = klass\n\n def getclass(self):\n return self.klass\n __class__ = property(getclass)\n\n# abstract classes\nAbstractSuper = AbstractClass(bases=())\n\nAbstractChild = AbstractClass(bases=(AbstractSuper,))\n\n# normal classes\nclass Super:\n pass\n\nclass Child(Super):\n pass\n\n# new-style classes\nclass NewSuper(object):\n pass\n\nclass NewChild(NewSuper):\n pass\n\n\n\f\nclass TestIsInstanceIsSubclass(unittest.TestCase):\n # Tests to ensure that isinstance and issubclass work on abstract\n # classes and instances. Before the 2.2 release, TypeErrors were\n # raised when boolean values should have been returned. The bug was\n # triggered by mixing 'normal' classes and instances were with\n # 'abstract' classes and instances. This case tries to test all\n # combinations.\n\n def test_isinstance_normal(self):\n # normal instances\n self.assertEqual(True, isinstance(Super(), Super))\n self.assertEqual(False, isinstance(Super(), Child))\n self.assertEqual(False, isinstance(Super(), AbstractSuper))\n self.assertEqual(False, isinstance(Super(), AbstractChild))\n\n self.assertEqual(True, isinstance(Child(), Super))\n self.assertEqual(False, isinstance(Child(), AbstractSuper))\n\n def test_isinstance_abstract(self):\n # abstract instances\n self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))\n self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))\n self.assertEqual(False, isinstance(AbstractSuper(), Super))\n self.assertEqual(False, isinstance(AbstractSuper(), Child))\n\n self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))\n self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))\n self.assertEqual(False, isinstance(AbstractChild(), Super))\n self.assertEqual(False, isinstance(AbstractChild(), Child))\n\n def test_subclass_normal(self):\n # normal classes\n self.assertEqual(True, issubclass(Super, Super))\n self.assertEqual(False, issubclass(Super, AbstractSuper))\n self.assertEqual(False, issubclass(Super, Child))\n\n self.assertEqual(True, issubclass(Child, Child))\n self.assertEqual(True, issubclass(Child, Super))\n self.assertEqual(False, issubclass(Child, AbstractSuper))\n\n def test_subclass_abstract(self):\n # abstract classes\n self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))\n self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))\n self.assertEqual(False, issubclass(AbstractSuper, Child))\n\n self.assertEqual(True, issubclass(AbstractChild, AbstractChild))\n self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))\n self.assertEqual(False, issubclass(AbstractChild, Super))\n self.assertEqual(False, issubclass(AbstractChild, Child))\n\n def test_subclass_tuple(self):\n # test with a tuple as the second argument classes\n self.assertEqual(True, issubclass(Child, (Child,)))\n self.assertEqual(True, issubclass(Child, (Super,)))\n self.assertEqual(False, issubclass(Super, (Child,)))\n self.assertEqual(True, issubclass(Super, (Child, Super)))\n self.assertEqual(False, issubclass(Child, ()))\n self.assertEqual(True, issubclass(Super, (Child, (Super,))))\n\n self.assertEqual(True, issubclass(NewChild, (NewChild,)))\n self.assertEqual(True, issubclass(NewChild, (NewSuper,)))\n self.assertEqual(False, issubclass(NewSuper, (NewChild,)))\n self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))\n self.assertEqual(False, issubclass(NewChild, ()))\n self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))\n\n self.assertEqual(True, issubclass(int, (long, (float, int))))\n if test_support.have_unicode:\n self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))\n\n def test_subclass_recursion_limit(self):\n # make sure that issubclass raises RuntimeError before the C stack is\n # blown\n self.assertRaises(RuntimeError, blowstack, issubclass, str, str)\n\n def test_isinstance_recursion_limit(self):\n # make sure that issubclass raises RuntimeError before the C stack is\n # blown\n self.assertRaises(RuntimeError, blowstack, isinstance, '', str)\n\ndef blowstack(fxn, arg, compare_to):\n # Make sure that calling isinstance with a deeply nested tuple for its\n # argument will raise RuntimeError eventually.\n tuple_arg = (compare_to,)\n for cnt in xrange(sys.getrecursionlimit()+5):\n tuple_arg = (tuple_arg,)\n fxn(arg, tuple_arg)\n\n\f\ndef test_main():\n test_support.run_unittest(\n TestIsInstanceExceptions,\n TestIsSubclassExceptions,\n TestIsInstanceIsSubclass\n )\n\n\nif __name__ == '__main__':\n test_main()\n"},"license":{"kind":"string","value":"lgpl-3.0"}}},{"rowIdx":475272,"cells":{"repo_name":{"kind":"string","value":"siliconsmiley/QGIS"},"path":{"kind":"string","value":"python/plugins/processing/algs/lidar/lastools/lasthinPro.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3972"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n lasthinPro.py\n ---------------------\n Date : October 2014\n Copyright : (C) 2014 by Martin Isenburg\n Email : martin near rapidlasso point com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Martin Isenburg'\n__date__ = 'October 2014'\n__copyright__ = '(C) 2014, Martin Isenburg'\n# This will get replaced with a git SHA1 when you do a git archive\n__revision__ = '$Format:%H$'\n\nimport os\nfrom LAStoolsUtils import LAStoolsUtils\nfrom LAStoolsAlgorithm import LAStoolsAlgorithm\n\nfrom processing.core.parameters import ParameterBoolean\nfrom processing.core.parameters import ParameterNumber\nfrom processing.core.parameters import ParameterSelection\n\nclass lasthinPro(LAStoolsAlgorithm):\n\n THIN_STEP = \"THIN_STEP\"\n OPERATION = \"OPERATION\"\n OPERATIONS= [\"lowest\", \"random\", \"highest\"]\n WITHHELD = \"WITHHELD\"\n CLASSIFY_AS = \"CLASSIFY_AS\"\n CLASSIFY_AS_CLASS = \"CLASSIFY_AS_CLASS\"\n\n def defineCharacteristics(self):\n self.name, self.i18n_name = self.trAlgorithm('lasthinPro')\n self.group, self.i18n_group = self.trAlgorithm('LAStools Production')\n self.addParametersPointInputFolderGUI()\n self.addParameter(ParameterNumber(lasthinPro.THIN_STEP,\n self.tr(\"size of grid used for thinning\"), 0, None, 1.0))\n self.addParameter(ParameterSelection(lasthinPro.OPERATION,\n self.tr(\"keep particular point per cell\"), lasthinPro.OPERATIONS, 0))\n self.addParameter(ParameterBoolean(lasthinPro.WITHHELD,\n self.tr(\"mark thinned-away points as withheld\"), False))\n self.addParameter(ParameterBoolean(lasthinPro.CLASSIFY_AS,\n self.tr(\"classify surviving points as class\"), False))\n self.addParameter(ParameterNumber(lasthinPro.CLASSIFY_AS_CLASS,\n self.tr(\"class\"), 0, None, 8))\n self.addParametersOutputDirectoryGUI()\n self.addParametersOutputAppendixGUI()\n self.addParametersPointOutputFormatGUI()\n self.addParametersAdditionalGUI()\n self.addParametersCoresGUI()\n self.addParametersVerboseGUI()\n\n def processAlgorithm(self, progress):\n commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), \"bin\", \"lasthin\")]\n self.addParametersVerboseCommands(commands)\n self.addParametersPointInputFolderCommands(commands)\n step = self.getParameterValue(lasthinPro.THIN_STEP)\n if step != 0.0:\n commands.append(\"-step\")\n commands.append(unicode(step))\n operation = self.getParameterValue(lasthinPro.OPERATION)\n if operation != 0:\n commands.append(\"-\" + self.OPERATIONS[operation])\n if self.getParameterValue(lasthinPro.WITHHELD):\n commands.append(\"-withheld\")\n if self.getParameterValue(lasthinPro.CLASSIFY_AS):\n commands.append(\"-classify_as\")\n commands.append(unicode(self.getParameterValue(lasthinPro.CLASSIFY_AS_CLASS)))\n self.addParametersOutputDirectoryCommands(commands)\n self.addParametersOutputAppendixCommands(commands)\n self.addParametersPointOutputFormatCommands(commands)\n self.addParametersAdditionalCommands(commands)\n self.addParametersCoresCommands(commands)\n\n LAStoolsUtils.runLAStools(commands, progress)\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475273,"cells":{"repo_name":{"kind":"string","value":"alexryndin/ambari"},"path":{"kind":"string","value":"ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"8157"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n'''\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom stacks.utils.RMFTestCase import RMFTestCase, Template, InlineTemplate, StaticFile\nfrom resource_management.core.exceptions import ComponentIsNotRunning\nfrom resource_management.libraries.script.config_dictionary import UnknownConfiguration\nfrom mock.mock import MagicMock, call, patch\n\n@patch(\"os.listdir\", new = MagicMock(return_value=['solr-8886.pid']))\n@patch(\"os.path.isdir\", new = MagicMock(return_value=True))\nclass TestInfraSolr(RMFTestCase):\n COMMON_SERVICES_PACKAGE_DIR = \"AMBARI_INFRA/0.1.0/package\"\n STACK_VERSION = \"2.4\"\n\n def configureResourcesCalled(self):\n self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n cd_access = 'a',\n mode = 0755\n )\n self.assertResourceCalled('Directory', '/var/run/ambari-infra-solr',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n cd_access = 'a',\n mode = 0755\n )\n self.assertResourceCalled('Directory', '/opt/ambari_infra_solr/data',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n cd_access = 'a',\n mode = 0755\n )\n self.assertResourceCalled('Directory', '/opt/ambari_infra_solr/data/resources',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n cd_access = 'a',\n mode = 0755\n )\n self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n recursive_ownership = True,\n cd_access = 'a',\n mode = 0755\n )\n self.assertResourceCalled('Directory', '/etc/ambari-infra-solr/conf',\n owner = 'solr',\n group = 'hadoop',\n create_parents = True,\n recursive_ownership = True,\n cd_access = 'a',\n mode = 0755\n )\n \n self.assertResourceCalled('File', '/var/log/ambari-infra-solr/solr-install.log',\n owner = 'solr',\n group = 'hadoop',\n mode = 0644,\n content = ''\n )\n self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/infra-solr-env.sh',\n owner = 'solr',\n group='hadoop',\n mode = 0755,\n content = InlineTemplate(self.getConfig()['configurations']['infra-solr-env']['content'])\n )\n self.assertResourceCalled('File', '/opt/ambari_infra_solr/data/solr.xml',\n owner = 'solr',\n group='hadoop',\n content = InlineTemplate(self.getConfig()['configurations']['infra-solr-xml']['content'])\n )\n self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/log4j.properties',\n owner = 'solr',\n group='hadoop',\n content = InlineTemplate(self.getConfig()['configurations']['infra-solr-log4j']['content'])\n )\n\n self.assertResourceCalled('File', '/etc/ambari-infra-solr/conf/custom-security.json',\n owner = 'solr',\n group='hadoop',\n content = InlineTemplate(self.getConfig()['configurations']['infra-solr-security-json']['content']),\n mode = 0640\n )\n\n self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')\n self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')\n self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')\n\n def test_configure_default(self):\n self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + \"/scripts/infra_solr.py\",\n classname = \"InfraSolr\",\n command = \"configure\",\n config_file = \"default.json\",\n stack_version = self.STACK_VERSION,\n target = RMFTestCase.TARGET_COMMON_SERVICES\n )\n \n self.configureResourcesCalled()\n self.assertNoMoreResources()\n \n def test_start_default(self):\n self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + \"/scripts/infra_solr.py\",\n classname = \"InfraSolr\",\n command = \"start\",\n config_file = \"default.json\",\n stack_version = self.STACK_VERSION,\n target = RMFTestCase.TARGET_COMMON_SERVICES\n )\n \n self.configureResourcesCalled()\n self.assertResourceCalled('Execute', \"/usr/lib/ambari-infra-solr/bin/solr start -cloud -noprompt -s /opt/ambari_infra_solr/data >> /var/log/ambari-infra-solr/solr-install.log 2>&1\",\n environment = {'SOLR_INCLUDE': '/etc/ambari-infra-solr/conf/infra-solr-env.sh'},\n user = \"solr\"\n )\n \n def test_stop_default(self):\n self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + \"/scripts/infra_solr.py\",\n classname = \"InfraSolr\",\n command = \"stop\",\n config_file = \"default.json\",\n stack_version = self.STACK_VERSION,\n target = RMFTestCase.TARGET_COMMON_SERVICES\n )\n \n self.assertResourceCalled('Execute', '/usr/lib/ambari-infra-solr/bin/solr stop -all >> /var/log/ambari-infra-solr/solr-install.log',\n environment = {'SOLR_INCLUDE': '/etc/ambari-infra-solr/conf/infra-solr-env.sh'},\n user = \"solr\",\n only_if = \"test -f /var/run/ambari-infra-solr/solr-8886.pid\"\n )\n self.assertResourceCalled('File', '/var/run/ambari-infra-solr/solr-8886.pid',\n action = ['delete']\n )\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475274,"cells":{"repo_name":{"kind":"string","value":"vshtanko/scikit-learn"},"path":{"kind":"string","value":"sklearn/metrics/setup.py"},"copies":{"kind":"string","value":"299"},"size":{"kind":"string","value":"1024"},"content":{"kind":"string","value":"import os\nimport os.path\n\nimport numpy\nfrom numpy.distutils.misc_util import Configuration\n\nfrom sklearn._build_utils import get_blas_info\n\n\ndef configuration(parent_package=\"\", top_path=None):\n config = Configuration(\"metrics\", parent_package, top_path)\n\n cblas_libs, blas_info = get_blas_info()\n if os.name == 'posix':\n cblas_libs.append('m')\n\n config.add_extension(\"pairwise_fast\",\n sources=[\"pairwise_fast.c\"],\n include_dirs=[os.path.join('..', 'src', 'cblas'),\n numpy.get_include(),\n blas_info.pop('include_dirs', [])],\n libraries=cblas_libs,\n extra_compile_args=blas_info.pop('extra_compile_args',\n []),\n **blas_info)\n\n return config\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n setup(**configuration().todict())\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475275,"cells":{"repo_name":{"kind":"string","value":"kanagasabapathi/python-for-android"},"path":{"kind":"string","value":"python3-alpha/python3-src/Tools/scripts/linktree.py"},"copies":{"kind":"string","value":"49"},"size":{"kind":"string","value":"2443"},"content":{"kind":"string","value":"#! /usr/bin/env python3\n\n# linktree\n#\n# Make a copy of a directory tree with symbolic links to all files in the\n# original tree.\n# All symbolic links go to a special symbolic link at the top, so you\n# can easily fix things if the original source tree moves.\n# See also \"mkreal\".\n#\n# usage: mklinks oldtree newtree\n\nimport sys, os\n\nLINK = '.LINK' # Name of special symlink at the top.\n\ndebug = 0\n\ndef main():\n if not 3 <= len(sys.argv) <= 4:\n print('usage:', sys.argv[0], 'oldtree newtree [linkto]')\n return 2\n oldtree, newtree = sys.argv[1], sys.argv[2]\n if len(sys.argv) > 3:\n link = sys.argv[3]\n link_may_fail = 1\n else:\n link = LINK\n link_may_fail = 0\n if not os.path.isdir(oldtree):\n print(oldtree + ': not a directory')\n return 1\n try:\n os.mkdir(newtree, 0o777)\n except os.error as msg:\n print(newtree + ': cannot mkdir:', msg)\n return 1\n linkname = os.path.join(newtree, link)\n try:\n os.symlink(os.path.join(os.pardir, oldtree), linkname)\n except os.error as msg:\n if not link_may_fail:\n print(linkname + ': cannot symlink:', msg)\n return 1\n else:\n print(linkname + ': warning: cannot symlink:', msg)\n linknames(oldtree, newtree, link)\n return 0\n\ndef linknames(old, new, link):\n if debug: print('linknames', (old, new, link))\n try:\n names = os.listdir(old)\n except os.error as msg:\n print(old + ': warning: cannot listdir:', msg)\n return\n for name in names:\n if name not in (os.curdir, os.pardir):\n oldname = os.path.join(old, name)\n linkname = os.path.join(link, name)\n newname = os.path.join(new, name)\n if debug > 1: print(oldname, newname, linkname)\n if os.path.isdir(oldname) and \\\n not os.path.islink(oldname):\n try:\n os.mkdir(newname, 0o777)\n ok = 1\n except:\n print(newname + \\\n ': warning: cannot mkdir:', msg)\n ok = 0\n if ok:\n linkname = os.path.join(os.pardir,\n linkname)\n linknames(oldname, newname, linkname)\n else:\n os.symlink(linkname, newname)\n\nif __name__ == '__main__':\n sys.exit(main())\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475276,"cells":{"repo_name":{"kind":"string","value":"amohanta/yalih"},"path":{"kind":"string","value":"mechanize/_http.py"},"copies":{"kind":"string","value":"133"},"size":{"kind":"string","value":"14354"},"content":{"kind":"string","value":"\"\"\"HTTP related handlers.\n\nNote that some other HTTP handlers live in more specific modules: _auth.py,\n_gzip.py, etc.\n\n\nCopyright 2002-2006 John J Lee \n\nThis code is free software; you can redistribute it and/or modify it\nunder the terms of the BSD or ZPL 2.1 licenses (see the file\nCOPYING.txt included with the distribution).\n\n\"\"\"\n\nimport HTMLParser\nfrom cStringIO import StringIO\nimport htmlentitydefs\nimport logging\nimport robotparser\nimport socket\nimport time\n\nimport _sgmllib_copy as sgmllib\nfrom _urllib2_fork import HTTPError, BaseHandler\n\nfrom _headersutil import is_html\nfrom _html import unescape, unescape_charref\nfrom _request import Request\nfrom _response import response_seek_wrapper\nimport _rfc3986\nimport _sockettimeout\n\ndebug = logging.getLogger(\"mechanize\").debug\ndebug_robots = logging.getLogger(\"mechanize.robots\").debug\n\n# monkeypatch urllib2.HTTPError to show URL\n## import urllib2\n## def urllib2_str(self):\n## return 'HTTP Error %s: %s (%s)' % (\n## self.code, self.msg, self.geturl())\n## urllib2.HTTPError.__str__ = urllib2_str\n\n\nCHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes\nDEFAULT_ENCODING = 'latin-1'\n\n# XXX would self.reset() work, instead of raising this exception?\nclass EndOfHeadError(Exception): pass\nclass AbstractHeadParser:\n # only these elements are allowed in or before HEAD of document\n head_elems = (\"html\", \"head\",\n \"title\", \"base\",\n \"script\", \"style\", \"meta\", \"link\", \"object\")\n _entitydefs = htmlentitydefs.name2codepoint\n _encoding = DEFAULT_ENCODING\n\n def __init__(self):\n self.http_equiv = []\n\n def start_meta(self, attrs):\n http_equiv = content = None\n for key, value in attrs:\n if key == \"http-equiv\":\n http_equiv = self.unescape_attr_if_required(value)\n elif key == \"content\":\n content = self.unescape_attr_if_required(value)\n if http_equiv is not None and content is not None:\n self.http_equiv.append((http_equiv, content))\n\n def end_head(self):\n raise EndOfHeadError()\n\n def handle_entityref(self, name):\n #debug(\"%s\", name)\n self.handle_data(unescape(\n '&%s;' % name, self._entitydefs, self._encoding))\n\n def handle_charref(self, name):\n #debug(\"%s\", name)\n self.handle_data(unescape_charref(name, self._encoding))\n\n def unescape_attr(self, name):\n #debug(\"%s\", name)\n return unescape(name, self._entitydefs, self._encoding)\n\n def unescape_attrs(self, attrs):\n #debug(\"%s\", attrs)\n escaped_attrs = {}\n for key, val in attrs.items():\n escaped_attrs[key] = self.unescape_attr(val)\n return escaped_attrs\n\n def unknown_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n def unknown_charref(self, ref):\n self.handle_data(\"&#%s;\" % ref)\n\n\nclass XHTMLCompatibleHeadParser(AbstractHeadParser,\n HTMLParser.HTMLParser):\n def __init__(self):\n HTMLParser.HTMLParser.__init__(self)\n AbstractHeadParser.__init__(self)\n\n def handle_starttag(self, tag, attrs):\n if tag not in self.head_elems:\n raise EndOfHeadError()\n try:\n method = getattr(self, 'start_' + tag)\n except AttributeError:\n try:\n method = getattr(self, 'do_' + tag)\n except AttributeError:\n pass # unknown tag\n else:\n method(attrs)\n else:\n method(attrs)\n\n def handle_endtag(self, tag):\n if tag not in self.head_elems:\n raise EndOfHeadError()\n try:\n method = getattr(self, 'end_' + tag)\n except AttributeError:\n pass # unknown tag\n else:\n method()\n\n def unescape(self, name):\n # Use the entitydefs passed into constructor, not\n # HTMLParser.HTMLParser's entitydefs.\n return self.unescape_attr(name)\n\n def unescape_attr_if_required(self, name):\n return name # HTMLParser.HTMLParser already did it\n\nclass HeadParser(AbstractHeadParser, sgmllib.SGMLParser):\n\n def _not_called(self):\n assert False\n\n def __init__(self):\n sgmllib.SGMLParser.__init__(self)\n AbstractHeadParser.__init__(self)\n\n def handle_starttag(self, tag, method, attrs):\n if tag not in self.head_elems:\n raise EndOfHeadError()\n if tag == \"meta\":\n method(attrs)\n\n def unknown_starttag(self, tag, attrs):\n self.handle_starttag(tag, self._not_called, attrs)\n\n def handle_endtag(self, tag, method):\n if tag in self.head_elems:\n method()\n else:\n raise EndOfHeadError()\n\n def unescape_attr_if_required(self, name):\n return self.unescape_attr(name)\n\ndef parse_head(fileobj, parser):\n \"\"\"Return a list of key, value pairs.\"\"\"\n while 1:\n data = fileobj.read(CHUNK)\n try:\n parser.feed(data)\n except EndOfHeadError:\n break\n if len(data) != CHUNK:\n # this should only happen if there is no HTML body, or if\n # CHUNK is big\n break\n return parser.http_equiv\n\nclass HTTPEquivProcessor(BaseHandler):\n \"\"\"Append META HTTP-EQUIV headers to regular HTTP headers.\"\"\"\n\n handler_order = 300 # before handlers that look at HTTP headers\n\n def __init__(self, head_parser_class=HeadParser,\n i_want_broken_xhtml_support=False,\n ):\n self.head_parser_class = head_parser_class\n self._allow_xhtml = i_want_broken_xhtml_support\n\n def http_response(self, request, response):\n if not hasattr(response, \"seek\"):\n response = response_seek_wrapper(response)\n http_message = response.info()\n url = response.geturl()\n ct_hdrs = http_message.getheaders(\"content-type\")\n if is_html(ct_hdrs, url, self._allow_xhtml):\n try:\n try:\n html_headers = parse_head(response,\n self.head_parser_class())\n finally:\n response.seek(0)\n except (HTMLParser.HTMLParseError,\n sgmllib.SGMLParseError):\n pass\n else:\n for hdr, val in html_headers:\n # add a header\n http_message.dict[hdr.lower()] = val\n text = hdr + \": \" + val\n for line in text.split(\"\\n\"):\n http_message.headers.append(line + \"\\n\")\n return response\n\n https_response = http_response\n\n\nclass MechanizeRobotFileParser(robotparser.RobotFileParser):\n\n def __init__(self, url='', opener=None):\n robotparser.RobotFileParser.__init__(self, url)\n self._opener = opener\n self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT\n\n def set_opener(self, opener=None):\n import _opener\n if opener is None:\n opener = _opener.OpenerDirector()\n self._opener = opener\n\n def set_timeout(self, timeout):\n self._timeout = timeout\n\n def read(self):\n \"\"\"Reads the robots.txt URL and feeds it to the parser.\"\"\"\n if self._opener is None:\n self.set_opener()\n req = Request(self.url, unverifiable=True, visit=False,\n timeout=self._timeout)\n try:\n f = self._opener.open(req)\n except HTTPError, f:\n pass\n except (IOError, socket.error, OSError), exc:\n debug_robots(\"ignoring error opening %r: %s\" %\n (self.url, exc))\n return\n lines = []\n line = f.readline()\n while line:\n lines.append(line.strip())\n line = f.readline()\n status = f.code\n if status == 401 or status == 403:\n self.disallow_all = True\n debug_robots(\"disallow all\")\n elif status >= 400:\n self.allow_all = True\n debug_robots(\"allow all\")\n elif status == 200 and lines:\n debug_robots(\"parse lines\")\n self.parse(lines)\n\nclass RobotExclusionError(HTTPError):\n def __init__(self, request, *args):\n apply(HTTPError.__init__, (self,)+args)\n self.request = request\n\nclass HTTPRobotRulesProcessor(BaseHandler):\n # before redirections, after everything else\n handler_order = 800\n\n try:\n from httplib import HTTPMessage\n except:\n from mimetools import Message\n http_response_class = Message\n else:\n http_response_class = HTTPMessage\n\n def __init__(self, rfp_class=MechanizeRobotFileParser):\n self.rfp_class = rfp_class\n self.rfp = None\n self._host = None\n\n def http_request(self, request):\n scheme = request.get_type()\n if scheme not in [\"http\", \"https\"]:\n # robots exclusion only applies to HTTP\n return request\n\n if request.get_selector() == \"/robots.txt\":\n # /robots.txt is always OK to fetch\n return request\n\n host = request.get_host()\n\n # robots.txt requests don't need to be allowed by robots.txt :-)\n origin_req = getattr(request, \"_origin_req\", None)\n if (origin_req is not None and\n origin_req.get_selector() == \"/robots.txt\" and\n origin_req.get_host() == host\n ):\n return request\n\n if host != self._host:\n self.rfp = self.rfp_class()\n try:\n self.rfp.set_opener(self.parent)\n except AttributeError:\n debug(\"%r instance does not support set_opener\" %\n self.rfp.__class__)\n self.rfp.set_url(scheme+\"://\"+host+\"/robots.txt\")\n self.rfp.set_timeout(request.timeout)\n self.rfp.read()\n self._host = host\n\n ua = request.get_header(\"User-agent\", \"\")\n if self.rfp.can_fetch(ua, request.get_full_url()):\n return request\n else:\n # XXX This should really have raised URLError. Too late now...\n msg = \"request disallowed by robots.txt\"\n raise RobotExclusionError(\n request,\n request.get_full_url(),\n 403, msg,\n self.http_response_class(StringIO()), StringIO(msg))\n\n https_request = http_request\n\nclass HTTPRefererProcessor(BaseHandler):\n \"\"\"Add Referer header to requests.\n\n This only makes sense if you use each RefererProcessor for a single\n chain of requests only (so, for example, if you use a single\n HTTPRefererProcessor to fetch a series of URLs extracted from a single\n page, this will break).\n\n There's a proper implementation of this in mechanize.Browser.\n\n \"\"\"\n def __init__(self):\n self.referer = None\n\n def http_request(self, request):\n if ((self.referer is not None) and\n not request.has_header(\"Referer\")):\n request.add_unredirected_header(\"Referer\", self.referer)\n return request\n\n def http_response(self, request, response):\n self.referer = response.geturl()\n return response\n\n https_request = http_request\n https_response = http_response\n\n\ndef clean_refresh_url(url):\n # e.g. Firefox 1.5 does (something like) this\n if ((url.startswith('\"') and url.endswith('\"')) or\n (url.startswith(\"'\") and url.endswith(\"'\"))):\n url = url[1:-1]\n return _rfc3986.clean_url(url, \"latin-1\") # XXX encoding\n\ndef parse_refresh_header(refresh):\n \"\"\"\n >>> parse_refresh_header(\"1; url=http://example.com/\")\n (1.0, 'http://example.com/')\n >>> parse_refresh_header(\"1; url='http://example.com/'\")\n (1.0, 'http://example.com/')\n >>> parse_refresh_header(\"1\")\n (1.0, None)\n >>> parse_refresh_header(\"blah\") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ValueError: invalid literal for float(): blah\n\n \"\"\"\n\n ii = refresh.find(\";\")\n if ii != -1:\n pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]\n jj = newurl_spec.find(\"=\")\n key = None\n if jj != -1:\n key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]\n newurl = clean_refresh_url(newurl)\n if key is None or key.strip().lower() != \"url\":\n raise ValueError()\n else:\n pause, newurl = float(refresh), None\n return pause, newurl\n\nclass HTTPRefreshProcessor(BaseHandler):\n \"\"\"Perform HTTP Refresh redirections.\n\n Note that if a non-200 HTTP code has occurred (for example, a 30x\n redirect), this processor will do nothing.\n\n By default, only zero-time Refresh headers are redirected. Use the\n max_time attribute / constructor argument to allow Refresh with longer\n pauses. Use the honor_time attribute / constructor argument to control\n whether the requested pause is honoured (with a time.sleep()) or\n skipped in favour of immediate redirection.\n\n Public attributes:\n\n max_time: see above\n honor_time: see above\n\n \"\"\"\n handler_order = 1000\n\n def __init__(self, max_time=0, honor_time=True):\n self.max_time = max_time\n self.honor_time = honor_time\n self._sleep = time.sleep\n\n def http_response(self, request, response):\n code, msg, hdrs = response.code, response.msg, response.info()\n\n if code == 200 and hdrs.has_key(\"refresh\"):\n refresh = hdrs.getheaders(\"refresh\")[0]\n try:\n pause, newurl = parse_refresh_header(refresh)\n except ValueError:\n debug(\"bad Refresh header: %r\" % refresh)\n return response\n\n if newurl is None:\n newurl = response.geturl()\n if (self.max_time is None) or (pause <= self.max_time):\n if pause > 1E-3 and self.honor_time:\n self._sleep(pause)\n hdrs[\"location\"] = newurl\n # hardcoded http is NOT a bug\n response = self.parent.error(\n \"http\", request, response,\n \"refresh\", msg, hdrs)\n else:\n debug(\"Refresh header ignored: %r\" % refresh)\n\n return response\n\n https_response = http_response\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475277,"cells":{"repo_name":{"kind":"string","value":"wenxichen/tensorflow_yolo2"},"path":{"kind":"string","value":"src/slim_dir/nets/inception_utils.py"},"copies":{"kind":"string","value":"66"},"size":{"kind":"string","value":"2630"},"content":{"kind":"string","value":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains common code shared by all inception models.\n\nUsage of arg scope:\n with slim.arg_scope(inception_arg_scope()):\n logits, end_points = inception.inception_v3(images, num_classes,\n is_training=is_training)\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef inception_arg_scope(weight_decay=0.00004,\n use_batch_norm=True,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001):\n \"\"\"Defines the default arg scope for inception models.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n use_batch_norm: \"If `True`, batch_norm is applied after each convolution.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n\n Returns:\n An `arg_scope` to use for the inception models.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': batch_norm_decay,\n # epsilon to prevent 0s in variance.\n 'epsilon': batch_norm_epsilon,\n # collection containing update_ops.\n 'updates_collections': tf.GraphKeys.UPDATE_OPS,\n }\n if use_batch_norm:\n normalizer_fn = slim.batch_norm\n normalizer_params = batch_norm_params\n else:\n normalizer_fn = None\n normalizer_params = {}\n # Set weight_decay for weights in Conv and FC layers.\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_regularizer=slim.l2_regularizer(weight_decay)):\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=slim.variance_scaling_initializer(),\n activation_fn=tf.nn.relu,\n normalizer_fn=normalizer_fn,\n normalizer_params=normalizer_params) as sc:\n return sc\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475278,"cells":{"repo_name":{"kind":"string","value":"psawaya/Mental-Ginger"},"path":{"kind":"string","value":"django/conf/locale/ka/formats.py"},"copies":{"kind":"string","value":"329"},"size":{"kind":"string","value":"1888"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n# This file is distributed under the same license as the Django package.\n#\n\n# The *_FORMAT strings use the Django date format syntax,\n# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date\nDATE_FORMAT = 'l, j F, Y'\nTIME_FORMAT = 'h:i:s a'\nDATETIME_FORMAT = 'j F, Y h:i:s a'\nYEAR_MONTH_FORMAT = 'F, Y'\nMONTH_DAY_FORMAT = 'j F'\nSHORT_DATE_FORMAT = 'j.M.Y'\nSHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'\nFIRST_DAY_OF_WEEK = 1 # (Monday)\n\n# The *_INPUT_FORMATS strings use the Python strftime format syntax,\n# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior\nDATE_INPUT_FORMATS = (\n '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'\n # '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'\n # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'\n # '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'\n)\nTIME_INPUT_FORMATS = (\n '%H:%M:%S', # '14:30:59'\n '%H:%M', # '14:30'\n)\nDATETIME_INPUT_FORMATS = (\n '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'\n '%Y-%m-%d %H:%M', # '2006-10-25 14:30'\n '%Y-%m-%d', # '2006-10-25'\n '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'\n '%d.%m.%Y %H:%M', # '25.10.2006 14:30'\n '%d.%m.%Y', # '25.10.2006'\n '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'\n '%d.%m.%y %H:%M', # '25.10.06 14:30'\n '%d.%m.%y', # '25.10.06'\n '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'\n '%m/%d/%Y %H:%M', # '10/25/2006 14:30'\n '%m/%d/%Y', # '10/25/2006'\n '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'\n '%m/%d/%y %H:%M', # '10/25/06 14:30'\n '%m/%d/%y', # '10/25/06'\n)\nDECIMAL_SEPARATOR = '.'\nTHOUSAND_SEPARATOR = \" \"\nNUMBER_GROUPING = 3\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475279,"cells":{"repo_name":{"kind":"string","value":"wzbozon/scikit-learn"},"path":{"kind":"string","value":"sklearn/preprocessing/data.py"},"copies":{"kind":"string","value":"68"},"size":{"kind":"string","value":"57385"},"content":{"kind":"string","value":"# Authors: Alexandre Gramfort \n# Mathieu Blondel \n# Olivier Grisel \n# Andreas Mueller \n# Eric Martin \n# License: BSD 3 clause\n\nfrom itertools import chain, combinations\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..externals import six\nfrom ..utils import check_array\nfrom ..utils.extmath import row_norms\nfrom ..utils.fixes import combinations_with_replacement as combinations_w_r\nfrom ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,\n inplace_csr_row_normalize_l2)\nfrom ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,\n min_max_axis, inplace_row_scale)\nfrom ..utils.validation import check_is_fitted, FLOAT_DTYPES\n\n\nzip = six.moves.zip\nmap = six.moves.map\nrange = six.moves.range\n\n__all__ = [\n 'Binarizer',\n 'KernelCenterer',\n 'MinMaxScaler',\n 'MaxAbsScaler',\n 'Normalizer',\n 'OneHotEncoder',\n 'RobustScaler',\n 'StandardScaler',\n 'add_dummy_feature',\n 'binarize',\n 'normalize',\n 'scale',\n 'robust_scale',\n 'maxabs_scale',\n 'minmax_scale',\n]\n\nDEPRECATION_MSG_1D = (\n \"Passing 1d arrays as data is deprecated in 0.17 and will \"\n \"raise ValueError in 0.19. Reshape your data either using \"\n \"X.reshape(-1, 1) if your data has a single feature or \"\n \"X.reshape(1, -1) if it contains a single sample.\"\n)\n\n\ndef _mean_and_std(X, axis=0, with_mean=True, with_std=True):\n \"\"\"Compute mean and std deviation for centering, scaling.\n\n Zero valued std components are reset to 1.0 to avoid NaNs when scaling.\n \"\"\"\n X = np.asarray(X)\n Xr = np.rollaxis(X, axis)\n\n if with_mean:\n mean_ = Xr.mean(axis=0)\n else:\n mean_ = None\n\n if with_std:\n std_ = Xr.std(axis=0)\n std_ = _handle_zeros_in_scale(std_)\n else:\n std_ = None\n\n return mean_, std_\n\n\ndef _handle_zeros_in_scale(scale):\n ''' Makes sure that whenever scale is zero, we handle it correctly.\n\n This happens in most scalers when we have constant features.'''\n\n # if we are fitting on 1D arrays, scale might be a scalar\n if np.isscalar(scale):\n if scale == 0:\n scale = 1.\n elif isinstance(scale, np.ndarray):\n scale[scale == 0.0] = 1.0\n scale[~np.isfinite(scale)] = 1.0\n return scale\n\n\ndef scale(X, axis=0, with_mean=True, with_std=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the mean and component wise scale to unit variance.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the means and standard deviations along. If 0,\n independently standardize each feature, otherwise (if 1) standardize\n each sample.\n\n with_mean : boolean, True by default\n If True, center the data before scaling.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_mean=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,\n warn_on_dtype=True, estimator='the scale function',\n dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` instead\"\n \" See docstring for motivation and alternatives.\")\n if axis != 0:\n raise ValueError(\"Can only scale sparse matrix on axis=0, \"\n \" got axis=%d\" % axis)\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n _, var = mean_variance_axis(X, axis=0)\n var = _handle_zeros_in_scale(var)\n inplace_column_scale(X, 1 / np.sqrt(var))\n else:\n X = np.asarray(X)\n mean_, std_ = _mean_and_std(\n X, axis, with_mean=with_mean, with_std=with_std)\n if copy:\n X = X.copy()\n # Xr is a view on the original array that enables easy use of\n # broadcasting on the axis in which we are interested in\n Xr = np.rollaxis(X, axis)\n if with_mean:\n Xr -= mean_\n mean_1 = Xr.mean(axis=0)\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n if not np.allclose(mean_1, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n if with_std:\n Xr /= std_\n if with_mean:\n mean_2 = Xr.mean(axis=0)\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again:\n if not np.allclose(mean_2, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0. \")\n Xr -= mean_2\n return X\n\n\nclass MinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n copy : boolean, optional, default True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array).\n\n Attributes\n ----------\n min_ : ndarray, shape (n_features,)\n Per feature adjustment for minimum.\n\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, feature_range=(0, 1), copy=True):\n self.feature_range = feature_range\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\"Minimum of desired feature range must be smaller\"\n \" than maximum. Got %s.\" % str(feature_range))\n data_min = np.min(X, axis=0)\n data_range = np.max(X, axis=0) - data_min\n data_range = _handle_zeros_in_scale(data_range)\n self.scale_ = (feature_range[1] - feature_range[0]) / data_range\n self.min_ = feature_range[0] - data_min * self.scale_\n self.data_range = data_range\n self.data_min = data_min\n return self\n\n def transform(self, X):\n \"\"\"Scaling features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n\n if X.ndim == 1:\n warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)\n\n X *= self.scale_\n X += self.min_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n Input data that will be transformed.\n \"\"\"\n check_is_fitted(self, 'scale_')\n\n X = check_array(X, copy=self.copy, ensure_2d=False)\n X -= self.min_\n X /= self.scale_\n return X\n\n\ndef minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):\n \"\"\"Transforms features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n feature_range: tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass StandardScaler(BaseEstimator, TransformerMixin):\n \"\"\"Standardize features by removing the mean and scaling to unit variance\n\n Centering and scaling happen independently on each feature by computing\n the relevant statistics on the samples in the training set. Mean and\n standard deviation are then stored to be used on later data using the\n `transform` method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators: they might behave badly if the\n individual feature do not more or less look like standard normally\n distributed data (e.g. Gaussian with 0 mean and unit variance).\n\n For instance many elements used in the objective function of\n a learning algorithm (such as the RBF kernel of Support Vector\n Machines or the L1 and L2 regularizers of linear models) assume that\n all features are centered around 0 and have variance in the same\n order. If a feature has a variance that is orders of magnitude larger\n that others, it might dominate the objective function and make the\n estimator unable to learn from other features correctly as expected.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_mean : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n mean_ : array of floats with shape [n_features]\n The mean value for each feature in the training set.\n\n std_ : array of floats with shape [n_features]\n The standard deviation for each feature in the training set.\n Set to one if the standard deviation is zero for a given feature.\n\n See also\n --------\n :func:`sklearn.preprocessing.scale` to perform centering and\n scaling without using the ``Transformer`` object oriented API\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n \"\"\"\n\n def __init__(self, copy=True, with_mean=True, with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : array-like or CSR matrix with shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse='csr', copy=self.copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n self.mean_ = None\n\n if self.with_std:\n var = mean_variance_axis(X, axis=0)[1]\n self.std_ = np.sqrt(var)\n self.std_ = _handle_zeros_in_scale(self.std_)\n else:\n self.std_ = None\n return self\n else:\n self.mean_, self.std_ = _mean_and_std(\n X, axis=0, with_mean=self.with_mean, with_std=self.with_std)\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Perform standardization by centering and scaling\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr', copy=copy,\n ensure_2d=False, warn_on_dtype=True,\n estimator=self, dtype=FLOAT_DTYPES)\n\n if X.ndim == 1:\n warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n if self.std_ is not None:\n inplace_column_scale(X, 1 / self.std_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X /= self.std_\n return X\n\n def inverse_transform(self, X, copy=None):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to scale along the features axis.\n \"\"\"\n check_is_fitted(self, 'std_')\n\n copy = copy if copy is not None else self.copy\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot uncenter sparse matrices: pass `with_mean=False` \"\n \"instead See docstring for motivation and alternatives.\")\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n if self.std_ is not None:\n inplace_column_scale(X, self.std_)\n else:\n X = np.asarray(X)\n if copy:\n X = X.copy()\n if self.with_std:\n X *= self.std_\n if self.with_mean:\n X += self.mean_\n return X\n\n\nclass MaxAbsScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale each feature by its maximum absolute value.\n\n This estimator scales and translates each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0. It does not shift/center the data, and\n thus does not destroy any sparsity.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n\n Attributes\n ----------\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n \"\"\"\n\n def __init__(self, copy=True):\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0)\n scales = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n scales = np.abs(X).max(axis=0)\n scales = np.array(scales)\n scales = scales.reshape(-1)\n self.scale_ = _handle_zeros_in_scale(scales)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be scaled.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n\n if X.ndim == 1:\n warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)\n\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 / self.scale_)\n else:\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n X /= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data that should be transformed back.\n \"\"\"\n check_is_fitted(self, 'scale_')\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n if sparse.issparse(X):\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n return X\n\n\ndef maxabs_scale(X, axis=0, copy=True):\n \"\"\"Scale each feature to the [-1, 1] range without breaking the sparsity.\n\n This estimator scales each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Set to False to perform inplace scaling and avoid a copy (if the input\n is already a numpy array).\n \"\"\"\n s = MaxAbsScaler(copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass RobustScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scale features using statistics that are robust to outliers.\n\n This Scaler removes the median and scales the data according to\n the Interquartile Range (IQR). The IQR is the range between the 1st\n quartile (25th quantile) and the 3rd quartile (75th quantile).\n\n Centering and scaling happen independently on each feature (or each\n sample, depending on the `axis` argument) by computing the relevant\n statistics on the samples in the training set. Median and interquartile\n range are then stored to be used on later data using the `transform`\n method.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators. Typically this is done by removing the mean\n and scaling to unit variance. However, outliers can often influence the\n sample mean / variance in a negative way. In such cases, the median and\n the interquartile range often give better results.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n with_centering : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_scaling : boolean, True by default\n If True, scale the data to interquartile range.\n\n copy : boolean, optional, default is True\n If False, try to avoid a copy and do inplace scaling instead.\n This is not guaranteed to always work inplace; e.g. if the data is\n not a NumPy array or scipy.sparse CSR matrix, a copy may still be\n returned.\n\n Attributes\n ----------\n center_ : array of floats\n The median value for each feature in the training set.\n\n scale_ : array of floats\n The (scaled) interquartile range for each feature in the training set.\n\n See also\n --------\n :class:`sklearn.preprocessing.StandardScaler` to perform centering\n and scaling using mean and variance.\n\n :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`\n to further remove the linear correlation across features.\n\n Notes\n -----\n See examples/preprocessing/plot_robust_scaling.py for an example.\n\n http://en.wikipedia.org/wiki/Median_(statistics)\n http://en.wikipedia.org/wiki/Interquartile_range\n \"\"\"\n\n def __init__(self, with_centering=True, with_scaling=True, copy=True):\n self.with_centering = with_centering\n self.with_scaling = with_scaling\n self.copy = copy\n\n def _check_array(self, X, copy):\n \"\"\"Makes sure centering is not enabled for sparse matrices.\"\"\"\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)\n\n if X.ndim == 1:\n warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)\n\n if sparse.issparse(X):\n if self.with_centering:\n raise ValueError(\n \"Cannot center sparse matrices: use `with_centering=False`\"\n \" instead. See docstring for motivation and alternatives.\")\n return X\n\n def fit(self, X, y=None):\n \"\"\"Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : array-like with shape [n_samples, n_features]\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n \"\"\"\n if sparse.issparse(X):\n raise TypeError(\"RobustScaler cannot be fitted on sparse inputs\")\n\n X = self._check_array(X, self.copy)\n if self.with_centering:\n self.center_ = np.median(X, axis=0)\n\n if self.with_scaling:\n q = np.percentile(X, (25, 75), axis=0)\n self.scale_ = (q[1] - q[0])\n self.scale_ = _handle_zeros_in_scale(self.scale_)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Center and scale the data\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, 1.0 / self.scale_)\n elif self.axis == 0:\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X /= self.scale_\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : array-like or CSR matrix.\n The data used to scale along the specified axis.\n \"\"\"\n if self.with_centering:\n check_is_fitted(self, 'center_')\n if self.with_scaling:\n check_is_fitted(self, 'scale_')\n X = self._check_array(X, self.copy)\n if sparse.issparse(X):\n if self.with_scaling:\n if X.shape[0] == 1:\n inplace_row_scale(X, self.scale_)\n else:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_scaling:\n X *= self.scale_\n if self.with_centering:\n X += self.center_\n return X\n\n\ndef robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the median and component wise scale\n according to the interquartile range.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like.\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the medians and IQR along. If 0,\n independently scale each feature, otherwise (if 1) scale\n each sample.\n\n with_centering : boolean, True by default\n If True, center the data before scaling.\n\n with_scaling : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default is True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n Notes\n -----\n This implementation will refuse to center scipy.sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_centering=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to call `X.toarray()`\n if he/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n :class:`sklearn.preprocessing.RobustScaler` to perform centering and\n scaling using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,\n copy=copy)\n if axis == 0:\n return s.fit_transform(X)\n else:\n return s.fit_transform(X.T).T\n\n\nclass PolynomialFeatures(BaseEstimator, TransformerMixin):\n \"\"\"Generate polynomial and interaction features.\n\n Generate a new feature matrix consisting of all polynomial combinations\n of the features with degree less than or equal to the specified degree.\n For example, if an input sample is two dimensional and of the form\n [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n\n Parameters\n ----------\n degree : integer\n The degree of the polynomial features. Default = 2.\n\n interaction_only : boolean, default = False\n If true, only interaction features are produced: features that are\n products of at most ``degree`` *distinct* input features (so not\n ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).\n\n include_bias : boolean\n If True (default), then include a bias column, the feature in which\n all polynomial powers are zero (i.e. a column of ones - acts as an\n intercept term in a linear model).\n\n Examples\n --------\n >>> X = np.arange(6).reshape(3, 2)\n >>> X\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> poly = PolynomialFeatures(2)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0, 0, 1],\n [ 1, 2, 3, 4, 6, 9],\n [ 1, 4, 5, 16, 20, 25]])\n >>> poly = PolynomialFeatures(interaction_only=True)\n >>> poly.fit_transform(X)\n array([[ 1, 0, 1, 0],\n [ 1, 2, 3, 6],\n [ 1, 4, 5, 20]])\n\n Attributes\n ----------\n powers_ : array, shape (n_input_features, n_output_features)\n powers_[i, j] is the exponent of the jth input in the ith output.\n\n n_input_features_ : int\n The total number of input features.\n\n n_output_features_ : int\n The total number of polynomial output features. The number of output\n features is computed by iterating over all suitably sized combinations\n of input features.\n\n Notes\n -----\n Be aware that the number of features in the output array scales\n polynomially in the number of features of the input array, and\n exponentially in the degree. High degrees can cause overfitting.\n\n See :ref:`examples/linear_model/plot_polynomial_interpolation.py\n `\n \"\"\"\n def __init__(self, degree=2, interaction_only=False, include_bias=True):\n self.degree = degree\n self.interaction_only = interaction_only\n self.include_bias = include_bias\n\n @staticmethod\n def _combinations(n_features, degree, interaction_only, include_bias):\n comb = (combinations if interaction_only else combinations_w_r)\n start = int(not include_bias)\n return chain.from_iterable(comb(range(n_features), i)\n for i in range(start, degree + 1))\n\n @property\n def powers_(self):\n check_is_fitted(self, 'n_input_features_')\n\n combinations = self._combinations(self.n_input_features_, self.degree,\n self.interaction_only,\n self.include_bias)\n return np.vstack(np.bincount(c, minlength=self.n_input_features_)\n for c in combinations)\n\n def fit(self, X, y=None):\n \"\"\"\n Compute number of output features.\n \"\"\"\n n_samples, n_features = check_array(X).shape\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n self.n_input_features_ = n_features\n self.n_output_features_ = sum(1 for _ in combinations)\n return self\n\n def transform(self, X, y=None):\n \"\"\"Transform data to polynomial features\n\n Parameters\n ----------\n X : array with shape [n_samples, n_features]\n The data to transform, row by row.\n\n Returns\n -------\n XP : np.ndarray shape [n_samples, NP]\n The matrix of features, where NP is the number of polynomial\n features generated from the combination of inputs.\n \"\"\"\n check_is_fitted(self, ['n_input_features_', 'n_output_features_'])\n\n X = check_array(X)\n n_samples, n_features = X.shape\n\n if n_features != self.n_input_features_:\n raise ValueError(\"X shape does not match training shape\")\n\n # allocate output data\n XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)\n\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n for i, c in enumerate(combinations):\n XP[:, i] = X[:, c].prod(1)\n\n return XP\n\n\ndef normalize(X, norm='l2', axis=1, copy=True):\n \"\"\"Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : 0 or 1, optional (1 by default)\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Normalizer` to perform normalization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n if norm not in ('l1', 'l2', 'max'):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = 'csc'\n elif axis == 1:\n sparse_format = 'csr'\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,\n estimator='the normalize function', dtype=FLOAT_DTYPES)\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if norm == 'l1':\n inplace_csr_row_normalize_l1(X)\n elif norm == 'l2':\n inplace_csr_row_normalize_l2(X)\n elif norm == 'max':\n _, norms = min_max_axis(X, 1)\n norms = norms.repeat(np.diff(X.indptr))\n mask = norms != 0\n X.data[mask] /= norms[mask]\n else:\n if norm == 'l1':\n norms = np.abs(X).sum(axis=1)\n elif norm == 'l2':\n norms = row_norms(X)\n elif norm == 'max':\n norms = np.max(X, axis=1)\n norms = _handle_zeros_in_scale(norms)\n X /= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n return X\n\n\nclass Normalizer(BaseEstimator, TransformerMixin):\n \"\"\"Normalize samples individually to unit norm.\n\n Each sample (i.e. each row of the data matrix) with at least one\n non zero component is rescaled independently of other samples so\n that its norm (l1 or l2) equals one.\n\n This transformer is able to work both with dense numpy arrays and\n scipy.sparse matrix (use CSR format if you want to avoid the burden of\n a copy / conversion).\n\n Scaling inputs to unit norms is a common operation for text\n classification or clustering for instance. For instance the dot\n product of two l2-normalized TF-IDF vectors is the cosine similarity\n of the vectors and is the base similarity metric for the Vector\n Space Model commonly used by the Information Retrieval community.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample.\n\n copy : boolean, optional, default True\n set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix).\n\n Notes\n -----\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n See also\n --------\n :func:`sklearn.preprocessing.normalize` equivalent function\n without the object oriented API\n \"\"\"\n\n def __init__(self, norm='l2', copy=True):\n self.norm = norm\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n X = check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Scale each non zero row of X to unit norm\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to normalize, row by row. scipy.sparse matrices should be\n in CSR format to avoid an un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr')\n return normalize(X, norm=self.norm, axis=1, copy=copy)\n\n\ndef binarize(X, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or scipy.sparse matrix\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR or CSC format to avoid an\n un-necessary copy.\n\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy\n (if the input is already a numpy array or a scipy.sparse CSR / CSC\n matrix and if axis is 1).\n\n See also\n --------\n :class:`sklearn.preprocessing.Binarizer` to perform binarization\n using the ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`)\n \"\"\"\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError('Cannot binarize a sparse matrix with threshold '\n '< 0')\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return X\n\n\nclass Binarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize data (set feature values to 0 or 1) according to a threshold\n\n Values greater than the threshold map to 1, while values less than\n or equal to the threshold map to 0. With the default threshold of 0,\n only positive values map to 1.\n\n Binarization is a common operation on text count data where the\n analyst can decide to only consider the presence or absence of a\n feature rather than a quantified number of occurrences for instance.\n\n It can also be used as a pre-processing step for estimators that\n consider boolean random variables (e.g. modelled using the Bernoulli\n distribution in a Bayesian setting).\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n set to False to perform inplace binarization and avoid a copy (if\n the input is already a numpy array or a scipy.sparse CSR matrix).\n\n Notes\n -----\n If the input is a sparse matrix, only the non-zero values are subject\n to update by the Binarizer class.\n\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n \"\"\"\n\n def __init__(self, threshold=0.0, copy=True):\n self.threshold = threshold\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n \"\"\"\n check_array(X, accept_sparse='csr')\n return self\n\n def transform(self, X, y=None, copy=None):\n \"\"\"Binarize each element of X\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n The data to binarize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n \"\"\"\n copy = copy if copy is not None else self.copy\n return binarize(X, threshold=self.threshold, copy=copy)\n\n\nclass KernelCenterer(BaseEstimator, TransformerMixin):\n \"\"\"Center a kernel matrix\n\n Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a\n function mapping x to a Hilbert space. KernelCenterer centers (i.e.,\n normalize to have zero mean) the data without explicitly computing phi(x).\n It is equivalent to centering phi(x) with\n sklearn.preprocessing.StandardScaler(with_std=False).\n\n Read more in the :ref:`User Guide `.\n \"\"\"\n\n def fit(self, K, y=None):\n \"\"\"Fit KernelCenterer\n\n Parameters\n ----------\n K : numpy array of shape [n_samples, n_samples]\n Kernel matrix.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n K = check_array(K)\n n_samples = K.shape[0]\n self.K_fit_rows_ = np.sum(K, axis=0) / n_samples\n self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples\n return self\n\n def transform(self, K, y=None, copy=True):\n \"\"\"Center kernel matrix.\n\n Parameters\n ----------\n K : numpy array of shape [n_samples1, n_samples2]\n Kernel matrix.\n\n copy : boolean, optional, default True\n Set to False to perform inplace computation.\n\n Returns\n -------\n K_new : numpy array of shape [n_samples1, n_samples2]\n \"\"\"\n check_is_fitted(self, 'K_fit_all_')\n\n K = check_array(K)\n if copy:\n K = K.copy()\n\n K_pred_cols = (np.sum(K, axis=1) /\n self.K_fit_rows_.shape[0])[:, np.newaxis]\n\n K -= self.K_fit_rows_\n K -= K_pred_cols\n K += self.K_fit_all_\n\n return K\n\n\ndef add_dummy_feature(X, value=1.0):\n \"\"\"Augment dataset with an additional dummy feature.\n\n This is useful for fitting an intercept term with implementations which\n cannot otherwise fit it directly.\n\n Parameters\n ----------\n X : array or scipy.sparse matrix with shape [n_samples, n_features]\n Data.\n\n value : float\n Value to use for the dummy feature.\n\n Returns\n -------\n\n X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]\n Same data with dummy feature added as first column.\n\n Examples\n --------\n\n >>> from sklearn.preprocessing import add_dummy_feature\n >>> add_dummy_feature([[0, 1], [1, 0]])\n array([[ 1., 0., 1.],\n [ 1., 1., 0.]])\n \"\"\"\n X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])\n n_samples, n_features = X.shape\n shape = (n_samples, n_features + 1)\n if sparse.issparse(X):\n if sparse.isspmatrix_coo(X):\n # Shift columns to the right.\n col = X.col + 1\n # Column indices of dummy feature are 0 everywhere.\n col = np.concatenate((np.zeros(n_samples), col))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n row = np.concatenate((np.arange(n_samples), X.row))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.coo_matrix((data, (row, col)), shape)\n elif sparse.isspmatrix_csc(X):\n # Shift index pointers since we need to add n_samples elements.\n indptr = X.indptr + n_samples\n # indptr[0] must be 0.\n indptr = np.concatenate((np.array([0]), indptr))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n indices = np.concatenate((np.arange(n_samples), X.indices))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.ones(n_samples) * value, X.data))\n return sparse.csc_matrix((data, indices, indptr), shape)\n else:\n klass = X.__class__\n return klass(add_dummy_feature(X.tocoo(), value))\n else:\n return np.hstack((np.ones((n_samples, 1)) * value, X))\n\n\ndef _transform_selected(X, transform, selected=\"all\", copy=True):\n \"\"\"Apply a transform function to portion of selected features\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n Dense array or sparse matrix.\n\n transform : callable\n A callable transform(X) -> X_transformed\n\n copy : boolean, optional\n Copy X even if it could be avoided.\n\n selected: \"all\" or array of indices or mask\n Specify which features to apply the transform to.\n\n Returns\n -------\n X : array or sparse matrix, shape=(n_samples, n_features_new)\n \"\"\"\n if selected == \"all\":\n return transform(X)\n\n X = check_array(X, accept_sparse='csc', copy=copy)\n\n if len(selected) == 0:\n return X\n\n n_features = X.shape[1]\n ind = np.arange(n_features)\n sel = np.zeros(n_features, dtype=bool)\n sel[np.asarray(selected)] = True\n not_sel = np.logical_not(sel)\n n_selected = np.sum(sel)\n\n if n_selected == 0:\n # No features selected.\n return X\n elif n_selected == n_features:\n # All features selected.\n return transform(X)\n else:\n X_sel = transform(X[:, ind[sel]])\n X_not_sel = X[:, ind[not_sel]]\n\n if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):\n return sparse.hstack((X_sel, X_not_sel))\n else:\n return np.hstack((X_sel, X_not_sel))\n\n\nclass OneHotEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode categorical integer features using a one-hot aka one-of-K scheme.\n\n The input to this transformer should be a matrix of integers, denoting\n the values taken on by categorical (discrete) features. The output will be\n a sparse matrix where each column corresponds to one possible value of one\n feature. It is assumed that input features take on values in the range\n [0, n_values).\n\n This encoding is needed for feeding categorical data to many scikit-learn\n estimators, notably linear models and SVMs with the standard kernels.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_values : 'auto', int or array of ints\n Number of values per feature.\n\n - 'auto' : determine value range from training data.\n - int : maximum value for all features.\n - array : maximum value per feature.\n\n categorical_features: \"all\" or array of indices or mask\n Specify what features are treated as categorical.\n\n - 'all' (default): All features are treated as categorical.\n - array of indices: Array of categorical feature indices.\n - mask: Array of length n_features and with dtype=bool.\n\n Non-categorical features are always stacked to the right of the matrix.\n\n dtype : number type, default=np.float\n Desired dtype of output.\n\n sparse : boolean, default=True\n Will return sparse matrix if set True else will return an array.\n\n handle_unknown : str, 'error' or 'ignore'\n Whether to raise an error or ignore if a unknown categorical feature is\n present during transform.\n\n Attributes\n ----------\n active_features_ : array\n Indices for active features, meaning values that actually occur\n in the training set. Only available when n_values is ``'auto'``.\n\n feature_indices_ : array of shape (n_features,)\n Indices to feature ranges.\n Feature ``i`` in the original data is mapped to features\n from ``feature_indices_[i]`` to ``feature_indices_[i+1]``\n (and then potentially masked by `active_features_` afterwards)\n\n n_values_ : array of shape (n_features,)\n Maximum number of values per feature.\n\n Examples\n --------\n Given a dataset with three features and two samples, we let the encoder\n find the maximum value per feature and transform the data to a binary\n one-hot encoding.\n\n >>> from sklearn.preprocessing import OneHotEncoder\n >>> enc = OneHotEncoder()\n >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \\\n[1, 0, 2]]) # doctest: +ELLIPSIS\n OneHotEncoder(categorical_features='all', dtype=<... 'float'>,\n handle_unknown='error', n_values='auto', sparse=True)\n >>> enc.n_values_\n array([2, 3, 4])\n >>> enc.feature_indices_\n array([0, 2, 5, 9])\n >>> enc.transform([[0, 1, 1]]).toarray()\n array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])\n\n See also\n --------\n sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of\n dictionary items (also handles string-valued features).\n sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot\n encoding of dictionary items or strings.\n \"\"\"\n def __init__(self, n_values=\"auto\", categorical_features=\"all\",\n dtype=np.float, sparse=True, handle_unknown='error'):\n self.n_values = n_values\n self.categorical_features = categorical_features\n self.dtype = dtype\n self.sparse = sparse\n self.handle_unknown = handle_unknown\n\n def fit(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_feature)\n Input array of type int.\n\n Returns\n -------\n self\n \"\"\"\n self.fit_transform(X)\n return self\n\n def _fit_transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n if self.n_values == 'auto':\n n_values = np.max(X, axis=0) + 1\n elif isinstance(self.n_values, numbers.Integral):\n if (np.max(X, axis=0) >= self.n_values).any():\n raise ValueError(\"Feature out of bounds for n_values=%d\"\n % self.n_values)\n n_values = np.empty(n_features, dtype=np.int)\n n_values.fill(self.n_values)\n else:\n try:\n n_values = np.asarray(self.n_values, dtype=int)\n except (ValueError, TypeError):\n raise TypeError(\"Wrong type for parameter `n_values`. Expected\"\n \" 'auto', int or array of ints, got %r\"\n % type(X))\n if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:\n raise ValueError(\"Shape mismatch: if n_values is an array,\"\n \" it has to be of shape (n_features,).\")\n\n self.n_values_ = n_values\n n_values = np.hstack([[0], n_values])\n indices = np.cumsum(n_values)\n self.feature_indices_ = indices\n\n column_indices = (X + indices[:-1]).ravel()\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)\n data = np.ones(n_samples * n_features)\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n\n if self.n_values == 'auto':\n mask = np.array(out.sum(axis=0)).ravel() != 0\n active_features = np.where(mask)[0]\n out = out[:, active_features]\n self.active_features_ = active_features\n\n return out if self.sparse else out.toarray()\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X, then transform X.\n\n Equivalent to self.fit(X).transform(X), but more convenient and more\n efficient. See fit for the parameters, transform for the return value.\n \"\"\"\n return _transform_selected(X, self._fit_transform,\n self.categorical_features, copy=True)\n\n def _transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"X needs to contain only non-negative integers.\")\n n_samples, n_features = X.shape\n\n indices = self.feature_indices_\n if n_features != indices.shape[0] - 1:\n raise ValueError(\"X has different shape than during fitting.\"\n \" Expected %d, got %d.\"\n % (indices.shape[0] - 1, n_features))\n\n # We use only those catgorical features of X that are known using fit.\n # i.e lesser than n_values_ using mask.\n # This means, if self.handle_unknown is \"ignore\", the row_indices and\n # col_indices corresponding to the unknown categorical feature are\n # ignored.\n mask = (X < self.n_values_).ravel()\n if np.any(~mask):\n if self.handle_unknown not in ['error', 'ignore']:\n raise ValueError(\"handle_unknown should be either error or \"\n \"unknown got %s\" % self.handle_unknown)\n if self.handle_unknown == 'error':\n raise ValueError(\"unknown categorical feature present %s \"\n \"during transform.\" % X[~mask])\n\n column_indices = (X + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(np.sum(mask))\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if self.n_values == 'auto':\n out = out[:, self.active_features_]\n\n return out if self.sparse else out.toarray()\n\n def transform(self, X):\n \"\"\"Transform X using one-hot encoding.\n\n Parameters\n ----------\n X : array-like, shape=(n_samples, n_features)\n Input array of type int.\n\n Returns\n -------\n X_out : sparse matrix if sparse=True else a 2-d array, dtype=int\n Transformed input.\n \"\"\"\n return _transform_selected(X, self._transform,\n self.categorical_features, copy=True)\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475280,"cells":{"repo_name":{"kind":"string","value":"griffincalme/LineweaverBurkKinematics"},"path":{"kind":"string","value":"CompareTwoEnzymes.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2335"},"content":{"kind":"string","value":"#Griffin Calme\r\n#Python 3.5.0\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\n#Raw Data\r\ns = np.array([1., 3., 5., 10., 20., 50., 100.]) #substrate concentration (micromoles)\r\nvA = np.array([0.67, 1.20, 1.43, 1.67, 1.82, 1.92, 1.96]) #velocity of enzyme A at corresponding concentration (micromoles/sec)\r\nvB = np.array([0.40, 0.86, 1.11, 1.43, 1.67, 1.85, 1.92]) #velocity of enzyme Bat corresponding concentration (micromoles/sec)\r\n\r\n#take reciprocal of Michaelis-Menten to plot in Lineweaver-Burke\r\nRecip_s = np.reciprocal(s)\r\nRecip_vA = np.reciprocal(vA)\r\nRecip_vB = np.reciprocal(vB)\r\n\r\n#Calculate linear regression\r\nslopeA, interceptA, r_valueA, p_valueA, std_errA = stats.linregress(Recip_s, Recip_vA)\r\nslopeB, interceptB, r_valueB, p_valueB, std_errB = stats.linregress(Recip_s, Recip_vB)\r\n\r\n#Draw linear regression A\r\nxA = np.linspace(-1., 2., 1000)\r\nfor iA in xA:\r\n yA = (slopeA * xA) + interceptA\r\n\r\n#Draw linear regression B\r\nxB = np.linspace(-1., 2., 1000)\r\nfor iB in xB:\r\n yB = (slopeB * xB) + interceptB\r\n\r\n\r\n#Plot 1/Vmax A\r\nplt.scatter(0, interceptA, color='red')\r\nprint(\"\\n----Values for vA----\")\r\nprint(\"1/Vmax A = \", interceptA)\r\nprint(\"Vmax A = \", 1/interceptA)\r\n\r\n#Plot -1/Km A\r\nxinterceptA = ((0 - interceptA)/slopeA)\r\nplt.scatter(xinterceptA, 0, color='red')\r\nprint(\"\\n-1/Km A = \", xinterceptA)\r\nKmA = (-1 / xinterceptA)\r\nprint(\"Km A = \", KmA)\r\nprint(\"\\nKm/Vmax A (slope A): \", slopeA)\r\n\r\n\r\n#Plot 1/Vmax B\r\nplt.scatter(0, interceptB, color='green')\r\nprint(\"\\n\\n----Values for vB----\")\r\nprint(\"1/Vmax B = \", interceptB)\r\nprint(\"Vmax B = \", 1/interceptB)\r\n\r\n#Plot -1/Km B\r\nxinterceptB = ((0 - interceptB)/slopeB)\r\nplt.scatter(xinterceptB, 0, color='green')\r\nprint(\"\\n-1/Km B = \", xinterceptB)\r\nKmB = (-1 / xinterceptB)\r\nprint(\"Km B = \", KmB)\r\nprint(\"\\nKm/Vmax B (slope B): \", slopeB)\r\n\r\n\r\n#Draw x & y origins\r\nplt.axhline(0, color='black')\r\nplt.axvline(0, color='black')\r\n\r\n#Graph scatter points\r\nplt.scatter(Recip_s, Recip_vA, color='blue')\r\nplt.scatter(Recip_s, Recip_vB, color='purple')\r\n#Graph linear regression\r\nplt.plot(xA, yA, label='A', color='blue', linestyle='--')\r\nplt.plot(xB, yB, label='B', color='purple', linestyle=':')\r\n\r\n#Titles and labels\r\nplt.xlabel('1/[S] ($\\mu$M)')\r\nplt.ylabel('1/v ($\\mu$M/s)')\r\nplt.title('Lineweaver-Burk')\r\nplt.legend()\r\nplt.show()\r\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475281,"cells":{"repo_name":{"kind":"string","value":"redebian/documentation"},"path":{"kind":"string","value":"django/contrib/comments/views/utils.py"},"copies":{"kind":"string","value":"192"},"size":{"kind":"string","value":"1947"},"content":{"kind":"string","value":"\"\"\"\nA few bits of helper functions for comment views.\n\"\"\"\n\nimport urllib\nimport textwrap\nfrom django.http import HttpResponseRedirect\nfrom django.core import urlresolvers\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib import comments\n\ndef next_redirect(data, default, default_view, **get_kwargs):\n \"\"\"\n Handle the \"where should I go next?\" part of comment views.\n\n The next value could be a kwarg to the function (``default``), or a\n ``?next=...`` GET arg, or the URL of a given view (``default_view``). See\n the view modules for examples.\n\n Returns an ``HttpResponseRedirect``.\n \"\"\"\n next = data.get(\"next\", default)\n if next is None:\n next = urlresolvers.reverse(default_view)\n if get_kwargs:\n if '#' in next:\n tmp = next.rsplit('#', 1)\n next = tmp[0]\n anchor = '#' + tmp[1]\n else:\n anchor = ''\n\n joiner = ('?' in next) and '&' or '?'\n next += joiner + urllib.urlencode(get_kwargs) + anchor\n return HttpResponseRedirect(next)\n\ndef confirmation_view(template, doc=\"Display a confirmation view.\"):\n \"\"\"\n Confirmation view generator for the \"comment was\n posted/flagged/deleted/approved\" views.\n \"\"\"\n def confirmed(request):\n comment = None\n if 'c' in request.GET:\n try:\n comment = comments.get_model().objects.get(pk=request.GET['c'])\n except (ObjectDoesNotExist, ValueError):\n pass\n return render_to_response(template,\n {'comment': comment},\n context_instance=RequestContext(request)\n )\n\n confirmed.__doc__ = textwrap.dedent(\"\"\"\\\n %s\n\n Templates: `%s``\n Context:\n comment\n The posted comment\n \"\"\" % (doc, template)\n )\n return confirmed\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475282,"cells":{"repo_name":{"kind":"string","value":"google-research/dice_rl"},"path":{"kind":"string","value":"google/scripts/xm_run_tabular_teq_dice.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2698"},"content":{"kind":"string","value":"# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport os\n\nfrom google3.learning.deepmind.xmanager import hyper\nimport google3.learning.deepmind.xmanager2.client.google as xm\n\nflags.DEFINE_string('exp_name', 'teqdice', 'Name of experiment.')\nflags.DEFINE_string('cell', None, 'The cell to run jobs in.')\nflags.DEFINE_integer('priority', 25, 'Priority to run job as.')\n\nflags.DEFINE_string('env_name', 'grid', 'Environment name.')\nflags.DEFINE_integer('num_seeds', 20, 'Number of random seed.')\nflags.DEFINE_string('load_dir', '/cns/is-d/home/sherryy/teqdice/data/',\n 'Directory to load dataset from.')\nflags.DEFINE_string('save_dir', '/cns/is-d/home/sherryy/teqdice/results/r=2/',\n 'Directory to save the results to.')\n\nFLAGS = flags.FLAGS\n\n\ndef build_experiment():\n runtime_worker = xm.Borg(\n cell=FLAGS.cell,\n priority=FLAGS.priority,\n )\n executable = xm.BuildTarget(\n '//third_party/py/dice_rl/google/scripts:run_tabular_teq_dice',\n build_flags=['-c', 'opt', '--copt=-mavx'],\n args=[\n ('env_name', FLAGS.env_name),\n ('load_dir', FLAGS.load_dir),\n ('save_dir', os.path.join(FLAGS.save_dir, FLAGS.exp_name)),\n ('max_trajectory_length_train', 50),\n ('num_trajectory', 1000),\n ],\n platform=xm.Platform.CPU,\n runtime=runtime_worker)\n\n parameters = hyper.product([\n hyper.sweep('seed', hyper.discrete(list(range(FLAGS.num_seeds)))),\n hyper.sweep('step_encoding', hyper.categorical([None, 'one_hot'])),\n hyper.sweep('max_trajectory_length', hyper.discrete([5, 10, 20, 50])),\n ])\n experiment = xm.ParameterSweep(executable, parameters)\n return experiment\n\n\ndef main(_):\n \"\"\"Launch the experiment using the arguments from the command line.\"\"\"\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)\n\n\nif __name__ == '__main__':\n app.run(main)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475283,"cells":{"repo_name":{"kind":"string","value":"tuestudy/ipsc"},"path":{"kind":"string","value":"2014/B/gsong-easy.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2748"},"content":{"kind":"string","value":"#/usr/bin/env python\nimport unittest\nimport sys\n\nclass RandomGenerator:\n def __init__(self, arr):\n self.randoms = arr\n self.curr = 43\n self.c = [0 for x in xrange(43)]\n\n def random(self):\n r = 43\n s = 22\n m = 2 ** 32\n val = self.randoms[self.curr - s] - self.randoms[self.curr - r] - self.c[self.curr - 1]\n self.randoms.append(val % m)\n self.c.append(1 if val < 0 else 0)\n self.curr += 1\n return self.randoms[self.curr - 1]\n\ndef applymove(diagram, move, rg):\n if move not in ['l', 'r']:\n return\n lastval = -1\n newdiagram = []\n if move == 'l':\n for val in diagram:\n if val == 0:\n continue\n if val == lastval:\n newdiagram[-1] = val + lastval\n lastval = -1\n else:\n newdiagram.append(val)\n lastval = val\n else:\n for val in reversed(diagram):\n if val == 0:\n continue\n if val == lastval:\n newdiagram[0] = val + lastval\n lastval = -1\n else:\n newdiagram.insert(0, val)\n lastval = val\n\n num_empty = len(diagram) - len(newdiagram)\n if num_empty == 0:\n return newdiagram\n\n emptycells = [0 for _ in xrange(num_empty)]\n offset = 0\n\n if move == 'l':\n offset = len(newdiagram)\n newdiagram.extend(emptycells)\n else:\n emptycells.extend(newdiagram)\n newdiagram = emptycells\n\n if diagram == newdiagram:\n return newdiagram\n\n (new_value, pos) = getRandomNumber(rg, num_empty)\n newdiagram[offset + pos] = new_value\n return newdiagram\n\ndef getRandomNumber(rg, num_empty):\n pos = rg.random() % num_empty\n new_value = 0\n if rg.random() % 10 == 0:\n new_value = 4\n else:\n new_value = 2\n return (new_value, pos)\n\n\nclass Test(unittest.TestCase):\n def test_def(self):\n pass\n\ndef main(stdin, stdout):\n casecount = int(stdin.readline())\n for case in range(casecount):\n stdin.readline()\n strip_size = int(stdin.readline().strip())\n diagram = [int(x) for x in stdin.readline().strip().split()]\n\n arr = [int(x) for x in stdin.readline().strip().split()]\n rg = RandomGenerator(arr)\n\n mode = 'b1'\n if mode == 'b1':\n moves_count = stdin.readline()\n moves = stdin.readline().strip()\n # b1 solution\n for move in moves:\n diagram = applymove(diagram, move, rg)\n print ' '.join(str(x) for x in diagram)\n else:\n # b2 solution\n pass\n\nif __name__ == '__main__':\n main(sys.stdin, sys.stdout)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475284,"cells":{"repo_name":{"kind":"string","value":"talhajaved/nyuadmarket"},"path":{"kind":"string","value":"flask/lib/python2.7/site-packages/mako/pygen.py"},"copies":{"kind":"string","value":"15"},"size":{"kind":"string","value":"9447"},"content":{"kind":"string","value":"# mako/pygen.py\n# Copyright (C) 2006-2013 the Mako authors and contributors \n#\n# This module is part of Mako and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\"\"\"utilities for generating and formatting literal Python code.\"\"\"\n\nimport re\nfrom mako import exceptions\n\nclass PythonPrinter(object):\n def __init__(self, stream):\n # indentation counter\n self.indent = 0\n\n # a stack storing information about why we incremented\n # the indentation counter, to help us determine if we\n # should decrement it\n self.indent_detail = []\n\n # the string of whitespace multiplied by the indent\n # counter to produce a line\n self.indentstring = \" \"\n\n # the stream we are writing to\n self.stream = stream\n\n # a list of lines that represents a buffered \"block\" of code,\n # which can be later printed relative to an indent level\n self.line_buffer = []\n\n self.in_indent_lines = False\n\n self._reset_multi_line_flags()\n\n def write(self, text):\n self.stream.write(text)\n\n def write_indented_block(self, block):\n \"\"\"print a line or lines of python which already contain indentation.\n\n The indentation of the total block of lines will be adjusted to that of\n the current indent level.\"\"\"\n self.in_indent_lines = False\n for l in re.split(r'\\r?\\n', block):\n self.line_buffer.append(l)\n\n def writelines(self, *lines):\n \"\"\"print a series of lines of python.\"\"\"\n for line in lines:\n self.writeline(line)\n\n def writeline(self, line):\n \"\"\"print a line of python, indenting it according to the current\n indent level.\n\n this also adjusts the indentation counter according to the\n content of the line.\n\n \"\"\"\n\n if not self.in_indent_lines:\n self._flush_adjusted_lines()\n self.in_indent_lines = True\n\n if (line is None or\n re.match(r\"^\\s*#\",line) or\n re.match(r\"^\\s*$\", line)\n ):\n hastext = False\n else:\n hastext = True\n\n is_comment = line and len(line) and line[0] == '#'\n\n # see if this line should decrease the indentation level\n if (not is_comment and\n (not hastext or self._is_unindentor(line))\n ):\n\n if self.indent > 0:\n self.indent -=1\n # if the indent_detail stack is empty, the user\n # probably put extra closures - the resulting\n # module wont compile.\n if len(self.indent_detail) == 0:\n raise exceptions.SyntaxException(\n \"Too many whitespace closures\")\n self.indent_detail.pop()\n\n if line is None:\n return\n\n # write the line\n self.stream.write(self._indent_line(line) + \"\\n\")\n\n # see if this line should increase the indentation level.\n # note that a line can both decrase (before printing) and\n # then increase (after printing) the indentation level.\n\n if re.search(r\":[ \\t]*(?:#.*)?$\", line):\n # increment indentation count, and also\n # keep track of what the keyword was that indented us,\n # if it is a python compound statement keyword\n # where we might have to look for an \"unindent\" keyword\n match = re.match(r\"^\\s*(if|try|elif|while|for|with)\", line)\n if match:\n # its a \"compound\" keyword, so we will check for \"unindentors\"\n indentor = match.group(1)\n self.indent +=1\n self.indent_detail.append(indentor)\n else:\n indentor = None\n # its not a \"compound\" keyword. but lets also\n # test for valid Python keywords that might be indenting us,\n # else assume its a non-indenting line\n m2 = re.match(r\"^\\s*(def|class|else|elif|except|finally)\",\n line)\n if m2:\n self.indent += 1\n self.indent_detail.append(indentor)\n\n def close(self):\n \"\"\"close this printer, flushing any remaining lines.\"\"\"\n self._flush_adjusted_lines()\n\n def _is_unindentor(self, line):\n \"\"\"return true if the given line is an 'unindentor',\n relative to the last 'indent' event received.\n\n \"\"\"\n\n # no indentation detail has been pushed on; return False\n if len(self.indent_detail) == 0:\n return False\n\n indentor = self.indent_detail[-1]\n\n # the last indent keyword we grabbed is not a\n # compound statement keyword; return False\n if indentor is None:\n return False\n\n # if the current line doesnt have one of the \"unindentor\" keywords,\n # return False\n match = re.match(r\"^\\s*(else|elif|except|finally).*\\:\", line)\n if not match:\n return False\n\n # whitespace matches up, we have a compound indentor,\n # and this line has an unindentor, this\n # is probably good enough\n return True\n\n # should we decide that its not good enough, heres\n # more stuff to check.\n #keyword = match.group(1)\n\n # match the original indent keyword\n #for crit in [\n # (r'if|elif', r'else|elif'),\n # (r'try', r'except|finally|else'),\n # (r'while|for', r'else'),\n #]:\n # if re.match(crit[0], indentor) and re.match(crit[1], keyword):\n # return True\n\n #return False\n\n def _indent_line(self, line, stripspace=''):\n \"\"\"indent the given line according to the current indent level.\n\n stripspace is a string of space that will be truncated from the\n start of the line before indenting.\"\"\"\n\n return re.sub(r\"^%s\" % stripspace, self.indentstring\n * self.indent, line)\n\n def _reset_multi_line_flags(self):\n \"\"\"reset the flags which would indicate we are in a backslashed\n or triple-quoted section.\"\"\"\n\n self.backslashed, self.triplequoted = False, False\n\n def _in_multi_line(self, line):\n \"\"\"return true if the given line is part of a multi-line block,\n via backslash or triple-quote.\"\"\"\n\n # we are only looking for explicitly joined lines here, not\n # implicit ones (i.e. brackets, braces etc.). this is just to\n # guard against the possibility of modifying the space inside of\n # a literal multiline string with unfortunately placed\n # whitespace\n\n current_state = (self.backslashed or self.triplequoted)\n\n if re.search(r\"\\\\$\", line):\n self.backslashed = True\n else:\n self.backslashed = False\n\n triples = len(re.findall(r\"\\\"\\\"\\\"|\\'\\'\\'\", line))\n if triples == 1 or triples % 2 != 0:\n self.triplequoted = not self.triplequoted\n\n return current_state\n\n def _flush_adjusted_lines(self):\n stripspace = None\n self._reset_multi_line_flags()\n\n for entry in self.line_buffer:\n if self._in_multi_line(entry):\n self.stream.write(entry + \"\\n\")\n else:\n entry = entry.expandtabs()\n if stripspace is None and re.search(r\"^[ \\t]*[^# \\t]\", entry):\n stripspace = re.match(r\"^([ \\t]*)\", entry).group(1)\n self.stream.write(self._indent_line(entry, stripspace) + \"\\n\")\n\n self.line_buffer = []\n self._reset_multi_line_flags()\n\n\ndef adjust_whitespace(text):\n \"\"\"remove the left-whitespace margin of a block of Python code.\"\"\"\n\n state = [False, False]\n (backslashed, triplequoted) = (0, 1)\n\n def in_multi_line(line):\n start_state = (state[backslashed] or state[triplequoted])\n\n if re.search(r\"\\\\$\", line):\n state[backslashed] = True\n else:\n state[backslashed] = False\n\n def match(reg, t):\n m = re.match(reg, t)\n if m:\n return m, t[len(m.group(0)):]\n else:\n return None, t\n\n while line:\n if state[triplequoted]:\n m, line = match(r\"%s\" % state[triplequoted], line)\n if m:\n state[triplequoted] = False\n else:\n m, line = match(r\".*?(?=%s|$)\" % state[triplequoted], line)\n else:\n m, line = match(r'#', line)\n if m:\n return start_state\n\n m, line = match(r\"\\\"\\\"\\\"|\\'\\'\\'\", line)\n if m:\n state[triplequoted] = m.group(0)\n continue\n\n m, line = match(r\".*?(?=\\\"\\\"\\\"|\\'\\'\\'|#|$)\", line)\n\n return start_state\n\n def _indent_line(line, stripspace = ''):\n return re.sub(r\"^%s\" % stripspace, '', line)\n\n lines = []\n stripspace = None\n\n for line in re.split(r'\\r?\\n', text):\n if in_multi_line(line):\n lines.append(line)\n else:\n line = line.expandtabs()\n if stripspace is None and re.search(r\"^[ \\t]*[^# \\t]\", line):\n stripspace = re.match(r\"^([ \\t]*)\", line).group(1)\n lines.append(_indent_line(line, stripspace))\n return \"\\n\".join(lines)\n"},"license":{"kind":"string","value":"mit"}}},{"rowIdx":475285,"cells":{"repo_name":{"kind":"string","value":"ClusterLabs/pacemaker-1.0"},"path":{"kind":"string","value":"cts/CTSscenarios.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"16516"},"content":{"kind":"string","value":"from CTS import *\nfrom CTStests import CTSTest\nfrom CTSaudits import ClusterAudit\nclass ScenarioComponent:\n\n def __init__(self, Env):\n self.Env = Env\n\n def IsApplicable(self):\n '''Return TRUE if the current ScenarioComponent is applicable\n in the given LabEnvironment given to the constructor.\n '''\n\n raise ValueError(\"Abstract Class member (IsApplicable)\")\n\n def SetUp(self, CM):\n '''Set up the given ScenarioComponent'''\n raise ValueError(\"Abstract Class member (Setup)\")\n\n def TearDown(self, CM):\n '''Tear down (undo) the given ScenarioComponent'''\n raise ValueError(\"Abstract Class member (Setup)\")\n \nclass Scenario:\n (\n'''The basic idea of a scenario is that of an ordered list of\nScenarioComponent objects. Each ScenarioComponent is SetUp() in turn,\nand then after the tests have been run, they are torn down using TearDown()\n(in reverse order).\n\nA Scenario is applicable to a particular cluster manager iff each\nScenarioComponent is applicable.\n\nA partially set up scenario is torn down if it fails during setup.\n''')\n\n def __init__(self, ClusterManager, Components, Audits, Tests):\n\n \"Initialize the Scenario from the list of ScenarioComponents\"\n self.ClusterManager = ClusterManager\n self.Components = Components\n self.Audits = Audits\n self.Tests = Tests\n\n self.BadNews = None\n self.TestSets = []\n self.Stats = {\"success\":0, \"failure\":0, \"BadNews\":0, \"skipped\":0}\n self.Sets = []\n\n #self.ns=CTS.NodeStatus(self.Env)\n\n for comp in Components:\n if not issubclass(comp.__class__, ScenarioComponent):\n raise ValueError(\"Init value must be subclass of ScenarioComponent\")\n\n for audit in Audits:\n if not issubclass(audit.__class__, ClusterAudit):\n raise ValueError(\"Init value must be subclass of ClusterAudit\")\n\n for test in Tests:\n if not issubclass(test.__class__, CTSTest):\n raise ValueError(\"Init value must be a subclass of CTSTest\")\n\n def IsApplicable(self):\n (\n'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()\n'''\n )\n\n for comp in self.Components:\n if not comp.IsApplicable():\n return None\n return 1\n\n def SetUp(self):\n '''Set up the Scenario. Return TRUE on success.'''\n\n self.ClusterManager.prepare()\n self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env[\"nodes\"])\n\n self.audit()\n if self.ClusterManager.Env[\"valgrind-tests\"]:\n self.ClusterManager.install_helper(\"cts.supp\")\n\n self.BadNews = LogWatcher(self.ClusterManager.Env, \n self.ClusterManager[\"LogFileName\"], \n self.ClusterManager[\"BadRegexes\"], \"BadNews\", 0)\n self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit\n\n j=0\n while j < len(self.Components):\n if not self.Components[j].SetUp(self.ClusterManager):\n # OOPS! We failed. Tear partial setups down.\n self.audit()\n self.ClusterManager.log(\"Tearing down partial setup\")\n self.TearDown(j)\n return None\n j=j+1\n\n self.audit()\n return 1\n\n def TearDown(self, max=None):\n\n '''Tear Down the Scenario - in reverse order.'''\n\n if max == None:\n max = len(self.Components)-1\n j=max\n while j >= 0:\n self.Components[j].TearDown(self.ClusterManager)\n j=j-1\n\n self.audit()\n\n def incr(self, name):\n '''Increment (or initialize) the value associated with the given name'''\n if not self.Stats.has_key(name):\n self.Stats[name]=0\n self.Stats[name] = self.Stats[name]+1\n\n def run(self, Iterations):\n self.ClusterManager.oprofileStart() \n try:\n self.run_loop(Iterations)\n self.ClusterManager.oprofileStop()\n except:\n self.ClusterManager.oprofileStop()\n raise\n\n def run_loop(self, Iterations):\n raise ValueError(\"Abstract Class member (run_loop)\")\n\n def run_test(self, test, testcount):\n nodechoice = self.ClusterManager.Env.RandomNode()\n \n ret = 1\n where = \"\"\n did_run = 0\n \n self.ClusterManager.log((\"Running test %s\" % test.name).ljust(35) + (\" (%s) \" % nodechoice).ljust(15) +\"[\"+ (\"%d\" % testcount).rjust(3) +\"]\")\n\n starttime = test.set_timer()\n if not test.setup(nodechoice):\n self.ClusterManager.log(\"Setup failed\")\n ret = 0\n \n elif not test.canrunnow(nodechoice):\n self.ClusterManager.log(\"Skipped\")\n test.skipped()\n\n else:\n did_run = 1\n ret = test(nodechoice)\n\n if not test.teardown(nodechoice):\n self.ClusterManager.log(\"Teardown failed\")\n answer = raw_input('Continue? [nY] ')\n if answer and answer == \"n\":\n raise ValueError(\"Teardown of %s on %s failed\" % (test.name, nodechoice))\n ret = 0\n\n stoptime=time.time()\n self.ClusterManager.oprofileSave(testcount)\n\n elapsed_time = stoptime - starttime\n test_time = stoptime - test.get_timer()\n if not test.has_key(\"min_time\"):\n test[\"elapsed_time\"] = elapsed_time\n test[\"min_time\"] = test_time\n test[\"max_time\"] = test_time\n else:\n test[\"elapsed_time\"] = test[\"elapsed_time\"] + elapsed_time\n if test_time < test[\"min_time\"]:\n test[\"min_time\"] = test_time\n if test_time > test[\"max_time\"]:\n test[\"max_time\"] = test_time\n \n if ret:\n self.incr(\"success\")\n test.log_timer()\n else:\n self.incr(\"failure\")\n self.ClusterManager.statall()\n did_run = 1 # Force the test count to be incrimented anyway so test extraction works\n\n self.audit(test.errorstoignore())\n return did_run\n\n def summarize(self):\n self.ClusterManager.log(\"****************\")\n self.ClusterManager.log(\"Overall Results:\" + repr(self.Stats))\n self.ClusterManager.log(\"****************\")\n\n stat_filter = { \n \"calls\":0,\n \"failure\":0,\n \"skipped\":0,\n \"auditfail\":0,\n }\n self.ClusterManager.log(\"Test Summary\")\n for test in self.Tests:\n for key in stat_filter.keys():\n stat_filter[key] = test.Stats[key]\n self.ClusterManager.log((\"Test %s: \"%test.name).ljust(25) + \" %s\"%repr(stat_filter))\n\n self.ClusterManager.debug(\"Detailed Results\")\n for test in self.Tests:\n self.ClusterManager.debug((\"Test %s: \"%test.name).ljust(25) + \" %s\"%repr(test.Stats))\n\n self.ClusterManager.log(\"<<<<<<<<<<<<<<<< TESTS COMPLETED\")\n\n def audit(self, LocalIgnore=[]):\n errcount=0\n ignorelist = []\n ignorelist.append(\"CTS:\")\n ignorelist.extend(LocalIgnore)\n ignorelist.extend(self.ClusterManager.errorstoignore())\n\n # This makes sure everything is stabilized before starting...\n failed = 0\n for audit in self.Audits:\n if not audit():\n self.ClusterManager.log(\"Audit \" + audit.name() + \" FAILED.\")\n failed += 1\n else:\n self.ClusterManager.debug(\"Audit \" + audit.name() + \" passed.\")\n\n while errcount < 1000:\n match = None\n if self.BadNews:\n match=self.BadNews.look(0)\n\n if match:\n add_err = 1\n for ignore in ignorelist:\n if add_err == 1 and re.search(ignore, match):\n add_err = 0\n if add_err == 1:\n self.ClusterManager.log(\"BadNews: \" + match)\n self.incr(\"BadNews\")\n errcount=errcount+1\n else:\n break\n else:\n answer = raw_input('Big problems. Continue? [nY]')\n if answer and answer == \"n\":\n self.ClusterManager.log(\"Shutting down.\")\n self.summarize()\n self.TearDown()\n raise ValueError(\"Looks like we hit a BadNews jackpot!\")\n\n return failed\n\nclass AllOnce(Scenario):\n '''Every Test Once''' # Accessable as __doc__\n def run_loop(self, Iterations):\n testcount=1\n for test in self.Tests:\n self.run_test(test, testcount)\n testcount += 1\n\nclass RandomTests(Scenario):\n '''Random Test Execution'''\n def run_loop(self, Iterations):\n testcount=1\n while testcount <= Iterations:\n test = self.ClusterManager.Env.RandomGen.choice(self.Tests)\n self.run_test(test, testcount)\n testcount += 1\n\nclass BasicSanity(Scenario):\n '''Basic Cluster Sanity'''\n def run_loop(self, Iterations):\n testcount=1\n while testcount <= Iterations:\n test = self.Environment.RandomGen.choice(self.Tests)\n self.run_test(test, testcount)\n testcount += 1\n\nclass Sequence(Scenario):\n '''Named Tests in Sequence'''\n def run_loop(self, Iterations):\n testcount=1\n while testcount <= Iterations:\n for test in self.Tests:\n self.run_test(test, testcount)\n testcount += 1\n\nclass InitClusterManager(ScenarioComponent):\n (\n'''InitClusterManager is the most basic of ScenarioComponents.\nThis ScenarioComponent simply starts the cluster manager on all the nodes.\nIt is fairly robust as it waits for all nodes to come up before starting\nas they might have been rebooted or crashed for some reason beforehand.\n''')\n def __init__(self, Env):\n pass\n\n def IsApplicable(self):\n '''InitClusterManager is so generic it is always Applicable'''\n return 1\n\n def SetUp(self, CM):\n '''Basic Cluster Manager startup. Start everything'''\n\n CM.prepare()\n\n # Clear out the cobwebs ;-)\n self.TearDown(CM)\n\n # Now start the Cluster Manager on all the nodes.\n CM.log(\"Starting Cluster Manager on all nodes.\")\n return CM.startall(verbose=True)\n\n def TearDown(self, CM):\n '''Set up the given ScenarioComponent'''\n\n # Stop the cluster manager everywhere\n\n CM.log(\"Stopping Cluster Manager on all nodes\")\n return CM.stopall(verbose=True)\n\nclass PingFest(ScenarioComponent):\n (\n'''PingFest does a flood ping to each node in the cluster from the test machine.\n\nIf the LabEnvironment Parameter PingSize is set, it will be used as the size\nof ping packet requested (via the -s option). If it is not set, it defaults\nto 1024 bytes.\n\nAccording to the manual page for ping:\n Outputs packets as fast as they come back or one hundred times per\n second, whichever is more. For every ECHO_REQUEST sent a period ``.''\n is printed, while for every ECHO_REPLY received a backspace is printed.\n This provides a rapid display of how many packets are being dropped.\n Only the super-user may use this option. This can be very hard on a net-\n work and should be used with caution.\n''' )\n\n def __init__(self, Env):\n self.Env = Env\n\n def IsApplicable(self):\n '''PingFests are always applicable ;-)\n '''\n\n return 1\n\n def SetUp(self, CM):\n '''Start the PingFest!'''\n\n self.PingSize=1024\n if CM.Env.has_key(\"PingSize\"):\n self.PingSize=CM.Env[\"PingSize\"]\n\n CM.log(\"Starting %d byte flood pings\" % self.PingSize)\n\n self.PingPids=[]\n for node in CM.Env[\"nodes\"]:\n self.PingPids.append(self._pingchild(node))\n\n CM.log(\"Ping PIDs: \" + repr(self.PingPids))\n return 1\n\n def TearDown(self, CM):\n '''Stop it right now! My ears are pinging!!'''\n\n for pid in self.PingPids:\n if pid != None:\n CM.log(\"Stopping ping process %d\" % pid)\n os.kill(pid, signal.SIGKILL)\n\n def _pingchild(self, node):\n\n Args = [\"ping\", \"-qfn\", \"-s\", str(self.PingSize), node]\n\n\n sys.stdin.flush()\n sys.stdout.flush()\n sys.stderr.flush()\n pid = os.fork()\n\n if pid < 0:\n self.Env.log(\"Cannot fork ping child\")\n return None\n if pid > 0:\n return pid\n\n\n # Otherwise, we're the child process.\n\n \n os.execvp(\"ping\", Args)\n self.Env.log(\"Cannot execvp ping: \" + repr(Args))\n sys.exit(1)\n\nclass PacketLoss(ScenarioComponent):\n (\n'''\nIt would be useful to do some testing of CTS with a modest amount of packet loss\nenabled - so we could see that everything runs like it should with a certain\namount of packet loss present. \n''')\n\n def IsApplicable(self):\n '''always Applicable'''\n return 1\n\n def SetUp(self, CM):\n '''Reduce the reliability of communications'''\n if float(CM.Env[\"XmitLoss\"]) == 0 and float(CM.Env[\"RecvLoss\"]) == 0 :\n return 1\n\n for node in CM.Env[\"nodes\"]:\n CM.reducecomm_node(node)\n \n CM.log(\"Reduce the reliability of communications\")\n\n return 1\n\n\n def TearDown(self, CM):\n '''Fix the reliability of communications'''\n\n if float(CM.Env[\"XmitLoss\"]) == 0 and float(CM.Env[\"RecvLoss\"]) == 0 :\n return 1\n \n for node in CM.Env[\"nodes\"]:\n CM.unisolate_node(node)\n\n CM.log(\"Fix the reliability of communications\")\n\n\nclass BasicSanityCheck(ScenarioComponent):\n (\n'''\n''')\n\n def IsApplicable(self):\n return self.Env[\"DoBSC\"]\n\n def SetUp(self, CM):\n\n CM.prepare()\n\n # Clear out the cobwebs\n self.TearDown(CM)\n\n # Now start the Cluster Manager on all the nodes.\n CM.log(\"Starting Cluster Manager on BSC node(s).\")\n return CM.startall()\n\n def TearDown(self, CM):\n CM.log(\"Stopping Cluster Manager on BSC node(s).\")\n return CM.stopall()\n\nclass Benchmark(ScenarioComponent):\n (\n'''\n''')\n\n def IsApplicable(self):\n return self.Env[\"benchmark\"]\n\n def SetUp(self, CM):\n\n CM.prepare()\n\n # Clear out the cobwebs\n self.TearDown(CM)\n\n # Now start the Cluster Manager on all the nodes.\n CM.log(\"Starting Cluster Manager on all node(s).\")\n return CM.startall()\n\n def TearDown(self, CM):\n CM.log(\"Stopping Cluster Manager on all node(s).\")\n return CM.stopall()\n\nclass RollingUpgrade(ScenarioComponent):\n (\n'''\nTest a rolling upgrade between two versions of the stack\n''')\n\n def __init__(self, Env):\n self.Env = Env\n\n def IsApplicable(self):\n if not self.Env[\"rpm-dir\"]:\n return None\n if not self.Env[\"current-version\"]:\n return None\n if not self.Env[\"previous-version\"]:\n return None\n\n return 1\n\n def install(self, node, version):\n\n target_dir = \"/tmp/rpm-%s\" % version\n src_dir = \"%s/%s\" % (self.CM.Env[\"rpm-dir\"], version)\n\n rc = self.CM.rsh(node, \"mkdir -p %s\" % target_dir)\n rc = self.CM.cp(\"%s/*.rpm %s:%s\" % (src_dir, node, target_dir))\n rc = self.CM.rsh(node, \"rpm -Uvh --force %s/*.rpm\" % (target_dir))\n\n return self.success()\n\n def upgrade(self, node):\n return self.install(node, self.CM.Env[\"current-version\"])\n\n def downgrade(self, node):\n return self.install(node, self.CM.Env[\"previous-version\"])\n\n def SetUp(self, CM):\n CM.prepare()\n\n # Clear out the cobwebs\n CM.stopall()\n\n CM.log(\"Downgrading all nodes to %s.\" % self.Env[\"previous-version\"])\n\n for node in self.Env[\"nodes\"]:\n if not self.downgrade(node):\n CM.log(\"Couldn't downgrade %s\" % node)\n return None\n\n return 1\n\n def TearDown(self, CM):\n # Stop everything\n CM.log(\"Stopping Cluster Manager on Upgrade nodes.\")\n CM.stopall()\n\n CM.log(\"Upgrading all nodes to %s.\" % self.Env[\"current-version\"])\n for node in self.Env[\"nodes\"]:\n if not self.upgrade(node):\n CM.log(\"Couldn't upgrade %s\" % node)\n return None\n\n return 1\n\n"},"license":{"kind":"string","value":"gpl-2.0"}}},{"rowIdx":475286,"cells":{"repo_name":{"kind":"string","value":"roshannaik/storm"},"path":{"kind":"string","value":"dev-tools/test-ns.py"},"copies":{"kind":"string","value":"23"},"size":{"kind":"string","value":"1027"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom subprocess import Popen, PIPE\nimport sys\nimport os\n\nos.chdir(\"storm-core\")\n\nns = sys.argv[1]\npipe = Popen([\"mvn\", \"test\", \"-DfailIfNoTests=false\", \"-Dtest=%s\"%ns])\npipe.wait()\n\nos.chdir(\"..\")\n\nsys.exit(pipe.returncode)\n"},"license":{"kind":"string","value":"apache-2.0"}}},{"rowIdx":475287,"cells":{"repo_name":{"kind":"string","value":"fbsder/openthread"},"path":{"kind":"string","value":"tools/harness-automation/cases/sed_9_2_10.py"},"copies":{"kind":"string","value":"16"},"size":{"kind":"string","value":"1871"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright (c) 2016, The OpenThread Authors.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n\n\nimport unittest\n\nfrom autothreadharness.harness_case import HarnessCase\n\nclass SED_9_2_10(HarnessCase):\n role = HarnessCase.ROLE_SED\n case = '9 2 10'\n golden_devices_required = 4\n def on_dialog(self, dialog, title):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"bsd-3-clause"}}},{"rowIdx":475288,"cells":{"repo_name":{"kind":"string","value":"amiguez/youtube-dl"},"path":{"kind":"string","value":"test/test_swfinterp.py"},"copies":{"kind":"string","value":"158"},"size":{"kind":"string","value":"2224"},"content":{"kind":"string","value":"#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\n# Allow direct execution\nimport os\nimport sys\nimport unittest\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n\nimport errno\nimport io\nimport json\nimport re\nimport subprocess\n\nfrom youtube_dl.swfinterp import SWFInterpreter\n\n\nTEST_DIR = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'swftests')\n\n\nclass TestSWFInterpreter(unittest.TestCase):\n pass\n\n\ndef _make_testfunc(testfile):\n m = re.match(r'^(.*)\\.(as)$', testfile)\n if not m:\n return\n test_id = m.group(1)\n\n def test_func(self):\n as_file = os.path.join(TEST_DIR, testfile)\n swf_file = os.path.join(TEST_DIR, test_id + '.swf')\n if ((not os.path.exists(swf_file)) or\n os.path.getmtime(swf_file) < os.path.getmtime(as_file)):\n # Recompile\n try:\n subprocess.check_call([\n 'mxmlc', '-output', swf_file,\n '-static-link-runtime-shared-libraries', as_file])\n except OSError as ose:\n if ose.errno == errno.ENOENT:\n print('mxmlc not found! Skipping test.')\n return\n raise\n\n with open(swf_file, 'rb') as swf_f:\n swf_content = swf_f.read()\n swfi = SWFInterpreter(swf_content)\n\n with io.open(as_file, 'r', encoding='utf-8') as as_f:\n as_content = as_f.read()\n\n def _find_spec(key):\n m = re.search(\n r'(?m)^//\\s*%s:\\s*(.*?)\\n' % re.escape(key), as_content)\n if not m:\n raise ValueError('Cannot find %s in %s' % (key, testfile))\n return json.loads(m.group(1))\n\n input_args = _find_spec('input')\n output = _find_spec('output')\n\n swf_class = swfi.extract_class(test_id)\n func = swfi.extract_function(swf_class, 'main')\n res = func(input_args)\n self.assertEqual(res, output)\n\n test_func.__name__ = str('test_swf_' + test_id)\n setattr(TestSWFInterpreter, test_func.__name__, test_func)\n\n\nfor testfile in os.listdir(TEST_DIR):\n _make_testfunc(testfile)\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"unlicense"}}},{"rowIdx":475289,"cells":{"repo_name":{"kind":"string","value":"gauravbose/digital-menu"},"path":{"kind":"string","value":"digimenu2/django/contrib/admindocs/urls.py"},"copies":{"kind":"string","value":"574"},"size":{"kind":"string","value":"1183"},"content":{"kind":"string","value":"from django.conf.urls import url\nfrom django.contrib.admindocs import views\n\nurlpatterns = [\n url('^$',\n views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),\n name='django-admindocs-docroot'),\n url('^bookmarklets/$',\n views.BookmarkletsView.as_view(),\n name='django-admindocs-bookmarklets'),\n url('^tags/$',\n views.TemplateTagIndexView.as_view(),\n name='django-admindocs-tags'),\n url('^filters/$',\n views.TemplateFilterIndexView.as_view(),\n name='django-admindocs-filters'),\n url('^views/$',\n views.ViewIndexView.as_view(),\n name='django-admindocs-views-index'),\n url('^views/(?P[^/]+)/$',\n views.ViewDetailView.as_view(),\n name='django-admindocs-views-detail'),\n url('^models/$',\n views.ModelIndexView.as_view(),\n name='django-admindocs-models-index'),\n url('^models/(?P[^\\.]+)\\.(?P[^/]+)/$',\n views.ModelDetailView.as_view(),\n name='django-admindocs-models-detail'),\n url('^templates/(?P