{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); '\n\ndef back_button(back_page = 'back_to_login.py'):\n return \"\"\"
\n \n
\"\"\" % back_page\n\ndef link(url):\n print content_type()\n print \"\"\"\n \n \n \n \n \n \n \n \"\"\" % url\n\ndef output_error(message, back_page):\n print content_type()\n print html_header('Error')\n print '

An error occured


'\n print '

the error message is : %s


' % message\n print back_button(back_page)\n print html_tail()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":612,"cells":{"__id__":{"kind":"number","value":1958505089897,"string":"1,958,505,089,897"},"blob_id":{"kind":"string","value":"e24f90ad40ee46ae80b0a36c5bebb049d5db53c3"},"directory_id":{"kind":"string","value":"25f36d92ed3bfb3b341848360f13caff8661ea3c"},"path":{"kind":"string","value":"/traitlets/test/test_text.py"},"content_id":{"kind":"string","value":"c0d758c65f2b09901b2e213b6be2ea1a9933d6db"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"uservidya/traitlets"},"repo_url":{"kind":"string","value":"https://github.com/uservidya/traitlets"},"snapshot_id":{"kind":"string","value":"aff1c3550e0cd9aed6aafde64fbc6a414d637335"},"revision_id":{"kind":"string","value":"171e400914f8d3ca5584ca27fafafda1c4c1fe3a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T17:03:21.123486","string":"2021-01-22T17:03:21.123486"},"revision_date":{"kind":"timestamp","value":"2013-12-11T07:49:37","string":"2013-12-11T07:49:37"},"committer_date":{"kind":"timestamp","value":"2013-12-11T07:49:37","string":"2013-12-11T07:49:37"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# encoding: utf-8\n\"\"\"Tests for traitlets.text\"\"\"\nfrom __future__ import print_function\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport math\nimport random\n\nimport nose.tools as nt\n\nfrom traitlets import text\n\n#-----------------------------------------------------------------------------\n# Globals\n#-----------------------------------------------------------------------------\n\ndef eval_formatter_check(f):\n ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u\"café\", b=\"café\")\n s = f.format(\"{n} {n//4} {stuff.split()[0]}\", **ns)\n nt.assert_equal(s, \"12 3 hello\")\n s = f.format(' '.join(['{n//%i}'%i for i in range(1,8)]), **ns)\n nt.assert_equal(s, \"12 6 4 3 2 2 1\")\n s = f.format('{[n//i for i in range(1,8)]}', **ns)\n nt.assert_equal(s, \"[12, 6, 4, 3, 2, 2, 1]\")\n s = f.format(\"{stuff!s}\", **ns)\n nt.assert_equal(s, ns['stuff'])\n s = f.format(\"{stuff!r}\", **ns)\n nt.assert_equal(s, repr(ns['stuff']))\n \n # Check with unicode:\n s = f.format(\"{u}\", **ns)\n nt.assert_equal(s, ns['u'])\n # This decodes in a platform dependent manner, but it shouldn't error out\n s = f.format(\"{b}\", **ns)\n \n nt.assert_raises(NameError, f.format, '{dne}', **ns)\n\ndef eval_formatter_slicing_check(f):\n ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)\n s = f.format(\" {stuff.split()[:]} \", **ns)\n nt.assert_equal(s, \" ['hello', 'there'] \")\n s = f.format(\" {stuff.split()[::-1]} \", **ns)\n nt.assert_equal(s, \" ['there', 'hello'] \")\n s = f.format(\"{stuff[::2]}\", **ns)\n nt.assert_equal(s, ns['stuff'][::2])\n \n nt.assert_raises(SyntaxError, f.format, \"{n:x}\", **ns)\n\ndef eval_formatter_no_slicing_check(f):\n ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)\n \n s = f.format('{n:x} {pi**2:+f}', **ns)\n nt.assert_equal(s, \"c +9.869604\")\n \n s = f.format('{stuff[slice(1,4)]}', **ns)\n nt.assert_equal(s, 'ell')\n \n nt.assert_raises(SyntaxError, f.format, \"{a[:]}\")\n\ndef test_long_substr():\n data = ['hi']\n nt.assert_equal(text.long_substr(data), 'hi')\n\n\ndef test_long_substr2():\n data = ['abc', 'abd', 'abf', 'ab']\n nt.assert_equal(text.long_substr(data), 'ab')\n\ndef test_long_substr_empty():\n data = []\n nt.assert_equal(text.long_substr(data), '')\n\ndef test_strip_email():\n src = \"\"\"\\\n >> >>> def f(x):\n >> ... return x+1\n >> ... \n >> >>> zz = f(2.5)\"\"\"\n cln = \"\"\"\\\n>>> def f(x):\n... return x+1\n... \n>>> zz = f(2.5)\"\"\"\n nt.assert_equal(text.strip_email_quotes(src), cln)\n\n\ndef test_strip_email2():\n src = '> > > list()'\n cln = 'list()'\n nt.assert_equal(text.strip_email_quotes(src), cln)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":613,"cells":{"__id__":{"kind":"number","value":17927193525913,"string":"17,927,193,525,913"},"blob_id":{"kind":"string","value":"d3a72d0161d2b8ca9212da7526d14eb8311bec8c"},"directory_id":{"kind":"string","value":"a117d85fbc7de4d1416b6c5d903c1ca7d1850f07"},"path":{"kind":"string","value":"/python/generator/generator.py"},"content_id":{"kind":"string","value":"76ca62b722b4f99fc4b78af819f660b8c0a05bef"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"moutard/learn"},"repo_url":{"kind":"string","value":"https://github.com/moutard/learn"},"snapshot_id":{"kind":"string","value":"2a10d88a4d7a61af96915d75d4790858eb405c6c"},"revision_id":{"kind":"string","value":"5ee94b09a2ca2b30eacedd51a2885c29b454668c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-01T00:20:03.925763","string":"2021-05-01T00:20:03.925763"},"revision_date":{"kind":"timestamp","value":"2014-06-01T15:25:56","string":"2014-06-01T15:25:56"},"committer_date":{"kind":"timestamp","value":"2014-06-01T15:25:56","string":"2014-06-01T15:25:56"},"github_id":{"kind":"number","value":9283361,"string":"9,283,361"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\ndef g():\n \"\"\"This is a generator, it has 3 methods :\n - next\n - send\n - throw\n \"\"\"\n yield 1\n yield 2\n yield 3\n\nfor x in g():\n print x\n\ngi = g()\nprint next(gi)\n\nfor x in g():\n print next(gi)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":614,"cells":{"__id__":{"kind":"number","value":19146964210208,"string":"19,146,964,210,208"},"blob_id":{"kind":"string","value":"b39b8513f71ac6a46716e401faeb69495d25761a"},"directory_id":{"kind":"string","value":"6973bab2b792498cfff04b313144c3a2738af270"},"path":{"kind":"string","value":"/pavement.py"},"content_id":{"kind":"string","value":"be043a038c77804edb5c6600a623d472d12a97e0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"xdissent/skel"},"repo_url":{"kind":"string","value":"https://github.com/xdissent/skel"},"snapshot_id":{"kind":"string","value":"6db2e21c616efa2fb5c7c048574483e23e45de4f"},"revision_id":{"kind":"string","value":"c5501946ab5c542cabf054cf69492e5d5c0e828c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T12:14:12.098539","string":"2020-05-18T12:14:12.098539"},"revision_date":{"kind":"timestamp","value":"2009-07-19T23:04:51","string":"2009-07-19T23:04:51"},"committer_date":{"kind":"timestamp","value":"2009-07-19T23:04:51","string":"2009-07-19T23:04:51"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from paver.easy import *\nfrom paver.tasks import consume_args\nimport paver.doctools\nimport paver.setuputils\nimport paver.misctasks\n\npaver.setuputils.install_distutils_tasks()\n\nimport os\nfrom setuptools import find_packages\n\nPROJECT_FILES = [\n '__init__.py',\n 'local_settings.py',\n 'manage.py',\n 'settings_dev.py',\n 'settings.py',\n 'static',\n 'templates',\n# 'initial_data.json',\n 'urls.py',\n]\n\nSKEL_SVN_URL = 'https://code.hartzogcreative.com/svn/hartzog_skel/trunk'\n\n# Local Settings\nPROJECTS_DIR = os.environ.get('SKEL_PROJECTS_DIR', '~/Sites/')\nSVN_URL_ROOT = os.environ.get('SKEL_SVN_URL_ROOT', 'https://code.hartzogcreative.com/svn')\nSVN_SSH_HOST = os.environ.get('SKEL_SVN_SSH_HOST', 'code.hartzogcreative.com')\nSVN_SSH_ROOT = os.environ.get('SKEL_SVN_SSH_ROOT', '/var/svn/code.hartzogcreative.com')\nENVIRONMENTS_DIR = os.environ.get('SKEL_ENVIRONMENTS_DIR', False)\nif not ENVIRONMENTS_DIR:\n ENVIRONMENTS_DIR = os.environ.get('WORKON_HOME', False)\n \nPACKAGE_DATA = paver.setuputils.find_package_data()\n# print PACKAGE_DATA\nPACKAGES = sorted(PACKAGE_DATA.keys())\nPACKAGES = find_packages()\n# print PACKAGES\n\nDATA_FILES = [\n ('skel/core/management', ['pavement.py']),\n]\n\noptions(\n setup=Bunch(\n name='Skel',\n version='0.1',\n description='Hartzog Creative Skel Framework for Django',\n author='Greg Thornton',\n author_email='xdissent@gmail.com',\n packages=PACKAGES,\n package_data={\n 'skel.core.management': ['pavement.py'],\n },\n zip_safe=False,\n entry_points = {\n 'console_scripts': [\n 'skel-admin.py = skel.core.management:launch_paver',\n ],\n },\n include_package_data=True,\n data_files=DATA_FILES,\n ),\n minilib=Bunch( \n extra_files=['doctools', 'setuputils']\n ), \n startproject=Bunch(\n projects_dir=PROJECTS_DIR,\n svn_url_root=SVN_URL_ROOT,\n environments_dir=ENVIRONMENTS_DIR,\n environment=False,\n svn_ssh_host=SVN_SSH_HOST,\n svn_ssh_root=SVN_SSH_ROOT,\n svn_dev_branch='xdissent',\n \n no_coda=False,\n no_svn=False,\n no_requirements=False,\n \n ),\n deploy=Bunch(\n targets=Bunch(\n dev=Bunch(\n ),\n ),\n ),\n)\n\n\n@task\n@cmdopts([\n ('no-coda', None, 'Disable Site creation in Coda'),\n ('no-svn', None, 'Disable Subversion repository creation'),\n ('environment=', 'E', 'Use a virtualenv for the environment'),\n ('no-upgrade', None, 'Prevent updating Skel in the virtualenv'),\n ('no-requirements', None, 'Prevent Virtualenv from being populated with requirements')\n])\n@consume_args\ndef startproject(options):\n \"\"\"Starts a new Skel project\"\"\"\n info('pavement %s' % environment.pavement_file)\n \n project_path = path(options.args[0])\n project_name = project_path.name\n \n # Ensure either absolute path or use project_dir\n if project_name == project_path:\n projects_dir = path(options.projects_dir).expand()\n project_path = path.joinpath(projects_dir, project_name)\n \n if project_path.exists():\n raise BuildFailure('Project already exists at %s' % project_path)\n \n try:\n __import__(project_name)\n except ImportError:\n pass\n else:\n raise BuildFailure('Python module named %s already exists on your path' % project_path)\n \n svn_url_root = path(options.svn_url_root)\n svn_url = path.joinpath(svn_url_root, project_name)\n \n if not options.no_svn:\n try:\n info('Checking for project name collision at %s' % svn_url)\n sh('svn ls %s' % svn_url)\n except BuildFailure:\n pass\n else:\n raise BuildFailure('Project already in subversion at %s' % svn_url)\n \n dry('Creating project directory at %s' % project_path, project_path.mkdir)\n \n environments_dir = path(options.environments_dir)\n venv_path = environments_dir.joinpath(project_name)\n if options.environment:\n venv_path = path(options.environment)\n venv_path = venv_path.expand()\n \n if venv_path.exists():\n if not venv_path.joinpath('bin/activate').exists():\n raise BuildFailure('Folder at %s does not contain a Virtualenv' % venv_path)\n info('Using existing virtualenv at %s' % venv_path)\n else:\n info('Creating virtualenv for %s at %s' % (venv_path, project_name))\n sh('virtualenv %s' % venv_path)\n \n easy_install_path = venv_path.joinpath('bin/easy_install')\n if not easy_install_path.exists():\n raise BuildFailure('Virtualenv at %s does not contain easy_install' % venv_path)\n\n skel_path = venv_path.joinpath('src/skel/skel')\n \n if not options.no_requirements:\n \n info('Easy installing PIP to process requirements.txt')\n sh('%s pip' % easy_install_path)\n \n pip_path = venv_path.joinpath('bin/pip')\n \n info('Installing latest copy of Skel')\n sh('%s install -e svn+%s#egg=Skel' % (pip_path, SKEL_SVN_URL))\n\n \n # TODO: handle skel upgrade option and set skel_path appropriately\n # if no upgrade, run paver install task into virtualenv\n # if upgrade run pip install skel\n # import skel\n \n # TODO: get skel pkg_resources path as skel_path\n # TODO: get requirements_path from skel pkg_resources:\n # requirements_path = skel_path.joinpath('requirements.txt')\n \n requirements_path = venv_path.joinpath('src/skel/requirements.txt')\n \n info('Installing requirements with PIP')\n sh('%s install -r %s' % (pip_path, requirements_path))\n \n \n if not options.no_svn:\n svn_ssh_root = path(options.svn_ssh_root)\n svn_ssh_path = svn_ssh_root.joinpath(project_name)\n \n svnadmin_command = 'svnadmin create %s' % svn_ssh_path\n ssh_command = \"ssh %s '%s'\" % (options.svn_ssh_host, svnadmin_command)\n info('Creating repository using \"%s\"' % ssh_command)\n sh(ssh_command)\n sh('svn mkdir %s -m \"creating %s\"' % (svn_url.joinpath('trunk'), 'trunk'))\n sh('svn mkdir %s -m \"creating %s\"' % (svn_url.joinpath('tags'), 'tags'))\n sh('svn mkdir %s -m \"creating %s\"' % (svn_url.joinpath('branches'), 'branches'))\n \n svn_dev_branch_url = svn_url.joinpath('branches', options.svn_dev_branch)\n sh('svn copy %s %s -m \"creating development branch (%s)\"' % (svn_url.joinpath('trunk'), svn_dev_branch_url, svn_dev_branch_url))\n \n sh('svn co %s %s' % (svn_dev_branch_url, project_path))\n # TODO: set svnignores\n \n # TODO: fix into pkg_resources instead of skel_path\n info('Copying default files from %s to %s' % (skel_path, project_path))\n for file_path in PROJECT_FILES:\n src_path = skel_path.joinpath(file_path)\n dest_path = project_path.joinpath(file_path)\n \n if src_path.isdir():\n src_path.copytree(dest_path)\n else:\n src_path.copy(dest_path)\n \n # from pprint import pprint\n # pprint(options.startproject)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":615,"cells":{"__id__":{"kind":"number","value":9861244938768,"string":"9,861,244,938,768"},"blob_id":{"kind":"string","value":"2bf0716b58f1946ceffd362ba53015e10e7d9cf5"},"directory_id":{"kind":"string","value":"b9f13232d92c5ee36fcca10ae74b2591f38a2674"},"path":{"kind":"string","value":"/tests/test_pmd.py"},"content_id":{"kind":"string","value":"714b51ee9d7780a19a9271b54f52c7ea80a1d6c6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jjst/reviewbot-pmd"},"repo_url":{"kind":"string","value":"https://github.com/jjst/reviewbot-pmd"},"snapshot_id":{"kind":"string","value":"ad2562b12d06f1126e1b86db856185e285b721e5"},"revision_id":{"kind":"string","value":"477ea74a9556d069dd8bdaf115fd8c8c651207d1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T18:12:05.526662","string":"2021-01-23T18:12:05.526662"},"revision_date":{"kind":"timestamp","value":"2014-08-13T18:25:31","string":"2014-08-13T18:25:31"},"committer_date":{"kind":"timestamp","value":"2014-08-13T18:25:31","string":"2014-08-13T18:25:31"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os\nimport subprocess\nimport shutil\nimport tempfile\nfrom collections import namedtuple\nfrom nose import SkipTest\nfrom nose.tools import *\nfrom nose.plugins.attrib import attr\nfrom reviewbotpmd.pmd import *\nimport xml.etree.ElementTree as ElementTree\n\n\ndef setup_module():\n global pmd_install_path, pmd_script_path\n pmd_install_path = os.environ.get('PMD_INSTALL_PATH', '/opt/pmd/')\n pmd_script_path = os.path.join(pmd_install_path, 'bin/run.sh')\n if not os.path.exists(pmd_install_path):\n raise SkipTest(\"Cannot run run tests as no valid \"\n \"$PMD_INSTALL_PATH was provided\")\n\njava_source_path = os.path.join(os.path.dirname(__file__),\n 'testdata/HelloWorld.java')\njs_source_path = os.path.join(os.path.dirname(__file__),\n 'testdata/hello-http.js')\ninvalid_source_path = os.path.join(os.path.dirname(__file__),\n 'testdata/IDontExist.java')\n\n\ndef test_violation_num_lines():\n one_line_violation = Violation(rule='', priority=1, text='', url='',\n first_line=1, last_line=1)\n assert one_line_violation.num_lines == 1\n\n\ndef test_violation_is_consecutive():\n violation_text = \"Text\"\n v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1)\n v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2)\n assert v1.is_consecutive(v2)\n assert v2.is_consecutive(v1)\n\n\ndef test_violation_is_consecutive_text_different():\n v1 = Violation('', 1, \"Text\", '', first_line=1, last_line=1)\n v2 = Violation('', 1, \"Different text\", '', first_line=2, last_line=2)\n assert not v1.is_consecutive(v2)\n assert not v2.is_consecutive(v1)\n\n\ndef test_violation_combine():\n violation_text = \"Text\"\n v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1)\n v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2)\n combined = v1.combine(v2)\n assert_equals(combined.first_line, 1)\n assert_equals(combined.last_line, 2)\n assert_equals(combined.text, violation_text)\n\n\ndef test_violation_combine_not_consecutive():\n v1 = Violation('', 1, \"Banana\", '', first_line=1, last_line=1)\n v2 = Violation('', 1, \"Strawberry\", '', first_line=2, last_line=2)\n assert_raises(ValueError, v1.combine, v2)\n\n\ndef test_violation_group_consecutive():\n violation_text = \"Text\"\n v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1)\n v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2)\n v1_v2_combined = v1.combine(v2)\n assert_equals(Violation.group_consecutive([v1, v2]), [v1_v2_combined])\n\n\ndef test_violation_group_consecutive_empty():\n assert_equals(Violation.group_consecutive([]), [])\n\n\ndef test_violation_group_consecutive_nothing_consecutive():\n violation_text = \"Text\"\n v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1)\n v2 = Violation('', 1, violation_text, '', first_line=3, last_line=3)\n v3 = Violation('', 1, violation_text, '', first_line=5, last_line=10)\n assert_equals(Violation.group_consecutive([v1, v2, v3]), [v1, v2, v3])\n\n\ndef test_violation_group_consecutive_2():\n violation_text = \"Text\"\n v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1)\n v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2)\n v3 = Violation('', 1, violation_text, '', first_line=5, last_line=10)\n v1_v2_combined = v1.combine(v2)\n assert_equals(\n Violation.group_consecutive([v1, v2, v3]), [v1_v2_combined, v3])\n\n\nclass TestResult(object):\n\n @classmethod\n def setup_class(cls):\n cls.testdir = tempfile.mkdtemp()\n cls.pmd_result_path = os.path.join(\n cls.testdir, 'HelloWorld_result.xml')\n with open(os.devnull, 'w') as devnull:\n subprocess.check_call(\n [pmd_script_path,\n 'pmd',\n '-d', java_source_path,\n '-R', 'rulesets/internal/all-java.xml',\n '-f', 'xml',\n '-r', cls.pmd_result_path],\n stdout=devnull,\n stderr=devnull)\n assert os.path.exists(cls.pmd_result_path)\n\n @classmethod\n def teardown_class(cls):\n shutil.rmtree(cls.testdir)\n\n def test_result_from_xml(self):\n result = Result.from_xml(self.pmd_result_path, java_source_path)\n assert len(result.violations) == 6\n\n\nclass TestPMDTool(object):\n\n def setup(self):\n self.pmd = PMDTool()\n default_settings = {\n 'markdown': False,\n 'pmd_install_path': pmd_install_path,\n 'rulesets': 'java-comments',\n 'max_priority_for_issue': 5,\n }\n self.num_violations = 2\n self.pmd.settings = default_settings\n self.pmd._setup(default_settings)\n self.pmd.processed_files = set()\n self.pmd.ignored_files = set()\n\n def is_valid_ruleset_file(self, filepath):\n if not os.path.exists(filepath):\n return False\n try:\n tree = ElementTree.parse(filepath)\n except ElementTree.ParseError:\n return False\n root = tree.getroot()\n if root.tag != 'pmd':\n return False\n file_elems = root.findall('file')\n return len(file_elems) == 1\n\n @attr('slow')\n def test_run_pmd_creates_file(self):\n results_file_path = self.pmd.run_pmd(java_source_path,\n rulesets=['java-basic'])\n assert os.path.exists(results_file_path)\n\n @attr('slow')\n def test_run_pmd_invalid_ruleset(self):\n assert_raises(PMDError,\n self.pmd.run_pmd,\n java_source_path,\n ['invalid-ruleset-path'])\n\n @attr('slow')\n def test_run_pmd_absolute_path_to_ruleset(self):\n ruleset_full_path = os.path.join(\n os.path.dirname(__file__),\n 'testdata/test_ruleset.xml')\n results_file_path = self.pmd.run_pmd(\n java_source_path, rulesets=[ruleset_full_path])\n assert self.is_valid_ruleset_file(results_file_path)\n\n @attr('slow')\n def test_run_pmd_relative_path_to_ruleset_in_classpath(self):\n ruleset_path = 'rulesets/java/comments.xml'\n results_file_path = self.pmd.run_pmd(\n java_source_path, rulesets=[ruleset_path])\n assert self.is_valid_ruleset_file(results_file_path)\n\n @attr('slow')\n def test_run_pmd_creates_valid_pmd_result(self):\n results_file_path = self.pmd.run_pmd(\n java_source_path, rulesets=self.pmd.rulesets)\n assert self.is_valid_ruleset_file(results_file_path)\n\n def test_run_pmd_with_invalid_source_file(self):\n assert not os.path.exists(invalid_source_path)\n results_file_path = self.pmd.run_pmd(\n invalid_source_path, rulesets=['java-basic'])\n assert_raises(\n ElementTree.ParseError, ElementTree.parse, results_file_path)\n\n @attr('slow')\n def test_handle_file(self):\n reviewed_file = FileMock(java_source_path, java_source_path)\n assert_true(self.pmd.handle_file(reviewed_file))\n assert_equal(len(reviewed_file.comments), self.num_violations)\n\n def test_handle_file_unsupported_file_type(self):\n reviewed_file = FileMock(dest_file='test.php')\n assert_false(self.pmd.handle_file(reviewed_file))\n\n def test_handle_file_invalid_file(self):\n reviewed_file = FileMock(dest_file=invalid_source_path)\n assert_false(self.pmd.handle_file(reviewed_file))\n\n def test_handle_files(self):\n reviewed_file = FileMock(java_source_path, java_source_path)\n self.pmd.handle_files([reviewed_file])\n assert self.pmd.processed_files == set([reviewed_file.dest_file])\n assert self.pmd.ignored_files == set()\n assert len(reviewed_file.comments) == self.num_violations\n\n def test_handle_files_opens_issues(self):\n reviewed_file = FileMock(\n java_source_path, java_source_path, open_issues=True)\n self.pmd.settings['max_priority_for_issue'] = Priority.MAX\n self.pmd.handle_files([reviewed_file])\n assert self.pmd.processed_files == set([reviewed_file.dest_file])\n assert self.pmd.ignored_files == set()\n assert all(c.issue == True for c in reviewed_file.comments)\n\n def test_handle_files_invalid_pmd_install(self):\n self.pmd.settings['pmd_install_path'] = 'invalid_path'\n reviewed_file = FileMock(java_source_path, java_source_path)\n self.pmd.handle_files([reviewed_file])\n assert self.pmd.processed_files == set()\n assert self.pmd.ignored_files == set([reviewed_file.dest_file])\n\n def test_handle_files_invalid_ruleset(self):\n self.pmd.settings['rulesets'] = 'invalid-ruleset-path'\n reviewed_file = FileMock(java_source_path, java_source_path)\n self.pmd.handle_files([reviewed_file])\n assert self.pmd.processed_files == set()\n assert self.pmd.ignored_files == set([reviewed_file.dest_file])\n\n def test_post_comments(self):\n result = mock_result()\n reviewed_file = FileMock(java_source_path)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == 2\n\n def test_post_comments_opens_issues(self):\n self.pmd.max_priority_for_issue = Priority.MAX\n result = mock_result()\n reviewed_file = FileMock(java_source_path, open_issues=True)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == 2\n assert all(c.issue == True for c in reviewed_file.comments)\n\n def test_post_comments_custom_priority(self):\n self.pmd.max_priority_for_issue = 3\n result = mock_result()\n result.violations = [\n mock_violation(rule=str(i), priority=i) for i in Priority.values]\n reviewed_file = FileMock(java_source_path, open_issues=True)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == len(Priority.values)\n comments_with_issues = [c for c in reviewed_file.comments if c.issue]\n assert len(comments_with_issues) == 3\n\n def test_post_comments_open_issues_disabled(self):\n self.pmd.max_priority_for_issue = Priority.MAX\n result = mock_result()\n reviewed_file = FileMock(java_source_path, open_issues=False)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == 2\n assert all(c.issue == False for c in reviewed_file.comments)\n\n def test_post_comments_open_issues_consecutive_violation(self):\n self.pmd.max_priority_for_issue = Priority.MAX\n result = mock_result()\n v = result.violations[-1]\n consecutive_violation = Violation(\n v.rule, v.priority, v.text,\n v.url, v.last_line + 1, v.last_line + 5)\n result.violations.append(consecutive_violation)\n reviewed_file = FileMock(java_source_path, open_issues=True)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == 2\n combined_violation_comment = next(c for c in reviewed_file.comments\n if v.text in c.text)\n assert_equals(combined_violation_comment.first_line, v.first_line)\n expected_num_lines = consecutive_violation.last_line - v.first_line + 1\n assert_equals(combined_violation_comment.num_lines, expected_num_lines)\n\n def test_post_comments_consecutive_violations(self):\n result = mock_result()\n result.violations = [Violation('', Priority.MAX, '', '', 1, 1,)]\n reviewed_file = FileMock(java_source_path, open_issues=True)\n self.pmd.post_comments(result, reviewed_file)\n assert len(reviewed_file.comments) == 1\n assert all(c.issue == True for c in reviewed_file.comments)\n\n def test_post_comments_comment_plain_text(self):\n result = mock_result()\n reviewed_file = FileMock(java_source_path)\n self.pmd.post_comments(result, reviewed_file, use_markdown=False)\n violation = result.violations[0]\n assert_equals(\n reviewed_file.comments[0].text,\n \"%s: %s\\n\\nMore info: %s\" % (violation.rule,\n violation.text,\n violation.url))\n\n def test_post_comments_comment_markdown(self):\n result = mock_result()\n reviewed_file = FileMock(java_source_path)\n self.pmd.post_comments(result, reviewed_file, use_markdown=True)\n violation = result.violations[0]\n assert_equals(\n reviewed_file.comments[0].text,\n \"[%s](%s): %s\" % (violation.rule, violation.url, violation.text))\n\n\ndef mock_result():\n v1 = Violation('TestRule1', 1, 'A test rule', 'dummy_url', 1, 10)\n v2 = Violation('TestRule2', 4, 'Another test rule', 'dummy_url', 14, 14)\n return Result('', [v1, v2])\n\n\ndef mock_violation(**kwargs):\n return Violation(\n kwargs.get('rule', 'RuleMock'),\n kwargs.get('priority', 1),\n kwargs.get('text', 'A test rule'),\n kwargs.get('url', 'http://dummy.url/'),\n kwargs.get('first_line', 1),\n kwargs.get('last_line', 1))\n\n\nComment = namedtuple('Comment', ['text', 'first_line', 'num_lines', 'issue'])\n\n\nclass FileMock(object):\n\n class Object:\n pass\n\n def __init__(self, patched_file_path=None, dest_file=None,\n open_issues=False):\n self.comments = []\n self.patched_file_path = patched_file_path\n self.dest_file = dest_file\n self.review = FileMock.Object()\n self.review.settings = {'open_issues': open_issues}\n\n def get_patched_file_path(self):\n return self.patched_file_path\n\n def comment(self, text, first_line, num_lines=1, issue=None,\n original=False):\n self.comments.append(Comment(text, first_line, num_lines, issue))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":616,"cells":{"__id__":{"kind":"number","value":8486855414411,"string":"8,486,855,414,411"},"blob_id":{"kind":"string","value":"0d3cc3aa8f99782a77258a690d8355e3df0ddf22"},"directory_id":{"kind":"string","value":"4fc8b8769b3dbca488f0211a1f249e9b5258530c"},"path":{"kind":"string","value":"/examples/advanalysis.py"},"content_id":{"kind":"string","value":"8e76a307db892b46f57de1d80cd18e4f2eaec0aa"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"majidaldo/psmt"},"repo_url":{"kind":"string","value":"https://github.com/majidaldo/psmt"},"snapshot_id":{"kind":"string","value":"b977cffe0215d9174cf072b9e1290051698d78f6"},"revision_id":{"kind":"string","value":"aeeab95f40f8b25620fc9e501054d384a4728921"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T07:51:20.267942","string":"2021-01-10T07:51:20.267942"},"revision_date":{"kind":"timestamp","value":"2014-01-31T20:28:18","string":"2014-01-31T20:28:18"},"committer_date":{"kind":"timestamp","value":"2014-01-31T20:28:18","string":"2014-01-31T20:28:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nfrom analysisbase import analysis\nfrom scriptmgt import batchfoldermgt\n\n\nimport scipy as sp\nimport numpy as np\nfrom scipy import *\nfrom scipy import optimize\nfrom scipy import stats\nfrom matplotlib import mlab\nimport itertools\n\nclass gkanalysis(analysis):\n \n def gethc2(self,params,resetts=True,takeoutfirstone=True\n ,sumup='all'):\n fns=self.batchobj.user_getoutputvsinput(params,['*cumdE.avetime'])\n d={}\n for afp in fns['*cumdE.avetime']:\n try: #b/c it maybe blank\n ld=sp.loadtxt(afp\n ,unpack=True\n ,usecols=(0,1,2,3))\n for ats,ajal,ajo, ajy in zip(*ld):\n d.update({int(ats):(ajal,ajo, ajy)})\n except: continue\n tss=sp.reshape(d.keys(),(len(d.keys()),1))\n sa=sp.hstack([tss,d.values()])\n detype=[('ts',int),('jal',float),('jo',float),('jy',float)]\n sa=sp.array(map(tuple,sa.tolist()),dtype=detype) #i just had to make it tuples?!\n #..instead of nested brackets. idk why\n sa.sort(order='ts') #probably unnecessary here\n #..so i don't need ts if i'm taking data every ts\n if takeoutfirstone==True:\n sa=sa[1:]\n if resetts==True:\n sa['ts']=sa['ts']-sa['ts'][0]\n #if sumup==True: return self.sumhc(sa,sumup=sumup)\n return self.sumhc(sa,sumup=sumup)[:1001000]\n #else: return sa\n def sumhc(self,hc,sumup='all'):\n if sumup=='all':\n return sp.sum([hc['jal'],hc['jo'],hc['jy']],axis=0)\n elif sumup==False: return hc #w/ components intact\n summed=sp.zeros(len(hc))\n for ahc in sumup: summed+=hc[ahc]\n return summed\n \n def gethc(self,*args,**kwargs):\n try: return self.gethc2(*args,**kwargs)\n except :multipleparamsets=args[0]\n def returnhcs(multipleparamsets,**kwargs):\n for aparamset in multipleparamsets:\n yield self.gethc2(aparamset,**kwargs)\n return returnhcs(multipleparamsets,**kwargs)\n \n def shortcutac(self,hc,trimend=True):\n #ac= real(ifft(real(fft(hc))**2))\n ffthc=(sp.fft(hc))\n ac= sp.real(sp.ifft((ffthc)*sp.conjugate(ffthc)))# i believe this\n if trimend==True:\n first90pc=int(len(hc)*.9)\n return self.divac(ac[:first90pc])\n else: return self.divac(ac)\n #last 10% of the series is ALWAYS meaningless\n\n #..isn't the normalized def.\n #supposely im part should be small\n \n #avgac=sp.average(ac)\n #avgac=sp.average(ac)\n #return ac\n \n def divac(self,ac):\n n=len(ac) #or hc\n ##div=[((n-k)*avgac**2) for k in xrange(n)]\n #divby=[(n-k) for k in xrange(n)]\n ##div=n\n #return ac/divby #ac[:-100000]/divby[:-100000]#(ac[0])#divby\n #chk end of corr for extremes\n return array(ac)/n\n \n def hcacintseries(self,hcac):\n #todo cache\n try: return self.cache['hcint'][hcac[-10:]]\n except:\n self.cache.update({'hcint':{}})\n hcint=sp.cumsum( [sp.trapz(hcac[i:i+2]) \\\n for i in xrange(len(hcac-1))] )\n self.cache['hcint'].update({ tuple(hcac[-10:]) : hcint })\n return hcint\n \n def convertconductivity(self,params,dt=.001e-12):\n \"\"\"multiply the integral by this\"\"\"\n #dt in ps; xyz in Ao; T in K\n #\"18592486.74*integral*dt/x/y/z/T^2\"\n params=dict(params)\n a=(params['Tk']*9.67114349e-05)+12.3918 #lattice const\n convert=(1/1e-12)*18592486.74*\\\n dt/params['dx']/params['ly']/params['lz']/params['Tk']**2/a**3\n return convert\n \n def getkseries(self,params,ac,dt=.001e-12):\n \"\"\"hcac integral in W/mK\"\"\"\n #dt in ps; xyz in Ao; T in K\n #\"18592486.74*integral*dt/x/y/z/T^2\"\n params=dict(params)\n #hc=(self.sumhc(self.gethc(params)))\n #ac=self.shortcutac(hc)\n #schcac=self.divac(schcac) #just a div by N for \"stochastic\" process\n hcintegral=self.hcacintseries(ac)\n convert=self.convertconductivity(params)\n return convert*hcintegral\n \n# def findpeaks(self, spectra,win,peaktol=.025): #401 in my case\n# #need to input below nyquist\n# \"\"\"input noisy spectra\n# 1st filters it b/c too many \"needles\" on a spike\n# \"\"\"\n# fs=(savitzky_golay(spectra,win,5)) #play w/ these params\n# pt=peaktol*max(fs)\n# return dict(peakdet(fs,pt)[0]) #dict it?\n\n\n\n#max freq N/(2T) T is sampling time\n#T=N*dt\n#=>maxfreq= 1/(2*dt) = 1/(2*.001*1e-12) . /1e12 for THz todo: is there a 2pi factor here?\n#around 500THz. vibes are around 4thz factor of 2\n\n #note psd chokes at nfft lower than n/2\n #works faster for even but few times the even no. takes a while. idkw\n #eg. 18000 vs 18002 for a len(tsd)=1e6\n def psd(self,tsd,seg=1024*16,dt=.001e-12,sides='onesided',scalebyfreq=True\n ,divby2=True\n ,takeeven=True,nsigfigs=3 #performance params\n ,chopoff=True): \n if takeeven==True: seg=seg- ( seg % 2) #make even\n if nsigfigs!=None:\n bigno=seg\n numofdigits=int(math.log(bigno,10))+1\n divby=10**(numofdigits-nsigfigs)\n sigfigs=bigno/divby\n seg=int(sigfigs*divby)\n #return seg\n #when nfft is big, the psd calc becomes slow so it's good enough just to\n #take the nearest (lower?) even\n #if seg is odd, it will go down by 1 to make it even\n \n \"\"\"a shortcut to get psd at expense of freq fidelity\"\"\"\n #seg default good for length ts ~1e6\n ps= mlab.psd(tsd,NFFT=seg\n ,Fs=1.0/(dt)#/2 #i guess this Fs is what i want\n ,scale_by_freq=scalebyfreq\n ,sides=sides#'twosided'\n #,detrend=mlab.detrend_mean\n #,noverlap=0\n #,pad_to=None\n ) \n #return ps[1]*((2*ts)**-1) \\\n #, ps[0] #freq vs power\n #return ps[1], ps[0]#/(pi)**.5 #freq in hz vs power\n ps=list(ps)\n if divby2==True:ps[0]=ps[0]/2\n if chopoff==True: return chopoffspectra([ps[1], ps[0]])\n else: return [ps[1], ps[0]]\n \n def smoothspectra(self,spectra,winfrac=.001):#useless\n #todo winfrac per f\n #useless?\n \"\"\"Input spectra w/o mirror image\"\"\" #it has to do w/ width of peak\n #that is rel to size of features\n win=int(winfrac*len(spectra))\n if win%2==0: win+=1\n return (savitzky_golay(spectra,11,0)) #201 gets the peaks\n def findpeaks(self,spectra,peaktol=.025):\n \"\"\"peaktol: fraction of highest spike\n returns {freq:amp}. spectra should be smoothed\n spectra has x index\n \"\"\"\n #spectra can be a bit noisy but not so much\n # that you have 'needles' on the order of the spike\n pt=max(spectra[1])*peaktol\n return dict( peakdet(spectra[1],pt,x=spectra[0])[0] ) #zero idexes peaks\n def returnpeaklocs(self,*args,**kwargs):#just need to put in .psd args\n pt=kwargs.pop('peaktol') \n sg=self.psd(*args,**kwargs)\n peaks=self.findpeaks(sg,peaktol=pt) #cool prog'ing!\n return sorted(peaks.keys())\n \n def findbestnfft(self,tsd,dt=.001e-12\n ,peaktol=.03,minpeakfreqdiff=.5e12 #physics params reject anything w/ dfreq smaller than ftol\n #..so smaller values of these give more peaks..but too low and you get \"false\" peaks \n #,guessdf=None,cutfreq=None\n ,atatime=2,ntol=.98,minn=1024 #optimization params\n ,returnpeaks=False\n ,statmsgs=True): \n #maxfreq=1.0/(dt*2)\n maxn=len(tsd) \n #mindf=maxfreq/(maxn/2.0) #eqn. (maxfreq is const)\n \"\"\"\n physical params\n peaktol: fraction of max peak to be id.ed as a peak\n ftol: min freq b/w peaks. used to eliminate noise adjacent to peaks\n optimization params\n ntol: frac of possible optimal numbers\n atatime: random no.s to assess at a time (not important. keep it 2)\n \"\"\"\n \n# #a guess\n# if guessdf==None: guessdf=mindf*2 #ohh let's start somewhere in the middle\n# #but if you gave a startdf it has to be bigger than min df\n# if guessdfmaxfreq:\n# fl=list(freqs)\n# imf=fl.index(afreq)\n# break\n #div spectra to a more manageable no. ..\n #magscale=(max(spectra)) #..to help the solver\n #already div by 2 in the psd\n #spectra=spectra/2 #!??!?! #b/c this is what the peakfit expects\n #*2.566972197*10**(-34)#/magscale #later x by magscale\n #can't do this..messes up time scale\n# maxfreq = freqs[imf] #Hz\n \n #pps=self.solveforpeaksparams(spectra[:imf],maxfreq=maxfreq*2*pi) #rads \n \n #convvert peaks\n radpeaks={} \n for ahzpeakloc,itspeakval in hzpeaks.iteritems():\n radpeaks.update({ahzpeakloc*2*sp.pi:itspeakval})\n del hzpeaks\n pps=self.solveforpeaksparams(spectra,peaks=radpeaks,maxfreq=freqs[-1]*2*pi) #rads\n #return pps\n #dfreq=maxfreq/(imf+1) #how imf cancels?\n #dfreq=freqs[-1]/(len(spectra)-1)\n T=dict(params)['Tk']\n a=(T*9.67114349e-05)+12.3918\n V=dict(params)['dx']*dict(params)['ly']*dict(params)['lz']*(a**3)\n kinfo={}\n for apeakloc,pparams in pps['peaksparams'].iteritems():\n #, i0 is height, i1 is halfwidh at half max\n peakfreq=apeakloc#rads #*maxfreq\n tau=(pparams[1]) #idk why\n a=pparams[0]#todo magscale factor here prob has dt or ttime\n #print tau, a, apeakloc/2/pi/1e12\n pps['peaksparams'].update({apeakloc: (a\n ,pparams[1]#*magscale why did i do this?\n #, (maxfreq*pparams[1])**-1\n )}) #use pparams[1] to draw\n #conductivity=\"h*.9296243367e43/V/T^2/(1e24+tau^2*wo^2)\"\n #conductivity=a*9.296243367e18/V/(T**2)/(1+(tau**2)*((peakfreq*2*pi)**2))\\\n #conductivity=a*9.296243367e18/V/(T**2)/(1+(tau**2)*((peakfreq*2*pi)**2))\n #conductivity=(7.242963817e52/V/(T**2)) *(2.566972197e-34)*a/(1+(tau**2)*peakfreq**2) \\\n #/(peakfreq/2/pi)\n #factor has conversions and \n conductivity=2*pi*(1.859248674*10**19)*a*tau/V/T**2/(1+(tau**2)*peakfreq**2)\n if conductivity<0: print 'WARNING: negative conductivity component calculated.'\n kinfo.update({apeakloc:{'k':conductivity,'tau':tau,'f':peakfreq/2/pi}})\n pps.update({'kinfo':kinfo})\n fit=pps['fit']\n pps['fit']=[fit[0]/2.0/pi,fit[1]] #backto hz\n return pps\n \n\n def solveforavggkintparams2(self,*args,**kwargs):\n \"\"\"a fx to reduce variability in the conductivity calc due to nfft\n sensitivity\"\"\"\n #only do if nfft not spec\n ncalcs=kwargs.setdefault('ncalcs',5) #no. of procedures\n ncalcs=kwargs.pop('ncalcs')\n calcs={}\n #gather the data\n for procn in xrange(ncalcs):\n acalc=self.solveforgkintparams(*args,**kwargs)\n calcs.update({procn:acalc})\n #group same freqs by least diff\n #use the one w/ the max no. of peaks\n #stats\n #eh not gonna worrk. just sum and put a std dev \n ks=[]\n for acalc in calcs.iteritems():\n ks.append(sumks(acalc[1]['kinfo']))\n calcs.update( {'k': ( average(ks),sp.std(ks) )} )\n return calcs\n def solveforavggkintparams(self,*args,**kwargs):\n \"\"\"this func is for processing lists of inputs\"\"\"\n params=args[0]\n #hc=args[1]\n #\"scalar\" case: one HC. one paramset\n try: params[0] #if just one paramset (not in a list) input this should fail\n except:\n # if len(shape(hc))==1:\n return self.solveforavggkintparams2(*args,**kwargs)\n #else expect a set of set of params\n paramslist=params; del params #name change\n #hclist=hc; del hc\n #a hc list for one paramset makes sense\n #..but not the other way around\n #assert len(paramslist)==len(hclist)\n #todo=dict(zip(paramslist,hclist))\n \n def returncalcgen(*args,**kwargs):\n for aparams in paramslist:\n acalc=self.solveforavggkintparams2(aparams,**kwargs)\n yield acalc\n# calcs={}\n# def returncalcgen(*args,**kwargs):\n# solverlooper=itertools.izip(paramslist),hclist)\n# for aparams,ahc in solverlooper:\n# #hcs=todo[aparams]\n# if len( shape(ahc) )==1: #ie just a vector\n# acalc=self.solveforavggkintparams2(aparams,**kwargs)\n# yield acalc\n# else: raise Exception, 'input HC not a vector'\n #calcs.update({aparams:acalc})\n# else: #list of HCs\n #subcalcs=[]\n# for ahc in hcs:\n# acalc=self.solveforavggkintparams2(ahc,**kwargs)\n# subcalcs.append(acalc)\n #calcs.update({aparams:subcalcs})\n return returncalcgen(*args,**kwargs)#calcs\n \n def savegkcalcs(self,paramslist,gkcalcs,solntype):\n #solnlooper=itertools.izip(paramslist,gkcalcs)\n savedataconstsolntype=lambda params,data: self.savedata(params,solntype,data)\n map(savedataconstsolntype,paramslist,gkcalcs)\n return\n# for aparams,acalc in solnlooper:\n# i=self.batchobj.runsi[aparams]\n# self.data[solntype][i]=acalc;del i\n# return\n \n def solvegktimeint2(self,params,hc=None,sumhcparts='all'\n ,zerotol=.05,stablepart=.02,taumultiplecutoff=30#or 15 #hc\n ,dt=.001e-12,minfreq=.01e12#,win=None #100k win (.01thz) seems best\n #detrending is futile\n ,nbins=100):\n if hc==None:hc=self.gethc(params,sumup=sumhcparts)\n #1#nstable=int(stablepart*len(hc))#it was found that 1st 2% of ts is stable\n #by 5% \n #hc=detrendhc(hc,dt=dt,minfreq=minfreq)#win=nstable)#linear detrend\n #1#ac=self.shortcutac(hc,trimend=False)[:nstable]\n ac=self.shortcutac(hc,trimend=True)\n #cutac=chopoffactail(ac,zerotol=zerotol)\n #if len(cutac)/float(len(hc)) < stablepart: #if it's too short\n # nstable=int(stablepart*len(hc))\n # ac=self.shortcutac(hc,trimend=False)[:nstable]\n #else: ac=cutac;del cutac\n pxx,pys=returnpeaksofabsac(ac,peaktol=zerotol)\n a,tau=fitexpdecay(pxx,pys)\n #now cutoff at (5 to 10)tau\n itau=int(tau/dt)#;print itau\n ac=ac[:taumultiplecutoff*itau] #need 30 for the ones w/ doping, less for others\n ks=self.getkseries(params,ac,dt=dt)\n #plot(ks)\n h,lowest,binsize,useless=sp.stats.histogram(ks#[4*tau:]#doesn't matter\n #..for my procedure\n ,numbins=nbins,defaultlimits=(0,max(ks)),printextras=False)\n del useless;del lowest #b/c i set lowest to 0\n h=list(h)\n maxbini=h.index(max(h))\n lowerbound=maxbini*binsize;upperbound=lowerbound+binsize\n return {'ks':ks,'k':(average([lowerbound,upperbound]),binsize/2.0)\\\n ,'tau':tau,'a':a}\n def solvegktimeint(self,*args,**kwargs):\n paramsorlistofthem=args[0]\n try: return self.solvegktimeint2(paramsorlistofthem,**kwargs)\n except:#list of params\n def returncalcgen(*args,**kwargs):\n for aparams in paramsorlistofthem:\n print 'processing ', self.params[aparams]\n acalc=self.solvegktimeint2(aparams,**kwargs)\n yield acalc\n return returncalcgen(*args,**kwargs)\n\ndef gkproctest1(gkao,params,hc):\n tauxs=range(5,60);ks=[]\n for ataux in tauxs:\n print 'processing tau x', ataux\n k=gkao.solvegktimeint(params,hc=hc,taumultiplecutoff=ataux)['k'][0]\n ks.append(k)\n return tauxs, ks\ndef gkproctest(gkao,paramslist,hcs=None):\n if hcs==None: hcs=gkao.gethc(paramslist)\n results={}\n for aparams in paramslist:\n ahc=hcs.next()\n r=gkproctest1(gkao,aparams,ahc)\n results.update({aparams:r})\n return results\n\ndef plotgkproctest(gkao):\n results=gkao.data['gkproctest']\n ys=[]\n for aparams,xy in results.iteritems():\n ys.append(xy[1])\n xs=xy[0]\n #xl=random.choice(xs);yl=dict(zip(*xy))[xl]\n #print xl,yl\n #matplotlib.pyplot.text( xl,yl,yl)\n plot(*xy,label=str(gkao.params[aparams]))\n plot(xs,np.sum(ys,axis=0))\n return\n\ndef plotcondtrends(gkao,kcalctype,trends='all'\n ,subset={'ly':[2],'dx':[4,8,16,32,64],'Tk':[300]}): #or 'int'..egral\n if trends=='all': #else fronzenset(dict)\n r=gkao.batchobj.user_groupbyconsts(['dx','dseed','vseed']\n ,subsetdictofiters=subset)#,'ly','lz']#i1 is for the const throughout\n trends,consts=r[0],r[1];del r\n# trendsforseeds=gkao.batchobj.user_groupbyconsts(['dseed','vseed']\n# ,subsetdictofiters=subset)[0]\n# knoseeds={}\n# for ast, paramlist in trendsforseeds.iteritems():\n# ks=[] \n# for aps in paramlist:\n# ti=gka.params[aps]\n# if ti in gkao.data[kcalctype].keys():\n# k=gkao.data[kcalctype][ti]['k']\n# ks.append(k)\n# knoseeds.update({ frozenset(ast):(average(ks),sp.std(ks)) })\n #keys have dx and dp\n for atrend, paramlist in trends.iteritems():\n kt={}#;xls=[];ks=[];kerrs=[]\n #atd=dict(atrend)\n for aparamset in paramlist:\n psd=dict(aparamset)\n taskid=gkao.batchobj.runsi[aparamset]\n T=psd['Tk']\n a=(T*9.67114349e-05)+12.3918\n xl=a*psd['dx']\n k=gkao.data[kcalctype][taskid]['k'];print taskid,k\n if k=={}: continue\n #else: k=k,0 for plotting tau\n kt.setdefault(xl,[])\n kt[xl].append(k[0])\n #xls.append(xl);ks.append(k[0]);kerrs.append(k[1])\n #print xls,ks,kerrs\n ktreduce={}\n for alength, ks in kt.iteritems():\n ktreduce[alength]=(average(kt[alength]),sp.std(kt[alength]))\n del kt\n xls,[ks,kerrs]=ktreduce.keys(), array(ktreduce.values()).transpose() \n #^coool code! \n kdata=array( zip( array(xls)**-1 , tuple(ks), tuple(kerrs) )\n ,dtype=[('x^-1',float),('k',float),('kstdev',float)] )\n kdata.sort(order='x^-1')\n #print consts\n lbl=frozenset.union(atrend,frozenset([(k,v[0]) for k,v in consts.iteritems()]))\n #print lbl \n matplotlib.pyplot.errorbar(kdata['x^-1'],kdata['k'],yerr=kdata['kstdev']\n ,marker='o',label= str(dict(lbl)['dp']*100) )\n legend(mode='expand',ncol=len(trends))\n plt.xlabel(r'$length^{-1}$ ($\\AA^{-1}$)')\n plt.ylabel(r'Conductivity ($W\\cdot m^{-1}K^{-1}$)')\n return\n#def markcalcedhc(calcedk,tau):\n# atau=tau/.63\n# mpl.scatter(atau,calcedcond)\n# return\n\n#should be given data\ndef plotkintandtau(gkao,params,hc,**kwargs):\n dt=kwargs.setdefault('dt',.001e-12)\n kwargs.pop('dt')\n gks=gkao.solveforavggkintparams(params,hc,**kwargs)\n calcedcond=gks['k']\n gks.pop('k')\n maxtaus=[]\n for acalck,info in gks.iteritems():\n taus=[]\n for afreq,infos in info['kinfo'].iteritems():\n taus.append((infos['tau']))\n maxtaus.append(max(taus))\n atau=average(maxtaus)*4 #the pt where conductivity is at the asymptotic value\n # a tuple\n hcacint=gkao.getkseries(params,hc,dt=dt)\n #todo: it's a waste to gen the xs. find units in the plot\n matplotlib.pyplot.plot(sp.linspace(0,len(hcacint)*dt,len(hcacint),endpoint=False),hcacint)\n matplotlib.pyplot.errorbar(atau,calcedcond[0],yerr=calcedcond[1]\n ,marker='o',c='r')\n return\n\nimport matplotlib\nfrom matplotlib import rc\nrc('text',usetex=False) # r'string'\nmatplotlib.rcParams['mathtext.default']='regular'\ndef plotniceac(gkao,hc,dt=.001e-12,endt=20e-12):\n dtps=dt/1e-12\n plt.xlabel(r'time (ps)');\n plt.ylabel(r'$\\left\\langle S_{x}(t)\\cdot S_{x}(0) \\right\\rangle/\\left\\langle S_{x}(0)\\cdot S_{x}(0) \\right\\rangle$')\n ac=gkao.shortcutac(hc)[:int(endt/dt)]\n xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False)\n p=plot(xs,array(ac)/ac[0])\n return (xs, ac),p\ndef plotniceabsac(gkao,hc,dt=.001e-12,endt=20e-12):\n dtps=dt/1e-12\n plt.xlabel(r'time (ps)');\n plt.ylabel(r'$|\\left\\langle S_{x}(t)\\cdot S_{x}(0) \\right\\rangle/\\left\\langle S_{x}(0)\\cdot S_{x}(0) \\right\\rangle|$')\n acr=gkao.shortcutac(hc)[:int(endt/dt)]\n ac=abs(array(acr))/acr[0]\n xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False)\n p=plot(xs,ac)\n return (xs, acr),p\ndef plotniceint(gkao,params,ac,dt=.001e-12,endt=20e-12):\n dtps=dt/1e-12\n plt.xlabel(r'time (ps)');\n plt.ylabel(r'Conductivity ($W\\cdot m^{-1}K^{-1}$)')\n plt.ylim(0,15)\n ac=ac[:int(endt/dt)]\n ks=gkao.getkseries(params,ac,dt=dt)\n xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False)\n p=plot(xs,ks)\n return (xs,ks),p\ndef plotniceacproc(gkao,params,hc,dt=.001e-12):\n gks=gkao.solvegktimeint2(params,hc=hc)\n n=len(gks['ks']);tau=gks['tau'];a=gks['a']\n ac=plotniceabsac(gkao,hc,dt=dt,endt=30*tau)[0]\n ED=genexpdecay(a/ac[1][0],tau,0,dt=dt,n=n)\n pxx,pys=returnpeaksofabsac(ac[1]/ac[1][0],peaktol=.05,dt=dt)\n scatter(array(pxx)/1e-12,pys,color='red')\n plot(ED[0]/1e-12,ED[1],color='black')\n return\ndef plotnicepsd(gkao,hc,**kwargs):\n plt.xlabel(r'frequency (THz)');\n plt.ylabel(r'a.u./frequency')\n psdd=gkao.psd(hc,**kwargs)\n plot(psdd[0]/1e12,psdd[1])\n return\ndef plotnicepsds(gkao,hcs,**kwargs):\n plt.yscale('log');plt.ylim(1e-9,None)\n plt.xlim(0,30)\n for ahc in hcs:\n plotnicepsd(gkao,ahc)\n plt.legend(['x=0%','x=50%'],ncol=2,loc=4)\n return\n\n#moving avg not as general\n#useless\ndef runningstd(ac,mu=0):\n sumsq=0\n ninvals=0\n stds=[]\n for anum in ac:\n if anum>0:\n ninvals+=1\n sumsq+=anum**2\n stds.append((sumsq*ninvals**-1)**.5)\n else: stds.append(stds[:-1])\n return stds #exactly as runningstd2\n \ndef runningstd2(ac): return [sp.stats.tstd(ac[0:i+2],limits=(0,None))\\\n for i in xrange(0,len(ac))]\n\n#better than using avgs b/c dists are less sensitive to extremes\n\n#useless?\ndef findstabilizedi(ac,conf=.95,xstd=.1,skip=0):#,noise=.05,chi2tol=8):nbins=30,\n sumsq=0\n ninvals=0\n ninstds=0\n stds=[]\n i=0\n for anum in ac:\n if anum>0:\n ninvals+=1\n sumsq+=anum**2\n curstd=xstd*(sumsq*ninvals**-1)**.5\n if anum conf:return i\n #print i,float(ninstds)/ninvals\n i+=1\n \n\n #for i in xrange(ii+1,len(ac)-1):\n \n# #thisstd=sp.stats.tstd(ac[:i+2],\n #assert 0than last bin in array #and i will ignore it\n\n#95pc in the middle idea\n# def returnbinrange(binranges,binno):return binranges\n# def returnbinranges(binranges):\n# binsd=[]\n# for i in binranges[:-1:2]:\n#nmostbins=\n #def myhist(*args,**kwargs): sp.stats.histogram2(ac,\n #bins=sp.zeros( len(binranges)-1 ,dtype='int')\n #bins+=sp.stats.histogram2(ac[i],binranges)[0][:-1]#don't want the last bin\n #which is for vals from the last range to inf\n #chi2=sp.stats.normaltest(ac[i],binranges)[0] #i0 is chi2\n #if chi20:invals.append(i)\n# ninvals=len(invals)\n# inavg=average(invals);del invals\n# \n# for i in xrange(ii+1,len(ac)-1):\n# #thisstd=sp.stats.tstd(ac[:i+2],\n# val=ac[i]\n# if val > 0:\n# inavg=(inavg)*ninvals/(ninvals+1)+val/(ninvals+1) #previous sum + new no.\n# ninvals+=1\n# tstdd=\n\n\n\n\nfrom scipy import signal\ndef detrendhc(hc,dt=.001e-12,minfreq=.01e12,win=None):\n if win==None:win=int(minfreq**-1/dt)#\n bp= xrange(0,len(hc),win) #pts in b/w detrending\n dts=sp.signal.detrend(hc,type='linear',bp=bp) #or type 'constant'\n return dts\n\n\ndef genexpdecay(a,tau,w0hz,dt=.001e-12,n=1e6):\n ts=sp.linspace(0,dt*n,n,endpoint=False) #time series\n return [ts,a*cos(w0hz*2*pi*ts)*exp(-ts/tau)]\ndef fftofexpdecay(*args,**kwargs):\n kwargs.setdefault('ang','rads')\n args=list(args)\n args[0]=args[0]**.5 #to take it 'back' to x(t)\n expd=( (genexpdecay(*args)[1]) )\n #expd=expd/range(1,len(expd)+1)\n ##return expd\n #p=gkad( expd , ts=.001e-12)\n \n #ifftexpd=(sp.fftpack.irfft(expd))\n #fftexpd=(sp.fftpack.rfft(expd))\n #return ifftexpd\n p=gka.psd( expd , ts=.001e-12 )\n if kwargs['ang']=='rads': return 2*sp.pi*p[0],p[1]#,(p[1]**.5)\n else: return p[0],p[1]\n\ndef genhc(a,tau,w0hz,dt=.001e-12,n=1e6,seed=None):\n \"\"\"params correspond to a*exp(-1/tau)*cos(w) in the A.C.\"\"\"\n #s/n up w/ n\n tau=float(tau)\n ts=sp.linspace(0,dt*n,n,endpoint=False) #time series\n aa=(sp.e**(1.0/(tau/dt)))**-1.0 #tau just from here\n sigma=(a-a*aa**2)**.5#a from here\n \n #an approx\n if w0hz==0: sinusoid=1\n else: sinusoid=cos(2*pi*w0hz*ts)*(2**.5)##by observation! i think it works\n #..for any fnc\n drunk=array( ar(n,[aa],sigma=sigma,seed=seed) )\n return sp.array([ts, drunk*sinusoid ])\n \n\n\ndef ar(n,alpha,sigma=1,seed=None): #magic \n#put no.s in even places in the input vec\n#vector input is linear\n#\"cleaner\" spectogram w/ longer vec\n#neg no.s for oscilations\n#peaks widths change w/ \n if seed!=None:sp.random.seed(seed) #then it will assign a seed\n #sig=1\n mu=0\n errors = np.random.normal(mu, sigma, n)\n #sp.random.seed(123)\n #alpha = np.random.uniform(0,1,p) #sationary <1\n values = np.zeros(n)\n \n for i in xrange(len(values)): # i changed range to xrange\n value = 0\n n = len(alpha) if len(alpha)<=i else i\n for j in xrange(n):\n value = value + alpha[j] * values[i-j-1]\n values[i] = value + errors[i]\n return values\n \ndef rawpsd(d): return (abs(sp.fft(d))**2)/len(d)\n\ndef sumks(peaksparams):\n k=0\n for apeakloc,pparams in peaksparams.iteritems():\n k+=pparams['k']\n return k\n\ndef plotpeaks(peaksparams,maxfreq=1,n=300):\n for apeakloc,pparams in peaksparams.iteritems():\n plot(sp.linspace(0,maxfreq,n)\n ,returnsumofpeaksfx([apeakloc],n,maxfreq=maxfreq)([pparams[0]],[pparams[1]]))\n return\n\n#from scipy.stats import cauchy\ndef returnsumofpeaksfx(peaklocs,specsize,maxfreq=1):\n \"\"\"returns a fnc that takes in dist. params (as an array)\n \"\"\"\n #kwargs.setdefault('specrange',specsize)\n #input params[] as vec\n# def dist(xs,x0,g,h):\n# pdf= lambda x: h/(1+((x-x0)/g)**2)\n# return sp.array(map(pdf,xs))\n\n xs=sp.linspace(0\n ,maxfreq #?!?!?!\n ,specsize) #so this shouldn't be regened\n #w/ each soln iteration\n xs2=xs**2\n \n #for cauchy height=1/(pi*gamma)\n \n #def peakgen(h,f,freqscale):\n# def peakgen(h,f,hwhm):\n# #halfwidth at half max is frac relative to freq scale\n# return h/( ((xs-f)/hwhm)**2 + 1 ) #graphical\n #return h*g**2/((xs-f)**2+g**2)\n# return cauchy.pdf(xs*freqscale\n# ,scale=((pi*h)**-1)\n# ,loc=f*freqscale)\n\n\n\n def peakgen(A,w0,tau): #radians\n #\"2*A*tau*(1+w^2*tau^2+w0^2*tau^2)\n #/(1+w^2*tau^2+2*tau^2*w*w0+w0^2*tau^2)\n #/(1+w^2*tau^2-2*tau^2*w*w0+w0^2*tau^2)\"\n #tau=abs(tau)#don't want any neg. taus\n #..or As\n #A=abs(A)\n w02=(w0)**2; tau2=(tau)**2\n return 2*A*tau*(1+xs2*tau2+w02*tau2) \\\n /(1+xs2*tau2+2*tau2*xs*w0+w02*tau2) \\\n /(1+xs2*tau2-2*tau2*xs*w0+w02*tau2)\n #return 2*A*tau/(1+((xs-w0)**2)*tau**2)\n\n# #goes w/ below\n# xs=xs*2*pi\n# xs2=xs**2\n# def peakgen(A,zeta,tau): #Hz\n# #\"8*A*tau*Pi^2*(4*Pi^2+4*tau^2*w^2*Pi^2+tau^2*zeta^2)\n# #/(4*Pi^2+tau^2*zeta^2-4*tau^2*zeta*w*Pi+4*tau^2*w^2*Pi^2)\n# #/(4*Pi^2+4*tau^2*w^2*Pi^2+4*tau^2*zeta*w*Pi+tau^2*zeta^2)\"\n# zeta2=(zeta)**2; tau2=(tau)**2; pi2=pi**2\n#\n# return 8*A*tau*pi2*(4*pi2+4*tau2*xs2*pi2+tau2*zeta2) \\\n# /(4*pi2+tau2*zeta2-4*tau2*zeta*xs*pi+4*tau2*xs2*pi2) \\\n# /(4*pi2+tau2*zeta2+4*tau2*zeta*xs*pi+4*tau2*xs2*pi2)\n \n def peaksgen(hs,peaklocs,hwhms):\n #return sum(map(peakgen,hs,peaklocs,[freqscale]*len(hs)),axis=0)\n #return sum(map(peakgen,hs,peaklocs),axis=0)\n return sum(map(peakgen,hs,peaklocs,hwhms),axis=0)\n return lambda heights,hwhms: peaksgen(heights,peaklocs,hwhms)#vec\n #return lambda heights: peaksgen(heights)#vec\n#(returnsumofpeaksfx([4k,5k,7k,15k,20k....],~1e6/2,~1e6/2)\n\n\ndef returnsumofexpdecays(w0hzs,n,dt=1):\n \"\"\"returns a fnc that takes in AC params\n \"\"\"\n xs=sp.linspace(0,dt*n,n,endpoint=False)\n \n def acgen(A,w0,tau):\n return A*sp.e**(-xs/tau)*cos(xs*2*pi*w0)\n \n def acsgen(aas,w0s,tawz):\n #return sum(map(peakgen,hs,peaklocs,[freqscale]*len(hs)),axis=0)\n #return sum(map(peakgen,hs,peaklocs),axis=0)\n return sum(map(acgen,aas,w0s,tawz),axis=0)\n return lambda az,taus: acsgen(az,w0hzs,taus)#vec\n #return lambda heights: peaksgen(heights)#vec\n#(returnsumofpeaksfx([4k,5k,7k,15k,20k....],~1e6/2,~1e6/2)\n\n \ndef savitzky_golay(y, window_size, order, deriv=0):\n r\"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techhniques.\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n References\n ----------\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\n Cambridge University Press ISBN-13: 9780521880688\n \"\"\"\n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError, msg:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv]\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m, y, mode='valid')\n\n\n#\n#def peaksinsegs(data, step): #breaks it into blocks. useless\n# data = data.ravel()\n# length = len(data)\n# if length % step == 0:\n# data.shape = (length/step, step)\n# else:\n# data.resize((length/step, step))\n# max_data = np.maximum.reduce(data,1)\n# min_data = np.minimum.reduce(data,1)\n# return np.concatenate((max_data[:,np.NewAxis], min_data[:,np.NewAxis]), 1)\n\n#higher n should find more (false) peaks\nimport itertools\nimport random\nclass winadvisor(object):\n #store bins collection\n def __init__(self,delta,n,**kwargs):#,data=None):#,stufftobin=None):\n self.delta=delta\n self.stop=False\n self.n=n\n self.kwargs=kwargs\n self.kwargs.setdefault('sort',True)\n self.evaled={} #possibly derive to keep only last few\n #self.lastresult=self.acceptorreject(data)\n #bin\n return\n\n# def bin(self,*stufftobin):\n# for tobin in stufftobin:\n \n def acceptorreject(self,data,**kwargs):\n \"\"\"better to input ordered data\"\"\"\n if len(data)==1:return True#assert len(data)>=2\n sort=kwargs.setdefault('sort',self.kwargs['sort']) #so the sort is just for this fnc call\n if sort==True: data=sorted(data)\n combogen=itertools.combinations(data,2)\n for acombo in combogen:\n if abs(acombo[0]-acombo[1])=1\n twoindices=tuple(twoindices)\n assert len(twoindices) and len(twodatas)==2\n di=twoindices[1]-twoindices[0]\n loi=twoindices[0];hii=twoindices[1]\n assert di>0\n \n# idd=dict( zip(indices,datas) )\n# if (twoindices[0]) in self.lastpaidevaled.keys() \\\n# or (twoindices[1]) in self.lastpaidevaled.keys():\n \n #see if it was already evaluated and get val from there\n try: lo=self.evaled[twoindices[0]]\n except:\n lo=self.acceptorreject(twodatas[0])\n self.evaled.update({twoindices[0]:lo})\n try: hi=self.evaled[twoindices[1]]\n except:\n hi=self.acceptorreject(twodatas[1])\n self.evaled.update({twoindices[1]:hi})\n \n if lo == True and hi==True:\n #try going higher\n #True anything below i lo\n for i in xrange(1,hii+1): self.evaled.setdefault(i,True)\n return 1\n if lo == True and hi==False:\n if di>1:\n #optimum could be b/w indices\n for i in xrange(1,loi+1): self.evaled.setdefault(i,True)\n for i in xrange(hii,self.n+1): self.evaled.setdefault(i,False)\n return None\n else:\n self.stop=True \n return 0 # ie bingo\n if lo == False and hi==False:\n for i in xrange(loi,self.n+1): self.evaled.setdefault(i,False)\n return -1 #try going lower\n if lo == False and hi==True:\n #print loi, hii\n raise Exception, 'unexpected situation'\n \n \n \n def trytofindhifi(self,indices,datas):\n \"\"\"..but w/o false peaks\"\"\"\n assert len(indices)>=2\n assert len(indices)==len(datas)\n assert sorted(indices)==indices\n \n indices=list(indices) \n mini=indices[0]#min(indices)\n maxi=indices[-1]#max(indices)\n ii=iter(indices)\n idd=dict( zip(indices,datas) )\n i2nexti=1\n for curi in ii:\n nexti=indices[i2nexti]\n #print curi,nexti\n ude=self.upordownorend( [curi,nexti], [idd[curi],idd[nexti]] )\n #print ude \n if ude==1:\n if nexti==maxi:\n if maxi==self.n:self.stop=True\n return maxi\n #else keep searching\n #\n if ude==None: return curi\n if ude==0: return curi\n if ude==-1 and curi==mini: return None #it's all crap\n i2nexti+=1\n \n def getrandints(self,no,minn=1):#no. can get big\n# maxn=self.n\n# got=[]\n# if len(self.evaled.keys())==(self.n-minn+1): return None\n# for i in xrange(no):\n# rn=random.randint(minn,maxn)\n# #this loop becomes slow when the no.s are close to being exhausted\n# while (rn in got) or (rn in self.evaled.keys()):#conditions to keep looking\n# rn=random.randint(minn,maxn)\n# got.append(rn)\n# if len(self.evaled.keys())==(self.n-minn+1): return got #if exhausted\n# return sorted(got)\n maxn=self.n\n got=[]\n if len(self.evaled.keys())==(self.n-minn+1): return None\n choices=xrange(minn,maxn+1)\n choices=list(frozenset(choices)-frozenset(self.evaled.keys()))\n #much faster!!\n for i in xrange(no):\n rn=random.choice(choices)\n got.append(rn)\n choices.remove(rn)\n if len(self.evaled.keys())==(self.n-minn+1): return got #if exhausted\n return sorted(got)\n\ndef returnpeaksofabsac(ac,dt=.001e-12,peaktol=.05):\n #not that sensitive to peaktol, peak tol .1 gives same ans\n #can input just 25% of full ac\n ac=sp.absolute(ac)\n n=len(ac)\n xs=sp.linspace(0,dt*n,n,endpoint=False)\n pt=max(ac)*peaktol\n pd= dict( peakdet(ac,pt,x=xs)[0] );del xs\n xf=pd.keys();yf=pd.values()\n return array(xf),array(yf)\n\ndef fitexpdecay(xs,ys):\n xs=array(xs);ys=array(ys);\n def expdecay(t,a,tau): return a*sp.exp(-t/float(abs(tau)))\n def mined(a_n_tau): return ( ys - expdecay(xs,a_n_tau[0],a_n_tau[1]) )**2\n #def minedjusttau(tau): return mined([max(ys),tau])\n #s=sp.optimize.leastsq(minedjusttau, (xs[1]-xs[0])*3000 ) \n s=sp.optimize.leastsq(mined, [max(ys),(xs[1]-xs[0])*3000] ) #this way\n #just b/c i like the resulting tau better\n #print s\n return abs(s[0][0]),abs(s[0][1]) #a, tau\n \n\ndef chopoffspectra(spectra,zerotol=.001):\n \"\"\" if expecting nothing beyond a certain freq\"\"\"\n maxval=zerotol*max(spectra[1]) #is the vals i\n #start from highest side and go low\n maxi=len(spectra[1])-1\n for ani in xrange(len(spectra[1])):\n decreasingi=maxi-ani\n if spectra[1][decreasingi] mx:\n mx = this\n mxpos = x[i]\n if this < mn:\n mn = this\n mnpos = x[i]\n \n if lookformax:\n if this < mx-delta:\n maxtab.append((mxpos, mx))\n mn = this\n mnpos = x[i]\n lookformax = False\n else:\n if this > mn+delta:\n mintab.append((mnpos, mn))\n mx = this\n mxpos = x[i]\n lookformax = True\n\n return maxtab, mintab\n\n\n\ntry:\n gkb=batchfoldermgt('\\\\\\\\129.59.197.166\\\\aldosams\\\\research\\\\yag\\\\runtypes\\\\gk')\nexcept:\n gkb=batchfoldermgt('/home/aldosams/research/yag/runtypes/gk')\n \ngka=gkanalysis(gkb)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":617,"cells":{"__id__":{"kind":"number","value":19112604497175,"string":"19,112,604,497,175"},"blob_id":{"kind":"string","value":"b9a61f5d229274e0161e3465e4cc91602c524f1c"},"directory_id":{"kind":"string","value":"2f89875c097a4aaddd4dca052676f0df887abb46"},"path":{"kind":"string","value":"/tests/simple_demo.py"},"content_id":{"kind":"string","value":"939e1d0a14b26f8a798139d6b6dcc2534b7afd39"},"detected_licenses":{"kind":"list like","value":["LGPL-3.0-only"],"string":"[\n \"LGPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"pmundkur/libcrm114"},"repo_url":{"kind":"string","value":"https://github.com/pmundkur/libcrm114"},"snapshot_id":{"kind":"string","value":"71d31be2d38f58e0da191e98f381764b3a09e0e1"},"revision_id":{"kind":"string","value":"fe1580274d01fc2dc9e667d38239f7272b49f0cf"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-24T14:02:11.078316","string":"2020-05-24T14:02:11.078316"},"revision_date":{"kind":"timestamp","value":"2013-04-07T20:35:10","string":"2013-04-07T20:35:10"},"committer_date":{"kind":"timestamp","value":"2013-04-07T20:35:10","string":"2013-04-07T20:35:10"},"github_id":{"kind":"number","value":2824772,"string":"2,824,772"},"star_events_count":{"kind":"number","value":6,"string":"6"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2013-04-07T20:35:11","string":"2013-04-07T20:35:11"},"gha_created_at":{"kind":"timestamp","value":"2011-11-22T02:16:36","string":"2011-11-22T02:16:36"},"gha_updated_at":{"kind":"timestamp","value":"2013-04-07T20:35:10","string":"2013-04-07T20:35:10"},"gha_pushed_at":{"kind":"timestamp","value":"2013-04-07T20:35:10","string":"2013-04-07T20:35:10"},"gha_size":{"kind":"number","value":116,"string":"116"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"number","value":5,"string":"5"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"C"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import string, pprint\nimport pycrm114 as p\n\nimport texts\nAlice_frag = \\\n \"So she was considering in her own mind (as well as she could, for the\\n\" \\\n \"hot day made her feel very sleepy and stupid), whether the pleasure\\n\" \\\n \"of making a daisy-chain would be worth the trouble of getting up and\\n\" \\\n \"picking the daisies, when suddenly a White Rabbit with pink eyes ran\\n\" \\\n \"close by her.\\n\"\nHound_frag = \\\n \"\\\"Well, Watson, what do you make of it?\\\"\\n\" \\\n \"Holmes was sitting with his back to me, and I had given him no\\n\" \\\n \"sign of my occupation.\\n\" \\\n \"\\\"How did you know what I was doing? I believe you have eyes in\\n\" \\\n \"the back of your head.\\\"\\n\"\nMacbeth_frag = \\\n\" Double, double, toil and trouble;\\n\" \\\n\" Fire, burn; and cauldron, bubble.\\n\" \\\n\" \\n\" \\\n\" SECOND WITCH.\\n\" \\\n\" Fillet of a fenny snake,\\n\" \\\n\" In the caldron boil and bake;\\n\" \\\n\" Eye of newt, and toe of frog,\\n\" \\\n\" Wool of bat, and tongue of dog,\\n\" \\\n\" Adder's fork, and blind-worm's sting,\\n\" \\\n\" Lizard's leg, and howlet's wing,--\\n\" \\\n\" For a charm of powerful trouble,\\n\" \\\n\" Like a hell-broth boil and bubble.\\n\" \\\n\nWillows_frag = \\\n \"'This is fine!' he said to himself. 'This is better than whitewashing!'\\n\" \\\n \"The sunshine struck hot on his fur, soft breezes caressed his heated\\n\" \\\n \"brow, and after the seclusion of the cellarage he had lived in so long\\n\" \\\n \"the carol of happy birds fell on his dulled hearing almost like a shout.\"\n\n\ncb = p.ControlBlock(flags=(p.CRM114_SVM | p.CRM114_STRING),\n classes=[(\"Alice\", True), (\"Macbeth\", False)],\n start_mem = 8000000)\n\ncb.dump(file(\"test_cb_dump.txt\", 'w'))\ncb = p.ControlBlock.load(file(\"test_cb_dump.txt\", 'r'))\n\ndb = p.DataBlock(cb)\n\nprint \" Starting to learn the 'Alice in Wonderland' text\"\ndb.learn_text(0, texts.Alice)\n\nprint \" Starting to learn the 'MacBeth' text\"\ndb.learn_text(1, texts.Macbeth)\n\nprint \" Writing our datablock as 'simple_demo_datablock.txt'.\"\ndb.dump(file(\"simple_demo_datablock.txt\", 'w'))\n\nprint \" Reading text form back in.\"\ndb = p.DataBlock.load(file(\"simple_demo_datablock.txt\", 'r'))\n\nprint \" Classifying the 'Alice' text.\"\ns = db.classify_text(Alice_frag)\nprint (\"Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d\"\n % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features()))\nfor sc in s.scores():\n print (\"documents: %d features: %d hits: %d prob: %f pR: %f\" %\n (sc[\"documents\"], sc[\"features\"], sc[\"hits\"], sc[\"prob\"], sc[\"pR\"]))\n\nprint \" Classifying the 'Macbeth' text.\"\ns = db.classify_text(Macbeth_frag)\nprint (\"Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d\"\n % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features()))\nfor sc in s.scores():\n print (\"documents: %d features: %d hits: %d prob: %f pR: %f\" %\n (sc[\"documents\"], sc[\"features\"], sc[\"hits\"], sc[\"prob\"], sc[\"pR\"]))\n\nprint \" Classifying the 'Hound' text.\"\ns = db.classify_text(Hound_frag)\nprint (\"Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d\"\n % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features()))\nfor sc in s.scores():\n print (\"documents: %d features: %d hits: %d prob: %f pR: %f\" %\n (sc[\"documents\"], sc[\"features\"], sc[\"hits\"], sc[\"prob\"], sc[\"pR\"]))\n\nprint \" Classifying the 'Wind in the Willows' text.\"\ns = db.classify_text(Willows_frag)\nprint (\"Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d\"\n % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features()))\nfor sc in s.scores():\n print (\"documents: %d features: %d hits: %d prob: %f pR: %f\" %\n (sc[\"documents\"], sc[\"features\"], sc[\"hits\"], sc[\"prob\"], sc[\"pR\"]))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":618,"cells":{"__id__":{"kind":"number","value":13228499323168,"string":"13,228,499,323,168"},"blob_id":{"kind":"string","value":"4f13ecfb5c2883b890629b36336467ba0ee8ab37"},"directory_id":{"kind":"string","value":"8a6cdc50c434eecd30f6ec964518d299901dccfb"},"path":{"kind":"string","value":"/uamobile/factory/softbank.py"},"content_id":{"kind":"string","value":"b2610f963f17c1c14b24cf6f64f09252d0f02230"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"csakatoku/uamobile"},"repo_url":{"kind":"string","value":"https://github.com/csakatoku/uamobile"},"snapshot_id":{"kind":"string","value":"ad8fa2663d45386298b14ff8895b160dcdb429c8"},"revision_id":{"kind":"string","value":"7be1f739369bb00b0ca099593d0d9dfaf52fb3b8"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T09:49:13.364754","string":"2021-01-18T09:49:13.364754"},"revision_date":{"kind":"timestamp","value":"2010-06-18T07:30:38","string":"2010-06-18T07:30:38"},"committer_date":{"kind":"timestamp","value":"2010-06-18T07:30:38","string":"2010-06-18T07:30:38"},"github_id":{"kind":"number","value":687654,"string":"687,654"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2014-10-29T10:43:59","string":"2014-10-29T10:43:59"},"gha_created_at":{"kind":"timestamp","value":"2010-05-26T17:18:08","string":"2010-05-26T17:18:08"},"gha_updated_at":{"kind":"timestamp","value":"2013-11-14T09:59:47","string":"2013-11-14T09:59:47"},"gha_pushed_at":{"kind":"timestamp","value":"2010-06-18T07:30:53","string":"2010-06-18T07:30:53"},"gha_size":{"kind":"number","value":300,"string":"300"},"gha_stargazers_count":{"kind":"number","value":8,"string":"8"},"gha_forks_count":{"kind":"number","value":3,"string":"3"},"gha_open_issues_count":{"kind":"number","value":2,"string":"2"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom uamobile.factory.base import AbstractUserAgentFactory\nfrom uamobile.softbank import SoftBankUserAgent\nfrom uamobile.parser import SoftBankUserAgentParser\n\nclass SoftBankUserAgentFactory(AbstractUserAgentFactory):\n device_class = SoftBankUserAgent\n parser = SoftBankUserAgentParser()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":619,"cells":{"__id__":{"kind":"number","value":14207751852955,"string":"14,207,751,852,955"},"blob_id":{"kind":"string","value":"848f5cf125ec7d4d75324f9fce32b380ab8075c7"},"directory_id":{"kind":"string","value":"ce8f4fa31e5682b33672d1c348dd5d958da06fbb"},"path":{"kind":"string","value":"/problem10.py"},"content_id":{"kind":"string","value":"6230d8e3005627bd7c265ffd7119797eb8a2af6f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"djmittens/euler"},"repo_url":{"kind":"string","value":"https://github.com/djmittens/euler"},"snapshot_id":{"kind":"string","value":"0b5d7e04b07715880ef85ae7bd3426f12199eddb"},"revision_id":{"kind":"string","value":"5b0fa070c147b52c5874e0bad018e4c3d9f5dd06"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-03-29T15:29:02.338032","string":"2020-03-29T15:29:02.338032"},"revision_date":{"kind":"timestamp","value":"2014-01-06T06:09:04","string":"2014-01-06T06:09:04"},"committer_date":{"kind":"timestamp","value":"2014-01-06T06:09:04","string":"2014-01-06T06:09:04"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from common import *\n\nprint sum(filter(prime, range(2, 2000000)))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":620,"cells":{"__id__":{"kind":"number","value":14508399556037,"string":"14,508,399,556,037"},"blob_id":{"kind":"string","value":"a77a1c38889ec1b7c6c216190e22713dfc7e8c73"},"directory_id":{"kind":"string","value":"40f2c9b0659ed981c2039189ff54466731160fdd"},"path":{"kind":"string","value":"/test/bpl_doctest.py"},"content_id":{"kind":"string","value":"568d09b739391ccd72dcc90f9dcd7c9ae62310c0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"straszheim/boost-python"},"repo_url":{"kind":"string","value":"https://github.com/straszheim/boost-python"},"snapshot_id":{"kind":"string","value":"cfe538c088c864d3044859b464a985efad2c73f6"},"revision_id":{"kind":"string","value":"119cb5a08939a6ea412d5892db9ae01296e1d083"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2017-10-30T21:03:17.513168","string":"2017-10-30T21:03:17.513168"},"revision_date":{"kind":"timestamp","value":"2012-08-21T08:58:38","string":"2012-08-21T08:58:38"},"committer_date":{"kind":"timestamp","value":"2012-08-21T08:58:38","string":"2012-08-21T08:58:38"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Copyright Troy D. Straszheim 2009. Distributed under the Boost\n# Software License, Version 1.0. (See accompanying\n# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n#\n# A script that runs doctests that works on python 2.2 and up.\n# The catch: only do portable stuff in the doctests.\n#\nimport sys\n\npython_version = sys.version_info[0] * 100 \\\n + sys.version_info[1] * 10 \\\n + sys.version_info[2]\n\nif python_version < 300:\n import py2_helpers as helpers\nelse:\n import py3_helpers as helpers\n\npythonpath = sys.argv[1]\ntestmodulename = sys.argv[2]\nsys.argv = sys.argv[2:]\nsys.path.append(pythonpath)\n\ntestmodule = __import__(testmodulename)\n\n\nshouldthrow = helpers.shouldthrow\n\nglobs = {'shouldthrow' : shouldthrow,\n 'python_version' : python_version }\n\ndef main(docstring):\n if python_version < 300:\n #\n # exception types are printed fully qualified in py3k\n #\n docstring = docstring.replace('Boost.Python.ArgumentError', 'ArgumentError')\n docstring = docstring.replace('Boost.Python.ArgumentError', 'ArgumentError')\n from doctest import Tester\n t = Tester(globs=globs)\n (failures, tries) = t.runstring(docstring, sys.argv[0])\n t.summarize(verbose=1)\n sys.exit(failures > 0)\n else:\n import doctest\n parser = doctest.DocTestParser()\n dt = parser.get_doctest(docstring,\n globs=globs,\n name=sys.argv[0],\n filename=None,\n lineno=None)\n print(dt)\n runner = doctest.DocTestRunner(verbose=1)\n runner.run(dt)\n (failed, attempted) = runner.summarize(verbose=True)\n sys.exit(failed > 0)\n # doctest.testfile(scriptfile, module_relative=False)\n\n\nif s in testmodule:\n if isinstance(testmodule.s, string):\n main(string)\nelif main in testmodule:\n if isinstance(testmodule.main, function):\n testmodule.main(sys.argv)\nelse:\n raise RuntimeError(\"neither doctest string nor main found in module\")\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":621,"cells":{"__id__":{"kind":"number","value":9526237464446,"string":"9,526,237,464,446"},"blob_id":{"kind":"string","value":"2512b80318f39da2086012cbdbfbc3d16014a962"},"directory_id":{"kind":"string","value":"39ccf37ee3a51763441cf0cbb494908cf05eb4cb"},"path":{"kind":"string","value":"/lexer_fo.py"},"content_id":{"kind":"string","value":"29c8ef4543f33af3cc3e088a73dae33919368b8d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"cburschka/python-logic"},"repo_url":{"kind":"string","value":"https://github.com/cburschka/python-logic"},"snapshot_id":{"kind":"string","value":"3c041f455ad24342f30f16f522aa46863a03c2ed"},"revision_id":{"kind":"string","value":"c1da12a0583086316b03b3a61a071ee1338e36ce"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T19:51:20.777936","string":"2021-01-10T19:51:20.777936"},"revision_date":{"kind":"timestamp","value":"2014-02-21T08:10:18","string":"2014-02-21T08:10:18"},"committer_date":{"kind":"timestamp","value":"2014-02-21T08:10:18","string":"2014-02-21T08:10:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import symbols as SYM\nimport parser.symbol\nimport parser.lexer\n\nclass Symbol(parser.symbol.Term):\n pass\n\nclass Equality(Symbol):\n pass\n\nclass Relation(Symbol):\n pass\n\nclass Function(Symbol):\n pass\n\nclass Constant(Symbol):\n pass\n\nclass Operator(parser.symbol.Term):\n pass\n\nclass Quantor(Operator):\n pass\n\nclass Junctor(Operator):\n pass\n\nclass Not(Operator):\n pass\n\nclass LeftParen(Operator):\n pass\n\nclass RightParen(Operator):\n pass\n\nclass Variable(parser.symbol.Term):\n pass\n\nclass Comma(Operator):\n pass\n\nlexer = lambda signature: parser.lexer.lexer(\n meta = {\n SYM.EXISTS: Quantor, SYM.FORALL: Quantor,\n SYM.NOT: Not,\n SYM.AND: Junctor, SYM.OR: Junctor, SYM.IMP: Junctor, SYM.EQ: Junctor,\n '(': LeftParen, ')': RightParen, '=': Equality, ',': Comma\n },\n names = dict(\n [(name, sym) for name, (sym, d) in signature['relations'].items()] +\n [(name, sym) for name, (sym, d) in signature['functions'].items()] +\n [(name, sym) for name, sym in signature['constants'].items()]\n ),\n variable = Variable,\n char_filter = lambda c: ('1' <= c <= '9' or 'a' <= c <= 'z' or 'A' <= c <= 'Z')\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":622,"cells":{"__id__":{"kind":"number","value":18751827231650,"string":"18,751,827,231,650"},"blob_id":{"kind":"string","value":"30d7ec270cdf58ce34ab16d5ef14c6954845a4a1"},"directory_id":{"kind":"string","value":"2a2c0e9e0aef33c43a65b8b0d703c28d86ed8831"},"path":{"kind":"string","value":"/bakery_cli/ttfont.py"},"content_id":{"kind":"string","value":"8a6d2c531bf6520192ea82fd42a5d0c537e23cc1"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"anthrotype/fontbakery"},"repo_url":{"kind":"string","value":"https://github.com/anthrotype/fontbakery"},"snapshot_id":{"kind":"string","value":"31dbf6fc9804a3d046a06a2db3b964d95682d49a"},"revision_id":{"kind":"string","value":"5717c39e9c999b62cacbdfde1fc1ee96ae049e5a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T09:08:45.361703","string":"2021-01-15T09:08:45.361703"},"revision_date":{"kind":"timestamp","value":"2014-12-04T14:20:41","string":"2014-12-04T14:20:41"},"committer_date":{"kind":"timestamp","value":"2014-12-04T14:20:41","string":"2014-12-04T14:20:41"},"github_id":{"kind":"number","value":27259674,"string":"27,259,674"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":" # coding: utf-8\n# Copyright 2013 The Font Bakery Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.\nimport re\nimport os.path as op\n\nfrom fontTools import ttLib\n\n\nclass BaseFont(object):\n\n @staticmethod\n def get_ttfont(path):\n return Font(path)\n\n @staticmethod\n def get_ttfont_from_metadata(path, font_metadata, is_menu=False):\n path = op.join(op.dirname(path), font_metadata.filename)\n if is_menu:\n path = path.replace('.ttf', '.menu')\n return Font.get_ttfont(path)\n\n\nclass Font(BaseFont):\n\n def __init__(self, fontpath):\n if fontpath[-4:] == '.ttx':\n self.ttfont = ttLib.TTFont(None)\n self.ttfont.importXML(fontpath, quiet=True)\n else:\n self.ttfont = ttLib.TTFont(fontpath)\n\n self.ascents = AscentGroup(self.ttfont)\n self.descents = DescentGroup(self.ttfont)\n self.linegaps = LineGapGroup(self.ttfont)\n\n def __getitem__(self, key):\n \"\"\" Returns TTFont table with key name\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Bold.ttf\")\n >>> font['name'].tableTag\n 'name'\n \"\"\"\n return self.ttfont[key]\n\n def get_program_bytecode(self):\n \"\"\" Return binary program code from \"prep\" table.\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Bold.ttf\")\n >>> font.get_program_bytecode()\n '\\\\xb8\\\\x01\\\\xff\\\\x85\\\\xb0\\\\x04\\\\x8d'\n \"\"\"\n try:\n return self['prep'].program.getBytecode()\n except KeyError:\n return \"\"\n\n def get_bounding(self):\n \"\"\" Returns max and min bbox font\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.get_bounding()\n (-384, 1178)\n \"\"\"\n if self.ttfont.sfntVersion == 'OTTO':\n return self['head'].yMin, self['head'].yMax\n\n ymax = 0\n for g in self['glyf'].glyphs:\n char = self['glyf'][g]\n if hasattr(char, 'yMax') and ymax < char.yMax:\n ymax = char.yMax\n\n ymin = 0\n for g in self['glyf'].glyphs:\n char = self['glyf'][g]\n if hasattr(char, 'yMin') and ymin > char.yMin:\n ymin = char.yMin\n\n return ymin, ymax\n\n @property\n def license_url(self):\n \"\"\" Return LicenseURL from \"name\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.license_url\n u'http://scripts.sil.org/OFL'\n \"\"\"\n for name in self.names:\n if name.nameID == 14:\n return Font.bin2unistring(name)\n\n @property\n def macStyle(self):\n return self['head'].macStyle\n\n @property\n def italicAngle(self):\n return self['post'].italicAngle\n\n @property\n def names(self):\n return self['name'].names\n\n @property\n def glyphs(self):\n \"\"\" Returns list of glyphs names in fonts\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(len(font.glyphs))\n 502\n \"\"\"\n return self.ttfont.getGlyphOrder()\n\n @property\n def OS2_usWeightClass(self):\n \"\"\" OS/2.usWeightClass property value\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.OS2_usWeightClass)\n 400\n \"\"\"\n return self['OS/2'].usWeightClass\n\n @property\n def OS2_usWidthClass(self):\n \"\"\" Returns OS/2.usWidthClass property value\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.OS2_usWidthClass\n 5\n \"\"\"\n return self['OS/2'].usWidthClass\n\n @property\n def OS2_fsType(self):\n \"\"\" OS/2.fsType property value\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.OS2_fsType)\n 8\n \"\"\"\n return self['OS/2'].fsType\n\n def platform_entry(self, entry):\n if entry.platformID == 1 and entry.langID == 0:\n return Font.bin2unistring(entry)\n elif entry.platformID == 3 and entry.langID == 0x409:\n return Font.bin2unistring(entry)\n\n\n @property\n def fullname(self):\n \"\"\" Returns fullname of fonts\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.fullname\n u'Monda Regular'\n \"\"\"\n for entry in self.names:\n if entry.nameID != 4:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def _style_name(self):\n for entry in self.names:\n if entry.nameID != 2:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def stylename(self):\n \"\"\" Returns OpenType specific style name\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Bold.ttf\")\n >>> font.stylename\n u'Bold'\n \"\"\"\n return self._style_name\n\n @property\n def _family_name(self):\n\n for entry in self.names:\n if entry.nameID != 1:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def familyname(self):\n \"\"\" Returns fullname of fonts\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Bold.ttf\")\n >>> font.familyname\n u'Font'\n \"\"\"\n return self._family_name\n\n @property\n def ot_family_name(self):\n \"\"\" Returns Windows-only Opentype-specific FamilyName \"\"\"\n for entry in self.names:\n # This value must be only for windows platform as in\n # mac it addresses some problems with installing fonts with\n # that ids\n if entry.nameID != 16 or entry.platformID != 3:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def ot_style_name(self):\n \"\"\" Returns Windows-only Opentype-specific StyleName \"\"\"\n for entry in self.names:\n # This value must be only for windows platform as in\n # mac it addresses some problems with installing fonts with\n # that ids\n if entry.nameID != 17 or entry.platformID != 3:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def ot_full_name(self):\n \"\"\" Returns Windows-only Opentype-specific FullName \"\"\"\n for entry in self.names:\n # This value must be only for windows platform as in\n # mac it addresses some problems with installing fonts with\n # that ids\n if entry.nameID != 18 or entry.platformID != 3:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n @property\n def post_script_name(self):\n \"\"\" Returns fullname of fonts\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Bold.ttf\")\n >>> font.post_script_name\n u'Font-Bold'\n \"\"\"\n for entry in self.names:\n if entry.nameID != 6:\n continue\n value = self.platform_entry(entry)\n if value:\n return value\n return ''\n\n def retrieve_cmap_format_4(self):\n \"\"\" Returns cmap table format 4\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.retrieve_cmap_format_4().platEncID)\n 3\n \"\"\"\n for cmap in self['cmap'].tables:\n if cmap.format == 4:\n return cmap\n\n def advance_width(self, glyph_id=None):\n \"\"\" AdvanceWidth of glyph from \"hmtx\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.advance_width(\"a\"))\n 572\n \"\"\"\n if not glyph_id:\n return self['hhea'].advanceWidthMax\n try:\n return self['hmtx'].metrics[glyph_id][0]\n except KeyError:\n return None\n\n @staticmethod\n def bin2unistring(record):\n if b'\\000' in record.string:\n return record.string.decode('utf-16-be')\n elif not isinstance(record.string, unicode):\n return unicode(record.string, 'unicode_escape')\n return record.string\n\n def get_glyf_length(self):\n \"\"\" Length of \"glyf\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.get_glyf_length())\n 21804\n \"\"\"\n return self.ttfont.reader.tables['glyf'].length\n\n def get_loca_length(self):\n \"\"\" Length of \"loca\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.get_loca_length())\n 1006\n \"\"\"\n return self.ttfont.reader.tables['loca'].length\n\n def get_loca_glyph_offset(self, num):\n \"\"\" Retrieve offset of glyph in font tables\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.get_loca_glyph_offset(15))\n 836\n >>> int(font.get_loca_glyph_offset(16))\n 904\n \"\"\"\n return self['loca'].locations[num]\n\n def get_loca_glyph_length(self, num):\n \"\"\" Retrieve length of glyph in font loca table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> int(font.get_loca_glyph_length(15))\n 68\n \"\"\"\n return self.get_loca_glyph_offset(num + 1) - self.get_loca_glyph_offset(num)\n\n def get_loca_num_glyphs(self):\n \"\"\" Retrieve number of glyph in font loca table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.get_loca_num_glyphs()\n 503\n \"\"\"\n return len(self['loca'].locations)\n\n def get_hmtx_max_advanced_width(self):\n \"\"\" AdvanceWidthMax from \"hmtx\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.get_hmtx_max_advanced_width()\n 1409\n \"\"\"\n advance_width_max = 0\n for g in self['hmtx'].metrics.values():\n advance_width_max = max(g[0], advance_width_max)\n return advance_width_max\n\n @property\n def advance_width_max(self):\n \"\"\" AdvanceWidthMax from \"hhea\" table\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.advance_width_max\n 1409\n \"\"\"\n return self.advance_width()\n\n def get_upm_height(self):\n return self['head'].unitsPerEm\n\n def get_highest_and_lowest(self):\n high = []\n low = []\n if self.ttfont.sfntVersion == 'OTTO':\n return high, low\n maxval = self.ascents.get_max()\n minval = self.descents.get_min()\n for glyph, params in self['glyf'].glyphs.items():\n if hasattr(params, 'yMax') and params.yMax > maxval:\n high.append(glyph)\n if hasattr(params, 'yMin') and params.yMin < minval:\n low.append(glyph)\n return high, low\n\n def save(self, fontpath):\n self.ttfont.save(fontpath)\n\n\ndef is_none_protected(func):\n\n def f(self, value):\n if value is None:\n return\n func(self, value)\n\n return f\n\n\nclass AscentGroup(object):\n\n def __init__(self, ttfont):\n self.ttfont = ttfont\n\n def set(self, value):\n self.hhea = value\n self.os2typo = value\n self.os2win = value\n\n def get_max(self):\n \"\"\" Returns largest value of ascents\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.ascents.get_max()\n 1178\n \"\"\"\n return max(self.hhea, self.os2typo, self.os2win)\n\n def hhea():\n doc = \"\"\"Ascent value in 'Horizontal Header' (hhea.ascent)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.ascents.hhea\n 1178\n \"\"\"\n\n def fget(self):\n return self.ttfont['hhea'].ascent\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['hhea'].ascent = value\n\n return locals()\n hhea = property(**hhea())\n\n def os2typo():\n doc = \"\"\"Ascent value in 'Horizontal Header' (OS/2.sTypoAscender)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.ascents.os2typo\n 1178\n \"\"\"\n\n def fget(self):\n return self.ttfont['OS/2'].sTypoAscender\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['OS/2'].sTypoAscender = value\n\n return locals()\n os2typo = property(**os2typo())\n\n def os2win():\n doc = \"\"\"Ascent value in 'Horizontal Header' (OS/2.usWinAscent)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.ascents.os2win\n 1178\n \"\"\"\n\n def fget(self):\n return self.ttfont['OS/2'].usWinAscent\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['OS/2'].usWinAscent = value\n\n return locals()\n os2win = property(**os2win())\n\n\nclass DescentGroup(object):\n\n def __init__(self, ttfont):\n self.ttfont = ttfont\n\n def set(self, value):\n self.hhea = value\n self.os2typo = value\n self.os2win = value\n\n def get_min(self):\n \"\"\" Returns least value of descents.\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.descents.get_min()\n -384\n \"\"\"\n return min(self.hhea, self.os2typo, self.os2win)\n\n def hhea():\n doc = \"\"\" Descent value in 'Horizontal Header' (hhea.descent)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.descents.hhea\n -384\n \"\"\"\n\n def fget(self):\n return self.ttfont['hhea'].descent\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['hhea'].descent = value\n\n return locals()\n hhea = property(**hhea())\n\n def os2typo():\n doc = \"\"\"Descent value in 'Horizontal Header' (OS/2.sTypoDescender)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.descents.os2typo\n -384\n \"\"\"\n\n def fget(self):\n return self.ttfont['OS/2'].sTypoDescender\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['OS/2'].sTypoDescender = value\n\n return locals()\n os2typo = property(**os2typo())\n\n def os2win():\n doc = \"\"\"Descent value in 'Horizontal Header' (OS/2.usWinDescent)\n\n >>> font = Font(\"tests/fixtures/ttf/Font-Regular.ttf\")\n >>> font.descents.os2win\n 384\n \"\"\"\n\n def fget(self):\n return self.ttfont['OS/2'].usWinDescent\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['OS/2'].usWinDescent = abs(value)\n\n return locals()\n os2win = property(**os2win())\n\n\nclass LineGapGroup(object):\n\n def __init__(self, ttfont):\n self.ttfont = ttfont\n\n def set(self, value):\n self.hhea = value\n self.os2typo = value\n\n def hhea():\n doc = \"The hhea.lineGap property\"\n\n def fget(self):\n return self.ttfont['hhea'].lineGap\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['hhea'].lineGap = value\n\n return locals()\n hhea = property(**hhea())\n\n def os2typo():\n doc = \"The OS/2.sTypoLineGap property\"\n\n def fget(self):\n return self.ttfont['OS/2'].sTypoLineGap\n\n @is_none_protected\n def fset(self, value):\n self.ttfont['OS/2'].sTypoLineGap = value\n\n return locals()\n os2typo = property(**os2typo())\n\n\nclass FontTool:\n\n @staticmethod\n def get_tables(path):\n \"\"\" Retrieves tables names existing in font\n\n >>> FontTool.get_tables(\"tests/fixtures/ttf/Font-Regular.ttf\")\n ['GDEF', 'gasp', 'loca', 'name', 'post', 'OS/2', 'maxp', 'head', \\\n'kern', 'FFTM', 'GSUB', 'glyf', 'GPOS', 'cmap', 'hhea', 'hmtx', 'DSIG']\n \"\"\"\n font = ttLib.TTFont(path)\n return font.reader.tables.keys()\n\n\ndef getName(font, pairs):\n value = None\n for pair in pairs:\n value = font['name'].getName(*pair)\n if value:\n break\n\n if value.isUnicode():\n value = value.string.decode('utf-16-be')\n else:\n value = value.string\n\n assert value, u'{} seems to be missed in NAME table'.format(pairs)\n return value\n\n\ndef getSuggestedFontNameValues(font):\n family_name = getName(font, [[1, 3, 1],\n [1, 1, 0]])\n\n subfamily_name = getName(font, [[2, 3, 1],\n [2, 1, 0]])\n\n full_name = getName(font, [[4, 3, 1],\n [4, 1, 0]])\n\n subfamilies = ['Regular',\n 'Bold',\n 'Italic',\n 'Semi Bold Italic',\n 'Semi Bold',\n 'Heavy',\n 'Heavy Italic',\n 'Extra Light Italic',\n 'Extra Light',\n 'Medium',\n 'Extra Bold',\n 'Medium Italic',\n 'Extra Bold Italic',\n 'Bold Italic',\n 'Thin Italic',\n 'Thin',\n 'Light Italic',\n 'Light',\n 'Black',\n 'Black Italic']\n\n if full_name == family_name:\n try:\n family_name, subfamily_name = full_name.split(' ', 1)[:]\n except ValueError:\n pass\n\n if subfamily_name == 'Normal' or subfamily_name == 'Roman':\n subfamily_name = 'Regular'\n elif subfamily_name == 'Heavy':\n subfamily_name = 'Black'\n elif subfamily_name == 'Heavy Italic':\n subfamily_name = 'Black Italic'\n return {'family': family_name, 'subfamily': subfamily_name}\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":623,"cells":{"__id__":{"kind":"number","value":15427522573947,"string":"15,427,522,573,947"},"blob_id":{"kind":"string","value":"cfe3d1d9d3659ef969a464715b7757d72f8a7fa5"},"directory_id":{"kind":"string","value":"9b03417874df98ca57ff593649a1ee06056ea8ad"},"path":{"kind":"string","value":"/urls.py"},"content_id":{"kind":"string","value":"64df2b7b3877df44579970b1bdeb9e5461339646"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"mzupan/intake"},"repo_url":{"kind":"string","value":"https://github.com/mzupan/intake"},"snapshot_id":{"kind":"string","value":"2c115510c0461b09db310ddc6955943161c783b7"},"revision_id":{"kind":"string","value":"a4b5e0e1e5d441ad9624a9eb44bc1ab98ccf3e22"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-22T01:46:06.212654","string":"2020-04-22T01:46:06.212654"},"revision_date":{"kind":"timestamp","value":"2010-11-23T17:01:50","string":"2010-11-23T17:01:50"},"committer_date":{"kind":"timestamp","value":"2010-11-23T17:01:50","string":"2010-11-23T17:01:50"},"github_id":{"kind":"number","value":1010081,"string":"1,010,081"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls.defaults import *\n\nurlpatterns = patterns('',\n (r'^api/', include('api.urls')),\n (r'^server/', include('server.urls')),\n (r'^group/', include('server.urls_group')),\n (r'^admin/', include('admin.urls')),\n \n \n (r'^/?$', 'index.views.show_index'),\n \n #\n # login/logout\n #\n (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'index/login.html'}),\n (r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/login/?logout=1'}),\n)\n\n\n#\n# handling static pages for development only\n#\nfrom django.conf import settings\nif settings.LOCAL_DEVELOPMENT:\n urlpatterns += patterns(\"django.views\",\n url(r\"%s(?P.*)/$\" % settings.MEDIA_URL[1:], \"static.serve\", {\n \"document_root\": settings.MEDIA_ROOT,\n })\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":624,"cells":{"__id__":{"kind":"number","value":13606456403294,"string":"13,606,456,403,294"},"blob_id":{"kind":"string","value":"b57fab85dc87f07c1f8e8862cc7687be9165853c"},"directory_id":{"kind":"string","value":"73318ea0fd00b76af332f74b22dd9a8f9f86a29a"},"path":{"kind":"string","value":"/src/create_test_users.py"},"content_id":{"kind":"string","value":"37d1d0fff21dc0fbd32eb865a670143050ba0038"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ambv/vanityfair"},"repo_url":{"kind":"string","value":"https://github.com/ambv/vanityfair"},"snapshot_id":{"kind":"string","value":"6edc4695cdf27a69f1b3823456b95e03f1e72ff6"},"revision_id":{"kind":"string","value":"c4106aa7e995873dd8338a8b04dfc8d03d7714c3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-04-01T20:47:39.114388","string":"2016-04-01T20:47:39.114388"},"revision_date":{"kind":"timestamp","value":"2013-06-11T09:51:35","string":"2013-06-11T09:51:35"},"committer_date":{"kind":"timestamp","value":"2013-06-11T09:51:35","string":"2013-06-11T09:51:35"},"github_id":{"kind":"number","value":4831987,"string":"4,831,987"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2012 by Łukasz Langa\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom random import choice\n\nfrom django.contrib.auth.models import User\n\n\nfirst_names = ['Jan', 'Mateusz', 'Krzysztof', 'Stefan', 'Ambroży',\n 'Marian', 'Eugeniusz', 'Bonifacy', 'Bożydar', 'Amadeusz',\n 'Ludwik', 'Kubuś', 'Roman', 'Zygzak']\n\nlast_names = ['Pipka', 'Buraczek', 'Nieodpowiedni', 'Idiom',\n 'Zabawny', 'Pełnopoważny', 'Niemoga', 'Mariola', 'Dung',\n 'Fubar', 'Osiemnasty', 'Siedemnasty', 'Szesnasty', 'Kwaśny']\n\nnicks = ['lightning', 'thunder', 'storm', 'hard disk', 'ender',\n 'aragorn', 'mordor', 'gandalf', 'smeagol', 'gollum', 'bilbo',\n 'frodo', 'legolas', 'hobbit', 'elf', 'krasnolud', 'wiedźmin',\n 'przyczepa', 'telefon', 'długi', 'mocny', 'szary', 'cichy',\n 'słabizna', 'wycharowany', 'czarodziej', 'potter', 'voldemort',\n 'wróżka', 'sieciowiec', 'informatyk', 'poeta', 'pantograf',\n 'maszynista', 'kurier', 'bohater', 'niemoralny', 'casanova',\n 'śmiszek', 'clint', 'kenny', 'cartman', 'kucharz', 'klucznik',\n 'kwazimodo', 'toudi', 'iktorn', 'smerf', 'gargamel', 'klakier',\n 'puszek', 'żelazko', 'king', 'poland', 'kazik', 'olo']\n\n\nfor i in xrange(100):\n username = 'gienek{}'.format(i)\n u = User(username=username, is_active=True,\n email='{}@allegro.pl'.format(username), first_name=choice(first_names),\n last_name=choice(last_names))\n u.set_password(username)\n u.save()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":625,"cells":{"__id__":{"kind":"number","value":15771119921793,"string":"15,771,119,921,793"},"blob_id":{"kind":"string","value":"3de983d60d77aaebea5a875918fc85ad18bd730b"},"directory_id":{"kind":"string","value":"7596eb26e37621eb7e42988cd75fe651c711b88a"},"path":{"kind":"string","value":"/samba/bin/python/samba/tests/samba3sam.py"},"content_id":{"kind":"string","value":"fc3348fc44c24ca6080fb5a94bb5d43ff8072b6d"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only","GPL-3.0-only","LicenseRef-scancode-free-unknown","LGPL-2.1-or-later","GPL-1.0-or-later","LGPL-3.0-only"],"string":"[\n \"GPL-2.0-only\",\n \"GPL-3.0-only\",\n \"LicenseRef-scancode-free-unknown\",\n \"LGPL-2.1-or-later\",\n \"GPL-1.0-or-later\",\n \"LGPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"ebrainte/Samba"},"repo_url":{"kind":"string","value":"https://github.com/ebrainte/Samba"},"snapshot_id":{"kind":"string","value":"7aba47d32e582f235ffd06f8c19ee5746d1c4cf2"},"revision_id":{"kind":"string","value":"8c023bcbee5fa5d071c14ab0cbf38dcab30e4094"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T20:22:09.461215","string":"2021-01-23T20:22:09.461215"},"revision_date":{"kind":"timestamp","value":"2013-11-22T16:48:50","string":"2013-11-22T16:48:50"},"committer_date":{"kind":"timestamp","value":"2013-11-22T16:48:50","string":"2013-11-22T16:48:50"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"/root/samba-4.0.10/python/samba/tests/samba3sam.py"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":626,"cells":{"__id__":{"kind":"number","value":13726715520691,"string":"13,726,715,520,691"},"blob_id":{"kind":"string","value":"1a6612015a5a69b2fe0daf68e643c69f9a33a5e9"},"directory_id":{"kind":"string","value":"445beaa2c50d62a837047b3dbd5132ef5470f8db"},"path":{"kind":"string","value":"/Lista-1 Zumbis/exercicio02_masanori_01.py"},"content_id":{"kind":"string","value":"ff59d1cef36feca1ff761697a1f43095ae4aff4b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"thayannevls/pythonZumbi"},"repo_url":{"kind":"string","value":"https://github.com/thayannevls/pythonZumbi"},"snapshot_id":{"kind":"string","value":"ecee77d640e6ad7e487348eae1ec2eba5e833f22"},"revision_id":{"kind":"string","value":"9ffd39aea2f2927bdb5d58828dfbc75756d3c197"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-09T07:24:28.655236","string":"2020-04-09T07:24:28.655236"},"revision_date":{"kind":"timestamp","value":"2014-03-09T20:50:02","string":"2014-03-09T20:50:02"},"committer_date":{"kind":"timestamp","value":"2014-03-09T20:50:02","string":"2014-03-09T20:50:02"},"github_id":{"kind":"number","value":17462490,"string":"17,462,490"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"N= int(input('Digite o número que você quer que seja convertido:')) \nprint (N*1000, 'milimetro')\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":627,"cells":{"__id__":{"kind":"number","value":12859132087384,"string":"12,859,132,087,384"},"blob_id":{"kind":"string","value":"d5b3d629daf1f0f2609bfa72d012ad70bd8722dd"},"directory_id":{"kind":"string","value":"1e66682e3fc5d64a5446409aacd887b9b79b9795"},"path":{"kind":"string","value":"/gccutils.py"},"content_id":{"kind":"string","value":"6b49c1655569c4fc7af4073516358e32c17e6149"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-or-later","GPL-3.0-only"],"string":"[\n \"GPL-3.0-or-later\",\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"jasonxmueller/gcc-python-plugin"},"repo_url":{"kind":"string","value":"https://github.com/jasonxmueller/gcc-python-plugin"},"snapshot_id":{"kind":"string","value":"b61e7ba666a76e2d330e1a15486ceb1963644b67"},"revision_id":{"kind":"string","value":"f77982e513cb04617c8c848dca83751105333890"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-02T09:27:17.531410","string":"2021-01-02T09:27:17.531410"},"revision_date":{"kind":"timestamp","value":"2012-03-16T18:48:53","string":"2012-03-16T18:48:53"},"committer_date":{"kind":"timestamp","value":"2012-03-16T18:48:53","string":"2012-03-16T18:48:53"},"github_id":{"kind":"number","value":3734215,"string":"3,734,215"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Copyright 2011 David Malcolm \n# Copyright 2011 Red Hat, Inc.\n#\n# This is free software: you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see\n# .\n\nimport gcc\n\nfrom six.moves import xrange\n\ndef get_src_for_loc(loc):\n # Given a gcc.Location, get the source line as a string\n import linecache\n return linecache.getline(loc.file, loc.line).rstrip()\n\ndef get_field_by_name(typeobj, name):\n check_isinstance(typeobj,\n (gcc.RecordType, gcc.UnionType, gcc.QualUnionType))\n for field in typeobj.fields:\n if field.name == name:\n return field\n\ndef get_global_typedef(name):\n # Look up a typedef in global scope by name, returning a gcc.TypeDecl,\n # or None if not found\n for u in gcc.get_translation_units():\n if u.language == 'GNU C++':\n gns = gcc.get_global_namespace()\n return gns.lookup(name)\n if u.block:\n for v in u.block.vars:\n if isinstance(v, gcc.TypeDecl):\n if v.name == name:\n return v\n\ndef get_variables_as_dict():\n result = {}\n for var in gcc.get_variables():\n result[var.decl.name] = var\n return result\n\ndef get_global_vardecl_by_name(name):\n # Look up a variable in global scope by name, returning a gcc.VarDecl,\n # or None if not found\n for u in gcc.get_translation_units():\n if u.language == 'GNU C++':\n gns = gcc.get_global_namespace()\n return gns.lookup(name)\n for v in u.block.vars:\n if isinstance(v, gcc.VarDecl):\n if v.name == name:\n return v\n\ndef get_nonnull_arguments(funtype):\n \"\"\"\n 'nonnull' is an attribute on the fun.decl.type\n\n http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html\n\n It can either have no arguments (all pointer args are non-NULL), or\n be a list of integers. These integers are 1-based.\n\n Return a frozenset of 0-based integers, giving the arguments for which we\n can assume the \"nonnull\" property.\n\n (Note the 0-based vs 1-based differences)\n\n Compare with gcc/tree-vrp.c: nonnull_arg_p\n \"\"\"\n check_isinstance(funtype, gcc.FunctionType)\n if 'nonnull' in funtype.attributes:\n result = []\n nonnull = funtype.attributes['nonnull']\n if nonnull == []:\n # All pointer args are nonnull:\n for idx, parm in enumerate(funtype.argument_types):\n if isinstance(parm, gcc.PointerType):\n result.append(idx)\n else:\n # Only the listed args are nonnull:\n for val in nonnull:\n check_isinstance(val, gcc.IntegerCst)\n result.append(val.constant - 1)\n return frozenset(result)\n else:\n # No \"nonnull\" attribute was given:\n return frozenset()\n\ndef invoke_dot(dot):\n from subprocess import Popen, PIPE\n\n if 1:\n fmt = 'png'\n else:\n # SVG generation seems to work, but am seeing some text-width issues\n # with rendering of the SVG by eog and firefox on this machine (though\n # not chromium).\n #\n # Looks like X coordinates allocated by graphviz don't contain quite\n # enough space for the elements.\n #\n # Presumably a font selection/font metrics issue\n fmt = 'svg'\n\n p = Popen(['dot', '-T%s' % fmt, '-o', 'test.%s' % fmt],\n stdin=PIPE)\n p.communicate(dot.encode('ascii'))\n\n p = Popen(['xdg-open', 'test.%s' % fmt])\n p.communicate()\n\ndef pprint(obj):\n pp = TextualPrettyPrinter()\n pp.pprint(obj)\n\ndef pformat(obj):\n pp = TextualPrettyPrinter()\n return pp.pformat(obj)\n\n\nclass PrettyPrinter(object):\n def __init__(self):\n self.show_addr = False\n\n def attr_to_str(self, name, value):\n if name == 'addr':\n return hex(value)\n if isinstance(value, str):\n return repr(value)\n return str(value)\n\n def iter_tree_attrs(self, obj):\n # Iterate through the interesting attributes of the object:\n for name in dir(obj):\n # Ignore private and \"magic\" attributes:\n if name.startswith('_'):\n continue\n value = getattr(obj, name)\n # Ignore methods:\n if hasattr(value, '__call__'):\n continue\n if not self.show_addr:\n if name == 'addr':\n continue\n # Don't follow infinite chains, e.g.\n # ptr to ptr to ... of a type:\n if isinstance(obj, gcc.Type):\n if (name == 'pointer' or\n name.endswith('equivalent')):\n continue\n\n #print 'attr %r obj.%s: %r' % (name, name, value)\n yield (name, value)\n\n\nclass TextualPrettyPrinter(PrettyPrinter):\n \"\"\"Convert objects to nice textual dumps, loosely based on Python's pprint\n module\"\"\"\n def __init__(self):\n super(TextualPrettyPrinter, self).__init__()\n self.maxdepth = 5\n\n def pprint(self, obj):\n import sys\n sys.stdout.write(self.pformat(obj))\n\n def make_indent(self, indent):\n return indent * ' '\n\n def pformat(self, obj):\n return self._recursive_format_obj(obj, set(), 0)\n\n def indent(self, prefix, txt):\n return '\\n'.join([prefix + line for line in txt.splitlines()])\n\n def _recursive_format_obj(self, obj, visited, depth):\n def str_for_kv(key, value):\n return ' %s = %s\\n' % (key, value)\n\n check_isinstance(obj, gcc.Tree)\n visited.add(obj.addr)\n\n result = '<%s\\n' % obj.__class__.__name__\n r = repr(obj)\n s = str(obj)\n result += str_for_kv('repr()', r)\n if s != r:\n result += str_for_kv('str()', '%r' % s)\n\n # Show MRO, stripping off this type from front and \"object\" from end:\n superclasses = obj.__class__.__mro__[1:-1]\n result += str_for_kv('superclasses',\n superclasses)\n for name, value in self.iter_tree_attrs(obj):\n if depth < self.maxdepth:\n if isinstance(value, gcc.Tree):\n if value.addr in visited:\n result += str_for_kv('.%s' % name,\n '... (%s)' % self.attr_to_str(name, repr(value)))\n else:\n # Recurse\n formatted_value = self._recursive_format_obj(value,\n visited, depth + 1)\n indented_value = self.indent(' ' * (len(name) + 6),\n formatted_value)\n result += str_for_kv('.%s' % name,\n indented_value.lstrip())\n continue\n # Otherwise: just print short version of the attribute:\n result += str_for_kv('.%s' % name,\n self.attr_to_str(name, value))\n\n result += '>\\n'\n return result\n\nclass DotPrettyPrinter(PrettyPrinter):\n # Base class for various kinds of data visualizations that use graphviz\n # (aka \".dot\" source files)\n def to_html(self, text):\n html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\",\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n \n # 'dot' doesn't seem to like these:\n '{': '&#123;',\n '}': '&#125;',\n\n ']': '&#93;',\n }\n return \"\".join(html_escape_table.get(c,c) for c in str(text))\n\n def _dot_td(self, text, align=\"left\", colspan=1, escape=1, bgcolor=None,\n port=None):\n if escape:\n text = self.to_html(text)\n attribs = 'align=\"%s\" colspan=\"%i\"' % (align, colspan)\n if bgcolor:\n attribs += ' bgcolor=\"%s\"' % bgcolor\n if port:\n attribs += ' port=\"%s\"' % port\n return ('%s'\n % (attribs, text))\n\n def _dot_tr(self, td_text):\n return ('%s\\n' % self._dot_td(td_text))\n\ntry:\n from pygments.formatter import Formatter\n from pygments.token import Token\n from pygments.styles import get_style_by_name\n\n class GraphvizHtmlFormatter(Formatter, DotPrettyPrinter):\n \"\"\"\n A pygments Formatter to turn source code fragments into graphviz's\n pseudo-HTML format.\n \"\"\"\n def __init__(self, style):\n Formatter.__init__(self)\n self.style = style\n\n def style_for_token(self, token):\n # Return a (hexcolor, isbold) pair, where hexcolor could be None\n\n # Lookup up pygments' color for this token type:\n col = self.style.styles[token]\n\n isbold = False\n\n # Extract a pure hex color specifier of the form that graphviz can\n # deal with\n if col:\n if col.startswith('bold '):\n isbold = True\n col = col[5:]\n return (col, isbold)\n\n def format_unencoded(self, tokensource, outfile):\n from pprint import pprint\n for t, piece in tokensource:\n # graphviz seems to choke on font elements with no inner text:\n if piece == '':\n continue\n\n # pygments seems to add this:\n if piece == '\\n':\n continue\n\n # avoid croaking on '\\n':\n if t == Token.Literal.String.Escape:\n continue\n\n color, isbold = self.style_for_token(t)\n if 0:\n print ('(color, isbold): (%r, %r)' % (color, isbold))\n\n if isbold:\n outfile.write('')\n\n # Avoid empty color=\"\" values:\n if color:\n outfile.write('' % color\n + self.to_html(piece)\n + '')\n else:\n outfile.write(self.to_html(piece))\n\n if isbold:\n outfile.write('')\n\n from pygments import highlight\n from pygments.lexers import CLexer\n from pygments.formatters import HtmlFormatter\n\n def code_to_graphviz_html(code):\n style = get_style_by_name('default')\n return highlight(code,\n CLexer(), # FIXME\n GraphvizHtmlFormatter(style))\n\n using_pygments = True\nexcept ImportError:\n using_pygments = False\n\nclass CfgPrettyPrinter(DotPrettyPrinter):\n # Generate graphviz source for this gcc.Cfg instance, as a string\n def __init__(self, cfg, name=None):\n self.cfg = cfg\n if name:\n self.name = name\n\n def block_id(self, b):\n if b is self.cfg.entry:\n return 'entry'\n if b is self.cfg.exit:\n return 'exit'\n return 'block%i' % id(b)\n\n def block_to_dot_label(self, bb):\n # FIXME: font setting appears to work on my machine, but I invented\n # the attribute value; it may be exercising a failure path\n result = '\\n'\n result += '\\n' % bb.index\n curloc = None\n if isinstance(bb.phi_nodes, list):\n for stmtidx, phi in enumerate(bb.phi_nodes):\n result += '' + self.stmt_to_html(phi, stmtidx) + '\\n'\n if isinstance(bb.gimple, list) and bb.gimple != []:\n for stmtidx, stmt in enumerate(bb.gimple):\n if curloc != stmt.loc:\n curloc = stmt.loc\n code = get_src_for_loc(stmt.loc).rstrip()\n pseudohtml = self.code_to_html(code)\n # print('pseudohtml: %r' % pseudohtml)\n result += ('')\n \n result += '' + self.stmt_to_html(stmt, stmtidx) + '\\n'\n else:\n # (prevent graphviz syntax error for empty blocks):\n result += self._dot_tr(self.block_id(bb))\n result += '
BLOCK %i
'\n + self.to_html('%4i ' % stmt.loc.line)\n + pseudohtml\n + '
'\n + (' ' * (5 + stmt.loc.column-1)) + '^'\n + '
\\n'\n return result\n\n def code_to_html(self, code):\n if using_pygments:\n return code_to_graphviz_html(code)\n else:\n return self.to_html(code)\n\n def stmt_to_html(self, stmt, stmtidx):\n text = str(stmt).strip()\n text = self.code_to_html(text)\n bgcolor = None\n\n # Work towards visualization of CPython refcounting rules.\n # For now, paint assignments to (PyObject*) vars and to ob_refcnt\n # fields, to highlight the areas needing tracking:\n # print 'stmt: %s' % stmt\n if 0: # hasattr(stmt, 'lhs'):\n # print 'stmt.lhs: %s' % stmt.lhs\n # print 'stmt.lhs: %r' % stmt.lhs\n if stmt.lhs:\n # print 'stmt.lhs.type: %s' % stmt.lhs.type\n\n # Color assignments to (PyObject *) in red:\n if str(stmt.lhs.type) == 'struct PyObject *':\n bgcolor = 'red'\n\n # Color assignments to PTR->ob_refcnt in blue:\n if isinstance(stmt.lhs, gcc.ComponentRef):\n # print(dir(stmt.lhs))\n # print 'stmt.lhs.target: %s' % stmt.lhs.target\n # print 'stmt.lhs.target.type: %s' % stmt.lhs.target.type\n # (presumably we need to filter these to structs that are\n # PyObject, or subclasses)\n # print 'stmt.lhs.field: %s' % stmt.lhs.field\n if stmt.lhs.field.name == 'ob_refcnt':\n bgcolor = 'blue'\n\n return self._dot_td(text, escape=0, bgcolor=bgcolor, port='stmt%i' % stmtidx)\n\n def edge_to_dot(self, e):\n if e.true_value:\n attrliststr = '[label = true]'\n elif e.false_value:\n attrliststr = '[label = false]'\n elif e.loop_exit:\n attrliststr = '[label = loop_exit]'\n elif e.fallthru:\n attrliststr = '[label = fallthru]'\n elif e.dfs_back:\n attrliststr = '[label = dfs_back]'\n else:\n attrliststr = ''\n return (' %s -> %s %s;\\n'\n % (self.block_id(e.src), self.block_id(e.dest), attrliststr))\n\n def extra_items(self):\n # Hook for expansion\n return ''\n\n def to_dot(self):\n if hasattr(self, 'name'):\n name = self.name\n else:\n name = 'G'\n result = 'digraph %s {\\n' % name\n result += ' subgraph cluster_cfg {\\n'\n #result += ' label=\"CFG\";\\n'\n result += ' node [shape=box];\\n'\n for block in self.cfg.basic_blocks:\n\n result += (' %s [label=<%s>];\\n'\n % (self.block_id(block), self.block_to_dot_label(block)))\n\n for edge in block.succs:\n result += self.edge_to_dot(edge)\n # FIXME: this will have duplicates:\n #for edge in block.preds:\n # result += edge_to_dot(edge)\n result += ' }\\n'\n\n # Potentially add extra material:\n result += self.extra_items()\n result += '}\\n'\n return result\n\nclass TreePrettyPrinter(DotPrettyPrinter):\n # Generate a graphviz visualization of this gcc.Tree and the graphs of\n # nodes it references, as a string\n def __init__(self, root):\n print('root: %s' % root)\n check_isinstance(root, gcc.Tree)\n self.root = root\n self.show_addr = False\n self.maxdepth = 6 # for now\n\n def tr_for_kv(self, key, value):\n return (' %s %s\\n'\n % (self._dot_td(key),\n self._dot_td(value)))\n\n def label_for_tree(self, obj):\n result = '\\n'\n r = repr(obj)\n s = str(obj)\n result += self.tr_for_kv('repr()', r)\n if s != r:\n result += self.tr_for_kv('str()', '%r' % s)\n\n # Show MRO, stripping off this type from front and \"object\" from end:\n superclasses = obj.__class__.__mro__[1:-1]\n result += self.tr_for_kv('superclasses',\n superclasses)\n\n for name, value in self.iter_tree_attrs(obj):\n result += (' %s %s \\n'\n % (self._dot_td(name),\n self._dot_td(self.attr_to_str(name, value))))\n result += '
\\n'\n return result\n\n def tree_id(self, obj):\n return 'id%s' % id(obj)\n\n def tree_to_dot(self, obj):\n check_isinstance(obj, gcc.Tree)\n return (' %s [label=<%s>];\\n'\n % (self.tree_id(obj), self.label_for_tree(obj)))\n\n def recursive_tree_to_dot(self, obj, visited, depth):\n print('recursive_tree_to_dot(%r, %r)' % (obj, visited))\n check_isinstance(obj, gcc.Tree)\n result = self.tree_to_dot(obj)\n visited.add(obj.addr)\n if depth < self.maxdepth:\n for name, value in self.iter_tree_attrs(obj):\n if isinstance(value, gcc.Tree):\n if value.addr not in visited:\n # Recurse\n result += self.recursive_tree_to_dot(value,\n visited, depth + 1)\n # Add edge:\n result += (' %s -> %s [label = %s];\\n'\n % (self.tree_id(obj),\n self.tree_id(value),\n name))\n return result\n\n def to_dot(self):\n self.root.debug()\n result = 'digraph G {\\n'\n result += ' node [shape=record];\\n'\n result += self.recursive_tree_to_dot(self.root, set(), 0)\n result += '}\\n'\n return result\n\ndef cfg_to_dot(name, cfg):\n pp = CfgPrettyPrinter(name, cfg)\n return pp.to_dot()\n\n\ndef tree_to_dot(tree):\n pp = TreePrettyPrinter(tree)\n return pp.to_dot()\n\nclass Table(object):\n '''A table of text/numbers that knows how to print itself'''\n def __init__(self, columnheadings=None, rows=[], sepchar='-'):\n self.numcolumns = len(columnheadings)\n self.columnheadings = columnheadings\n self.rows = []\n self._colsep = ' '\n self._sepchar = sepchar\n\n def add_row(self, row):\n assert len(row) == self.numcolumns\n self.rows.append(row)\n\n def write(self, out):\n colwidths = self._calc_col_widths()\n\n self._write_separator(out, colwidths)\n\n self._write_row(out, colwidths, self.columnheadings)\n\n self._write_separator(out, colwidths)\n\n for row in self.rows:\n self._write_row(out, colwidths, row)\n\n self._write_separator(out, colwidths)\n\n def _calc_col_widths(self):\n result = []\n for colIndex in xrange(self.numcolumns):\n result.append(self._calc_col_width(colIndex))\n return result\n\n def _calc_col_width(self, idx):\n cells = [str(row[idx]) for row in self.rows]\n heading = self.columnheadings[idx]\n return max([len(c) for c in (cells + [heading])])\n\n def _write_row(self, out, colwidths, values):\n for i, (value, width) in enumerate(zip(values, colwidths)):\n if i > 0:\n out.write(self._colsep)\n formatString = \"%%-%ds\" % width # to generate e.g. \"%-20s\"\n out.write(formatString % value)\n out.write('\\n')\n\n def _write_separator(self, out, colwidths):\n for i, width in enumerate(colwidths):\n if i > 0:\n out.write(self._colsep)\n out.write(self._sepchar * width)\n out.write('\\n')\n\nclass CallgraphPrettyPrinter(DotPrettyPrinter):\n def node_id(self, cgn):\n return 'cgn%i' % id(cgn)\n\n def node_to_dot_label(self, cgn):\n return str(cgn.decl.name)\n\n def edge_to_dot(self, e):\n attrliststr = ''\n return (' %s -> %s %s;\\n'\n % (self.node_id(e.caller),\n self.node_id(e.callee),\n attrliststr))\n\n def to_dot(self):\n result = 'digraph Callgraph {\\n'\n #result += ' subgraph cluster_callgraph {\\n'\n result += ' node [shape=box];\\n'\n for cgn in gcc.get_callgraph_nodes():\n result += (' %s [label=<%s>];\\n'\n % (self.node_id(cgn), self.node_to_dot_label(cgn)))\n for edge in cgn.callers:\n result += self.edge_to_dot(edge)\n #result += ' }\\n'\n result += '}\\n'\n return result\n\ndef callgraph_to_dot():\n pp = CallgraphPrettyPrinter()\n return pp.to_dot()\n\ndef check_isinstance(obj, types):\n \"\"\"\n Like:\n assert isinstance(obj, types)\n but with better error messages\n \"\"\"\n if not isinstance(obj, types):\n raise TypeError('%s / %r is not an instance of %s' % (obj, obj, types))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":628,"cells":{"__id__":{"kind":"number","value":790274022311,"string":"790,274,022,311"},"blob_id":{"kind":"string","value":"acd4027afe6f8299db74a23b5d143b417779529d"},"directory_id":{"kind":"string","value":"240efd23bff4397d3ac799121632819c43378d70"},"path":{"kind":"string","value":"/bonzo/errors.py"},"content_id":{"kind":"string","value":"e09a32e3c17e462519355adc3ae5ee722b446b0d"},"detected_licenses":{"kind":"list like","value":["Apache-2.0","LicenseRef-scancode-public-domain"],"string":"[\n \"Apache-2.0\",\n \"LicenseRef-scancode-public-domain\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"rwdim/bonzo"},"repo_url":{"kind":"string","value":"https://github.com/rwdim/bonzo"},"snapshot_id":{"kind":"string","value":"857826beb670cf730bd9feba30b0483cc4c79cd3"},"revision_id":{"kind":"string","value":"9c28fdb27647c82baf880dfaf058fcb95540a625"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T21:13:29.155635","string":"2021-01-19T21:13:29.155635"},"revision_date":{"kind":"timestamp","value":"2014-09-14T04:03:51","string":"2014-09-14T04:03:51"},"committer_date":{"kind":"timestamp","value":"2014-09-14T04:03:51","string":"2014-09-14T04:03:51"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"SMTP exceptions for response to the client.\"\"\"\n\n\nclass SMTPError(Exception):\n \"\"\"An exception that will turn into an SMTP error response.\n\n :arg int status_code: SMTP status code. For a status codes list, see:\n http://www.greenend.org.uk/rjk/tech/smtpreplies.html.\n :arg string message: Message to be written to the stream in order to\n response to the client.\n :arg string log_message: Message to be written to the log for this error.\n May contain ``%s``-style placeholders, which will be filled in with\n remaining positional parameters.\n \"\"\"\n\n def __init__(self, status_code, message, log_message=None, *args):\n self.status_code = status_code\n self.message = message\n self.log_message = log_message\n self.args = args\n\n def __str__(self):\n message = 'SMTP %d: %s' % (self.status_code, self.message)\n if not self.log_message:\n return message\n return message + ' (' + (self.log_message % self.args) + ')'\n\n\nclass InternalConfusion(SMTPError):\n \"\"\"Used to return a ``451`` status code.\n \"\"\"\n\n def __init__(self):\n super(InternalConfusion, self).__init__(451, 'Internal confusion')\n\n\nclass UnrecognisedCommand(SMTPError):\n \"\"\"Used to return a ``500`` status code.\n \"\"\"\n\n def __init__(self):\n super(UnrecognisedCommand, self).__init__(500, 'Error: bad syntax')\n\n\nclass BadArguments(SMTPError):\n \"\"\"Used to return a ``501`` status code.\n\n :arg string syntax: Syntax returned to the client.\n \"\"\"\n\n def __init__(self, syntax):\n super(BadArguments, self).__init__(501, 'Syntax: %s' % syntax)\n\n\nclass NotImplementedCommand(SMTPError):\n \"\"\"Used to return a ``502`` status code.\n\n :arg string command: Command not implemented for the server.\n \"\"\"\n\n def __init__(self, command):\n message = 'Error: command \"%s\" not implemented' % command\n super(NotImplementedCommand, self).__init__(502, message)\n\n\nclass BadSequence(SMTPError):\n \"\"\"Used to return a ``503`` status code.\n\n :arg string message: Message to be written to the stream and to response to\n the client.\n \"\"\"\n\n def __init__(self, message):\n super(BadSequence, self).__init__(503, message)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":629,"cells":{"__id__":{"kind":"number","value":13589276547722,"string":"13,589,276,547,722"},"blob_id":{"kind":"string","value":"e9c31a1751b2f5e302a1acdd5367a15adfe7c987"},"directory_id":{"kind":"string","value":"944587dc229af167e7bdb9988c6f19e4d00eb664"},"path":{"kind":"string","value":"/resultat.py"},"content_id":{"kind":"string","value":"754c9d70d57a2b0736aa94b4de00f1c2b64ad682"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"guennobzh/vetathlon"},"repo_url":{"kind":"string","value":"https://github.com/guennobzh/vetathlon"},"snapshot_id":{"kind":"string","value":"bb87ab7563949823097cd4cce71b145502a151ba"},"revision_id":{"kind":"string","value":"430a3f57ae77e65ebe0ecf0a53bdad86ebb0def4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T06:05:31.535768","string":"2021-01-01T06:05:31.535768"},"revision_date":{"kind":"timestamp","value":"2014-05-11T15:49:32","string":"2014-05-11T15:49:32"},"committer_date":{"kind":"timestamp","value":"2014-05-11T15:49:32","string":"2014-05-11T15:49:32"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom mod_python import apache, Session\nfrom datab import _Datab\nfrom tools import _Head\n\ndef index(req, categorie):\n #Cré l'objet base de donnée\n objBdd = _Datab()\n\n if categorie == 'seniorsg': #Général sénior\n titre = 'Classement général sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n \n elif categorie == 'juniorsg': #Général junior\n titre = 'Classement général junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n \n elif categorie == 'seniorse': #sénior equipe\n titre = 'Classement par équipe sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorsi': #sénior individuel\n titre = 'Classement individuel sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorsp': #sénior pieton\n titre = 'Classement piéton sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'seniorsv': #sénior vtt\n titre = 'Classement vtt sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'masculinese': #equipe masculine senior\n titre = 'Classement par équipe sénior masculine'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorshi': #sénior homme individuel\n titre = 'Classement individuel sénior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorshp': #sénior homme pieton\n titre = 'Classement piéton sénior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'seniorshv': #sénior homme vtt\n titre = 'Classement vtt sénior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'femininese': #equipe féminine sénior\n titre = 'Classement par équipe sénior féminine'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorsfi': #sénior femme individuel\n titre = 'Classement individuel sénior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'seniorsfp': #sénior femme pieton\n titre = 'Classement piéton sénior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'seniorsfv': #sénior femme vtt\n titre = 'Classement vtt sénior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'juniorse': #equipe junior\n titre = 'Classement par équipe junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorsi': #junior individuel\n titre = 'Classement individuel junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorsp': #junior pieton\n titre = 'Classement piéton junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'juniorsv': #junior vtt\n titre = 'Classement vtt junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'masculinesje': #equipe junior masculine\n titre = 'Classement par équipe junior masculine'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorshi': #junior homme individuel\n titre = 'Classement individuel junior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorshp': #junior homme pieton\n titre = 'Classement piéton junior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'juniorshv': #junior homme vtt\n titre = 'Classement vtt junior homme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'femininesje': #equipe junior feminine\n titre = 'Classement par équipe junior féminine'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorsfi': #junior femme individuel\n titre = 'Classement individuel junior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste = pieton donc les individuel*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'juniorsfp': #junior femme pieton\n titre = 'Classement piéton junior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'juniorsfv': #junior femme vtt\n titre = 'Classement vtt junior femme'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/\n ;''')\n\n elif categorie == 'mixtess': # mixte sénior\n titre = 'Classement par équipe mixte sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and ((`particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n and `particpv`.`sexe` = 1) /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n or (`particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n and `particpv`.`sexe` = 2)) /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ''')\n\n elif categorie == 'mistesj': # mixte junior\n titre = 'Classement par équipe mixte junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n and ((`particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\\'on le coureur est un homme*/\n and `particpv`.`sexe` = 1) /*selectionne uniquement les fiches d\\'on le vetetiste est une femme*/\n or (`particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\\'on le coureur est une femme*/\n and `particpv`.`sexe` = 2)) /*selectionne uniquement les fiches d\\'on le vetetiste est un homme*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ''')\n\n elif categorie == 'sj': #Sénior/Junior\n titre = 'Classement par équipe Sénior/Junior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est >= a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est < a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n elif categorie == 'js': #Junior/Sénior\n titre = 'Classement par équipe Junior/Sénior'\n listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/\n `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/\n `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/\n from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/\n where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\\'id du pieton a sa fiche*/\n and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\\'id du vetetiste a sa fiche*/\n and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du pieton est < a l\\'age junior*/\n and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\\'ont l\\'age du vetetiste est >= a l\\'age junior*/\n and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/\n and `etat` = 0 /*selectionne uniquement les fiches d\\'on l\\'etat est partant*/\n and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\\'on le vététiste != pieton donc les equipe*/\n order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/\n ;''')\n\n retour = ''\n \n #Définie le type mine\n req.content_type = \"text/html;charset=UTF-8\"\n\n #début du tableau\n retour = '''
\n \n

%s

\n \n \n \n \n \n \n \n \n \n '''%(titre)\n \n #crée la vatiable pour la couleur des ligne paire\n lclass = 0\n\n #crée la vatiable pour le numero de place\n place = 1\n\n for infodossard in listDossard:\n #test si il s'agit d'une equipe\n if infodossard[1] == infodossard[2]:\n nomc = infodossard[8]+' '+infodossard[9]\n nomv = ''\n fusionc = '2'\n else:\n nomc = infodossard[8]+' '+infodossard[9]\n nomv = ''%(infodossard[10], infodossard[11])\n fusionc = '1'\n\n #débinie la class css de la ligne\n if lclass == 0:\n classl = 'l-impaire'\n lclass = 1\n else:\n classl = 'l-paire'\n lclass = 0\n\n retour += '''\n \n \n \n %s\n \n \n \n \n '''%(classl, place, infodossard[0], fusionc, nomc, nomv, infodossard[5], infodossard[6], infodossard[7])\n\n place += 1\n\n retour += '
PlaceDossardCourreurVététisteTemps pieton Temps vttTemps total
%s %s
%s%s&nbsp&nbsp&nbsp&nbsp&nbsp&nbsp&nbsp%s%s%s%s
'\n\n return _Head(retour)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":630,"cells":{"__id__":{"kind":"number","value":8091718399451,"string":"8,091,718,399,451"},"blob_id":{"kind":"string","value":"832a5bff1b880530f7a113aec3ef698f06c99339"},"directory_id":{"kind":"string","value":"4f9faebed856e704cea05d593f03f40a9bded5ca"},"path":{"kind":"string","value":"/dumbo/timelines.py"},"content_id":{"kind":"string","value":"9f73a24a5dfb68da8c9926484e5de1aabea8cc05"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jazzwang/hadoop-timelines"},"repo_url":{"kind":"string","value":"https://github.com/jazzwang/hadoop-timelines"},"snapshot_id":{"kind":"string","value":"c86f457d1b2bc2d2cbf7befbc79e5d5fc32f0896"},"revision_id":{"kind":"string","value":"1946ba52b0fd3cf00108d48e088e6d4b7f8d00fd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-12T19:51:20.050417","string":"2021-01-12T19:51:20.050417"},"revision_date":{"kind":"timestamp","value":"2009-06-29T05:18:01","string":"2009-06-29T05:18:01"},"committer_date":{"kind":"timestamp","value":"2009-06-29T05:18:01","string":"2009-06-29T05:18:01"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sys\nimport re\nimport urllib, httplib\nimport dumbo\n\nATTRIBUTES_PATTERN = re.compile('(?P[^=]+)=\"(?P[^\"]*)\" *')\nINT_PROPS = frozenset(('SUBMIT_TIME','START_TIME','FINISH_TIME','SHUFFLE_FINISHED','SORT_FINISHED'))\n\nscale = 1000\n\ndef mapper(key, line):\n event, rest = line.split(\" \",1)\n attrs = {}\n for name, value in re.findall(ATTRIBUTES_PATTERN, rest):\n attrs.setdefault(name, [])\n attrs[name].append(int(value)/scale if name in INT_PROPS else value)\n\n if 'JOBNAME' in attrs:\n if event == 'Job':\n # Job has multiple JOBNAME, taking the longest for now. lame.\n names = sorted(attrs['JOBNAME'],lambda x,y: cmp(len(y), len(x)))\n yield names[0], (event, attrs)\n else:\n yield attrs['JOBNAME'][0], (event, attrs)\n \ndef reducer(key, values):\n \n mapStartTime = {}\n mapEndTime = {}\n reduceStartTime = {}\n reduceShuffleTime = {}\n reduceSortTime = {}\n reduceEndTime = {}\n finalAttempt = {}\n wastedAttempts = []\n submitTime = None\n finishTime = None\n\n for event, attrs in values:\n attrs = dict((k,v[0]) for k,v in attrs.items())\n if event == 'Job':\n #print >> sys.stderr, 'reduce', key, attrs.keys()\n if \"SUBMIT_TIME\" in attrs:\n submitTime = attrs[\"SUBMIT_TIME\"]\n if \"FINISH_TIME\" in attrs:\n finishTime = attrs[\"FINISH_TIME\"]\n elif event == 'MapAttempt':\n attempt = attrs[\"TASK_ATTEMPT_ID\"]\n time = attrs.get(\"START_TIME\", 0)\n if time != 0:\n mapStartTime[attempt] = time\n elif \"FINISH_TIME\" in attrs:\n mapEndTime[attempt] = attrs[\"FINISH_TIME\"]\n if attrs.get(\"TASK_STATUS\", \"\") == \"SUCCESS\":\n task = attrs[\"TASKID\"]\n if task in finalAttempt:\n wastedAttempts.append(finalAttempt[task])\n finalAttempt[task] = attempt\n else:\n wastedAttempts.append(attempt)\n elif event == 'ReduceAttempt':\n attempt = attrs[\"TASK_ATTEMPT_ID\"]\n time = attrs.get(\"START_TIME\", 0)\n if time != 0:\n reduceStartTime[attempt] = time\n elif \"FINISH_TIME\" in attrs:\n task = attrs[\"TASKID\"]\n if attrs.get(\"TASK_STATUS\", \"\") == \"SUCCESS\":\n if task in finalAttempt:\n wastedAttempts.append(finalAttempt[task])\n finalAttempt[task] = attempt\n else:\n wastedAttempts.append(attempt)\n reduceEndTime[attempt] = attrs[\"FINISH_TIME\"]\n if \"SHUFFLE_FINISHED\" in attrs:\n reduceShuffleTime[attempt] = attrs[\"SHUFFLE_FINISHED\"]\n if \"SORT_FINISHED\" in attrs:\n reduceSortTime[attempt] = attrs[\"SORT_FINISHED\"]\n\n final = frozenset(finalAttempt.values())\n\n runningMaps = []\n shufflingReduces = []\n sortingReduces = []\n runningReduces = []\n waste = []\n\n if not submitTime or not finishTime:\n dumbo.core.incrcounter('Timelines', 'Incomplete Jobs', 1)\n return\n\n for t in range(submitTime, finishTime):\n runningMaps.append(0)\n shufflingReduces.append(0)\n sortingReduces.append(0)\n runningReduces.append(0)\n waste.append(0)\n\n for task in mapEndTime.keys():\n if task in mapStartTime:\n for t in range(mapStartTime[task]-submitTime, mapEndTime[task]-submitTime):\n if task in final:\n runningMaps[t] += 1\n else:\n waste[t] += 1\n\n for task in reduceEndTime.keys():\n if task in reduceStartTime:\n if task in final:\n for t in range(reduceStartTime[task]-submitTime, reduceShuffleTime[task]-submitTime):\n shufflingReduces[t] += 1\n for t in range(reduceShuffleTime[task]-submitTime, reduceSortTime[task]-submitTime):\n sortingReduces[t] += 1\n for t in range(reduceSortTime[task]-submitTime, reduceEndTime[task]-submitTime):\n runningReduces[t] += 1\n else:\n for t in range(reduceStartTime[task]-submitTime, reduceEndTime[task]-submitTime):\n waste[t] += 1\n\n params = {'maps': runningMaps, 'shuffles': shufflingReduces,\n 'merges': sortingReduces, 'reducers': runningReduces,\n 'waste': waste }\n \n params = dict([(k,\",\".join(str(c) for c in v)) for k,v in params.items()])\n \n params['start'] = submitTime\n params['end'] = finishTime\n \n params['mapcount'] = len([k for k in mapEndTime.keys() if k in mapStartTime and k in final])\n params['redcount'] = len([k for k in reduceEndTime.keys() if k in reduceStartTime and k in final])\n \n conn = httplib.HTTPConnection(\"hadoop-timelines.appspot.com:80\")\n conn.request(\"POST\", \"/timelines\", urllib.urlencode(params))\n response = conn.getresponse()\n yield key, response.getheader('location')\n \nif __name__ == \"__main__\":\n dumbo.run(mapper, reducer)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":631,"cells":{"__id__":{"kind":"number","value":1168231139682,"string":"1,168,231,139,682"},"blob_id":{"kind":"string","value":"b8b77c7979011d60dedfa890d543d1134269a7b0"},"directory_id":{"kind":"string","value":"f35013a2907f0527b6408da65d81fbad43bc4270"},"path":{"kind":"string","value":"/foreign/admin.py"},"content_id":{"kind":"string","value":"d8421a9f8a65c0f620ccae9c1e1e8eef4b9cb570"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"hero007asd/million_ads"},"repo_url":{"kind":"string","value":"https://github.com/hero007asd/million_ads"},"snapshot_id":{"kind":"string","value":"167598f865d5cbeaf07fbeaa5ad65719370e407e"},"revision_id":{"kind":"string","value":"3b6da01554d043f9ea40ffbe79e2bdd96a661fca"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T03:29:35.823690","string":"2016-09-06T03:29:35.823690"},"revision_date":{"kind":"timestamp","value":"2014-05-03T07:13:17","string":"2014-05-03T07:13:17"},"committer_date":{"kind":"timestamp","value":"2014-05-03T07:13:17","string":"2014-05-03T07:13:17"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom django import forms\nfrom foreign import models\nfrom datetime import date\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.html import format_html_join\nfrom django.utils.safestring import mark_safe\n\n# class OrderForm(forms.ModelForm):\n# \tclass Meta:\n# \t\tmodel = models.Order\nclass OrderAdmin(admin.ModelAdmin):\n\tlist_display = ('user','balance','lots','start_pts','close_pts','start_time','orderProfit','orderPts','uppser_case_name')\n\tsearch_fields = ('user',)\t\n\t# list_filter = ('start_time','lots','user__ini_money')\n\tlist_filter = ('start_time','lots',)\n\tdate_hierarchy = 'start_time'\n\tordering = ('-start_time','lots')\n\t# fields = ('user','start_pts','lots','balance')\n\t# filter_horizontal&filter_vertical; for many 2 many keys\n\traw_id_fields = ('user',)\n\tfieldsets=(\n\t\t(None,{\n\t\t\t'fields':('user','start_pts','lots','balance')\n\t\t\t}),\n\t ('Advanced options',{\n\t \t# 'classes':('extrapretty','wide'),\n\t \t'description':'advanced options test',\n\t \t'classes':('collapse','wide'),\n\t \t'fields':('currency_type',('stop_profit_pts','stop_loss_pts'),'close_pts')#,'close_time','profit')\n\t \t}),\n\t)\n\t# formfield_overrides = {\n\t# \tmodels.TextField: {'widget':RichTextEditorWidget},\n\t# }\n\t# exclude = ['close_pts']\n\t# form = OrderForm\n\n\tdef uppser_case_name(self,obj):\n\t\treturn ('%s' % obj.currency_type).upper()\n\tuppser_case_name.short_description = 'CurrencyType1'\n\n\n# class TypeAdmin(admin.ModelAdmin):\n# \tformfield_overrides={\n# \t\tmodels.TextField:{'widget':RichTextEditorWidget},\n# \t}\n\nclass DecadeBornListFilter(admin.SimpleListFilter):\n\ttitle = _('decade born')\n\tparameter_name='decade'\n\tdef lookups(self,request,model_admin):\n\t\treturn (\n\t\t\t('80',_('in the eighties')),\n\t\t\t('90',_('in the nineties')),\n\t\t\t)\n\tdef queryset(self,request,queryset):\n\t\tif self.value() == '80s':\n\t\t\treturn queryset.filter(birthday__gte=date(1980,1,1),birthday__lte=date(1989,12,31))\n\t\tif self.value() == '90s':\n\t\t\treturn queryset.filter(birthday__gte=date(1990,1,1),birthday__lte=date(1999,12,31))\n\nclass PersonAdmin(admin.ModelAdmin):\n\tlist_display = ('name','decade_born_in','is_active','born_in_fifities')\n\tlist_display_links = ('name','decade_born_in')\n\tlist_filter = (DecadeBornListFilter,)\n\tlist_per_page = 10\n\tprepopulated_fields = {'slug':('name',)}\n\treadonly_fields = ('address_report',)\n\tdef address_report(self,instance):\n\t\treturn format_html_join(mark_safe('
'), '{0}', ((line,) for line in instance.name), ) or 'i can\\'t determine this address.'\n\t\t# return format_html_join(mark_safe('
'), '{0}', ((line,) for line in instance.birthday), ) or 'i can\\'t determine this address.'\n\taddress_report.short_description = 'address'\n\taddress_report.allow_tags = True\n\t#TODO\n\t# README\n\t#list_max_show_all = 20\n\t#list_select_related = ('person','blog')\n\t#IS_OK\n\t# list_editable = ('is_active',)\n# Register your models here.\nadmin.site.register(models.UserTemp)\n# admin.site.register(models.Order)\nadmin.site.register(models.Order,OrderAdmin)\nadmin.site.register(models.CurrencyType)\nadmin.site.register(models.Person,PersonAdmin)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":632,"cells":{"__id__":{"kind":"number","value":3874060550232,"string":"3,874,060,550,232"},"blob_id":{"kind":"string","value":"37a02cff98df8becc540883d8a77140eafdcc624"},"directory_id":{"kind":"string","value":"3be8da1d39bef1e09e4c8e7a6b736d7fc74a3c0f"},"path":{"kind":"string","value":"/webserver/opentrain/ot_api/views.py"},"content_id":{"kind":"string","value":"a1656d035d88ab0be7336c6c7437f93a893b532a"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"amitzini/OpenTrain"},"repo_url":{"kind":"string","value":"https://github.com/amitzini/OpenTrain"},"snapshot_id":{"kind":"string","value":"bbe5b2fc1b1b118931f7aac94667083c1b5cf4da"},"revision_id":{"kind":"string","value":"25ff81df668a9eba1c4369f9a789e34c60b44096"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-01T22:36:01.131143","string":"2020-04-01T22:36:01.131143"},"revision_date":{"kind":"timestamp","value":"2014-10-27T22:07:40","string":"2014-10-27T22:07:40"},"committer_date":{"kind":"timestamp","value":"2014-10-27T22:07:40","string":"2014-10-27T22:07:40"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import json\nimport common.ot_utils\nimport datetime\nfrom django.http.response import HttpResponse, HttpResponseBadRequest\nfrom django.conf import settings\nimport urllib\n\nfrom django.views.generic import View\nfrom django.shortcuts import render\n\ndef show_docs(request):\n ctx = dict()\n ctx['apis'] = ApiView.get_api_insts()\n \n return render(request,'ot_api/docs.html',ctx)\n\nclass ApiView(View):\n def _prepare_list_resp(self,req,items,info=None):\n info = info or dict()\n count = len(items)\n total_count = info.get('total_count',len(items))\n meta=dict(count=count,total_count=total_count)\n if total_count > count:\n if total_count > info['offset'] + info['limit']:\n d = req.GET.dict()\n d['offset'] = info['offset'] + info['limit']\n meta['next'] = req.path + '?' + urllib.urlencode(d)\n content = dict(objects=items,meta=meta)\n return HttpResponse(content=json.dumps(content),content_type='application/json',status=200)\n\n\n def get_json_resp(self,content,status=200):\n return HttpResponse(content=json.dumps(content),content_type='application/json',status=status)\n\n\n def get_bool(self,key,defval=None):\n val = self.GET.get(key,None)\n if val is None:\n return defval\n val = val.lower()\n if val == 'false':\n return False\n if val == 'true':\n return True\n return bool(int(val))\n\n\n\n def get_doc(self):\n return self.__doc__\n \n def get_api_url_nice(self):\n u = self.api_url\n u = u.replace('$','').replace('^','')\n u = '/api/1/' + u\n return u\n \n @classmethod\n def get_api_insts(cls):\n return [c() for c in cls.get_api_classes()]\n \n @classmethod\n def get_api_classes(cls):\n return cls.__subclasses__()\n \n \n @classmethod\n def get_urls(cls):\n from django.conf.urls import url\n urls = []\n for ac in cls.get_api_classes():\n urls.append(url(ac.api_url,ac.as_view()))\n return urls\n \nclass TripIdsForDate(ApiView):\n \"\"\" Return list of trips for given date given \n paramters: one of:\n date : in format dd/mm/yyyy \n today : 0/1 \n \"\"\"\n api_url = r'^trips/trips-for-date/$'\n def get(self,request):\n import timetable.services\n date = request.GET.get('date')\n today = self.get_bool('today',False)\n if not today and not date:\n raise Exception('Must have either today or date')\n if today:\n dt = common.ot_utils.get_localtime_now().date()\n else:\n day,month,year = date.split('/')\n dt = datetime.date(year=int(year),month=int(month),day=int(day))\n trips = timetable.services.get_all_trips_in_date(dt)\n objects=[trip.gtfs_trip_id for trip in trips]\n result = dict(objects=objects,\n meta=dict(total_count=len(objects)))\n return self.get_json_resp(result)\n\nclass TripDetails(ApiView):\n \"\"\" Return details for trip with id trip_id (given in url)\n details include the points in order to draw the trip on map\n \"\"\"\n api_url = r'^trips/(?P\\w+)/details/$'\n def get(self,request,gtfs_trip_id):\n import timetable.services\n trip = timetable.services.get_trip(gtfs_trip_id)\n result = trip.to_json_full()\n return self.get_json_resp(result)\n\nclass TripDetails(ApiView):\n \"\"\" Return details for trip with id trip_id (given in url)\n details include the points in order to draw the trip on map\n \"\"\"\n api_url = r'^trips/(?P\\w+)/stops/$'\n def get(self,request,gtfs_trip_id):\n import timetable.services\n from analysis.models import RtStop\n device_id = request.GET.get('device_id')\n if device_id is None:\n return HttpResponseBadRequest('Must specify device_id')\n trip = timetable.services.get_trip(gtfs_trip_id)\n rt_stops = RtStop.objects.filter(tracker_id=device_id,trip__gtfs_trip_id=gtfs_trip_id)\n result = trip.to_json_full(with_shapes=False,rt_stops=rt_stops)\n return self.get_json_resp(result)\n\n\nclass CurrentTrips(ApiView):\n \"\"\" Return current trips \"\"\"\n api_url = r'^trips/current/$'\n def get(self,request):\n import analysis.logic\n current_trips = analysis.logic.get_current_trips() \n return self._prepare_list_resp(request, current_trips)\n\nclass TripsLocation(ApiView):\n \"\"\" Return location (exp and cur) of trips given in comma separated GET paramter trip_ids \"\"\"\n api_url = r'^trips/cur-location/$'\n def get(self,request):\n import analysis.logic\n trip_ids = request.GET.get('trip_ids',None)\n if not trip_ids:\n return HttpResponseBadRequest('Must specify trip_ids')\n live_trips = analysis.logic.get_trips_location(trip_ids.split(',')) \n result = dict(objects=live_trips)\n result['meta'] = dict()\n return self.get_json_resp(result)\n\nclass Devices(ApiView): \n \"\"\" Return list of devices \"\"\"\n api_url = r'^devices/$'\n def get(self,request):\n import analysis.logic\n devices = analysis.logic.get_devices_summary()\n return self._prepare_list_resp(request,devices)\n\nclass DeviceReports(ApiView):\n \"\"\" Return reports for given device with id device_id\n
use stops_only=1 to get only stops\n
use full=1 to get also wifis\n \"\"\"\n api_url = r'^devices/(?P[\\w ]+)/reports/'\n def get(self,request,device_id):\n import analysis.logic\n info = dict()\n info['since_id'] = int(request.GET.get('since_id',0))\n info['limit'] = int(request.GET.get('limit',200))\n info['offset'] = int(request.GET.get('offset',0))\n info['stops_only'] = bool(int(request.GET.get('stops_only',0)))\n info['bssid'] = request.GET.get('bssid')\n info['full'] = bool(int(request.GET.get('full',0)))\n reports = analysis.logic.get_device_reports(device_id,info)\n return self._prepare_list_resp(request,reports,info)\n\nclass DeviceStatus(ApiView):\n \"\"\" Returns the status of curret device, e.g. its real time location
\n Should be used mainly for testing\n \"\"\"\n api_url = r'^devices/(?P[\\w ]+)/status/'\n def get(self,request,device_id):\n import algorithm.train_tracker\n result = algorithm.train_tracker.get_device_status(device_id)\n return self.get_json_resp(result)\n\nclass BssidsToStopIds(ApiView):\n \"\"\" returns map of bssids to stops \"\"\"\n api_url = r'^stops/bssids/'\n def get(self,request):\n import algorithm.bssid_tracker\n data = algorithm.bssid_tracker.get_bssid_data_for_app()\n return self.get_json_resp(data)\n\nclass AllStops(ApiView):\n \"\"\" return lists of stops with bssids\n \"\"\"\n api_url = r'^stops/$'\n def get(self,request):\n from timetable.models import TtStop\n stops = TtStop.objects.all().order_by('gtfs_stop_id')\n import algorithm.bssid_tracker\n data = algorithm.bssid_tracker.get_bssids_by_stop_ids()\n content = [stop.to_json(bssids=data.get(stop.gtfs_stop_id,[])) for stop in stops]\n return self.get_json_resp(content)\n\nclass DistBetweenShapes(ApiView):\n api_url = r'^stops/distance/$'\n def get(self,request):\n import timetable.services\n if 'gtfs_stop_id1' not in request.GET or 'gtfs_stop_id2' not in request.GET:\n return HttpResponse(status=400,content='gtfs_stop_id1 and gtfs_stop_id2 are mandatory')\n content = timetable.services.find_distance_between_gtfs_stops_ids(request.GET['gtfs_stop_id1'],request.GET['gtfs_stop_id2'])\n return self.get_json_resp(content)\n\nclass BssidToStop(ApiView):\n \"\"\" Returns stop info for each bssid \n get bssids as paramter\n \"\"\"\n api_url = r'^analysis/bssid-info/'\n def get(self,request):\n bssids = self.GET.get('bssids').split(',')\n all = self.get_bool('all',False)\n pass\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":633,"cells":{"__id__":{"kind":"number","value":14362370673035,"string":"14,362,370,673,035"},"blob_id":{"kind":"string","value":"bfbc9143120fa72c8a09613d9f9085e295b9ed70"},"directory_id":{"kind":"string","value":"da9567a1a19352a18ab3d8318bdf178fea794fb3"},"path":{"kind":"string","value":"/Code/Python/sandbox/pipe/pypipe.py"},"content_id":{"kind":"string","value":"11b5b02ac1c293f00528e4aa89963a2eedddbd2b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"antroy/Home"},"repo_url":{"kind":"string","value":"https://github.com/antroy/Home"},"snapshot_id":{"kind":"string","value":"4b2a03360c06ac859563152d51f88c243f443f69"},"revision_id":{"kind":"string","value":"0a08b553601b9828ed4f85536e0e22e8aabf812b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T14:50:57.469866","string":"2016-09-06T14:50:57.469866"},"revision_date":{"kind":"timestamp","value":"2009-11-09T21:47:30","string":"2009-11-09T21:47:30"},"committer_date":{"kind":"timestamp","value":"2009-11-09T21:47:30","string":"2009-11-09T21:47:30"},"github_id":{"kind":"number","value":366737,"string":"366,737"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\r\nimport sys\r\n\r\n\r\ndef suck_input():\r\n lines = []\r\n source = sys.stdin\r\n\r\n while True:\r\n data = source.read(1024)\r\n lines.append(data)\r\n if not data:\r\n break\r\n\r\n return ''.join(lines)\r\n\r\ndef main(out_f):\r\n text = suck_input()\r\n out_handle = open(out_f, 'w')\r\n out_handle.write(text)\r\n\r\n out_handle.close()\r\n\r\nif __name__ == \"__main__\":\r\n out_f = sys.argv[1]\r\n main(out_f)\r\n\r\n\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":634,"cells":{"__id__":{"kind":"number","value":14345190807721,"string":"14,345,190,807,721"},"blob_id":{"kind":"string","value":"180fa949bb3235c600599141acfff4cc51ce86ac"},"directory_id":{"kind":"string","value":"639494a7ac8fcd9f7cd00533669b1888cc626cd2"},"path":{"kind":"string","value":"/src/servlets/closed/Main.py"},"content_id":{"kind":"string","value":"811691b255376550326f8e4b416ed5ba8a6fbf2d"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"kstaken/Syncato"},"repo_url":{"kind":"string","value":"https://github.com/kstaken/Syncato"},"snapshot_id":{"kind":"string","value":"17fb859b1e0b7a8373b919f0e1a8d12b916881e3"},"revision_id":{"kind":"string","value":"860822e08b08b88b749961a1d61be902ce3b8ea3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-05T22:48:28.727982","string":"2020-04-05T22:48:28.727982"},"revision_date":{"kind":"timestamp","value":"2013-05-19T05:49:27","string":"2013-05-19T05:49:27"},"committer_date":{"kind":"timestamp","value":"2013-05-19T05:49:27","string":"2013-05-19T05:49:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#\n# See the file LICENSE for redistribution information.\n#\n# Copyright (c) 2003 Kimbro Staken. All rights reserved.\n# \n# $Id: Main.py,v 1.6 2003/12/15 14:57:48 kstaken Exp $\n\nfrom string import replace\n\nimport libxml2\n\nfrom XMLFormServlet import XMLFormServlet\n\nfrom DocumentBuilder import DocumentBuilder\nfrom XMLFragment import XMLFragment\n\nfrom BaseDatabase import NotFoundError\n\nclass Main(XMLFormServlet): \n def __init__(self):\n XMLFormServlet.__init__(self)\n \n def respondToGet(self, transaction):\n request = transaction.request()\n response = transaction.response()\n \n pathInfo = request.extraURLPath()\n \n if (pathInfo == \"\" or pathInfo == \"/\"):\n # We handle gets by just returning the basic edit page \n content = self.weblog.db.runTransform(\"\", \"item/editor\", \"\")\n self.sendResponseText(response, content) \n elif (pathInfo.startswith(\"/collection\")):\n # The type of collection is part of the url\n type = replace(pathInfo, \"/collection/\", \"\")\n \n collection = type + \"/collection\" \n \n # Locate the collection description\n file = open(self.weblog.db.locateFile(collection, \".xml\")).read()\n\n # Convert it to HTML\n content = self.weblog.db.runTransform(file, \"admin/collection\", \"\")\n self.sendResponseText(response, content)\n \n def respondToPost(self, transaction): \n request = transaction.request()\n response = transaction.response()\n\n entryType = request.field('type', \"\")\n \n fields = request.fields()\n \n try:\n entryDocument = DocumentBuilder(self.weblog, fields)\n content = entryDocument.serialize()\n \n print content\n # Convert the rest of the form into an XML form spec.\n formDocument = self._buildFormDocument(fields)\n \n errorText = \"\"\n try:\n # If there was a problem building the initial document we just\n # want to display an error.\n errorText = entryDocument.getErrorText()\n if (errorText != \"\"):\n raise ProcessingError()\n # Otherwise we continue processing the document.\n else: \n # Add the entry document into the form specification.\n formDocument.getDocument().addNode(\"/form/content\", entryDocument.getRootElement())\n \n # Hand the form spec to the action processor for this entry \n # type.\n content = formDocument.serialize()\n # print content\n result = self.weblog.db.runTransform(content, entryType + \"/action\", \"\", \"admin/action\")\n \n print result\n actionResult = XMLFragment(result)\n \n # If there were any errors in processing we send an error \n # to the user.\n errors = actionResult.xpathEval(\"/results/error-text\")\n if (len(errors) > 0): \n for error in errors:\n errorText = error.content + \"
\"\n \n raise ProcessingError()\n # Otherwise we figure ou what button was clicked so that we \n # forward the user to the proper place.\n else:\n button = formDocument.xpathEval(\"/form/button/node()\")[0].name\n #print button\n \n style = self.getStyle(request, actionResult, \"/results/action/\" + button)\n #print style\n self.sendResponse(response, entryDocument, style)\n \n except ProcessingError, e:\n # Make sure the document actually has content and then add the \n # error message to it.\n try:\n root = entryDocument.getRootElement()\n entryDocument.getDocument().addNode(\"/node()/error-text\", errorText, 1)\n except:\n entryDocument.getDocument().addNode(\"/dummy/error-text\", errorText, 1)\n \n print entryDocument.serialize()\n style = self.getStyle(request, formDocument, \"/form/action/error\")\n \n self.sendResponse(response, entryDocument, style)\n \n except NotFoundError, e:\n doc = XMLFragment(\"Document not found\")\n self.sendResponse(response, doc, \"admin/error\") \n \n def _buildFormDocument(self, fields):\n updatedFields = {}\n for field in fields:\n if (not field.startswith(\"/\") and not field.startswith(\"#\")):\n updatedFields[\"/form/\" + field] = fields[field]\n\n return DocumentBuilder(self.weblog, updatedFields)\n\n def getStyle(self, request, document, path, defaultStyle = \"admin/error\"):\n style = document.xpathEval(path)\n if (len(style) > 0):\n style = style[0].content\n if (style == \"referer\"):\n style = request.environ()['HTTP_REFERER']\n else:\n style = defaultStyle\n \n return style\n \n def sendResponse(self, response, document, target):\n # if the target is a URL we send a redirect\n if (target.startswith(\"http://\")):\n self.sendRedirect(response, target)\n # Otherwise we're forwarding to a stylesheet\n else:\n content = document.serialize()\n # print target\n print content\n result = self.weblog.db.runTransform(content, target, \"\")\n self.sendResponseText(response, result) \n \nclass ProcessingError (Exception):\n \"\"\"\n Exception that is thrown when an error occurs during processing.\n \"\"\"\n pass\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":635,"cells":{"__id__":{"kind":"number","value":283467879641,"string":"283,467,879,641"},"blob_id":{"kind":"string","value":"7db779f7c72be69a673fd058dd969ed2c36791bc"},"directory_id":{"kind":"string","value":"bec07c5c10ff0d3aa13a2a21d886215dcfa3757b"},"path":{"kind":"string","value":"/server.py"},"content_id":{"kind":"string","value":"95baef265784092ab582fc28307fa56d72b12b2b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"nourlcn/icome-pyzmq"},"repo_url":{"kind":"string","value":"https://github.com/nourlcn/icome-pyzmq"},"snapshot_id":{"kind":"string","value":"869a638902af79792cee8a1b44eae43653dd5d32"},"revision_id":{"kind":"string","value":"39e7d29a9363858a9cd5a6c7bfa9b5e66bafc405"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-25T03:27:46.076718","string":"2021-01-25T03:27:46.076718"},"revision_date":{"kind":"timestamp","value":"2012-08-09T16:52:33","string":"2012-08-09T16:52:33"},"committer_date":{"kind":"timestamp","value":"2012-08-09T16:52:33","string":"2012-08-09T16:52:33"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport zmq \nimport time \nimport os\nimport Image\n\nimages_list = file('/your/file/path','r').readlines()\ntotal = len(images_list)\nindex=0\n\ndef get_one_filename():\n global index\n return images_list[index][:-1]\n\ndef simple_main():\n global index\n global total\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n socket.bind(\"tcp://localhost:5555\")\n\n while (index < total-1):\n # Wait for next request from client\n message = socket.recv()\n print \"Current, No. \", index+1\n time.sleep (3) # Do some 'work'\n filename = get_one_filename()\n socket.send(filename)\n index += 1\n\n while(True):\n msg = socket.recv()\n socket.send(\"END\")\n \n return 0\n\nif __name__ == '__main__':\n simple_main()\n\n\n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":636,"cells":{"__id__":{"kind":"number","value":16767552346670,"string":"16,767,552,346,670"},"blob_id":{"kind":"string","value":"196c2edd4d139bddc084f3d2c6bb66d8a34d2547"},"directory_id":{"kind":"string","value":"19e5118d622ccf89cdcb27d19e4eb8a9c0cca908"},"path":{"kind":"string","value":"/exercices/solution.py"},"content_id":{"kind":"string","value":"dd8dbf3661b0b83f635e68e2d0a1c971aa188673"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jeanbcaron/course-material"},"repo_url":{"kind":"string","value":"https://github.com/jeanbcaron/course-material"},"snapshot_id":{"kind":"string","value":"492662989de689b35f49770529828bd484bc6480"},"revision_id":{"kind":"string","value":"810cdf9f71e412bdf5fbb0444064d4b3f61eae67"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-14T12:07:33.883350","string":"2021-01-14T12:07:33.883350"},"revision_date":{"kind":"timestamp","value":"2014-09-27T09:43:45","string":"2014-09-27T09:43:45"},"committer_date":{"kind":"timestamp","value":"2014-09-27T09:43:45","string":"2014-09-27T09:43:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def is_alpha(inp):\n import string\n alphab = string.ascii_letters\n for letter in inp \n if inp in alphab:\n return True\n else:\n return False\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":637,"cells":{"__id__":{"kind":"number","value":12945031437825,"string":"12,945,031,437,825"},"blob_id":{"kind":"string","value":"85f913ed7ad21dfcc850750aa74af470416929a7"},"directory_id":{"kind":"string","value":"c6bfed62f906316569801f199227d90676a739f9"},"path":{"kind":"string","value":"/pulp_rpm/test/unit/test_extension_admin_iso_repo_list.py"},"content_id":{"kind":"string","value":"feabb18ed50ec6906bb087eeecf15f6bfd08f2d6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"chaobin/rcm-pulp-rpm"},"repo_url":{"kind":"string","value":"https://github.com/chaobin/rcm-pulp-rpm"},"snapshot_id":{"kind":"string","value":"eb9dfce275d301e8951f9bd60240b385edfa869c"},"revision_id":{"kind":"string","value":"180c4f713e8e7490cf54122af7cf051be1c55ea4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-04T20:21:07.321354","string":"2016-08-04T20:21:07.321354"},"revision_date":{"kind":"timestamp","value":"2014-01-02T20:43:48","string":"2014-01-02T20:43:48"},"committer_date":{"kind":"timestamp","value":"2014-01-02T20:43:48","string":"2014-01-02T20:43:48"},"github_id":{"kind":"number","value":16002762,"string":"16,002,762"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# Copyright © 2013 Red Hat, Inc.\n#\n# This software is licensed to you under the GNU General Public\n# License as published by the Free Software Foundation; either version\n# 2 of the License (GPLv2) or (at your option) any later version.\n# There is NO WARRANTY for this software, express or implied,\n# including the implied warranties of MERCHANTABILITY,\n# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should\n# have received a copy of GPLv2 along with this software; if not, see\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.\n\nfrom copy import deepcopy\nfrom gettext import gettext as _\nimport mock\nfrom pulp_rpm.devel import rpm_support_base\n\nfrom pulp.common.plugins import importer_constants\n\nfrom pulp_rpm.common import constants\nfrom pulp_rpm.extension.admin.iso import repo_list\n\n\n# These are some test repos that are returned by the repo_mock() repo object. They were copied from\n# real repositories from a working database and then massaged into this form for testing purposes\nTEST_REPOS = [\n {'display_name': 'test_iso_repo', 'description': None,\n 'distributors': [\n {'repo_id': 'test_iso_repo', '_ns': 'repo_distributors',\n 'last_publish': '2013-05-21T12:41:17-04:00', 'auto_publish': True,\n 'scheduled_publishes': [], 'distributor_type_id': 'iso_distributor',\n '_id': {'$oid': '519ba0a0b1a8a15a1fcae0b1'}, 'config': {}, 'id': 'iso_distributor'}],\n '_ns': 'repos', 'notes': {'_repo-type': 'iso-repo'}, 'content_unit_counts': {'iso': 3},\n 'importers': [\n {'repo_id': 'test_iso_repo', '_ns': 'repo_importers', 'importer_type_id': 'iso_importer',\n 'last_sync': '2013-05-21T12:44:52-04:00', 'scheduled_syncs': [],\n '_id': {'$oid': '519ba0a0b1a8a15a1fcae0b0'},\n 'config': {\n importer_constants.KEY_FEED: 'http://pkilambi.fedorapeople.org/test_file_repo/',\n importer_constants.KEY_MAX_DOWNLOADS: 1, importer_constants.KEY_MAX_SPEED: 50000},\n 'id': 'iso_importer'}],\n '_id': {'$oid': '519ba0a0b1a8a15a1fcae0af'}, 'id': 'test_iso_repo',\n '_href': '/pulp/api/v2/repositories/test_iso_repo/'},\n # This is an ISO repository that uses SSL certificates. This helps us test that the certificates\n # get scrubbed appropriately by the ISORepoListCommand.\n {'display_name': 'cdn', 'description': None,\n 'distributors': [\n {'repo_id': 'cdn', '_ns': 'repo_distributors', 'last_publish': None,\n 'auto_publish': False, 'scheduled_publishes': [], 'distributor_type_id': 'iso_distributor',\n '_id': {'$oid': '5163309cb1a8a160d0117efd'},\n 'config': {constants.CONFIG_SSL_AUTH_CA_CERT: 'A cert',\n constants.CONFIG_SERVE_HTTPS: True, constants.CONFIG_SERVE_HTTP: False},\n 'id': 'iso_dist'}],\n '_ns': 'repos', 'notes': {'_repo-type': 'iso-repo'}, 'content_unit_counts': {'iso': 5},\n 'importers': [\n {'repo_id': 'cdn', '_ns': 'repo_importers', 'importer_type_id': 'iso_importer',\n 'last_sync': '2013-04-08T18:12:20-04:00', 'scheduled_syncs': [],\n '_id': {'$oid': '5163309cb1a8a160d0117ef3'},\n 'config': {\n importer_constants.KEY_FEED: 'https://cdn.redhat.com/iso',\n importer_constants.KEY_SSL_CA_CERT: 'CA Certificate',\n importer_constants.KEY_SSL_CLIENT_CERT: 'Client Certificate',\n 'id': 'cdn', importer_constants.KEY_SSL_CLIENT_KEY: 'Client Key'},\n 'id': 'iso_importer'}],\n '_id': {'$oid': '5163309cb1a8a160d0117eea'}, 'id': 'cdn',\n '_href': '/pulp/api/v2/repositories/cdn/'},\n # This is an RPM repository. It should get filtered out by get_repositories(), and it should be\n # shown by get_other_repositories().\n {'display_name': 'zoo', 'description': None,\n 'distributors': [\n {'repo_id': 'zoo', '_ns': 'repo_distributors', 'last_publish': '2013-04-30T10:27:31-04:00',\n 'auto_publish': True, 'scheduled_publishes': [], 'distributor_type_id': 'yum_distributor',\n '_id': {'$oid': '517fd4c3b1a8a112da54b1ba'},\n 'config': {'http': False, 'relative_url': '/repos/pulp/pulp/demo_repos/zoo/',\n 'https': True}, 'id': 'yum_distributor'},\n {'repo_id': 'zoo', '_ns': 'repo_distributors', 'last_publish': None, 'auto_publish': False,\n 'scheduled_publishes': [], 'distributor_type_id': 'export_distributor',\n '_id': {'$oid': '517fd4c3b1a8a112da54b1bb'}, 'config': {'http': False, 'https': True},\n 'id': 'export_distributor'}],\n '_ns': 'repos', 'notes': {'_repo-type': 'rpm-repo'},\n 'content_unit_counts': {'package_group': 2, 'package_category': 1, 'rpm': 32, 'erratum': 4},\n 'importers': [\n {'repo_id': 'zoo', '_ns': 'repo_importers', 'importer_type_id': 'yum_importer',\n 'last_sync': '2013-04-30T10:27:29-04:00', 'scheduled_syncs': [],\n '_id': {'$oid': '517fd4c3b1a8a112da54b1b9'},\n 'config': {'feed_url': 'http://repos.fedorapeople.org/repos/pulp/pulp/demo_repos/zoo/'},\n 'id': 'yum_importer'}],\n '_id': {'$oid': '517fd4c3b1a8a112da54b1b8'}, 'id': 'zoo',\n '_href': '/pulp/api/v2/repositories/zoo/'}]\n\n\ndef repo_mock():\n repo = mock.MagicMock()\n repo.repositories = mock.MagicMock()\n response = mock.MagicMock()\n response.response_body = deepcopy(TEST_REPOS)\n repo.repositories.return_value = response\n return repo\n\n\nclass TestISORepoListCommand(rpm_support_base.PulpClientTests):\n \"\"\"\n Test the ISORepoListCommand class.\n \"\"\"\n @mock.patch('pulp_rpm.extension.admin.iso.repo_list.ListRepositoriesCommand.__init__',\n side_effect=repo_list.ListRepositoriesCommand.__init__, autospec=True)\n def test___init__(self, list_repo_init):\n \"\"\"\n Test the __init__() method.\n \"\"\"\n list_command = repo_list.ISORepoListCommand(self.context)\n\n list_repo_init.assert_called_once_with(list_command, self.context,\n repos_title=_('ISO Repositories'))\n self.assertEqual(list_command.all_repos_cache, None)\n\n def test__all_repos(self):\n \"\"\"\n Test the _all_repos() method.\n \"\"\"\n self.context.server.repo = repo_mock()\n list_command = repo_list.ISORepoListCommand(self.context)\n query_params = {}\n\n all_repos = list_command._all_repos(query_params)\n\n # The mock should have been called, and all_repos should just be our TEST_REPOS\n self.context.server.repo.repositories.assert_call_once_with(query_params)\n self.assertEqual(all_repos, TEST_REPOS)\n\n # The cache should be filled now\n self.assertEqual(list_command.all_repos_cache, TEST_REPOS)\n\n # Calling it again should not increase the mock's call count since the cache should be used\n list_command._all_repos(query_params)\n self.assertEqual(self.context.server.repo.repositories.call_count, 1)\n\n def test_get_other_repositories(self):\n \"\"\"\n Test the get_other_repositories() method.\n \"\"\"\n self.context.server.repo = repo_mock()\n list_command = repo_list.ISORepoListCommand(self.context)\n query_params = {}\n\n other_repos = list_command.get_other_repositories(query_params)\n\n # The only \"other repo\" is the third test one, the \"zoo\" RPM repo\n self.assertEqual(other_repos, [TEST_REPOS[2]])\n\n def test_get_repositories(self):\n \"\"\"\n Test the get_repositories() method.\n \"\"\"\n self.context.server.repo = repo_mock()\n list_command = repo_list.ISORepoListCommand(self.context)\n query_params = {}\n\n iso_repos = list_command.get_repositories(query_params)\n\n # Let's inspect the repos to make sure they have all the correct properties\n # There should be two ISO repos (cdn and iso). zoo was an RPM repo\n self.assertEqual(len(iso_repos), 2)\n\n # The first repo should be test_iso_repo, unaltered\n self.assertEqual(iso_repos[0], TEST_REPOS[0])\n\n # The second repo should be cdn, but the SSL certificates should have been removed\n expected_cdn = deepcopy(TEST_REPOS[1])\n expected_cdn['importers'][0]['config']['feed_ssl_configured'] = 'True'\n expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CLIENT_CERT)\n expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CLIENT_KEY)\n expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CA_CERT)\n expected_cdn['distributors'][0]['config'].pop(constants.CONFIG_SSL_AUTH_CA_CERT)\n expected_cdn['distributors'][0]['config']['repo_protected'] = 'True'\n self.assertEqual(iso_repos[1], expected_cdn)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":638,"cells":{"__id__":{"kind":"number","value":16234976403170,"string":"16,234,976,403,170"},"blob_id":{"kind":"string","value":"cca173a1d79cec7d7eb36298984b4a25fe9e682c"},"directory_id":{"kind":"string","value":"3e2c82189e007b53752e162b4997e1fedd9c5cb1"},"path":{"kind":"string","value":"/inputModule.py"},"content_id":{"kind":"string","value":"e7e6448ddef9c13d8c3fbbb348834fbdbfdfb85e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"cdepatie/uoguelph"},"repo_url":{"kind":"string","value":"https://github.com/cdepatie/uoguelph"},"snapshot_id":{"kind":"string","value":"3b1100465547734dd83ca0294b9ca41309032e20"},"revision_id":{"kind":"string","value":"1d2ea0a6d331289d98fd1bdc81595236d91020ef"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-08-14T16:58:22.446020","string":"2015-08-14T16:58:22.446020"},"revision_date":{"kind":"timestamp","value":"2014-11-23T22:08:29","string":"2014-11-23T22:08:29"},"committer_date":{"kind":"timestamp","value":"2014-11-23T22:08:29","string":"2014-11-23T22:08:29"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nimport os\nimport sys\nfrom output import *\nfrom calculations import *\n\ndef parseFunction(function):\n\n parsedArray = function.partition(\"=\")\n if len(function) == 1:\n inputString = function\n elif parsedArray[1] == \"\":\n\tinputString = parsedArray[0]\n else:\n\tinputString = parsedArray[2]\n \t\n validChars = ['pow', '^', 'sin', 'cos', 'tan', 'csc', 'sec', 'cot', 'asin', 'acos', 'atan', 'acsc',\n 'asec', 'acot', 'e', 'pi', '.', '+', '/', '\\\\', '*', '%', '-', 'log', 'ln', 'sqrt',\n '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '!','x','X', ',']\n inputStringTemp = ''\n \n #strip newlines/carriage returns, whitespace, convert backslashes to forward slashes, and convert the string to lower case, in that order\n inputString = inputString.replace('\\n','').replace('\\r','')\n inputString = inputString.strip()\t\t\n inputString = inputString.replace('\\\\','/')\n inputString = inputString.replace(' ','')\n inputString = inputString.lower()\n\n #iterate through the string, counting each instance of open and closed bracket. After the entire string has been traversed, compare the counts for equality \n leftBracket = 0\n rightBracket = 0\n for character in inputString:\n if(character == '('):\n leftBracket += 1\n continue\n elif(character == ')'):\n rightBracket += 1\n continue\n else:\n continue\n if(rightBracket != leftBracket):\n logger([\"ERROR\",\"The equation you input does not have a matching number of brackets!\"])\n return [\"ERROR\",\"The equation you input does not have a matching number of brackets!\"]\n \n #iterate through the input string, replacing instances of values in validChars with empty strings. If anything remains afterwards, it is an invalid input and an error is returned\n inputStringTemp = inputString\n for element in validChars:\n inputStringTemp = inputStringTemp.replace(element, '')\n if(len(inputStringTemp) != 0):\n logger([\"ERROR\",\"The equation you have input contains one or more invalid expressions: \" + inputStringTemp])\n return [\"ERROR\",\"The equation you have input contains one or more invalid expressions: \" + inputStringTemp]\n\n #iterate through each character of the input string, setting operator to True if the character is a mathematical operator\n #if the subsequent character is another operator, return an error. otherwise set operator to False and continue\n operator = False\n for character in inputString:\n if(character == '+' or character == '-' or character == '/' or character == '*' or character == '%'):\n if(operator == True):\n\t logger([\"ERROR\",\"The equation you have input contains one or more instances of adjacent operators. eg. +*\"])\n return [\"ERROR\",\"The equation you have input contains one or more instances of adjacent operators. eg. +*\"]\n else:\n operator = True\n continue\n if(operator == True):\n operator = False\n\n return [\"GRAPH\",inputString]\n\ndef parseInputString(inputString):\n #a list used as a dictionary for valid content\n validChars = ['pow', '^', 'sin', 'cos', 'tan', 'csc', 'sec', 'cot', 'asin', 'acos', 'atan', 'acsc',\n 'asec', 'acot', 'e', 'pi', '.', '+', '/', '\\\\', '*', '%', '-', 'log', 'ln', 'sqrt',\n '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '!', ',']\n inputStringTemp = ''\n \n #check if the inputString is a file. If so, check the extension to ensure it is .txt\n inputFile = os.path.isfile(inputString)\n if(inputFile):\n\tisTextFile = inputString.endswith(\".txt\")\n if(isTextFile):\n return [\"FILE\",inputString]\n else:\n\t logger([\"ERROR\",\"User input a file other than .txt\"])\n return [\"ERROR\",\"Please ensure your input file is a .txt file.\"]\n\n #strip newlines/carriage returns, whitespace, convert backslashes to forward slashes, and convert the string to lower case, in that order\n inputString = inputString.replace('\\n', '').replace('\\r', '')\n inputString = inputString.strip()\n inputString = inputString.replace('\\\\','/')\n inputString = inputString.replace(' ','')\n inputString = inputString.lower()\n \n #iterate through the string, counting each instance of open and closed bracket. After the entire string has been traversed, compare the counts for equality\n leftBracket = 0\n rightBracket = 0\n for character in inputString:\n if(character == '('):\n leftBracket += 1\n continue\n elif(character == ')'):\n rightBracket += 1\n continue\n else:\n continue\n if(rightBracket != leftBracket):\n\tlogger([\"ERROR\",\"The equation input does not have a matching number of brackets.\"])\n return [\"ERROR\",\"The equation you input does not have a matching number of brackets!\"]\n\n #iterate through the input string, replacing instances of values in validChars with empty strings. If anything remains afterwards, it is an invalid input and an error is returned\n inputStringTemp = inputString\n for element in validChars:\n inputStringTemp = inputStringTemp.replace(element, '')\n if(len(inputStringTemp) != 0):\n logger([\"ERROR\",\"The equation input contains one or more invalid expressions: \" + inputStringTemp])\n return [\"ERROR\",\"The equation you have input contains one or more invalid expressions: \" + inputStringTemp]\n\n #iterate through each character of the input string, setting operator to True if the character is a mathematical operator\n #if the subsequent character is another operator, return an error. otherwise set operator to False and continue\n operator = False\n for character in inputString:\n if(character == '+' or character == '-' or character == '/' or character == '*' or character == '%'):\n if(operator == True):\n\t\tlogger([\"ERROR\",\"The equation input contains one or more instances of adjacent operators. eg. +*\"])\n return [\"ERROR\",\"The equation you have input contains one or more instances of adjacent operators. eg. +*\"]\n else:\n operator = True\n continue\n if(operator == True):\n operator = False\n\n return [\"GOOD\",inputString]\n\n#a simple function that can be passed a list, and inputHandler will send the data to the relevant function based on the zeroeth element of the list\ndef inputHandler(inputList):\n if(inputList[0] == \"GOOD\"):\n\treturnList = mathHandler(inputList[1])\n\treturn returnList\n\n elif(inputList[0] == \"FILE\"):\n\treturnList = readFile(inputList[1])\n\treturn returnList\n\n elif(inputList[0] == \"GRAPH\"):\n\treturnList = graphHandler(inputList[1])\n\treturn returnList\n\n elif(inputList[0] == \"ERROR\"):\n\treturn inputList\n\n else:\n\tlogger([\"ERROR\",\"inputHandler received an unexpected input: [\" + inputList[0] + \",\" + inputList[1] + \"]\"])\n\treturn [\"ERROR\",\"inputHandler received an unexpected input: \" + inputList[0] + \",\" + inputList[1]]\t\n\ndef readFile(filePath):\n returnList = [\"FILE_R\"]\n tempString = ''\n lineCounter = 1\n\n #basic try/except to open the file\n try:\n inputFile = open(filePath, 'r')\n except IOError:\n logger([\"ERROR\",\"Could not open input file, IOError exception.\"])\n return [\"ERROR\",\"Could not open input file.\"]\n\n #read the entire file into a list, storing each line as an element\n inputList = inputFile.readlines()\n\n #partition the line around the @ delimiter, using the 0th element of the result as a tag for graph/calculation\n for e in inputList:\n element = e.strip()\n if(len(element) != 0):\n tempString = element.partition('@')\n if(tempString[0] == 'GRPH'):\n outputList = parseFunction(tempString[2])\n if(outputList[0] == 'ERROR'):\n outputList.append(outputList[1])\n outputList[1] = \"LINE \" + str(lineCounter) + \": \" + tempString[2]\n elif(tempString[0] == 'CALC'):\n outputList = parseInputString(tempString[2])\n if(outputList[0] == 'ERROR'):\n outputList.append(outputList[1])\n outputList[1] = \"LINE \" + str(lineCounter) + \": \" + tempString[2]\n else:\n logger([\"ERROR\", \"LINE: \" + str(lineCounter) ,\"Failure to import file contents: please compare function format with README.\"])\n outputList = [\"ERROR\", \"LINE: \" + str(lineCounter) ,\"Failure to import file contents: please compare function format with README.\"]\n if(outputList == None):\n logger([\"ERROR\",\"parseInputString return a None object when reading the input file.\"])\n continue\n else:\n returnList.append(outputList)\n lineCounter += 1\n\n return returnList\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":639,"cells":{"__id__":{"kind":"number","value":8358006405928,"string":"8,358,006,405,928"},"blob_id":{"kind":"string","value":"7e48a297512da16ebfcceb8024c2f758896def10"},"directory_id":{"kind":"string","value":"4ce702c1b6c6019be3abc95903d54dffbe7a9ede"},"path":{"kind":"string","value":"/fraction.py"},"content_id":{"kind":"string","value":"0af394abf47894fbfa58c29adb08b6ae7d5590bb"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"fromMars/fraction"},"repo_url":{"kind":"string","value":"https://github.com/fromMars/fraction"},"snapshot_id":{"kind":"string","value":"6644fbbfd4a38e76f1e43cb6b8d5beb0831d2653"},"revision_id":{"kind":"string","value":"65228a3944959a8ecc5a6fdcc45491b2d1071fff"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T21:07:28.193393","string":"2021-01-15T21:07:28.193393"},"revision_date":{"kind":"timestamp","value":"2014-12-26T04:56:33","string":"2014-12-26T04:56:33"},"committer_date":{"kind":"timestamp","value":"2014-12-26T04:56:33","string":"2014-12-26T04:56:33"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sys\n\ndef seprate_number(number):\n int_part = str(number).split(\".\")[0]\n dec_part = str(number).split(\".\")[1]\n dec_type = int(dec_part[-1:])\n print \"seprate integer and decimal:\", int_part, dec_part, dec_type\n return (int_part, dec_part, dec_type)\n \n\ndef seprate_decimal(decimal_number):\n base = int(\"1\" + \"0\"*len(str(decimal_number)))\n base0 = base\n decimal_number0 = decimal_number\n gcd = g_c_d(base, decimal_number, 0, [0])\n gcd_number = gcd[-1:][0]\n print \"base:{0}, decimal_number:{1}\".format(base, decimal_number)\n \n return str(decimal_number/gcd_number) + \"/\" + str(base/gcd_number)\n\n\ndef g_c_d(base, decimal_number, count, tmp_mod):\n count += 1\n mod = base % decimal_number\n if mod==0:\n\tprint \"Got Result:\" + str(tmp_mod)\n\t#return tmp_mod\n else:\n\ttmp_mod.append(mod)\n\tprint str(count) + \": mod: {0}, tmp_mod: {1}\".format(str(mod), str(tmp_mod))\n\tg_c_d(decimal_number, mod, count, tmp_mod)\n return tmp_mod\n\ndef to_fraction(number):\n seprated = seprate_number(number)\n dec_result = seprate_decimal(int(seprated[1]))\n result = str(seprated[0]) + \"|\" + dec_result\n \n print result\n \n \nif __name__ == \"__main__\":\n w = float(sys.argv[1])\n h = float(sys.argv[2])\n \n to_fraction(w/h)\n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":640,"cells":{"__id__":{"kind":"number","value":11716670786155,"string":"11,716,670,786,155"},"blob_id":{"kind":"string","value":"b43cecc93635d703ce9b059cd6e28afa7f718e13"},"directory_id":{"kind":"string","value":"0733df4c3fb92762b484c6f4c4600d7a308e3572"},"path":{"kind":"string","value":"/cphct/plugins.py"},"content_id":{"kind":"string","value":"4e55c4c491391fa4654bc52aced88191e713fdd1"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only"],"string":"[\n \"GPL-2.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"josh-gree/cphcttoolbox"},"repo_url":{"kind":"string","value":"https://github.com/josh-gree/cphcttoolbox"},"snapshot_id":{"kind":"string","value":"efed90e063b9c3c1b30a68b42c068b6707e16615"},"revision_id":{"kind":"string","value":"406fc60fe075a9fb0a2a10231bac684a4f88bbaa"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-04T15:45:52.524279","string":"2016-08-04T15:45:52.524279"},"revision_date":{"kind":"timestamp","value":"2014-12-04T11:15:56","string":"2014-12-04T11:15:56"},"committer_date":{"kind":"timestamp","value":"2014-12-04T11:15:56","string":"2014-12-04T11:15:56"},"github_id":{"kind":"number","value":39253012,"string":"39,253,012"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#\n# --- BEGIN_HEADER ---\n#\n# plugins - application plugin framework\n# Copyright (C) 2012-2014 The Cph CT Toolbox Project lead by Brian Vinter\n#\n# This file is part of Cph CT Toolbox.\n#\n# Cph CT Toolbox is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Cph CT Toolbox is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n#\n# -- END_HEADER ---\n#\n\n\"\"\"Plugin framework for optional application extensions that may be provided\nby users.\n\nAll plugin modules should implement the mandatory init/exit functions:\nplugin_init(conf, ..)\nplugin_exit(conf, ..)\n\nthat will automatically be called once with the actual plugin arguments before\nand after all plugin use respectively. They may be used to set up and tear\ndown plugin data structures but can just return without any actions otherwise.\n\nThe actual plugin actions are implemented in the four functions:\nload_input(input_data, input_meta, conf, ..)\npreprocess_input(input_data, input_meta, conf, ..)\npostprocess_output(output_data, output_meta, conf, ..)\nsave_output(output_data, output_meta, conf, ..)\n\nthat take at least an array, a meta data list and a configuration dictionary\nas input, but where '..' should include any other valid positional or named\narguments that may be passed to the plugin. They should modify the array\ninline if possible and always return the resulting array.\n\nPlease note that the plugin functions will typically be called on a chunk of\nthe complete input and output. Thus the functions should be flexible enough to\nhandle variable chunks, or detect and fail if chunk is incompatible with the\nprocessing.\n\"\"\"\n\nimport os\nimport sys\nimport traceback\n\nallowed_plugin_hooks = ['load_input', 'preprocess_input',\n 'postprocess_output', 'save_output']\ninternal_plugin_hooks = ['plugin_init', 'plugin_exit']\n\n\ndef unsupported_handler(*args, **kwargs):\n \"\"\"Shared helper to mark plugin unsuitable for particular operations.\n\n Parameters\n ----------\n *args : positional arguments\n Any positional arguments.\n **kwargs : keyword arguments\n Any keyword arguments.\n\n Raises\n -------\n ValueError :\n Always as operation doesn't make sense.\n \"\"\"\n\n raise ValueError('unsupported operation for plugin!')\n\n\ndef add_unsupported(plugin_mod, handler):\n \"\"\"Adds the *handler* function for any targets from allowed_plugin_hooks\n that *plugin_mod* does not implement itself. This is in order to allow\n plugins to only explicitly implement relevant handlers yet still gracefully\n handle cases where a user tries to execute a plugin in an unhandled\n context.\n\n Parameters\n ----------\n plugin_mod : module object\n A plugin module previously loaded.\n handler : function\n A function to assign to targets that *plugin_mod* does not implement.\n \"\"\"\n\n for target in allowed_plugin_hooks:\n if not hasattr(plugin_mod, target):\n setattr(plugin_mod, target, handler)\n\n\ndef plugin_base_dirs(conf):\n \"\"\"Return a list of base plugin search directories.\n Plugins are picked up from sub directories of the:\n * global toolbox installation path (/path/to/cphcttoolbox)\n * toolbox dot directory in the user home (~/.cphcttoolbox)\n * current directory (./)\n and in that order.\n\n Parameters\n ----------\n conf : dict\n Configuration dictionary.\n \n Returns\n -------\n output : list of str\n Returns a list of plugin search directory paths\n \"\"\"\n\n # Parent dir of this module dir is toolbox base\n\n global_base = conf['cphcttoolbox_base']\n user_base = os.path.expanduser(os.path.join('~', '.cphcttoolbox'))\n local_base = os.path.abspath('.')\n return [global_base, user_base, local_base]\n\n\ndef app_plugin_dirs(app_names, engine_dir, conf):\n \"\"\"Return a list of application-specific plugin search directories for the\n application registering with the names in the *app_names* list and the\n given *engine_dir*.\n The list contains plugin directory paths sorted in growing priority order.\n\n Parameters\n ----------\n app_names : list of str\n List of application names.\n engine_dir : str\n Back end calculation engine sub directory name.\n conf : dict\n Configuration dictionary.\n \n Returns\n -------\n output : list of str\n Returns a list of engine-specific plugin directories for the given\n application.\n \"\"\"\n\n plugin_paths = []\n plugin_dirs = plugin_base_dirs(conf)\n\n # Search and add plugin directories in increasing priority order\n\n plugin_prefixes = ['cphct', ''] + app_names\n for base_path in plugin_dirs:\n for plugin_dir in ['%splugins' % pre for pre in plugin_prefixes]:\n dir_path = os.path.join(base_path, plugin_dir, engine_dir)\n plugin_paths.append(dir_path)\n return plugin_paths\n\n\ndef app_plugin_paths(app_names, engine_dir, conf):\n \"\"\"Return a list of available plugins for the application registering with\n the names in the *app_names* list and the given *engine_dir*.\n The list contains tuples of plugin location and name and it is sorted in\n growing priority order in case a plugin name appears more than once.\n Plugins are picked up from the:\n * global toolbox installation path (/path/to/cphcttoolbox)\n * toolbox dot directory in the user home (~/.cphcttoolbox)\n * current directory (./)\n and in that order.\n For each search location any 'cphctplugins', 'plugins' and 'Xplugins'\n directories (where X is a name in *app_names*) will be searched for python\n modules in the *engine_dir* sub directories. I.e. cphctplugins/npy will be\n searched for numpy plugins.\n\n Parameters\n ----------\n app_names : list of str\n List of application names.\n engine_dir : str\n Back end calculation engine sub directory name.\n conf : dict\n Configuration dictionary.\n \n Returns\n -------\n output : list of tuple\n Returns a list of plugin directory and name tuples\n \"\"\"\n\n plugin_paths = []\n plugin_dirs = app_plugin_dirs(app_names, engine_dir, conf)\n\n # Search and add plugins in increasing priority order\n\n for dir_path in plugin_dirs:\n if os.path.isdir(dir_path):\n dir_files = os.listdir(dir_path)\n for file_name in dir_files:\n (mod_name, ext) = os.path.splitext(file_name)\n mod_path = os.path.join(dir_path, mod_name)\n if ext == '.py' or os.path.isdir(mod_path):\n plugin_paths.append((dir_path, mod_name))\n\n return plugin_paths\n\n\ndef load_plugins(app_names, engine_dir, conf):\n \"\"\"Load plugins specified in conf using plugin paths based on *app_names*\n and *engine_dir*. In case of multiple plugins with the same base name the\n plugin_paths list is used as growing priority order so that the last\n matching plugin is used.\n Returns a tuple containing a dictionary with loaded plugins and a\n dictionary containing any loading errors encountered. Both dictionaries\n map from names in allowed_plugin_hooks to the actual data.\n Enabled hooks automatically get their optional internal plugin hooks set\n for init and clean up. The *engine_dir* parameter is used as a prefix for\n the inserted *conf* values.\n Plugin arguments from the corresponding *conf* entries are copied so that\n all subsequent plugin actions can rely solely on the returned dictionary.\n\n Parameters\n ----------\n app_names : list of str\n List of application names.\n engine_dir : str\n Back end calculation engine sub directory name.\n conf : dict\n Configuration dictionary.\n\n Returns\n -------\n output : (dict, dict)\n Returns a 2-tuple of a plugin dictionary and load error dictionary.\n \"\"\"\n\n (plugins, errors) = ({}, {})\n orig_sys_path = sys.path\n internal_targets = ['%s_%s' % (engine_dir, i) for i in\n internal_plugin_hooks]\n external_targets = ['%s_%s' % (engine_dir, i) for i in\n allowed_plugin_hooks]\n\n # For automatic init and clean up of enabled plugins\n\n for auto_target in internal_targets:\n plugins[auto_target] = []\n\n # Locate and load plugins in increasing priority order\n\n plugin_paths = app_plugin_paths(app_names, engine_dir, conf)\n for target in external_targets:\n (plugins[target], errors[target]) = ([], [])\n\n # Conf entry is (name, args, kwargs) tuple\n\n index = 0\n for (req_mod, args, kwargs) in conf.get(target, []):\n use_plugin = None\n\n # Search backwards from the end to apply priority\n\n for (dir_path, mod_name) in plugin_paths[::-1]:\n if req_mod == mod_name:\n use_plugin = (dir_path, mod_name)\n break\n\n if use_plugin:\n (plugin_dir, plugin_name) = use_plugin\n\n # Load plugin with plugin_dir as first source but with\n # original module path appended for external dependencies.\n # Automatically add init and exit hooks for all enabled\n # plugins. Please note that we repeat init and exit for every\n # occurrence of a plugin because it may require individual init\n # and exit for each set of arguments.\n # Immediately remove module from sys.modules after import to\n # avoid caching when loading module of same name for another\n # engine.\n # We insert the plugin call arguments from conf here for\n # complete information in returned plugins dictionary.\n # Finally we prepare the plugin instance __plugin_state__\n # dictionary for use in individual plugin executions.\n\n sys.path = [plugin_dir] + orig_sys_path\n try:\n plugin_mod = __import__(plugin_name)\n plugin_mod.__plugin_state__['target'] = target\n plugin_mod.__plugin_state__['id'] = index\n add_unsupported(plugin_mod, unsupported_handler)\n del sys.modules[plugin_name]\n plugin_tuple = (plugin_name, plugin_mod, args,\n kwargs)\n plugins[target].append(plugin_tuple)\n for auto_target in internal_targets:\n plugins[auto_target].append(plugin_tuple)\n except Exception, exc:\n err = 'Failed to load %s plugin from %s:\\n%s' \\\n % (plugin_name, plugin_dir,\n traceback.format_exc(exc))\n errors[target].append((plugin_name, err))\n else:\n err = 'No such plugin \"%s\" in plugin directories %s' \\\n % (req_mod, ', '.join(app_plugin_dirs(app_names,\n engine_dir, conf)))\n errors[target].append((req_mod, err))\n index += 1\n sys.path = orig_sys_path\n return (plugins, errors)\n\n\ndef execute_plugin(\n hook,\n name,\n plugin_mod,\n args,\n kwargs,\n ):\n \"\"\"Execute matching *hook* function from *plugin_mod* plugin module with\n the provided positional *args* and named *kwargs*.\n\n Parameters\n ----------\n hook : str\n Name of hook function\n name : str\n Name of plugin\n plugin_mod : module\n Plugin module previously loaded and prepared.\n args : list of str\n List of arguments for plugin\n kwargs : dict\n Dictionary of keyword and value pair arguments for plugin\n\n Returns\n -------\n output : ndarray or None\n The processed ndarray for main hooks and None for init/exit hooks.\n\n Raises\n -------\n ValueError\n If plugin or hook does not match any supplied plugins.\n \"\"\"\n\n if hook.endswith('_plugin_init'):\n return plugin_mod.plugin_init(*args, **kwargs)\n elif hook.endswith('_load_input'):\n return plugin_mod.load_input(*args, **kwargs)\n elif hook.endswith('_preprocess_input'):\n return plugin_mod.preprocess_input(*args, **kwargs)\n elif hook.endswith('_postprocess_output'):\n return plugin_mod.postprocess_output(*args, **kwargs)\n elif hook.endswith('_save_output'):\n return plugin_mod.save_output(*args, **kwargs)\n elif hook.endswith('_plugin_exit'):\n return plugin_mod.plugin_exit(*args, **kwargs)\n else:\n raise ValueError('invalid plugin hook %s for %s (%s)' % (hook,\n name, plugin_mod))\n\n\ndef set_plugin_var(\n conf,\n key,\n value,\n replace=False,\n ):\n \"\"\"Set plugin variable *key* to *value*.\n This is used to share variables between plugins.\n\n Parameters\n ----------\n conf : dict\n Configuration dictionary.\n key : str\n Variable name.\n value : object\n Variable value.\n replace : bool, optional\n If True, existing variable *key* is replaced by *value*.\n \n Raises\n -------\n ValueError\n If *replace* is False and *key* already exists\n \"\"\"\n\n if replace or key not in conf['plugin_shared_vars']:\n conf['plugin_shared_vars'][key] = value\n else:\n msg = \\\n \"plugin var: '%s' already exists, use replace=True to overwrite\" \\\n % key\n raise ValueError(msg)\n\n\ndef get_plugin_var(conf, key):\n \"\"\"Get plugin variable value for *key*.\n This is used to share variables between plugins.\n \n Parameters\n ----------\n conf : dict\n Configuration dictionary.\n key : str\n Variable name.\n \n Returns\n -------\n output : object or None\n Returns the value for *key* or None if *key* was not previously set\n \n \"\"\"\n\n output = None\n if key in conf['plugin_shared_vars']:\n output = conf['plugin_shared_vars'][key]\n\n return output\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":641,"cells":{"__id__":{"kind":"number","value":5153960790793,"string":"5,153,960,790,793"},"blob_id":{"kind":"string","value":"d6bb0b6afb6681b0e90f3a3fe9f8dfb8d5b3da65"},"directory_id":{"kind":"string","value":"b875f61a0aac6c87d6256f8eaef54211a261a7e5"},"path":{"kind":"string","value":"/src/www/tools/connectdb.py"},"content_id":{"kind":"string","value":"110cc92d39d910e16cf89b2f475a89684407c59d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jakewan/kestava"},"repo_url":{"kind":"string","value":"https://github.com/jakewan/kestava"},"snapshot_id":{"kind":"string","value":"f7afc812db1b02861b384b026f0cef957efb4549"},"revision_id":{"kind":"string","value":"d5e5559621baa8eb1d39ba3851589967f6ac53dd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-05-21T23:45:47.413223","string":"2016-05-21T23:45:47.413223"},"revision_date":{"kind":"timestamp","value":"2010-12-11T18:52:18","string":"2010-12-11T18:52:18"},"committer_date":{"kind":"timestamp","value":"2010-12-11T18:52:18","string":"2010-12-11T18:52:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import cherrypy\n\nfrom model.database import make_connection, close_connection\n\nclass ConnectDb(cherrypy.Tool):\n \n def __init__(self):\n super(ConnectDb, self).__init__('before_request_body', self.__connect)\n \n def __connect(self):\n make_connection()\n\nclass DisconnectDb(cherrypy.Tool):\n \n def __init__(self):\n super(DisconnectDb, self).__init__('on_end_request', self.__disconnect)\n\n def __disconnect(self):\n if hasattr(cherrypy.request, 'db'):\n close_connection()\n \ncherrypy.tools.connect_db = ConnectDb()\ncherrypy.tools.disconnect_db = DisconnectDb()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":642,"cells":{"__id__":{"kind":"number","value":2388001865196,"string":"2,388,001,865,196"},"blob_id":{"kind":"string","value":"839bcba20ae6342284bf76349d48f311deaa935b"},"directory_id":{"kind":"string","value":"3f04a8f4ab3f4aaaa288f93c6e8fdaa1fc3dd044"},"path":{"kind":"string","value":"/1.download/ex11.py"},"content_id":{"kind":"string","value":"1fd4e92e1c2185a876109f8dfff83640dd2fb29c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"gengletao/learn-python"},"repo_url":{"kind":"string","value":"https://github.com/gengletao/learn-python"},"snapshot_id":{"kind":"string","value":"7f8ae92c22d520312345ebd24969e6c75a834f6e"},"revision_id":{"kind":"string","value":"62b756f8184ed774fe0911e856994afee8c243ba"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-01T19:05:34.749283","string":"2016-09-01T19:05:34.749283"},"revision_date":{"kind":"timestamp","value":"2011-10-08T16:32:33","string":"2011-10-08T16:32:33"},"committer_date":{"kind":"timestamp","value":"2011-10-08T16:32:33","string":"2011-10-08T16:32:33"},"github_id":{"kind":"number","value":2416030,"string":"2,416,030"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: letao geng \n# Copyright (C) alipay.com 2011\n\n'''\n'''\n\nimport os\nimport sys\n\ndef main():\n ''' main function\n '''\n print \"How old are you?\",\n age = raw_input()\n print \"How tall are you?\",\n height = raw_input()\n print \"How much do you weight?\",\n weight = raw_input()\n\n print \"So, you're %r old, %r tall and %r heavy.\" % (\n age, height, weight)\n\n print 'Done'\n\nif __name__ == '__main__':\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":643,"cells":{"__id__":{"kind":"number","value":7378753856050,"string":"7,378,753,856,050"},"blob_id":{"kind":"string","value":"703288dd7d7e89e65a01a82cc5eaba103933cc0d"},"directory_id":{"kind":"string","value":"db258fdb98d36eef012f527f05270cb1eab8b5bf"},"path":{"kind":"string","value":"/ListeNumBBis.py"},"content_id":{"kind":"string","value":"d4d73c84030a4e602de1cc3fa005b4f9bd479a27"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"lisalam/Code_VRD"},"repo_url":{"kind":"string","value":"https://github.com/lisalam/Code_VRD"},"snapshot_id":{"kind":"string","value":"31440e4b4897deb705c578c1c9557b9160ffea4c"},"revision_id":{"kind":"string","value":"bda284754b1095cea07bbe231f53448dcb67d2d7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T08:10:12.278550","string":"2021-01-10T08:10:12.278550"},"revision_date":{"kind":"timestamp","value":"2013-05-15T13:59:23","string":"2013-05-15T13:59:23"},"committer_date":{"kind":"timestamp","value":"2013-05-15T13:59:23","string":"2013-05-15T13:59:23"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\nimport os, sys\nimport javax.swing as swing\nimport java.awt as awt\nfrom javax.swing import BorderFactory\nfrom javax.swing.border import EtchedBorder, TitledBorder\nfrom java.awt import Font\n\nfrom java.awt import TextField, Panel, GridLayout, ComponentOrientation, Label, Checkbox, BorderLayout, Button, Color, FileDialog, Frame, Font\n\n\nimport sys\nimport os\nimport time\nimport glob\nimport os.path as path\nimport getpass\nimport shutil\nimport random\nimport math\n\nusername=getpass.getuser()\n\nmypath=os.path.expanduser(os.path.join(\"~\",\"Dropbox\",\"Macros_Lisa\",\"Code_VRD\"))\nsys.path.append(mypath)\n\nfrom org.python.core import codecs\ncodecs.setDefaultEncoding('utf-8')\n\n\nclass ListeNumBBis(swing.JFrame):\n\n\t\t\t\tdef __init__(self, listnumb):\n\t\t\t\t\tswing.JFrame.__init__(self, title=\"Numero de Boite\")\n\t\t\t\t\tself.setDefaultCloseOperation(swing.JFrame.DISPOSE_ON_CLOSE)\n\t\t\t\t\tself.__listnumb = listnumb\n\t\t\t\t\tself.run()\n\n\t\t\t\tdef run(self):\n\t\t\t\t\tself.size = (200, 300)\n\t\t\t\t\tself.contentPane.layout = awt.BorderLayout()\n\t\t\t\t\tline = BorderFactory.createEtchedBorder(EtchedBorder.LOWERED)\n\n\n\t\t\t\t\tPanel1=swing.JPanel(awt.FlowLayout(awt.FlowLayout.CENTER))\n\t\t\t\t\tPanel1.setBorder(line)\n\t\t\t\t\tlabel=swing.JLabel(\"\")\n\t\t\t\t\tlabel.setText(\"Liste des numeros de boites\")\n\t\t\t\t\tPanel1.add(label)\n\t\t\t\n\t\t\t\t\tPanel2=swing.JPanel(awt.FlowLayout(awt.FlowLayout.CENTER))\n\t\t\t\t\tPanel2.setBorder(line)\n\t\t\t\t\tself.__displistnumb = swing.JList(self.__listnumb)\n\t\t\t\t\tself.__displistnumb.setVisibleRowCount(14)\n\t\t\t\t\tself.__displistnumb.setFixedCellWidth(75) \n\t\t\t\t\tPanel2.add(self.__displistnumb)\n\t\t\t\t\tbarre = swing.JScrollPane(self.__displistnumb)\n\t\t\t\t\tPanel2.add(barre)\n\t\t\t\n\t\t\t\n\t\t\t\t\tPanel3=swing.JPanel(awt.FlowLayout(awt.FlowLayout.RIGHT))\n\t\t\t\t\tPanel3.setBorder(line)\n\n\t\t\t\n\t\t\t\t\tself.contentPane.add(Panel1, awt.BorderLayout.NORTH)\n\t\t\t\t\tself.contentPane.add(Panel2, awt.BorderLayout.CENTER)\n\t\t\t\t\tself.contentPane.add(Panel3, awt.BorderLayout.SOUTH)\n\t\t\t\n\t\t\t\n\t\t\n\n\t\t\t\nif __name__ == \"__main__\":\n\n\n\tlistnumb=[]\n\tnum1= (\"1\")\n\tnum2 = (\"2\")\n\tnum3 = (\"3\")\n\tnum4 = (\"4\")\n\t\n\t\n\n\tlistnumb=[num1,num2,num3,num4]\n\t\n\tnumb = ListeNumBBis(listnumb)\n\tnumb.show()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":644,"cells":{"__id__":{"kind":"number","value":14499809602883,"string":"14,499,809,602,883"},"blob_id":{"kind":"string","value":"005c434784c32d1eeb9e7e9e8d4c3abbad79f782"},"directory_id":{"kind":"string","value":"8494e4c915de65ab270bfb04b53da6ed1b5b18f9"},"path":{"kind":"string","value":"/e43.py"},"content_id":{"kind":"string","value":"8cc0daded2af6460a777c080a73190d641b5c978"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"spanishbear/learnPythonTheHardWay"},"repo_url":{"kind":"string","value":"https://github.com/spanishbear/learnPythonTheHardWay"},"snapshot_id":{"kind":"string","value":"f2209fb3dd5825052231dc87f35b551bb94e204c"},"revision_id":{"kind":"string","value":"16dc1882c2f45473c2be4b03c7633d07095efaed"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T11:39:18.811606","string":"2021-01-20T11:39:18.811606"},"revision_date":{"kind":"timestamp","value":"2014-02-21T20:48:57","string":"2014-02-21T20:48:57"},"committer_date":{"kind":"timestamp","value":"2014-02-21T20:48:57","string":"2014-02-21T20:48:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from sys import exit\nfrom random import randint\n\nclass Scene(object):\n def enter(self):\n print \"This scene is not configured.\"\n exit(1)\n\nclass Engine(object):\n def __init__(self, scene_map):\n print \"Engine __init_ has scene_map\", scene_map\n self.scene_map = scene_map\n\n def play(self):\n current_scene = self.scene_map.opening_scene()\n print \"Play's first scene\", current_scene\n\n while True:\n print \"\\n-------------\"\n next_scene_name = current_scene.enter()\n print \"next scene\", next_scene_name\n current_scene = self.scene_map.next_scene(next_scene_name)\n print \"map returns new scene\", current_scene\n\nclass Death(Scene):\n def enter(self):\n print \"Whoops. You died.\"\n print \"Try again.\"\n \nclass CentralCorridor(Scene):\n def enter(self):\n print \"\"\"You are the last surviving member of your ship and your last mission is to get the neutron destruct bomb from the \n Weapons Armory and blow the ship up after getting into an escape pod. You're running down the central corridor as silent as\n a mouse. Really, it was so silent that it was deafening. You see, your shoes were designed to be completely soundless. \n Then, outta nowhere a Gothon jumps out. You are shocked for two reasons: One, how in the hell did this Gothon find you so \n easily? Second, you expected a terrible monster with sharp, jagged features. A creature straight from the depths of hell, with \n rows of teeth that one bite feels like a million bites at once, claws so sharp that one slice would behead a person, a stench\n so foul that would knock the breath out of you. Oddly enough though, these Gothons truly were what their name says. Literally.\n They were just gothic humans. Here you are with a smooth-faced gothic boy standing before you, blocking the armory door.\n But you simply do not have the time to think about this conundrum. How was the entire crew defeated by these Gothons? \n Anyway, what do you do? You have the options of \"shooting\", \"dodging his attack\", \"running away in confusion\", or \"telling a joke\".\n \"\"\" \n action = raw_input(\"> \") \n\n if action == \"shooting\":\n print \"\"\"Bang! Bang! Bang! You closed your eyes while shooting him because you didn't want to see the truth that you were\n shooting a young boy. Unfortunately, you emptied your clip and you did not hit him AT ALL. He's terribly upset that you \n tried to kill him. He goes into an extreme rage and kills you instantly with one punch.\n \"\"\"\n return 'death'\n elif action == \"running away in confusion\":\n print \"\"\"After finding out what a Gothon truly is, you immediately run away with the most puzzling look on your face. Suddenly,\n there are other goths. They're watching you run towards them and they almost laugh at the face you're making. \n But they shoot you instead.\n \"\"\"\n return 'death'\n elif action == \"dodging his attack\":\n print \"\"\"When the Gothon sees you, he tries to shoot with you his Gothic-themed gun. You dodge the gun shot successfully, but \n you didn't realize that he had a knife in his other hand which he quickly uses to kill you as your dodging. \"\"\"\n elif action == \"telling a joke\":\n print \"\"\"Lucky for you, you had a few gothic friends when you were growing up. You are accustomed to their ways and you know \n what will make them laugh. After all, they just want to be happy. You say a joke, a pretty lame joke actually, and the Gothon\n stops, tries not to laugh, then bursts out laughing and can't move. While he's laughing, you shoot him right between his eyes.\n Then you keep walking towards the armory door and you're in!\"\"\"\n return 'laser_weapon_armory'\n else:\n print \"Huh? Try again\"\n return 'central_corridor'\n\n\nclass LaserWeaponArmory(Scene):\n def enter(self):\n print \"\"\" You walk in the room and see a box lying in the middle of the room. You walk up to it and see that there's a lock on \n the box. You have to guess the 3-digit code . You have only 10 times to do so.\n \"\"\"\n code = \"%d%d%d\" % (randint(1,9), randint(1,9), randint(1,9))\n guess = raw_input(\"[keypad]> \")\n guesses = 0 \n\n while guess != code and guesses < 10: \n print \"bzzzzzzz\"\n guesses += 1\n guess = raw_input(\"[keypad]> \")\n if guess == code:\n print \"\"\"The box finally opens up and you pick up the neutron bomb. Then you run to the bridge and place it in the right spot. \n \"\"\"\n return 'the_bridge'\n else:\n print \"Obviously your will to live has dwindled. Incorrect code. The Gothons blow the ship.\"\n return 'death'\n\nclass TheBridge(Scene):\n def enter(self):\n print \"\"\"You are on the bridge with the bomb. You've planted the bomb so now all you want to do is set detonote the bomb. \n \"\"\"\n action = raw_input(\"> \")\n\n if action == \"throw the bomb\":\n print \"Bomb was thrown and you died\"\"\" \n return 'death'\n elif action == \"slowly place the bomb\":\n print \"Yay!\"\n else:\n print \"Does not compute. Try again.\"\n return 'the_bridge'\n \n\nclass EscapePod(Scene):\n def enter(self):\n print \"You try to escape. You jump into a pod and leave!\"\n return 'finished'\n \n\nclass Map(object):\n scenes = {\n 'central_corridor': CentralCorridor(),\n 'laser_weapon_armory': LaserWeaponArmory(),\n 'the_bridge': TheBridge(),\n 'escape_pod': EscapePod(),\n 'death': Death()\n }\n def __init__(self, start_scene):\n self.start_scene = start_scene\n\n def next_scene(self, scene_name):\n self.scene_name = scene_name\n print \"start_scene in next_scene\"\n val = Map.scenes.get(scene_name)\n print \"next_scene returns\", val\n\n def opening_scene(self):\n return self.next_scene(self.start_scene)\n\na_map = Map('central_corridor')\nprint \"THIS IS THE A_MAP VARIABLE\", a_map\na_game = Engine(a_map)\nprint \"THIS IS THE A_GAME VARIABLE\", a_game\na_game.play()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":645,"cells":{"__id__":{"kind":"number","value":2448131381726,"string":"2,448,131,381,726"},"blob_id":{"kind":"string","value":"7235d20d6dfd7422847c1b25a3b94a9835f4de4e"},"directory_id":{"kind":"string","value":"cb229e438db6f6e713e66edf947d44b2953aaca8"},"path":{"kind":"string","value":"/esrchsmry.py"},"content_id":{"kind":"string","value":"9810aeb6c8dfca20430a52d79f6f4418b840914d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"oscarpeterjohansson/Pyntrez"},"repo_url":{"kind":"string","value":"https://github.com/oscarpeterjohansson/Pyntrez"},"snapshot_id":{"kind":"string","value":"873e6738f4afc89f92351213bff88a72d5c14694"},"revision_id":{"kind":"string","value":"84aed747a7818bf5f0b29b68f8c83cf9227e26cb"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T20:45:05.381364","string":"2021-01-10T20:45:05.381364"},"revision_date":{"kind":"timestamp","value":"2014-12-02T09:52:26","string":"2014-12-02T09:52:26"},"committer_date":{"kind":"timestamp","value":"2014-12-02T09:52:26","string":"2014-12-02T09:52:26"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\"\"\" Extract the main elements from the XML Esearch output\n\nThe following elements will be parsed out and be written to a new\nfile in tabular format:\n\n QueryTranslation\n Count\n RetMax\n RetStart\n QueryKey\n WebEnv\n IdList\n\n\nFrom the command-line: use -h/--help for information on execution\n\n\"\"\"\n\n__author__ = \"Johansson, O.\"\n__email__ = \"oscarpeterjohansson@outlook.com\"\n__contributors__ = \"\"\n__version__ = \"1.0\"\n__licence__ = \"GPL-3\"\n\n\nimport argparse\nimport re\nimport sys\n\n\nESPATTERNS = ( # this an appropriate order\n \"QueryTranslation\",\n \"Count\",\n \"RetMax\",\n \"RetStart\",\n \"QueryKey\",\n \"WebEnv\",\n \"IdList\"\n)\n\n\ndef find_text(dta):\n \"\"\" Find patterns in xml string \"\"\"\n row = []\n for p in ESPATTERNS:\n # \".*?\", non-greedy (ie. first match);\n # DOTALL -> . matches all, including newline\n m = re.search(\"(?P<%s>(?<=<%s>).*?(?=))\" % (p,p,p), dta, re.DOTALL) \n v = \"\"\n if m != None:\n v = m.group(p)\n if p == \"IdList\":\n IdList = re.findall(\"(?<=).*(?=)\", v)\n v = ','.join(IdList)\n row.append(v)\n return row\n\n\ndef esrchsmry(inputfile,outputfile):\n \"\"\" \n Extract relevant information from the xml output of the Esearch \n utils\n \"\"\"\n try:\n with open(inputfile,'r') as fd1, open(outputfile,'w') as fd2:\n xml = fd1.read()\n t_res = find_text(xml)\n txt = '\\r' + '\\t'.join(ESPATTERNS) + \"\\r\\n\"\n s_res = \"\\t\".join([str(t) for t in t_res]) + \"\\r\\n\"\n txt += s_res\n fd2.write(txt)\n except (IOError,) as e:\n ms = \"\\rIOError: sorry, no output this time\\r\\n\"\n sys.stdout.write(ms)\n\n\nparser = argparse.ArgumentParser(\n prog = sys.argv[0],\n conflict_handler = \"resolve\",\n description = \"\"\"\n Extract the main elements from the XML Esearch output\n utils. The following elements will be parsed out and be written\n to a new file in tabular format:\n QueryTranslation, Count, RetMax, RetStart, QueryKey, WebEnv, \n IdList\n \"\"\",\n add_help = True\n)\n\nparser.add_argument(\n \"--input\",\n dest = \"inputfile\",\n required = True,\n help = \"\"\"\n Path to xml file with output returned by the Esearch utils\n \"\"\"\n)\n\nparser.add_argument(\n \"--output\",\n dest = \"outputfile\",\n required = True,\n help = \"\"\"\n Path to file to contain output returned by this program\n \"\"\"\n)\n\n\ndef main(argv):\n \"\"\" For command-line use\n \"\"\"\n n_argv = parser.parse_args(argv)\n d_argv = vars(n_argv)\n esrchsmry(**d_argv)\n parser.exit(status=0, message=None)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":646,"cells":{"__id__":{"kind":"number","value":18519899011331,"string":"18,519,899,011,331"},"blob_id":{"kind":"string","value":"2b6ad86de494e62df725260216805e3586ff01fb"},"directory_id":{"kind":"string","value":"a94f779b762463d80a775db7efc08b47ff60aac1"},"path":{"kind":"string","value":"/days/4/classes/extras/duck-typing/2.py"},"content_id":{"kind":"string","value":"52747fd8eacd3c9c809012fd85ca9a0be8b0850b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"sivajipr/python-course"},"repo_url":{"kind":"string","value":"https://github.com/sivajipr/python-course"},"snapshot_id":{"kind":"string","value":"602a99d941dc6df1dabb17dfc284dcffd140e003"},"revision_id":{"kind":"string","value":"176c04426f0cbef1c4beb888300dd911eb708b97"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T09:29:30.649858","string":"2016-09-05T09:29:30.649858"},"revision_date":{"kind":"timestamp","value":"2014-09-19T08:18:36","string":"2014-09-19T08:18:36"},"committer_date":{"kind":"timestamp","value":"2014-09-19T08:18:36","string":"2014-09-19T08:18:36"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nclass Duck:\n def quack(self):\n print 'Duck: quack quack'\n\nclass LooksLikeADuck:\n def quack(self):\n print 'LooksLikeADuck: quack quack'\n\n def run(self):\n print 'LooksLikeADuck: run'\n\n\ndef handle_duck(d):\n d.quack()\n\n\nd = LooksLikeADuck()\nhandle_duck(d)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":647,"cells":{"__id__":{"kind":"number","value":40816,"string":"40,816"},"blob_id":{"kind":"string","value":"068caeacf2bc2a1c76ea182636f5f045b419d493"},"directory_id":{"kind":"string","value":"ea7d84b2cf9f72454e4c5223c4b30b0fc9df6210"},"path":{"kind":"string","value":"/rmse.py"},"content_id":{"kind":"string","value":"67887c8c644ea48b75ea02ae6e7120220f6586c1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"AlexeyMK/senior_design"},"repo_url":{"kind":"string","value":"https://github.com/AlexeyMK/senior_design"},"snapshot_id":{"kind":"string","value":"17a415aaa29d15b48826f3db16d3028805b48dbd"},"revision_id":{"kind":"string","value":"91a1941d5af3da4a930287f073a4cea77679a905"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-20T00:46:21.788119","string":"2020-05-20T00:46:21.788119"},"revision_date":{"kind":"timestamp","value":"2012-04-18T07:43:34","string":"2012-04-18T07:43:34"},"committer_date":{"kind":"timestamp","value":"2012-04-18T07:43:34","string":"2012-04-18T07:43:34"},"github_id":{"kind":"number","value":3110578,"string":"3,110,578"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# rmse.py\r\n# Calculate error for experiments\r\n# Test: rmse.py [name of csv file]\r\n\r\n# !/usr/bin/python\r\n\r\nimport math\r\nimport sys\r\n\r\nimport util\r\n\r\n\r\ndef get_single_rmse(actual_val, data_val):\r\n return math.fabs(1 + (float)(actual_val) * 2/5 - (float)(data_val))\r\n\r\n\r\ndef calculate_rmse(actual, data):\r\n err_list = []\r\n for i in range(len(actual)):\r\n err_list.append(get_single_rmse(actual[i], data[i]))\r\n\r\n avg_err = sum(err_list) / len(err_list)\r\n \r\n sq_list = [x**2 for x in err_list]\r\n std_err = math.sqrt(sum(sq_list) / len(sq_list))\r\n\r\n return (avg_err, std_err)\r\n\r\n\r\ndef main(filename):\r\n actual, data = util.process_csv_list(filename)\r\n print \"average error: %f \\nstandard error: %f\" % (calculate_rmse(actual, data))\r\n \r\n\r\nif __name__ == '__main__':\r\n sys.exit(main(sys.argv[1]))\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":648,"cells":{"__id__":{"kind":"number","value":8400956044577,"string":"8,400,956,044,577"},"blob_id":{"kind":"string","value":"f78493f605ac3e6e4d0d100ba44950c0b3c59838"},"directory_id":{"kind":"string","value":"231131309cf5e5861b6a3972dab2570ba9107edb"},"path":{"kind":"string","value":"/buybread/src/test.py"},"content_id":{"kind":"string","value":"6492490b481eecd516a7c860c4bec8c4c2191406"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"enzolv/PythonExercises"},"repo_url":{"kind":"string","value":"https://github.com/enzolv/PythonExercises"},"snapshot_id":{"kind":"string","value":"a950ca189371f1d004afe5b3d9d884de5fe9e51a"},"revision_id":{"kind":"string","value":"17df7ae654fda3cb0d742931e937162aa4d2ab5e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T16:59:29.814579","string":"2020-12-24T16:59:29.814579"},"revision_date":{"kind":"timestamp","value":"2012-11-30T10:21:39","string":"2012-11-30T10:21:39"},"committer_date":{"kind":"timestamp","value":"2012-11-30T10:21:39","string":"2012-11-30T10:21:39"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"food=[]\r\nstore = [\"bread\",\"butter\",\"cheese\"]\r\n\r\ndef bbread ():\r\n if \"bread\" in store:\r\n return \"bread\"\r\n return None\r\n pass\r\n\r\ndef bsausage ():\r\n if \"sausage\" in store:\r\n return \"sausage\"\r\n return None\r\n pass\r\n\r\ndef bbutter ():\r\n if \"butter\" in store:\r\n return \"butter\"\r\n return None\r\n pass\r\n\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":649,"cells":{"__id__":{"kind":"number","value":12240656816958,"string":"12,240,656,816,958"},"blob_id":{"kind":"string","value":"d29c9814107e6cc57fa65f419bc5816a8a215074"},"directory_id":{"kind":"string","value":"32cfd6a8df9b24059ed7bee0b7bf99b6c0268f6e"},"path":{"kind":"string","value":"/framework/seocortex/utils/proxied_requests.py"},"content_id":{"kind":"string","value":"18c36b876880cde6c908516ca23f148d9e2e0d1a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"blorenz/seocortex"},"repo_url":{"kind":"string","value":"https://github.com/blorenz/seocortex"},"snapshot_id":{"kind":"string","value":"5cd7acb647fbc4908e6045d2a89bdd2ade922434"},"revision_id":{"kind":"string","value":"3f1f7e8ac4a12e24e7f2cb58407ce52babfe5cf8"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T21:36:01.039128","string":"2016-09-05T21:36:01.039128"},"revision_date":{"kind":"timestamp","value":"2012-04-23T13:33:46","string":"2012-04-23T13:33:46"},"committer_date":{"kind":"timestamp","value":"2012-04-23T13:33:46","string":"2012-04-23T13:33:46"},"github_id":{"kind":"number","value":3951299,"string":"3,951,299"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Python imports\nfrom requests.auth import HTTPProxyAuth\nfrom requests.sessions import session\nfrom random import randint\n\n#DEFAULT_PROXIES = ['72.8.142.216:62048', '68.168.215.94:62048', '68.168.214.126:62048', '68.168.215.228:62048', '72.8.191.131:62048', '72.8.142.20:62048', '72.8.191.57:62048', '72.8.191.18:62048', '72.8.191.146:62048', '72.8.191.158:62048', '72.8.142.213:62048', '68.168.215.65:62048', '72.8.191.254:62048', '68.168.215.70:62048', '68.168.214.150:62048', '72.8.142.229:62048', '68.168.214.171:62048', '68.168.215.241:62048', '72.8.142.32:62048', '68.168.214.67:62048', '68.168.215.80:62048', '68.168.215.173:62048', '68.168.214.186:62048', '68.168.214.121:62048', '72.8.191.105:62048']\nDEFAULT_PROXIES = [\n {'ip':'50.117.24.226', 'port':'3131', 'u' : '31a89a8cbaae43b6', 'p' : '56024ab39ee54dcf'},\n {'ip':'50.117.68.212', 'port':'3131', 'u' : 'cbf69c8854ba4d04', 'p' : 'b7bb6255996749dd'},\n {'ip':'50.117.69.0', 'port':'3131', 'u' : 'fb8c9c5ee75e4f57', 'p' : '4add5380d904463c'},\n {'ip':'50.117.69.212', 'port':'3131', 'u' : '4edb9bc1c2fa4cf2', 'p' : '192384efd5e24fa4'},\n {'ip':'50.117.70.1', 'port':'3131', 'u' : '9404854294b04337', 'p' : '784c84deb85c44f8'},\n {'ip':'50.117.70.212', 'port':'3131', 'u' : '4d4fff780c02423d', 'p' : 'd918ca12a0ed4a7b'},\n {'ip':'50.117.71.1', 'port':'3131', 'u' : '0baf28faba404420', 'p' : '189b850e3fe24d09'},\n {'ip':'50.117.71.213', 'port':'3131', 'u' : '791b457dd2414762', 'p' : '7ed5670febb34e0d'},\n {'ip':'173.208.130.10', 'port':'3131', 'u' : 'd947f173e03449aa', 'p' : '22853a3d90154a90'},\n {'ip':'173.208.130.249', 'port':'3131', 'u' : 'b5ad668b72a84b93', 'p' : '8601db9523f84456'},\n {'ip':'173.208.145.164', 'port':'3131', 'u' : 'abd0ed0ca4d24913', 'p' : 'a9431c8f37ba4509'},\n {'ip':'173.208.145.82', 'port':'3131', 'u' : '53b9e92da15247e5', 'p' : 'f7250e69cfa845ef'},\n {'ip':'173.208.153.235', 'port':'3131', 'u' : 'c0fbf735c6fe4ca9', 'p' : 'f2aa81122bda4f6a'},\n {'ip':'173.208.158.167', 'port':'3131', 'u' : '82513de71c1248f3', 'p' : 'b01bfe42a4ff492c'},\n {'ip':'50.117.64.10', 'port':'3131', 'u' : 'e1a213b3c10d47c8', 'p' : '67da9da435384bc0'},\n {'ip':'50.117.64.24', 'port':'3131', 'u' : '1124e174b2274f35', 'p' : '4a4e1bd0c87444eb'},\n {'ip':'50.117.65.15', 'port':'3131', 'u' : '2882da2640de4f0a', 'p' : '07e47cdbd8714484'},\n {'ip':'50.117.65.74', 'port':'3131', 'u' : '37b82961ace9499a', 'p' : 'af015f7d99ae4ded'},\n {'ip':'50.117.66.233', 'port':'3131', 'u' : 'd7b536dbb5844906', 'p' : 'c302b1cf423a49d8'},\n {'ip':'50.117.67.167', 'port':'3131', 'u' : 'f0fab0d16a274379', 'p' : '9f4f024b325f4201'},\n]\n# Auth information\nDEFAULT_USERNAME = \"davindergrover\"\nDEFAULT_PASSWORD = \"uMP4FEvlWGGy\"\nDEFAULT_AUTH = \"%s:%s\" % (DEFAULT_USERNAME, DEFAULT_PASSWORD)\nFETCH_HEADERS = {\n 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21',\n 'Accept-Encoding': ', '.join(('identity', 'deflate', 'compress', 'gzip')),\n 'Accept': '*/*'\n}\n\n##\n# Just simply wrap all the main requests module's methods\n# this is brain dead code TBH\n##\n\n\n\ndef get_proxy(offset = 0, username = DEFAULT_USERNAME, password = DEFAULT_PASSWORD, proxy_list = DEFAULT_PROXIES):\n p = proxy_list[offset]\n ip = p['ip']\n port = p['port']\n username = p['u']\n proxy = {\"http\" : \"http://%s:%s\" % (ip, port)}\n proxy['auth'] = HTTPProxyAuth(p['u'], p['p'])\n return proxy\n \n \ndef get_proxies(*args, **kwargs):\n i = 0\n # Breaks when proxy is None\n while True:\n proxy = get_proxy(offset = i, *args, **kwargs)\n if proxy is None:\n break\n yield proxy\n i += 1\n \n##\n# @return A random proxy url\n##\ndef get_random_proxy(*args, **kwargs):\n p_list = kwargs.get('proxy_list', [None])\n offset = randint(0, len(p_list)-1)\n return get_proxy(offset = offset, *args, **kwargs)\n\n##\n# Throws in a proxy if none specified\n##\ndef request(method, url, params=None, data=None, headers = FETCH_HEADERS, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, return_response=True, config=None, use_proxies = True):\n # This is the only thing we do\n if proxies is None and use_proxies:\n proxies = get_random_proxy(proxy_list = DEFAULT_PROXIES)\n if not proxies is None:\n auth = proxies['auth']\n del proxies['auth']\n \n \n s = session()\n kwargs = {\n 'method' : method, \n 'url' : url,\n 'params' : params,\n 'data' : data,\n 'headers' : headers,\n 'cookies' : cookies,\n 'files' : files,\n 'auth' : auth,\n 'timeout' : timeout,\n 'allow_redirects' : allow_redirects,\n 'proxies' : proxies,\n 'hooks' : hooks,\n 'return_response' : return_response,\n 'config' : config,\n }\n \n return s.request(**kwargs)\n \n\ndef get(url, **kwargs):\n kwargs.setdefault('allow_redirects', True)\n return request('GET', url, **kwargs)\n\n\ndef head(url, **kwargs):\n kwargs.setdefault('allow_redirects', True)\n return request('HEAD', url, **kwargs)\n\n\ndef post(url, data='', **kwargs):\n return request('post', url, data=data, **kwargs)\n\n\ndef put(url, data='', **kwargs):\n return request('put', url, data=data, **kwargs)\n\n\ndef patch(url, data='', **kwargs):\n return request('patch', url, data='', **kwargs)\n\n\ndef delete(url, **kwargs):\n return request('delete', url, **kwargs)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":650,"cells":{"__id__":{"kind":"number","value":11536282190755,"string":"11,536,282,190,755"},"blob_id":{"kind":"string","value":"03eb412c976881cca60191c6f9c7f02e894234cd"},"directory_id":{"kind":"string","value":"d941938417bab130154c78f732606daa7b107e4a"},"path":{"kind":"string","value":"/testing_runtime/web/job.py"},"content_id":{"kind":"string","value":"87fc376cb88eaefc9edf4b1d979fae51563b2dc1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"skliarpawlo/ganymede"},"repo_url":{"kind":"string","value":"https://github.com/skliarpawlo/ganymede"},"snapshot_id":{"kind":"string","value":"abc8c7fac03b51a41cf92efacdf4170dd271d890"},"revision_id":{"kind":"string","value":"3a847635634d383d01dbeb70ef969202b0b7a8c9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-07T18:55:51.680687","string":"2016-09-07T18:55:51.680687"},"revision_date":{"kind":"timestamp","value":"2013-11-01T15:18:25","string":"2013-11-01T15:18:25"},"committer_date":{"kind":"timestamp","value":"2013-11-01T15:18:25","string":"2013-11-01T15:18:25"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# coding: utf-8\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom testing_runtime.models import Job, Task, StoredTest, User, EnvScript\nfrom core import db\nimport json\nfrom testing_runtime.web import tests\nfrom django.utils.translation import ugettext as _\nfrom decorators import html\nfrom django.template import RequestContext\n\ndef json_to_envs( js, job_id=None ) :\n if js is None :\n return []\n\n envs_params = json.loads( js )\n envs = []\n\n for env_dict in envs_params :\n model = EnvScript( **env_dict )\n model.job_id = job_id\n envs.append( model )\n\n return envs\n\n\ndef json_to_users( js ) :\n if js is None :\n return []\n\n users_ids = json.loads( js )\n\n users_collect = []\n for user_id in users_ids :\n users_collect.append(\n db.user_session.query( User ).filter( User.user_id == user_id ).one()\n )\n return users_collect\n\n\ndef fetch_users_info( job = None ) :\n res = []\n users = db.user_session.query( User ).all()\n if not (job is None) :\n checked_users = json_to_users( job.users )\n for x in users :\n checked = False\n if not (job is None) :\n if x in checked_users :\n checked = True\n\n res.append( {\n \"id\" : x.user_id,\n \"name\" : x.username,\n \"email\" : x.email,\n \"checked\" : checked\n } )\n return res\n\ndef json_to_tests( js ) :\n tests_ids = json.loads( js )\n\n tests_collect = []\n for test_id in tests_ids :\n tests_collect.append(\n db.session.query( StoredTest ).filter( StoredTest.test_id == test_id ).one()\n )\n return tests_collect\n\ndef add_job(request) :\n title = html.title([ _('Add job'), _('Jobs'), 'Ganymede' ])\n request.page = \"job.add\"\n\n if request.method == 'POST' :\n name = request.POST[ 'name' ]\n repo = request.POST[ 'repo' ]\n branch = request.POST[ 'branch' ]\n users = request.POST[ 'users' ]\n deploy = request.POST[ 'deploy' ]\n github = request.POST[ 'github' ]\n job_tests = json_to_tests(request.POST[ 'tests' ])\n whose = request.user.username\n job = Job(\n name=name, repo=repo,\n branch=branch, tests=job_tests,\n users=users, deploy=deploy,\n whose=whose, github=github\n )\n\n job.envs = json_to_envs( request.POST[ 'envs' ] )\n\n db.session.add(job)\n\n for env in job.envs :\n db.session.add( env )\n\n json_resp = json.dumps( { \"status\" : \"ok\" } )\n return HttpResponse(json_resp, mimetype=\"application/json\")\n else :\n tests_data = tests.gather_tests_info()\n users_data = fetch_users_info()\n return render_to_response( 'job/add/add_job.html', {\n 'title' : title,\n 'tests' : tests_data,\n 'users' : users_data,\n 'repos' : [],\n 'branches' : ['develop', 't-kz']\n }, context_instance=RequestContext(request) )\n\ndef list_jobs(request) :\n title = html.title( [ _('Jobs'), 'Ganymede' ] )\n\n jobs = []\n for job in db.session.query(Job).all() :\n try :\n last_task = db.session.query(Task).\\\n filter(Task.job_id == job.job_id).\\\n order_by(Task.add_time.desc()).limit(1).one()\n except :\n last_task = None\n\n jobs.append( {\n \"job_id\" : job.job_id,\n \"name\" : job.name,\n \"repo\" : job.repo,\n \"branch\" : job.branch,\n \"whose\" : job.whose if not job.whose is None else \"-\",\n \"last_status\" : _( \"Not executed\" ) if (last_task is None) else last_task.status.capitalize(),\n \"last_task_id\" : None if (last_task is None) else last_task.task_id\n } )\n return render_to_response( 'job/list/list.html', {\n 'title' : title,\n 'jobs' : jobs\n }, context_instance=RequestContext(request) )\n\ndef remove_job(request) :\n job_id = request.POST[ 'job_id' ]\n db.session.query(Job).filter(Job.job_id == job_id).delete()\n\n json_resp = json.dumps( { \"status\" : \"ok\" } )\n return HttpResponse(json_resp, mimetype=\"application/json\")\n\ndef update_job(request, job_id) :\n title = html.title( [ _('Update job') + \" #\" + str(job_id), _('Jobs'), 'Ganymede' ] )\n\n if request.method == 'POST' :\n job = db.session.query(Job).filter( Job.job_id == int(job_id) ).one()\n job.name = request.POST[ 'name' ]\n job.repo = request.POST[ 'repo' ]\n job.branch = request.POST[ 'branch' ]\n job.exec_time = request.POST[ 'exec_time' ] if request.POST[ 'exec_time' ] != \"\" else None\n job.tests = json_to_tests( request.POST[ 'tests' ] )\n job.users = request.POST[ 'users' ]\n job.deploy = request.POST[ 'deploy' ] if not request.POST[ 'deploy' ] == u'' else None\n job.github = request.POST[ 'github' ]\n\n for env in job.envs :\n db.session.delete( env )\n\n db.session.commit()\n\n new_envs = json_to_envs( request.POST[ 'envs' ], job.job_id )\n\n for env in new_envs :\n job.envs.append( env )\n\n try :\n db.session.commit()\n json_resp = json.dumps( { \"status\" : \"ok\" } )\n except Exception as e :\n db.session.rollback()\n json_resp = json.dumps( { \"status\" : \"error\", \"content\" : str(e) } )\n\n return HttpResponse(json_resp, mimetype=\"application/json\")\n else :\n job_model = db.session.query( Job ).filter( Job.job_id == job_id ).one()\n job = {\n \"job_id\" : job_model.job_id,\n \"name\" : job_model.name,\n \"repo\" : job_model.repo,\n \"branch\" : job_model.branch,\n \"envs\" : job_model.envs,\n \"exec_time\" : job_model.exec_time.strftime(\"%H:%M\") if not job_model.exec_time is None else \"\",\n \"tests\" : job_model.tests,\n \"deploy\" : job_model.deploy,\n \"github\" : job_model.github\n }\n\n tests_ids = []\n for x in job_model.tests :\n tests_ids.append( x.test_id )\n\n tests_data = tests.gather_tests_info( tests_ids )\n users_data = fetch_users_info( job_model )\n\n return render_to_response(\n 'job/update/update_job.html', {\n 'title' : title,\n 'job' : job,\n 'users' : users_data,\n 'tests' : tests_data,\n 'repos' : [],\n 'branches' : ['develop', 't-kz']\n }, context_instance=RequestContext(request) )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":651,"cells":{"__id__":{"kind":"number","value":14285061246046,"string":"14,285,061,246,046"},"blob_id":{"kind":"string","value":"e08375f6d23fa98b08bde59e3650754be7820f99"},"directory_id":{"kind":"string","value":"38a7ff3c3c1c0c78473266ab271b27d2d02f383e"},"path":{"kind":"string","value":"/ccorr_mat_test.py"},"content_id":{"kind":"string","value":"3ddc7556ed92367f8fe88b15b0a8437283422067"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"mailletf/brain-parcellater"},"repo_url":{"kind":"string","value":"https://github.com/mailletf/brain-parcellater"},"snapshot_id":{"kind":"string","value":"7a95e6c88e7cda8265c69aebbe2fb121428c9213"},"revision_id":{"kind":"string","value":"e7d1415192c224712db2c459e167481ffddee1aa"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T10:57:15.085794","string":"2020-05-18T10:57:15.085794"},"revision_date":{"kind":"timestamp","value":"2014-08-01T18:56:05","string":"2014-08-01T18:56:05"},"committer_date":{"kind":"timestamp","value":"2014-08-01T18:56:05","string":"2014-08-01T18:56:05"},"github_id":{"kind":"number","value":10571730,"string":"10,571,730"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nimport numpy as N\nimport scipy.spatial.distance as D\nimport scipy.sparse as S\nimport scipy.stats as SS\nimport conn_ccorr_mat as CCM\n\nimport unittest\n\nsize = 4\n\nclass CrossCorrelationTest(unittest.TestCase):\n def setUp(self):\n def computeCrossCorrMat(m):\n cm = N.zeros(m.shape)\n for i in xrange(size):\n for j in xrange(size):\n #cm[i,j] = N.correlate(m[i], m[j])\n cm[i,j] = 1 - D.cosine(m[i], m[j])\n #cm[i,j] = SS.pearsonr(m[i], m[j])[0]\n return cm\n\n self.mat = N.random.random((size,size))\n self.cc_mat = computeCrossCorrMat(self.mat)\n\n self.bin_mat = N.round(self.mat)\n self.cc_bin_mat = computeCrossCorrMat(self.bin_mat)\n\n def test_nonBinaryTest(self):\n test_mat = [S.dok_matrix((1,size), int) for x in xrange(size)]\n for i in xrange(size):\n for j in xrange(size):\n test_mat[i][0,j] = self.mat[i,j]\n\n cc_test_mat = CCM.cross_correlate_matrix_nonbinary(test_mat)\n N.testing.assert_almost_equal(cc_test_mat, self.cc_mat)\n\n\n def test_binaryTest(self):\n test_mat = [S.dok_matrix((1,size), int) for x in xrange(size)]\n for i in xrange(size):\n for j in xrange(size):\n if self.mat[i,j]>0:\n test_mat[i][0,j] = self.bin_mat[i,j]\n\n cc_test_mat = CCM.cross_correlate_matrix_binary(test_mat)\n N.testing.assert_almost_equal(cc_test_mat, self.cc_bin_mat)\n\n \n\nif __name__ == '__main__':\n unittest.main()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":652,"cells":{"__id__":{"kind":"number","value":7344394095729,"string":"7,344,394,095,729"},"blob_id":{"kind":"string","value":"acf6ba39039944ffc94d45306620a0506e2be5d3"},"directory_id":{"kind":"string","value":"d7ee1fd4ec2d1a4704c7eaed798962868aebd290"},"path":{"kind":"string","value":"/myproject/app/announcement/views.py"},"content_id":{"kind":"string","value":"3b0b0fd15c2482db93bef2de3318ef7bdcf7c55f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"wwj718/sklok"},"repo_url":{"kind":"string","value":"https://github.com/wwj718/sklok"},"snapshot_id":{"kind":"string","value":"04146259fd102390875ed8ec4fa2840df2644f9c"},"revision_id":{"kind":"string","value":"fbd5cbf3826d7d01cee1a68493d9c4e6df75491f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T14:39:11.142418","string":"2021-01-22T14:39:11.142418"},"revision_date":{"kind":"timestamp","value":"2013-11-07T13:19:29","string":"2013-11-07T13:19:29"},"committer_date":{"kind":"timestamp","value":"2013-11-07T13:19:29","string":"2013-11-07T13:19:29"},"github_id":{"kind":"number","value":13374341,"string":"13,374,341"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding=utf-8\nfrom django.shortcuts import render_to_response\nfrom models import Announcement\nfrom django.template import RequestContext\nfrom django.shortcuts import get_object_or_404 \n\n'''\ndef show_all_news(request): #包含分页功能\n\tlist_items = News.objects.filter(categories='N')\t\n\tvariables = RequestContext(request,{'list_items':list_items})\n\treturn render_to_response(\"Introduction.html\",variables) \n'''\n#分类取:\ndef get_by_id(request,id): #包含分页功能\n\tid=int(id)\n\tannouncement = get_object_or_404(Announcement, pk=id)\n\tvariables = RequestContext(request,{'announcement':announcement})\n\treturn render_to_response(\"announcement_detail.html\",variables)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":653,"cells":{"__id__":{"kind":"number","value":11038065998556,"string":"11,038,065,998,556"},"blob_id":{"kind":"string","value":"3d80989ef44e4cb6b0397cc90f62c96987a6e1b4"},"directory_id":{"kind":"string","value":"a704892d86252dde1bc0ff885ea5e7d23b45ce84"},"path":{"kind":"string","value":"/addons-community/partner_category_view/__terp__.py"},"content_id":{"kind":"string","value":"9942153504e544ddb60d15affbcf8f28038b5106"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"oneyoung/openerp"},"repo_url":{"kind":"string","value":"https://github.com/oneyoung/openerp"},"snapshot_id":{"kind":"string","value":"5685bf8cce09131afe9b9b270f6cfadf2e66015e"},"revision_id":{"kind":"string","value":"7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-03-31T18:22:41.917881","string":"2016-03-31T18:22:41.917881"},"revision_date":{"kind":"timestamp","value":"2013-05-24T06:10:53","string":"2013-05-24T06:10:53"},"committer_date":{"kind":"timestamp","value":"2013-05-24T06:10:53","string":"2013-05-24T06:10:53"},"github_id":{"kind":"number","value":9902716,"string":"9,902,716"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"{\n 'name': 'Partner Category View',\n 'version': '1.0',\n 'category': 'Generic Modules/Others',\n 'description': \"\"\"This module add list of partner to the partner category view.\n\nWarning current version of OpenERP server (22nd of April 2010) have a bug (the view is mixed up).\nhttps://bugs.launchpad.net/openobject-server/+bug/455547\n\nTo quick fix it:\ngo into you server directory\nand then\n\nif server version is 5.0 do:\nwget http://launchpadlibrarian.net/45016785/patch\npatch -p0 < patch\n\nif server version is trunk do:\nwget http://launchpadlibrarian.net/45017686/trunk.patch\npatch -p0 < trunk.patch\n\"\"\",\n 'author': 'Nicolas De Smet',\n 'website': 'http://ndesmet.be',\n 'depends': ['base'],\n 'update_xml': ['view.xml'],\n 'installable': True,\n 'active': False,\n}\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":654,"cells":{"__id__":{"kind":"number","value":10101763126357,"string":"10,101,763,126,357"},"blob_id":{"kind":"string","value":"6538982a64732d353dce9fbbfa54266cc671ea0c"},"directory_id":{"kind":"string","value":"118c3d42606b5c582485bfd437fc514dee78b226"},"path":{"kind":"string","value":"/vt.py"},"content_id":{"kind":"string","value":"4327981d78b2b981cd8a7b6c172921d6639d5175"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"investlab/virustotal-scanner-python"},"repo_url":{"kind":"string","value":"https://github.com/investlab/virustotal-scanner-python"},"snapshot_id":{"kind":"string","value":"0e00f816bb246eb4a7d55fd58e8f2d62e4ea1e63"},"revision_id":{"kind":"string","value":"fd6f3cda45d1f4a631040e9a7a5ed223396ea5c9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2022-08-12T15:56:28.974684","string":"2022-08-12T15:56:28.974684"},"revision_date":{"kind":"timestamp","value":"2014-12-07T21:34:21","string":"2014-12-07T21:34:21"},"committer_date":{"kind":"timestamp","value":"2014-12-07T21:34:21","string":"2014-12-07T21:34:21"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import json\nimport urllib\nimport urllib2\nimport hashlib\nimport postfile\nimport time\nimport sys\n\n########################################################################## \n\n#global var error codes used in three classes below\nIS_BATCH_ERROR = 2\nINVALID_TYPE_ERROR = 3\nBAD_RESPONSE_ERROR = 4\nSTAT_FAILED = 5\nINVALID_RESULTS_ERROR = 6\n\n########################\n# fileinfo class #\n# #\n########################\nclass fileinfo():\n \"\"\"\n fileinfo class contains static methods to return hash values to \n Scan and VirusTotal classes below\n \"\"\"\n #md5hash - reurn md5 hash of file \n @staticmethod\n def md5hash(path):\n block_size = 128\n f = open(path, \"rb\")\n md5 = hashlib.md5()\n while True:\n data = f.read(block_size)\n if not data:\n break\n md5.update(data)\n f.close()\n return md5.hexdigest()\n \n\n########################\n# Virus Total Class #\n# #\n########################\nclass VirusTotal(object):\n\n def __init__(self, api_key, resource=\"NA\", path=\"NA\"):\n \"\"\" \n VirusTotal object which interacts directly with the VirusTotal public\n API\n Should not be used directly:\n Use Scan object below to work with files/urls/reports\n \n attributes: \n resource: can be scan_id/hash (optional)\n path: full path to single file (optional)\n api_key (required)\n \"\"\"\n self.resource = resource\n self.path = path\n self.api_key = api_key\n \n #######################################################################\n # _submit_resource #\n # internal method to submit values to VT to query info #\n # takes method argument which can be either 'report' or 'rescan' #\n #######################################################################\n def _submit_resource(self, method):\n url = \"https://www.virustotal.com/vtapi/v2/file/\" + method\n parameters = {\"resource\": self.resource,\n \"apikey\": self.api_key}\n \n #send request\n data = urllib.urlencode(parameters)\n req = urllib2.Request(url, data)\n response = urllib2.urlopen(req)\n if(response.getcode() != 200):\n return BAD_RESPONSE_ERROR\n json_out = response.read()\n \n return json.loads(json_out)\n \n #######################################################################\n # _is_batch #\n # internal method to determine if json output from VT #\n # was a batch scan or single #\n # requires json output from vt as argument #\n #######################################################################\n @staticmethod\n def _isbatch(json_output):\n if(type(json_output) == list):\n return True\n else:\n return False\n \n #######################################################################\n # _is_notbatch #\n # internal method to determine if json output from VT #\n # was a batch scan or single #\n # requires json output from vt as argument #\n #######################################################################\n @staticmethod\n def _isnotbatch(json_output):\n if(type(json_output) == dict):\n return True\n else:\n return False\n \n #######################################################################\n # has_file #\n # method to determine if VirusTotal has file in it's DB #\n # #\n # Note: this will send a web request to virustotal everytime # \n # function is called #\n #######################################################################\n def has_file(self):\n method = \"report\"\n json_out = self._submit_resource(method)\n #batch - list\n #reg - dict\n if(self._isbatch(json_out)):\n return IS_BATCH_ERROR\n if(self._isnotbatch(json_out)):\n if(json_out[\"response_code\"]) == 1:\n return True\n else:\n return False\n else:\n return INVALID_TYPE_ERROR\n \n #######################################################################\n # submit_file #\n # method to submit file to VT for analysis. #\n # This will return scan_id immediatley but will take several minutes#\n # to analyze fully - see query_status() #\n # #\n # Returns dictionary with status code, message, and scan_id # \n # #\n # Note: this will send a web request to virustotal everytime # \n # function is called #\n #######################################################################\n def submit_file(self):\n host = \"www.virustotal.com\"\n selector = \"http://www.virustotal.com/vtapi/v2/file/scan\"\n fields = [(\"apikey\", self.api_key)]\n file_to_send = open(self.path, \"rb\").read()\n files = [(\"file\", self.path, file_to_send)]\n json_out = postfile.post_multipart(host, selector, fields, files)\n json_out = json.loads(json_out)\n \n response = json_out[\"response_code\"]\n msg = json_out[\"verbose_msg\"]\n if(response != 1):\n return_json = {\"code\":0,\"val\":msg}\n return return_json\n\n elif(response == 1):\n return_json = {\"code\":1,\"val\":msg,\"scan_id\":json_out[\"scan_id\"]}\n return return_json\n\n #######################################################################\n # rescan_file #\n # method to rescan file already in DB using scan_id or hash #\n # This will return scan_id immediatley but will take several minutes#\n # to analyze fully - see query_status() #\n # #\n # Returns scan_id # \n # #\n # Note: this will send a web request to virustotal everytime # \n # function is called #\n #######################################################################\n def rescan_file(self):\n method = \"rescan\"\n json_output = self._submit_resource(method)\n if(json_output[\"response_code\"]==1):\n return json_output[\"scan_id\"]\n else:\n return BAD_RESPONSE_ERROR\n \n #######################################################################\n # query_status #\n # check status of scan already submiited using scan_id/hash #\n # #\n # Returns 1 if successful # \n # #\n # Note: this will send a web request to virustotal everytime # \n # function is called and can take several minutes to complete #\n #######################################################################\n def query_status(self):\n found = 0\n count = 0\n method = \"report\"\n while (found == 0):\n count += 1\n time.sleep(60)\n json_out = self._submit_resource(method)\n if(count > 6):\n return STAT_FAILED\n if((json_out[\"response_code\"] == 1 and \n json_out[\"verbose_msg\"] != \"Scan request successfully queued, come back later for the report\")):\n found = 1\n return 1\n \n #######################################################################\n # get_report #\n # method to get report from hash/scan_id #\n # #\n # Returns raw json report # \n # #\n # Note: this will send a web request to virustotal everytime # \n # function is called #\n #######################################################################\n def get_report(self):\n method = \"report\"\n report_json = self._submit_resource(method)\n if(report_json == BAD_RESPONSE_ERROR):\n print(\"failed at get_report() - bad response from VT\")\n elif not report_json:\n return INVALID_RESULTS_ERROR\n else:\n return report_json\n \n #######################################################################\n # gather_report_details #\n # internal method to get positive vt result details #\n # such as vendor names,malware names, and dates #\n # #\n # Returns dictonary of values if successful # \n # Returns 1 if no vendors flagged the resource #\n # # \n #######################################################################\n @staticmethod \n def _gather_report_details(json):\n if(json['response_code'] == 1):\n scans = json['scans']\n \n failed_flag = 0\n detect_flag = 0\n vendors = []\n detect_list = []\n not_detect_list = []\n output = {}\n \n for key in scans:\n vendors.append(key)\n for val in vendors:\n detect = scans[val][\"detected\"]\n if(detect):\n detect_flag = 1\n detect_list.append(val)\n else:\n failed_flag = 1\n not_detect_list.append(val)\n \n if(detect_flag == 0):\n return 1\n elif(failed_flag):\n for val in detect_list:\n output[val] = {\"version\":scans[val]['version'], \"result\":scans[val]['result'], \"update\":scans[val]['update']}\n output['detect_list'] = detect_list\n output['failed'] = 1\n output['failedlist_key'] = {\"failed_list\":not_detect_list}\n else:\n for val in detect_list:\n output[val] = {\"version\":scans[val]['version'], \"result\":scans[val]['result'], \"update\":scans[val]['update']}\n output['failed'] = 0\n return output\n \n else:\n return 0\n \n #######################################################################\n # make_awesome_report #\n # Print full scan report (uses _gather_report_details) #\n # #\n #######################################################################\n def make_awesome_report(self):\n raw_json = self.get_report()\n \n if(raw_json == INVALID_RESULTS_ERROR): #this will happen if invalid hash is submitted\n print \"Resouce not found - invalid hash probably\"\n elif(raw_json['response_code'] == 1):\n #display status information\n print(\"Scan Date \" + str(raw_json['scan_date']))\n print(\"sha1: \" + str(raw_json['sha1']) + \" \\n\" + \"md5: \" + str(raw_json['md5']))\n ratio = float(raw_json['positives'])/float(raw_json['total']) * float(100)\n print(\"Positive results: \" + repr(raw_json['positives']) + '\\n' + \"Total Results: \" + repr(raw_json['total'])) \n print(\"Detection rate: {0:.0f}%\".format(ratio))\n \n #print detailed report\n output = self._gather_report_details(raw_json)\n if(output == 1):\n print \"No vendors flagged this resource as malicious\"\n elif(output != 0):\n vendor = output['detect_list']\n for val in vendor:\n print \"\\nVendor: %s \\n Result: %s Version: %s Update: %s \\n\" % (val, str(output[val]['result']), str(output[val]['version']),str(output[val]['update']))\n if(output['failed']): \n print \"The following vendors did not flag this as malicious: \\n %s\" % (str(\" -- \".join(output['failedlist_key']['failed_list'])))\n else:\n print \"ERROR: Not found.\" \n \n #Resource not found in DB \n elif(raw_json['response_code'] == 0):\n print \" Resource not found in Virus Total database \"\n print \" resource: \" + raw_json['resource']\n \n \n########################\n# Scan Class #\n# #\n######################## \nclass Scan(object):\n \n def __init__(self,api_key,path=\"NA\"):\n \"\"\" \n This object is a wrapper for the 'VirusTotal' object which\n defines most of the logic regarding VT scanning\n \n This object can be called directly to invoke scanning functions.\n available methods:\n object.scan_file(api_key,path)\n object.get_report(api_key,hash)\n \n attributes:\n path to single file (optional)\n api key (required)\n \"\"\"\n self.path = path\n self.api_key = api_key\n \n \n #######################################################################\n # _get_filehash #\n # method to get file hash #\n # #\n # Returns md5hash # \n # #\n #######################################################################\n def _get_filehash(self):\n md5hash = fileinfo.md5hash(self.path)\n return md5hash\n \n #######################################################################\n # scan_file - call this directly #\n # method to submit file for analysis - set_verbose flag to 1 #\n # for more info printed, default is 0 #\n # #\n # If file exists in DB - file will not be submitted or rescanned #\n # #\n # Prints report # \n # #\n #######################################################################\n def scan_file(self,verbose_flag=0):\n md5hash = self._get_filehash()\n if(verbose_flag): print(\"md5 hash of file: \" + md5hash)\n vt = VirusTotal(self.api_key,md5hash,self.path)\n has_file = vt.has_file()\n if(has_file == IS_BATCH_ERROR or has_file == INVALID_TYPE_ERROR):\n print(\"scan_file() failed at has_file()\")\n elif(has_file == True):\n if(verbose_flag): print(\"VT already has file -- querying report:\")\n \n vt.make_awesome_report()\n else:\n if(verbose_flag): print(\"submitting file - this could take several minutes\")\n return_dict = vt.submit_file()\n if(return_dict['code']== 1):\n scan_id = return_dict['scan_id']\n if(verbose_flag): print(\"scan_id: \" + scan_id)\n if(vt.query_status() != STAT_FAILED):\n \n vt.make_awesome_report()\n else:\n print(\"scan_file() failed because we were unable to query the status of the file sent (note that file was sent)\")\n else:\n print(\"scan_file() failed because we were unable to send file -- \" + return_dict['msg'])\n \n #######################################################################\n # scan_report - call this directly #\n # method to submit hash to get report- set_verbose flag to 1 #\n # for more info printed, default is 0 #\n # #\n # If hash does not exist in DB - exit with error #\n # #\n # Prints report # \n # #\n #######################################################################\n def scan_report(self,hash,verbose_flag=0):\n if(verbose_flag): print(\"resource to search: \" + hash + '\\n')\n vt = VirusTotal(self.api_key,hash)\n vt.make_awesome_report()\n \n########################################################################## \n \n\ndef example():\n api = \"\"\n \n resource = \"d74b1df3ab16b36d48850f5d57b346b0\" #dexter malware hash\n path = \"testfile.txt\"\n \n #upload/scan file\n scanner = Scan(api,path)\n scanner.scan_file(verbose_flag=1)\n \n #check report based on file hash or scanid\n scanner2 = Scan(api)\n scanner2.scan_report(resource,verbose_flag=1)\n \n \ndef main():\n print \"This script contains the class implementations to interact with VirusTotal - do not call directly\"\nif __name__ == \"__main__\":\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":655,"cells":{"__id__":{"kind":"number","value":13812614865636,"string":"13,812,614,865,636"},"blob_id":{"kind":"string","value":"e999c6117b005784087bf3ab01f62aae28d911cb"},"directory_id":{"kind":"string","value":"2dc33f2fd71c1a0063183f26751a8ef4a2f2cfe9"},"path":{"kind":"string","value":"/backend/documents/clustering.py"},"content_id":{"kind":"string","value":"c47dd661b2c3012042cbfedb895cafbc464ae36a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ruiaf/sumnews"},"repo_url":{"kind":"string","value":"https://github.com/ruiaf/sumnews"},"snapshot_id":{"kind":"string","value":"40c6ab773738ec3b75474372d9a8bdab85022a4b"},"revision_id":{"kind":"string","value":"a93e0757046015b5fa785c6fcf95467b505a6912"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-08T02:35:21.091167","string":"2016-09-08T02:35:21.091167"},"revision_date":{"kind":"timestamp","value":"2014-04-05T17:17:30","string":"2014-04-05T17:17:30"},"committer_date":{"kind":"timestamp","value":"2014-04-05T17:17:30","string":"2014-04-05T17:17:30"},"github_id":{"kind":"number","value":17527511,"string":"17,527,511"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import logging\nimport threading\nimport time\nimport settings\nimport utils\n\n\nclass ClusterMaker(threading.Thread):\n def __init__(self, index, *args, **kwargs):\n self.index = index\n self.comparator = DocComparator(self.index)\n self.objects = []\n self.responsibility = {}\n self.availability = {}\n self.lock = threading.Lock()\n self.add_list = []\n self.add_list_lock = threading.Lock()\n threading.Thread.__init__(self, *args, **kwargs)\n\n def add(self, doc):\n self.add_list_lock.acquire()\n self.add_list.append(doc)\n self.add_list_lock.release()\n\n def process_add_list(self):\n self.add_list_lock.acquire()\n self.lock.acquire()\n\n ts = time.time()\n for obj in self.add_list:\n self.objects.append(obj)\n self.responsibility[obj] = {}\n self.availability[obj] = {}\n self.responsibility[obj][obj] = 0.0\n self.availability[obj][obj] = 0.0\n for other_doc in self.objects:\n if self.comparator.similarity(obj, other_doc) >= settings.CLUSTERING_MINIMUM_SIMILARITY:\n self.responsibility[obj][other_doc] = 0.0\n self.availability[obj][other_doc] = 0.0\n self.responsibility[other_doc][obj] = 0.0\n self.availability[other_doc][obj] = 0.0\n\n te = time.time()\n if len(self.add_list):\n logging.info(\"Finished adding %d objects to clustering in %2.2f seconds\", len(self.add_list), te - ts)\n\n self.add_list = []\n self.add_list_lock.release()\n self.lock.release()\n\n def clear(self):\n logging.info(\"Clearing clusters\")\n self.lock.acquire()\n self.add_list_lock.acquire()\n self.comparator = DocComparator(self.index)\n self.objects = []\n self.responsibility = {}\n self.availability = {}\n self.add_list = []\n self.lock.release()\n self.add_list_lock.release()\n\n def run(self):\n while True:\n ts = time.time()\n self.iterate_affinity()\n te = time.time()\n\n logging.info(\"Finished affinity iteration for: %d documents in %2.2f seconds\", len(self.objects), te - ts)\n\n self.process_add_list()\n time.sleep(settings.CLUSTERING_INTERVAL)\n\n def run_for_unittest(self):\n for i in range(10):\n ts = time.time()\n self.iterate_affinity()\n te = time.time()\n logging.info(\"Finished affinity iteration for: %d documents in %2.2f seconds\", len(self.objects), te - ts)\n self.process_add_list()\n\n def iterate_affinity(self):\n self.lock.acquire()\n for i in self.objects:\n values = utils.max2((self.availability[i][k_prime] + self.comparator.similarity(i, k_prime),\n k_prime) for k_prime in self.responsibility[i].keys())\n\n for k in self.responsibility[i].keys():\n sim = self.comparator.similarity(i, k)\n\n max_value = values[0][0]\n if values[0][1] is k:\n if len(values) > 2: max_value = values[1][0]\n else: max_value = 0.0\n\n self.responsibility[i][k] = ((settings.CLUSTERING_DUMPING_FACTOR * self.responsibility[i][k]) +\n (1 - settings.CLUSTERING_DUMPING_FACTOR) * (sim - max_value))\n\n for k in self.objects:\n sum_value = 0.0\n for i_prime in self.responsibility[k].keys():\n sum_value += max(0.0, self.responsibility[i_prime][k])\n\n for i in self.responsibility[k].keys():\n self.availability[i][k] = (settings.CLUSTERING_DUMPING_FACTOR * self.availability[i][k] +\n (1 - settings.CLUSTERING_DUMPING_FACTOR) *\n min(0.0, (self.responsibility[k][k] +\n sum_value -\n max(0.0, self.responsibility[i][k]) -\n max(0.0, self.responsibility[k][k]))))\n\n self.availability[k][k] = ((settings.CLUSTERING_DUMPING_FACTOR *\n self.availability[k][k]) +\n ((1 - settings.CLUSTERING_DUMPING_FACTOR) *\n (sum_value - max(0.0, self.responsibility[k][k]))))\n\n for i in self.objects:\n if not i.exemplar is i and i in i.exemplar.children:\n i.exemplar.children.remove(i)\n\n exemplar = max((self.availability[i][k_prime] + self.responsibility[i][k_prime], k_prime)\n for k_prime in self.availability[i].keys())\n\n i.exemplar = exemplar[1]\n i.responsibility_parent = self.responsibility[i][exemplar[1]]\n i.availability_parent = self.availability[i][exemplar[1]]\n i.similarity_parent = self.comparator.similarity(i, exemplar[1])\n if not i.exemplar is i:\n i.exemplar.children.append(i)\n\n self.lock.release()\n\nclass DocComparator(object):\n def __init__(self, inverted_index):\n self.index = inverted_index\n self.cache = {}\n\n def similarity(self, doc1, doc2):\n if doc1 is doc2:\n return settings.CLUSTERING_DEFAULT_PREFERENCE\n\n if (doc1, doc2) not in self.cache:\n intersection_words = doc1.words() & doc2.words()\n other_words = doc1.words() ^ doc2.words()\n intersection_word_weight = sum(self.index.tf_idf(word) for word in intersection_words)\n other_words_weight = sum(self.index.tf_idf(word) for word in other_words)\n sim = intersection_word_weight / (intersection_word_weight + other_words_weight + 0.01)\n\n if sim >= settings.CLUSTERING_MINIMUM_SIMILARITY:\n self.cache[(doc1, doc2)] = sim\n self.cache[(doc2, doc1)] = sim\n\n return sim\n\n return self.cache[(doc1, doc2)]\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":656,"cells":{"__id__":{"kind":"number","value":15513421922416,"string":"15,513,421,922,416"},"blob_id":{"kind":"string","value":"feb6a6af974cc8e469aeffb0f0b4282fb68531d0"},"directory_id":{"kind":"string","value":"a555236d3ae5725e37aa6b5c5d8b50593f74ed69"},"path":{"kind":"string","value":"/python/while.py"},"content_id":{"kind":"string","value":"1fdd940f65af43d8ea944af988a0b56cfb9e5bd0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"xc145214/practise"},"repo_url":{"kind":"string","value":"https://github.com/xc145214/practise"},"snapshot_id":{"kind":"string","value":"82b2e2adeafbf3aa29dcdef10613bec82e52a0b8"},"revision_id":{"kind":"string","value":"959f6882ae7d367353ea172972298d29ec713219"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T12:08:23.951316","string":"2016-09-06T12:08:23.951316"},"revision_date":{"kind":"timestamp","value":"2014-12-15T09:42:58","string":"2014-12-15T09:42:58"},"committer_date":{"kind":"timestamp","value":"2014-12-15T09:42:58","string":"2014-12-15T09:42:58"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\n\"\"\"\n\tusage:while 循环demo\n\"\"\"\n\nnumber = 23\nrunning = True\t#true大写\n\nwhile running:\n\tguess = int(input(\"please input a int number:\"))\n\tif guess == number:\n\t\tprint('Congradulations, you guessed it')\n\t\trunning = False\t#stop the loop\n\telif guess < number:\n\t\tprint('No, it is a little higher than you guess')\n\telse:\n\t\tprint(\"No, it is a little lower than you guess\")\nelse:\n\tprint(\"the while loop is over\")\nprint(\"Down\")\n\t\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":657,"cells":{"__id__":{"kind":"number","value":5403068860103,"string":"5,403,068,860,103"},"blob_id":{"kind":"string","value":"09f820e7af3095637395471af50fea7d7cee25e8"},"directory_id":{"kind":"string","value":"db2b114efae7a1b75a42680d19429b71f8240436"},"path":{"kind":"string","value":"/directory_app.py"},"content_id":{"kind":"string","value":"b040481487908e7dfd54a83078089b7532e2190d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ikon42/reddit-unite"},"repo_url":{"kind":"string","value":"https://github.com/ikon42/reddit-unite"},"snapshot_id":{"kind":"string","value":"2b036f3f904977b4a4b99b7c6f17c1ed8eb436e2"},"revision_id":{"kind":"string","value":"1fc42de11eeb0180e734bbc822288ccaa62a15a2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-13T21:07:19.713565","string":"2020-04-13T21:07:19.713565"},"revision_date":{"kind":"timestamp","value":"2010-11-11T03:24:27","string":"2010-11-11T03:24:27"},"committer_date":{"kind":"timestamp","value":"2010-11-11T03:24:27","string":"2010-11-11T03:24:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport web\nimport template\nimport util\n\nfrom models import User\n\nfrom forms import search_form\n\nurls = (\n '/search/?', 'search',\n '', 'redis',\n '/([a-zA-Z0-9_]+)?/?', 'index',\n '/map/(.*)/?', 'user_map',\n)\n\nclass index:\n '''Displays a user list or a list of user lists'''\n def GET(self, name):\n t = template.env.get_template('user_list.html')\n list_list = [ # list_list should be generated somehow\n 'global', 'Lists all users from all locales.',\n ]\n user_list = []\n if (name is None):\n return t.render(util.data(\n title='User Lists',\n instructions='''Users are grouped into \"user lists\" that group them geographically.\n These lists are automatically generated and will change based upon\n the relative size of various user populations.''',\n list_list=map(\n lambda (li): {'name': li[0], 'scope': li[1]},\n zip(*[list_list[i::2] for i in range(2)]),\n ),\n ))\n elif (name.lower() in list_list[::2]):\n for i in User.all():\n x = util.strip_private_data(i)\n if x is not None:\n user_list.append(x)\n else:\n raise web.notfound()\n return t.render(util.data(\n title='Display all members',\n instructions='''Public member listing''',\n users=user_list,\n ))\n\n\nclass user_map:\n '''Generates all different kinds of maps!'''\n def GET(self, name):\n t = template.env.get_template('user_map.html')\n return t.render(util.data())\n\n\nclass search:\n '''Allows users to search for other users based on public information'''\n def GET(self):\n q = web.input()\n t = template.env.get_template('search.html')\n f = search_form()\n try:\n if q.query:\n results = []\n user_list = []\n query = q.query.split(' ')\n for i in User.all():\n x = util.strip_private_data(i)\n if x is not None:\n user_list.append(x)\n for p in user_list:\n for i in query:\n if i in dict(p).values():\n results.append(p)\n return t.render(util.data(\n title='Find who you\\'re looking for!',\n form=f,\n results=results if results else None,\n ))\n else:\n web.debug('q.query doesn\\'t exist and it didn\\'t thow an exception!')\n raise Warning('Odd, huh?')\n except:\n return t.render(util.data(\n title='Find who you\\'re looking for!',\n form=f,\n ))\n\n\nclass redis:\n def GET(self):\n raise web.seeother('/')\n\n\napp = web.application(urls, locals())\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":658,"cells":{"__id__":{"kind":"number","value":6064493859900,"string":"6,064,493,859,900"},"blob_id":{"kind":"string","value":"33fa695d11439397d0c5a2ff77827a728c6c0f80"},"directory_id":{"kind":"string","value":"c9d677cfd20117259680dc4cf3420274519be768"},"path":{"kind":"string","value":"/buildUT2004Mod.py"},"content_id":{"kind":"string","value":"67e7b74d93d9b10941b88d3071b32d07b7d7e341"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"brobinson9999/pyun-build"},"repo_url":{"kind":"string","value":"https://github.com/brobinson9999/pyun-build"},"snapshot_id":{"kind":"string","value":"c143f092a6d175c583e3f1f2768fedd8c0a075d2"},"revision_id":{"kind":"string","value":"70e226da546a6f2b650bfa31582c6bfbd7d35b35"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T16:21:04.753731","string":"2021-01-01T16:21:04.753731"},"revision_date":{"kind":"timestamp","value":"2011-09-11T17:37:20","string":"2011-09-11T17:37:20"},"committer_date":{"kind":"timestamp","value":"2011-09-11T17:37:20","string":"2011-09-11T17:37:20"},"github_id":{"kind":"number","value":645196,"string":"645,196"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nfrom buildUTMod import *\n\ndef buildUT2k4modINI(filePath,modName,modTitle=\"DummyBuild\",modLogo=\"DummyBuildLogo\",modDesc=\"DummyBuildDesc\",modCmdLine=\"DummyBuildCmdLine\",modURL=\"DummyURL\"):\n fileData = \"[MOD]\\nModTitle=\" + modTitle + \"\\nModLogo=\" + modLogo + \"\\nModDesc=\" + modDesc + \"\\nModCmdLine=\" + modCmdLine + \"\\nModURL=\" + modURL + \"\\n\"\n\n bruteIO.removeFile(filePath)\n bruteIO.writeFile(filePath, fileData)\n \ndef buildUT2k4modSystemINI(baseINIPath, filePath, modName, dependencies, nondependencies):\n fileData = bruteIO.readFile(baseINIPath)\n \n editPackagesString = \"\"\n for dependency in dependencies:\n editPackagesString = editPackagesString + \"EditPackages=\" + dependency + \"\\n\"\n editPackagesString = editPackagesString + \"EditPackages=\" + modName + \"\\n\"\n \n searchString = \"CutdownPackages=Core\"\n modifiedFileData = fileData.replace(searchString, editPackagesString + searchString)\n\n for nondependency in nondependencies:\n modifiedFileData = modifiedFileData.replace(\"EditPackages=\" + nondependency, \"\")\n\n bruteIO.removeFile(filePath)\n bruteIO.writeFile(filePath, modifiedFileData)\n\ndef buildUT2k4mod(ut2004baseDir, modName, sourceDirectories, dependencies, nondependencies, exportcache=True, deleteAfterBuild=True,modTitle=\"DummyBuild\",modLogo=\"DummyBuildLogo\",modDesc=\"DummyBuildDesc\",modCmdLine=\"DummyBuildCmdLine\",modURL=\"DummyURL\"):\n systemDirectory = os.path.join(ut2004baseDir, \"System\")\n modSystemDirectory = os.path.join(ut2004baseDir, modName, \"System\")\n\n # Clear old files and directories.\n print \"Clearing old files and directories...\"\n baseModPath = os.path.join(ut2004baseDir, modName)\n bruteIO.removeFile(os.path.join(systemDirectory, modName + \".u\"))\n bruteIO.removeDirectory(baseModPath)\n\n # Create needed directories and files.\n print \"Creating environment for \" + modName + \"...\"\n sourceCodeDestinationDirectory = os.path.join(baseModPath, modName, \"classes\")\n os.makedirs(sourceCodeDestinationDirectory)\n os.makedirs(modSystemDirectory)\n\n buildUT2k4modINI(os.path.join(ut2004baseDir, modName, \"UT2k4mod.ini\"), modTitle, modLogo, modDesc, modCmdLine, modURL)\n buildUT2k4modSystemINI(os.path.join(systemDirectory, \"UT2004.ini\"), os.path.join(modSystemDirectory, modName + \".ini\"), modName, dependencies, nondependencies)\n\n # Copy source code.\n for sourceDirectory in sourceDirectories:\n print \"Copying files from \" + sourceDirectory + \"...\"\n copyAll.copyAll(sourceDirectory, sourceCodeDestinationDirectory, makeLinksInsteadOfCopying=False)\n\n # Do the actual compile.\n # We don't need to pipe stdin for any reason - it is a workaround. If it isn't specified as a pipe Python attempts to duplicate\n # the input handle and in some circumstances that can fail.\n print \"Compiling \" + modName + \"...\"\n uccPath = os.path.join(systemDirectory, \"UCC.exe\")\n p = subprocess.Popen([uccPath + \" make -mod=\" + modName],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)\n# p = subprocess.Popen([uccPath, \"make\", \"-mod=\" + modName],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)\n outputTuple = p.communicate()\n outputText = outputTuple[0] + outputTuple[1]\n\n import re\n outputText = re.sub(r'\\r\\n', \"\\n\", outputText)\n outputText = re.sub(r'Analyzing...\\n', \"\", outputText)\n outputText = re.sub(r'-*[a-zA-Z0-9_]* - Release-*\\n', \"\", outputText)\n outputText = re.sub(r'Parsing [a-zA-Z0-9_]*\\n', \"\", outputText)\n outputText = re.sub(r'Compiling [a-zA-Z0-9_]*\\n', \"\", outputText)\n outputText = re.sub(r'Importing Defaults for [a-zA-Z0-9_]*\\n', \"\", outputText)\n \n print outputText\n \n compileReturnCode = p.returncode\n if (compileReturnCode != 0):\n return False\n \n print \"Deploying \" + modName + \"...\"\n shutil.copyfile(os.path.join(modSystemDirectory, modName + \".u\"), os.path.join(systemDirectory, modName + \".u\"))\n\n print \"Running Tests...\"\n executeUnrealscriptTestCommandlets(sourceCodeDestinationDirectory, modName, systemDirectory, uccPath)\n\n if (exportcache): \n print \"Generating Cache...\"\n subprocess.call([uccPath, \"dumpint\", modName + \".u\"])\n subprocess.call([uccPath, \"exportcache\", modName + \".u\"])\n\n print \"Cleaning up...\"\n if (deleteAfterBuild):\n bruteIO.removeDirectory(baseModPath)\n \n print \"Finished building \" + modName + \".\"\n\n return True\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":659,"cells":{"__id__":{"kind":"number","value":6150393169216,"string":"6,150,393,169,216"},"blob_id":{"kind":"string","value":"c2bbd1f65c1e4824c48f54c4f2c36064c27d8863"},"directory_id":{"kind":"string","value":"07ae5f3577aa53b19a31cd648ecb7ad4c868943c"},"path":{"kind":"string","value":"/time-measurer.py"},"content_id":{"kind":"string","value":"4034b7167c8cdaf9fb69ff8aa78c0297769062f0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"anatolyburtsev/script_time_measurer"},"repo_url":{"kind":"string","value":"https://github.com/anatolyburtsev/script_time_measurer"},"snapshot_id":{"kind":"string","value":"c302a3f29ce5533cacc20b36b30864f5eeefc98c"},"revision_id":{"kind":"string","value":"ba0ef603fd24d15e21cce6bbb4fe04a6f774bd98"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-13T02:16:24.675626","string":"2021-01-13T02:16:24.675626"},"revision_date":{"kind":"timestamp","value":"2014-10-20T12:31:23","string":"2014-10-20T12:31:23"},"committer_date":{"kind":"timestamp","value":"2014-10-20T12:31:23","string":"2014-10-20T12:31:23"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# запускает переданную программу со всеми переданными ключами, меряет время выполнения и шлет в графит\n# !!!important | - не обрабатывается\n# 12/10/2014 Anatoly Burtsev onotole@yandex-team.ru\n#TODO read GRAPHITE_HOST and PORT from config\n\nimport os\nimport sys\nimport time\nimport subprocess as sb\nimport socket\n\nGRAPHITE_HOST = \"localhost\"\nGRAPHITE_PORT = 2003\nHOST = socket.gethostname().split('.')[0]\n\nif len(sys.argv) == 1: \n exit(0)\ncommand = sys.argv[1:]\n\nscript_name_position=1\n# skip \"/usr/bin/flock -w 0 /tmp/.flock_tmp\" \n# may be need improve\nif sys.argv[script_name_position] == \"/usr/bin/flock\":\n script_name_position = 5\n\nSCRIPT = sys.argv[script_name_position]\n\n#/usr/bin/gdb -> gdb\nSCRIPT=SCRIPT.split('/')[-1]\n\n#special for differ \"get_smth.py thing1\" and \"get_smth.py thing2\"\nif len(sys.argv[script_name_position:]) >= 2 and sys.argv[script_name_position+1][0] not in '0123456789-': \n SCRIPT = SCRIPT + '_' + sys.argv[script_name_position+1]\n\n#delele extenstion\nSCRIPT = SCRIPT.replace('.sh','').replace('.py','')\n\nP = sb.Popen( command, stdout=sb.PIPE, stderr=sb.PIPE )\n\nT0 = time.time()\nout, err = P.communicate()\ndT = time.time() - T0\n\nsys.stdout.write(out)\nsys.stderr.write(err)\n\n#send to graphite\nMESSAGE = 'stats.timemeasurer.%s.%s %d %d\\n' % (HOST, SCRIPT, int(dT), int(time.time()))\n\n#print(MESSAGE)\n\nsock = socket.create_connection( (GRAPHITE_HOST, GRAPHITE_PORT))\nsock.sendall( MESSAGE )\nsock.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":660,"cells":{"__id__":{"kind":"number","value":12343736014204,"string":"12,343,736,014,204"},"blob_id":{"kind":"string","value":"6b54b1dea646098d46efe9c760d00b2838d356c5"},"directory_id":{"kind":"string","value":"2b616c1d43329e30bf4b70e9b42ff6d9ab26ab45"},"path":{"kind":"string","value":"/apk_exporter_tool.py"},"content_id":{"kind":"string","value":"12e0be12d3c7657f82cc474c02ffe3cf6966d237"},"detected_licenses":{"kind":"list like","value":["GPL-1.0-or-later","LGPL-2.1-or-later","GPL-3.0-or-later","GPL-3.0-only","LGPL-2.0-or-later","LicenseRef-scancode-warranty-disclaimer"],"string":"[\n \"GPL-1.0-or-later\",\n \"LGPL-2.1-or-later\",\n \"GPL-3.0-or-later\",\n \"GPL-3.0-only\",\n \"LGPL-2.0-or-later\",\n \"LicenseRef-scancode-warranty-disclaimer\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"coskundeniz/apk-exporter"},"repo_url":{"kind":"string","value":"https://github.com/coskundeniz/apk-exporter"},"snapshot_id":{"kind":"string","value":"8b8fd6624ec701cdcc0b22aa3dcb3055bb63dd86"},"revision_id":{"kind":"string","value":"3529f3bee20dc503323e532dec66eb1ffc032bbc"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-17T23:09:15.835138","string":"2020-04-17T23:09:15.835138"},"revision_date":{"kind":"timestamp","value":"2013-09-20T13:11:28","string":"2013-09-20T13:11:28"},"committer_date":{"kind":"timestamp","value":"2013-09-20T13:11:28","string":"2013-09-20T13:11:28"},"github_id":{"kind":"number","value":12931584,"string":"12,931,584"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom Tkinter import *\nfrom tkFileDialog import askdirectory\nfrom apk_exporter import ApkExporter\n\n\nclass ApkExporterGui(Frame):\n\n def __init__(self, parent):\n Frame.__init__(self, parent, background=\"white\")\n\n self.parent = parent\n\n self.dirname = None\n self.info = StringVar()\n self.package_name_var = StringVar()\n self.version_code_var = StringVar()\n self.version_name_var = StringVar()\n self.version_code_entry = StringVar()\n self.version_code_entry.set(\"\")\n self.version_name_entry = StringVar()\n self.version_name_entry.set(\"\")\n self.selected_unsigned = IntVar()\n\n self.init_ui()\n\n self.app = ApkExporter()\n\t\t\n def init_ui(self):\n \"\"\" initialize gui \"\"\"\n self.parent.title(\"Apk Exporter\")\n self.pack(fill=BOTH, expand=1)\n\n # place widgets\n self.place_browse_button()\n\n self.place_package_name_lbl()\n self.place_version_code_lbl()\n self.place_version_name_lbl()\n\n self.place_package_name_var()\n self.place_version_code_var()\n self.place_version_name_var()\n\n self.place_version_code_entry()\n self.place_version_name_entry()\n\n self.place_sign_checkbox()\n self.place_export_button()\n self.place_exit_button()\n\n self.place_info()\n\n # add horizontal line\n frame = Frame(self, relief=RAISED, bd=1)\n frame.grid(row=6, columnspan=3, sticky=W+E)\n\n def place_browse_button(self):\n browse_button = Button(self,\n text=\"BROWSE PROJECT DIRECTORY\",\n command=self.ask_directory)\n browse_button.grid(row=0, columnspan=3, padx=120, pady=10)\n\n def place_package_name_lbl(self):\n package_name_lbl = Label(self,\n text=\"Package Name: \",\n bg=\"white\")\n package_name_lbl.grid(row=1, column=0, pady=5, padx=25, ipady=5, sticky=E)\n\n def place_version_code_lbl(self):\n version_code_lbl = Label(self,\n text=\"Version Code: \",\n bg=\"white\")\n version_code_lbl.grid(row=2, column=0, pady=5, padx=25, ipady=5, sticky=E)\n\n def place_version_name_lbl(self):\n version_name_lbl = Label(self,\n text=\"Version Name: \",\n bg=\"white\")\n version_name_lbl.grid(row=3, column=0, pady=5, padx=25, ipady=5, sticky=E)\n\n def place_package_name_var(self):\n package_name_var = Label(self,\n textvariable=self.package_name_var,\n bg=\"white\")\n package_name_var.grid(row=1, column=1, pady=5, sticky=W)\n\n def place_version_code_var(self):\n version_code_var = Label(self,\n textvariable=self.version_code_var,\n bg=\"white\")\n version_code_var.grid(row=2, column=1, pady=5, sticky=W)\n\n def place_version_name_var(self):\n version_name_var = Label(self,\n textvariable=self.version_name_var,\n bg=\"white\")\n version_name_var.grid(row=3, column=1, pady=5, sticky=W)\n\n def place_version_code_entry(self):\n version_code_entry = Entry(self,\n textvariable=self.version_code_entry,\n bg=\"white\",\n justify=CENTER)\n version_code_entry.grid(row=2, column=2, pady=5, sticky=W)\n\n def place_version_name_entry(self):\n version_code_entry = Entry(self,\n textvariable=self.version_name_entry,\n bg=\"white\",\n justify=CENTER)\n version_code_entry.grid(row=3, column=2, pady=5, sticky=W)\n\n def place_sign_checkbox(self):\n sign_checkbox = Checkbutton(self,\n text=\"Unsigned Release\",\n variable=self.selected_unsigned,\n bg=\"white\")\n sign_checkbox.grid(row=4, columnspan=2, padx=25, pady=5, sticky=W)\n\n def place_export_button(self):\n self.export_button = Button(self,\n text=\"Export Apk\",\n command=self.export_apk)\n self.export_button.grid(row=5, column=1, pady=20, sticky=E)\n\n def place_exit_button(self):\n exit_button = Button(self,\n text=\"Exit\",\n width=9,\n command=self.parent.quit)\n exit_button.grid(row=5, column=2, pady=20)\n\n def place_info(self):\n info = Label(self,\n textvariable=self.info,\n bg=\"white\")\n info.grid(row=7, columnspan=3, padx=15, pady=5, sticky=W)\n\n def ask_directory(self):\n if(os.name == \"nt\"):\n self.dirname = askdirectory(initialdir='C:\\\\',\n title='Select Project Directory')\n elif(os.name == \"mac\"):\n self.dirname = askdirectory(initialdir='/Users/%s' % os.getlogin(),\n title='Select Project Directory')\n else:\n self.dirname = askdirectory(initialdir='/home/%s' % os.environ['USER'],\n title='Select Project Directory')\n self.app.dirname = self.dirname\n\n #extract package name, version code and name\n self.app.extract_info()\n\n self.info.set(\"Extracted manifest info\")\n\n # update screen\n self.package_name_var.set(self.app.package)\n self.version_code_var.set(self.app.version_code)\n self.version_name_var.set(self.app.version_name)\n\n def export_apk(self):\n\n # get version changes\n if(self.version_code_entry != \"\"):\n self.app.version_code = self.version_code_entry.get()\n self.app.change_version_code(self.app.get_root())\n\n if(self.version_name_entry != \"\"):\n self.app.version_name = self.version_name_entry.get()\n self.app.change_version_name(self.app.get_root())\n\n if(self.version_code_entry.get() == \"\" and\n self.version_name_entry.get() == \"\"):\n pass\n else:\n self.app.write_changes()\n\n # handle signing changes and export apk\n self.app.unsigned = self.selected_unsigned.get()\n self.app.export_apk()\n self.info.set(\"Finished exporting apk\")\n\n\ndef run():\n root = Tk()\n\n width = 450\n height = 300\n\n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n\n x = (screen_width - width) / 2\n y = (screen_height - height) / 2\n\n root.geometry(\"%dx%d+%d+%d\" %(width, height, x, y))\n root.resizable(width=FALSE, height=FALSE)\n\n app = ApkExporterGui(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n run()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":661,"cells":{"__id__":{"kind":"number","value":12996571070828,"string":"12,996,571,070,828"},"blob_id":{"kind":"string","value":"9c9f5a295e3d157192c35f0e92a7f7ba347ca2cb"},"directory_id":{"kind":"string","value":"0e2f7f9f51fd066f5dd7c7ac334e7cbbccb337d8"},"path":{"kind":"string","value":"/hinstagram.py"},"content_id":{"kind":"string","value":"9695ab00f0e06f4d322bcf55e299797e41193807"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jayhack/Hinstagram"},"repo_url":{"kind":"string","value":"https://github.com/jayhack/Hinstagram"},"snapshot_id":{"kind":"string","value":"ab73a3487bc9fa827f070161f84b97c495795a48"},"revision_id":{"kind":"string","value":"09e1641d17c36f7e7b25d945fc2a2a920cf559d7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T02:49:14.638736","string":"2020-05-18T02:49:14.638736"},"revision_date":{"kind":"timestamp","value":"2014-05-04T14:30:20","string":"2014-05-04T14:30:20"},"committer_date":{"kind":"timestamp","value":"2014-05-04T14:30:20","string":"2014-05-04T14:30:20"},"github_id":{"kind":"number","value":19409348,"string":"19,409,348"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Hinstagram: Visualizing Images as Histograms\n# --------------------------------------------\n# by Jay Hack (jhack@stanford.edu), Spring 2014\nfrom collections import Counter\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport Image\nimport seaborn as sns\n\ndef make_gaussian_img (height, width, sample_size=(1000000,)):\n\n\tgx = np.random.normal (loc=width/2, scale=width/4, size=sample_size).astype(np.int)\n\tgy = np.random.normal (loc=height/2, scale=height/4, size=sample_size).astype(np.int)\n\tdef in_range (x, dim):\n\t\treturn x >= 0 and x <= dim\n\tintensities = Counter([(y, x) for y, x in zip(gy, gx) if in_range(x, width) and in_range(y, height)])\n\tgaussian_img = np.zeros ((height, width))\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tgaussian_img[i][j] = intensities[(i, j)]\n\treturn gaussian_img\n\n\nif __name__ == '__main__':\n\n\t#=====[ Step 1: load image as grayscale\t]=====\n\timage_name = 'IMG_0519.jpg'\n\traw_img = 255 - np.array(Image.open (image_name).convert('L').resize((240, 360))) \n\theight, width = raw_img.shape\n\n\t#=====[ Step 2: *sample* a gaussian distribution\t]=====\n\tgaussian_img = make_gaussian_img (height, width)\n\n\t#=====[ Step 3: convolve image with gaussian, normalize\t]=====\n\tconvolved_img = np.multiply (raw_img, gaussian_img).astype (np.float)\n\tconvolved_img = convolved_img / (np.max(np.max(convolved_img)) / 255)\n\tconvolved_img = convolved_img.astype (np.uint8)\n\n\t#=====[ Step 4: unpack into points - apparently this is the only way seaborn accepts...\t]=====\n\tX, Y = [], []\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tY += [height - i] * convolved_img[i][j]\n\t\t\tX += [j] * convolved_img[i][j]\n\n\t#=====[ Step 5: make and display histogram ]=====\n\tsns.jointplot (np.array(X), np.array(Y), color='#219f85')\n\tplt.axis ('off')\n\tplt.show ()\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":662,"cells":{"__id__":{"kind":"number","value":9242769659558,"string":"9,242,769,659,558"},"blob_id":{"kind":"string","value":"b11f60373a6b9bd8f14db3db3cf0a3dfb4515c4c"},"directory_id":{"kind":"string","value":"c81c47b78a7ee94b745f90974ab34d9e31e13855"},"path":{"kind":"string","value":"/apps/imageproxy/proxy.py"},"content_id":{"kind":"string","value":"666e7d2476bcb2045c99d329b390de77811dcd94"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"brstgt/cream-testsite"},"repo_url":{"kind":"string","value":"https://github.com/brstgt/cream-testsite"},"snapshot_id":{"kind":"string","value":"9a0b2972b3aa0c1bf42747c42ce209586cc8f6e5"},"revision_id":{"kind":"string","value":"f2712b28ec12e737175c1898890ecc7ce49a9e00"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-08T03:48:49.673692","string":"2016-08-08T03:48:49.673692"},"revision_date":{"kind":"timestamp","value":"2012-03-22T13:02:44","string":"2012-03-22T13:02:44"},"committer_date":{"kind":"timestamp","value":"2012-03-22T13:02:44","string":"2012-03-22T13:02:44"},"github_id":{"kind":"number","value":3604724,"string":"3,604,724"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import re, os, urllib2, tempfile\nfrom django.core.files.base import File\nfrom PIL import Image as PilImage\n\nfrom models import Image\nfrom settings import SIZES, OWN, EXCLUDE, URL_RETRIEVE_TIMEOUT\n\nclass ProxyError(Exception):\n pass\n\nclass FormatError(ProxyError):\n pass\n\nclass RetrieveError(ProxyError):\n pass\n\nclass ExcludeError(ProxyError):\n pass\n\nclass Proxy:\n\n ProxyError = ProxyError\n FormatError = FormatError\n RetrieveError = RetrieveError\n ExcludeError = ExcludeError\n\n def __init__(self, user):\n self.user = user\n\n def store_set(self, set):\n img = Image()\n img.userId = self.user\n img.url = set['url']\n img.save()\n name = \"%d.%s\" % (img.id, set['format'].lower())\n size = set['sizes']\n img.full.save(name, File(size['full']), save=False)\n img.medium.save(name, File(size['medium']), save=False)\n img.thumb.save(name, File(size['thumb']), save=False)\n img.save()\n\n return img\n\n def upload(self, upload):\n converter = Converter(SIZES)\n\n fileset = converter.process_upload(upload)\n\n set = self.store_set(fileset)\n converter.clean_set(fileset)\n return set\n\n def resolve(self, url):\n resolver = Resolver()\n try:\n set = resolver.resolve(url)\n except Image.DoesNotExist:\n converter = Converter(SIZES)\n\n fileset = converter.process_url(url)\n\n set = self.store_set(fileset)\n converter.clean_set(fileset)\n\n return set\n\n @staticmethod\n def elect_size(set, geometry):\n if set.fullWidth < geometry[0] and set.fullHeight < geometry[1]:\n return set.full\n\n if set.mediumWidth < geometry[0] and set.mediumHeight < geometry[1]:\n return set.medium\n\n return set.thumb\n\nclass Resolver:\n\n def find_set_by_id(self, id):\n return Image.objects.get(pk=id)\n\n def find_set_by_url(self, url):\n return Image.objects.get(url=url)\n\n def resolve(self, url):\n for match in OWN:\n matches = re.search(match, url)\n if matches:\n id = matches.group(1)\n return self.find_set_by_id(id)\n\n for match in EXCLUDE:\n matches = re.search(match, url)\n if matches:\n raise ExcludeError(\"This URL may not be proxied\")\n\n return self.find_set_by_url(url)\n\nclass Converter:\n\n def __init__(self, sizes):\n self.sizes = sizes\n\n def process_gif(self, source, target, size):\n from pgmagick import ImageList, Geometry\n list = ImageList()\n list.readImages(source)\n list.scaleImages(Geometry(size['size'][0], size['size'][1]))\n list.writeImages(target.name)\n pass\n\n def process(self, source, target, size):\n try:\n image = PilImage.open(source)\n except Exception:\n raise FormatError(\"Format not supported\")\n\n if image.format == 'GIF' and size['allowAnimation']:\n self.process_gif(source, target, size)\n return image.format\n\n image.thumbnail(size['size'], PilImage.ANTIALIAS)\n format = 'JPEG'\n extension = 'JPG'\n if image.format == 'GIF':\n format = 'GIF'\n extension = 'GIF'\n\n image.save(target.name, format)\n return extension\n\n def get_temp_file(self):\n # Use tempfile.mkstemp, since it will actually create the file on disk.\n (filedescriptor, filepath) = tempfile.mkstemp()\n # Close the open file using the file descriptor, since file objects\n # returned by os.fdopen don't work, either\n os.close(filedescriptor)\n\n # Open the file on disk\n return open(filepath, \"w+b\")\n\n def process_set(self, source):\n set = {\n 'sizes': {}\n }\n for key, size in self.sizes.items():\n target = self.get_temp_file()\n set['format'] = self.process(source, target, size)\n set['sizes'][key] = target\n return set\n\n def process_upload(self, upload):\n original = self.get_temp_file()\n original.write(upload.read())\n original.close()\n set = self.process_set(original.name)\n set['url'] = ''\n os.remove(original.name)\n return set\n\n def process_url(self, url):\n original = self.get_temp_file()\n try:\n remote = urllib2.urlopen(url, None, URL_RETRIEVE_TIMEOUT)\n original.write(remote.read())\n original.close()\n except Exception:\n raise RetrieveError(\"URL \" + url + \" could not be retrieved\")\n\n set = self.process_set(original.name)\n os.remove(original.name)\n set['url'] = url\n return set\n\n def clean_set(self, set):\n for key, file in set['sizes'].items():\n os.remove(file.name)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":663,"cells":{"__id__":{"kind":"number","value":18597208415605,"string":"18,597,208,415,605"},"blob_id":{"kind":"string","value":"212d40b810f42b2b398ccf53524ecf192225f5ec"},"directory_id":{"kind":"string","value":"243e2a25fe12f9f7e0fda524af4968ec0500f513"},"path":{"kind":"string","value":"/test_scripts/test_sep_method.py"},"content_id":{"kind":"string","value":"f2a1ffcb6bbaed92dd347924885edac8664a9225"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only","GPL-1.0-or-later","LGPL-2.0-or-later","LicenseRef-scancode-warranty-disclaimer","LicenseRef-scancode-generic-exception","LicenseRef-scancode-other-copyleft","LicenseRef-scancode-proprietary-license","GPL-2.0-or-later"],"string":"[\n \"GPL-2.0-only\",\n \"GPL-1.0-or-later\",\n \"LGPL-2.0-or-later\",\n \"LicenseRef-scancode-warranty-disclaimer\",\n \"LicenseRef-scancode-generic-exception\",\n \"LicenseRef-scancode-other-copyleft\",\n \"LicenseRef-scancode-proprietary-license\",\n \"GPL-2.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"pez2001/sVimPy"},"repo_url":{"kind":"string","value":"https://github.com/pez2001/sVimPy"},"snapshot_id":{"kind":"string","value":"e6e515167ce510c1f9150d963af381fa826da285"},"revision_id":{"kind":"string","value":"01adfffcaf80ed5deb33c24fb31d6de105aef834"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T05:33:35.417567","string":"2021-01-01T05:33:35.417567"},"revision_date":{"kind":"timestamp","value":"2013-10-29T21:10:14","string":"2013-10-29T21:10:14"},"committer_date":{"kind":"timestamp","value":"2013-10-29T21:10:14","string":"2013-10-29T21:10:14"},"github_id":{"kind":"number","value":3014874,"string":"3,014,874"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"class test():\n\tdef main(self,x,l):\n\t\tif(l==2):\n\t\t\treturn\n\t\tself.main(\"2nd\",l+1)\n\t\tprint(\"main:\",x,\",l:\",l)\nt = test()\nt.main(\"1st\",0)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":664,"cells":{"__id__":{"kind":"number","value":377957170924,"string":"377,957,170,924"},"blob_id":{"kind":"string","value":"eb9bd953f9d3aa0c985bb991c82071df54858c74"},"directory_id":{"kind":"string","value":"92b68c1c0f671f2435a8579e8115c9cc685f1def"},"path":{"kind":"string","value":"/gnutrition-0.31/src/gnutr_stock.py"},"content_id":{"kind":"string","value":"4f9e89e6f6a2a0cc9504a7127449c253f5efa743"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"ckoch786/GNUstuff"},"repo_url":{"kind":"string","value":"https://github.com/ckoch786/GNUstuff"},"snapshot_id":{"kind":"string","value":"714db3772fa16831e07ad1c4e0330d7d23998d78"},"revision_id":{"kind":"string","value":"4b63a2755c52fcb44413a2977de26b399db967f6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T23:45:50.350764","string":"2021-01-15T23:45:50.350764"},"revision_date":{"kind":"timestamp","value":"2011-12-28T14:54:17","string":"2011-12-28T14:54:17"},"committer_date":{"kind":"timestamp","value":"2011-12-28T14:54:17","string":"2011-12-28T14:54:17"},"github_id":{"kind":"number","value":3063133,"string":"3,063,133"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# gnutrition - a nutrition and diet analysis program.\n# Copyright( C) 2000-2002 Edgar Denny (edenny@skyweb.net)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport gtk\nimport install\n\ndef create_stock():\n drct = install.dir + '/pixmaps/'\n recipe_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'cake.png')\n plan_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'plan.png')\n food_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'banana.png')\n\n recipe_iconset = gtk.IconSet( recipe_pixbuf)\n plan_iconset = gtk.IconSet( plan_pixbuf)\n food_iconset = gtk.IconSet( food_pixbuf)\n\n icon_factory = gtk.IconFactory()\n icon_factory.add( 'gnutr-recipe', recipe_iconset)\n icon_factory.add( 'gnutr-plan', plan_iconset)\n icon_factory.add( 'gnutr-food', food_iconset)\n\n icon_factory.add_default()\n\n gtk.stock_add(( \n ('gnutr-recipe', '_Recipe', gtk.gdk.MOD1_MASK, ord( \"r\"), \"uk\"), \n ('gnutr-plan', '_Plan', gtk.gdk.MOD1_MASK, ord( \"p\"), \"uk\"), \n ('gnutr-food', '_Food', gtk.gdk.MOD1_MASK, ord( \"f\"), \"uk\")))\n\ncreate_stock()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":665,"cells":{"__id__":{"kind":"number","value":1640677549939,"string":"1,640,677,549,939"},"blob_id":{"kind":"string","value":"4d320d2e36f0f17b6fff5a932562e60c5b63ebc3"},"directory_id":{"kind":"string","value":"7c141cca004e389a4249c79bc1aa95217a2ed1fe"},"path":{"kind":"string","value":"/30-Cms/arab/arabpy/arab/channel.py"},"content_id":{"kind":"string","value":"88f1df4b70add8d9c2b83072c8a98b69a66e9fa9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"EbenZhang/arab"},"repo_url":{"kind":"string","value":"https://github.com/EbenZhang/arab"},"snapshot_id":{"kind":"string","value":"bd2d29e3f4cf618d710dbec4716cb8928e92b562"},"revision_id":{"kind":"string","value":"986be1018e2cab6b1c7fd9d4242dfc1cda489c0a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-03-30T08:29:28.686074","string":"2020-03-30T08:29:28.686074"},"revision_date":{"kind":"timestamp","value":"2011-10-15T15:06:16","string":"2011-10-15T15:06:16"},"committer_date":{"kind":"timestamp","value":"2011-10-15T15:06:16","string":"2011-10-15T15:06:16"},"github_id":{"kind":"number","value":2561638,"string":"2,561,638"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#coding=utf-8\n\nfrom google.protobuf.service import *\nimport zmq\nimport error\nimport types\n\nclass Channel(RpcChannel):\n def __init__(self,server_address,timeout = 10000):\n '''server_address: base on zmq protocol, eg.tcp://127.0.0.1:5555\n timeout: in milliseconds\n '''\n self.zmq_ctx = zmq.Context()\n self.rpc_socket = self.zmq_ctx.socket(zmq.REQ)\n self.rpc_socket.connect(server_address)\n self.timeout = timeout\n def CallMethod(self, method_descriptor, rpc_controller,\n request, response_class, done):\n self.rpc_socket.send(method_descriptor.full_name +\n '\\0' +\n request.SerializeToString())\n poller = zmq.Poller()\n poller.register(self.rpc_socket,zmq.POLLIN)\n sockets = dict(poller.poll(self.timeout))\n\n if self.rpc_socket in sockets:\n data = self.rpc_socket.recv()\n\n dummyResp = types.ErrResp()\n\n dummyResp.ParseFromString(data)\n if dummyResp.error.error_code != types.ErrorCode.OK:\n return dummyResp\n\n response = response_class()\n response.ParseFromString(data)\n else:\n #time for msg stay in the queue\n self.rpc_socket.setsockopt(zmq.LINGER, 0)\n self.rpc_socket.close()\n dummyResp = types.ErrResp()\n dummyResp.error.error_code = types.ErrorCode.TIME_OUT\n return dummyResp\n\n response.error.error_code = types.ErrorCode.OK\n return response\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":666,"cells":{"__id__":{"kind":"number","value":9990093953289,"string":"9,990,093,953,289"},"blob_id":{"kind":"string","value":"f59c1d59f2c57dd2072c62983dc80a50944b8403"},"directory_id":{"kind":"string","value":"480754b93cf677a218c5ef0650a30febecd1d8b6"},"path":{"kind":"string","value":"/src/205.py"},"content_id":{"kind":"string","value":"2d8e38f026af5c6b3340e057d1e9f29569bfe1cd"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"kaizensoze/project-euler"},"repo_url":{"kind":"string","value":"https://github.com/kaizensoze/project-euler"},"snapshot_id":{"kind":"string","value":"5e90c576fa24b27bacbf30dd21a611c0e8f080fb"},"revision_id":{"kind":"string","value":"7cbd0ba10f938a85d4d3fc7550942293a644f054"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-14T13:50:54.601979","string":"2020-04-14T13:50:54.601979"},"revision_date":{"kind":"timestamp","value":"2014-06-08T04:54:15","string":"2014-06-08T04:54:15"},"committer_date":{"kind":"timestamp","value":"2014-06-08T04:54:15","string":"2014-06-08T04:54:15"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import decimal\n\npCount = 0\npt = {}\n\ncCount = 0\nct = {}\n\nfor a in range(1,5):\n for b in range(1,5):\n for c in range(1,5):\n for d in range(1,5):\n for e in range(1,5):\n for f in range(1,5):\n for g in range(1,5):\n for h in range(1,5):\n for i in range(1,5):\n sum = a+b+c+d+e+f+g+h+i\n if sum in pt.keys():\n pt[sum] += 1\n else:\n pt[sum] = 1\n pCount += 1\n\nfor a in range(1,7):\n for b in range(1,7):\n for c in range(1,7):\n for d in range(1,7):\n for e in range(1,7):\n for f in range(1,7):\n sum = a+b+c+d+e+f\n if sum in ct.keys():\n ct[sum] += 1\n else:\n ct[sum] = 1\n cCount += 1\n\nprob = 0\ndecimal.getcontext().prec = 15\nc = decimal.Decimal(cCount)\np = decimal.Decimal(pCount)\n\nfor pk in pt.keys():\n for ck in ct.keys():\n if pk > ck:\n pResult = decimal.Decimal(pt[pk])\n cResult = decimal.Decimal(ct[ck])\n lProb = (pResult*cResult) / (c*p)\n prob += lProb\n\nprint(prob)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":667,"cells":{"__id__":{"kind":"number","value":15281493651547,"string":"15,281,493,651,547"},"blob_id":{"kind":"string","value":"864231f0797ad8b34ca0204cc9fa5baebaec5378"},"directory_id":{"kind":"string","value":"d732556164a432e22472ea3aa43c48dd611ffb74"},"path":{"kind":"string","value":"/scripts/create_genome_index.py"},"content_id":{"kind":"string","value":"bdb365ff1cc0839751df62af40f5261ad6001a38"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"verdurin/gimmemotifs"},"repo_url":{"kind":"string","value":"https://github.com/verdurin/gimmemotifs"},"snapshot_id":{"kind":"string","value":"30c94ccdda33bcfd4701a8cff5f514619e37aeb1"},"revision_id":{"kind":"string","value":"83c99ddb60d1f7414b2ffe0f23e6690a245f57cd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T07:49:29.020055","string":"2021-01-18T07:49:29.020055"},"revision_date":{"kind":"timestamp","value":"2011-03-17T09:52:48","string":"2011-03-17T09:52:48"},"committer_date":{"kind":"timestamp","value":"2011-03-17T09:52:48","string":"2011-03-17T09:52:48"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2009-2010 Simon van Heeringen \n#\n# This module is free software. You can redistribute it and/or modify it under \n# the terms of the MIT License, see the file COPYING included with this \n# distribution.\n\nfrom gimmemotifs.genome_index import *\nfrom gimmemotifs.config import *\nfrom optparse import OptionParser\nimport sys\n\ndefault_index = \"/usr/share/gimmemotifs/genome_index/\"\ntry:\t\n\tconfig = MotifConfig()\n\tdefault_index = config.get_index_dir()\nexcept:\n\tpass\n\nparser = OptionParser()\nparser.add_option(\"-i\", \"--indexdir\", dest=\"indexdir\", help=\"Index dir (default %s)\" % default_index, metavar=\"DIR\", default=default_index)\nparser.add_option(\"-f\", \"--fastadir\", dest=\"fastadir\", help=\"Directory containing fastafiles\", metavar=\"DIR\")\nparser.add_option(\"-n\", \"--indexname\", dest=\"indexname\", help=\"Name of index\", metavar=\"NAME\")\n\t\n(options, args) = parser.parse_args()\n\nif not options.fastadir or not options.indexname:\n\tparser.print_help()\n\tsys.exit(1)\n\t\n\nif not os.path.exists(options.indexdir):\n\tprint \"Index_dir %s does not exist!\" % (options.indexdir)\n\tsys.exit(1)\n\nfasta_dir = options.fastadir\nindex_dir = os.path.join(options.indexdir, options.indexname)\n\ng = GenomeIndex()\ng = g.create_index(fasta_dir, index_dir)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":668,"cells":{"__id__":{"kind":"number","value":10385230971350,"string":"10,385,230,971,350"},"blob_id":{"kind":"string","value":"51f45833ff4804a3a0235d336b85db5208cc4b65"},"directory_id":{"kind":"string","value":"8e630ecff5c9022fd1ac80af5b23271a6ce2ad59"},"path":{"kind":"string","value":"/ecolect/items.py"},"content_id":{"kind":"string","value":"9f79757b043a7d7cca01cfbbe977a299bf2a0d8f"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"treejames/ecolect"},"repo_url":{"kind":"string","value":"https://github.com/treejames/ecolect"},"snapshot_id":{"kind":"string","value":"a2bc5f6c5e338afaac829b360f2d1ce0072a6432"},"revision_id":{"kind":"string","value":"decf311016bb94f589d673459079cecef8c62962"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-01-15T12:30:10.802382","string":"2018-01-15T12:30:10.802382"},"revision_date":{"kind":"timestamp","value":"2014-04-30T07:09:29","string":"2014-04-30T07:09:29"},"committer_date":{"kind":"timestamp","value":"2014-04-30T07:11:21","string":"2014-04-30T07:11:21"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\n\nclass PostItem(Item):\n # define the fields for your item here like:\n # name = Field()\n title = Field()\n url = Field()\n project_name = Field()\n project_address = Field()\n project_investment= Field()\n builder_name = Field()\n builder_address = Field()\n eia_name = Field()\n eia_address = Field()\n page_content = Field()\n pollutions = Field()\n start_date = Field()\n post_start_date = Field()\n post_end_date = Field()\n\n crawled = Field()\n spider = Field()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":669,"cells":{"__id__":{"kind":"number","value":8151847953971,"string":"8,151,847,953,971"},"blob_id":{"kind":"string","value":"55e9d612ec8333a98ccdfe1c7115dc510c2d277c"},"directory_id":{"kind":"string","value":"f30f510abec73c5270325d7e0e0891270269e1ec"},"path":{"kind":"string","value":"/modules/viewer/viewerimage.py"},"content_id":{"kind":"string","value":"1de589358c7479035425634a1ad278cfa0cf5f6b"},"detected_licenses":{"kind":"list like","value":["LGPL-2.0-or-later","GPL-2.0-only","GPL-2.0-or-later"],"string":"[\n \"LGPL-2.0-or-later\",\n \"GPL-2.0-only\",\n \"GPL-2.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"pombredanne/dff"},"repo_url":{"kind":"string","value":"https://github.com/pombredanne/dff"},"snapshot_id":{"kind":"string","value":"46b94ca4ab1ff363934cc3624a99f8b334783155"},"revision_id":{"kind":"string","value":"a0453b38b79c96a68b3ee241a490488092fb5a8d"},"branch_name":{"kind":"string","value":"HEAD"},"visit_date":{"kind":"timestamp","value":"2017-05-26T12:01:00.985089","string":"2017-05-26T12:01:00.985089"},"revision_date":{"kind":"timestamp","value":"2010-03-18T23:18:19","string":"2010-03-18T23:18:19"},"committer_date":{"kind":"timestamp","value":"2010-03-18T23:18:19","string":"2010-03-18T23:18:19"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# DFF -- An Open Source Digital Forensics Framework\n# Copyright (C) 2009-2010 ArxSys\n# This program is free software, distributed under the terms of\n# the GNU General Public License Version 2. See the LICENSE file\n# at the top of the source tree.\n# \n# See http://www.digital-forensic.org for more information about this\n# project. Please do not directly contact any of the maintainers of\n# DFF for assistance; the project provides a web site, mailing lists\n# and IRC channels for your use.\n# \n# Author(s):\n# Solal Jacob \n# \n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import Qt, QSize, QString, SIGNAL, QThread\nfrom PyQt4.QtGui import QPixmap, QImage, QPushButton, QLabel, QWidget, QHBoxLayout, QVBoxLayout, QScrollArea, QIcon, QMatrix\n\nfrom api.vfs import *\nfrom api.module.module import *\nfrom api.module.script import *\nfrom api.magic.filetype import FILETYPE\n\nimport sys\nimport time\nimport re\n\n\nclass QRotateButton(QPushButton):\n def __init__(self, angle, icon):\n QPushButton.__init__(self, QIcon(QString(icon)), \"\")\n self.angle = angle\n\n def mousePressEvent(self, event):\n self.animateClick()\n self.emit(SIGNAL(\"clicked\"), self.angle)\n\n\nclass QZoomButton(QPushButton):\n def __init__(self, zoom, icon):\n QPushButton.__init__(self, QIcon(QString(icon)), \"\")\n self.zoom = zoom\n\n\n def mousePressEvent(self, event):\n self.animateClick()\n self.emit(SIGNAL(\"zoomed\"), self.zoom)\n\n\nclass LoadedImage(QLabel):\n def __init__(self):\n QLabel.__init__(self)\n #self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)\n self.node = None\n self.angle = 0\n self.factor = 1\n self.imgWidth = 0\n self.baseImage = QImage()\n self.cpixmap = QPixmap()\n self.matrix = QMatrix()\n\n\n def load(self, node, type):\n self.node = node\n file = self.node.open()\n buff = file.read()\n file.close()\n self.baseImage.loadFromData(buff, type)\n\n\n def adjust(self, imgwidth):\n self.imgWidth = imgwidth\n self.currentImage = self.baseImage.scaled(QSize(self.imgWidth, self.imgWidth), Qt.KeepAspectRatio, Qt.FastTransformation)\n self.setPixmap(QPixmap.fromImage(self.currentImage))\n self.adjustSize()\n\n\n def resize(self, zoomer):\n w = self.currentImage.width() * zoomer\n self.currentImage = self.baseImage.scaled(QSize(w, w), Qt.KeepAspectRatio, Qt.FastTransformation)\n self.setPixmap(QPixmap.fromImage(self.currentImage))\n self.adjustSize()\n\n\n def rotate(self, angle):\n matrix = QMatrix()\n matrix.rotate(angle)\n self.currentImage = self.currentImage.transformed(matrix)\n self.baseImage = self.baseImage.transformed(matrix)\n self.setPixmap(QPixmap.fromImage(self.currentImage))\n self.adjustSize()\n\n\n def fitbest(self):\n self.currentImage = self.baseImage.scaled(QSize(self.imgWidth, self.imgWidth), Qt.KeepAspectRatio, Qt.FastTransformation)\n self.setPixmap(QPixmap.fromImage(self.currentImage))\n self.adjustSize()\n\n\n def notSupported(self):\n #self.setPixmap(None)\n self.setText(\"Format Not Supported\")\n self.adjustSize()\n\nclass Metadata(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n\n\nimport time\n\n\n#class SortImages(QThread):\n# def __init__(self):\n# QThread.__init__(self)\n# self.images = {}\n# self.ft = FILETYPE()\n# self.reg_viewer = re.compile(\".*(JPEG|JPG|jpg|jpeg|GIF|gif|bmp|BMP|png|PNG|pbm|PBM|pgm|PGM|ppm|PPM|xpm|XPM|xbm|XBM).*\", re.IGNORECASE)\n\n\n# def getImageType(self, node):\n# type = None\n\n# if node.attr.size != 0:\n# map = node.attr.smap\n# try:\n #XXX temporary patch for windows magic\n# f = node.attr.smap[\"type\"]\n# except IndexError:\n# #XXX temporary patch for windows magic\n# self.ft.filetype(node)\n# f = node.attr.smap[\"type\"]\n# res = self.reg_viewer.match(f)\n# if res != None:\n# type = f[:f.find(\" \")]\n# return type\n\n\n# def setFolder(self, folder):\n# self.folder = folder\n# self.images = {}\n\n\n# def run(self):\n# self.images = {}\n# for node in self.folder:\n# type = self.getImageType(node)\n# if type != None:\n# self.images[node] = type\n\n\nclass ImageView(QWidget, Script):\n def __init__(self):\n Script.__init__(self, \"viewerimage\")\n self.type = \"imageview\"\n self.icon = None\n self.vfs = vfs.vfs()\n self.ft = FILETYPE()\n self.reg_viewer = re.compile(\".*(JPEG|JPG|jpg|jpeg|GIF|gif|bmp|BMP|png|PNG|pbm|PBM|pgm|PGM|ppm|PPM|xpm|XPM|xbm|XBM).*\", re.IGNORECASE)\n self.loadedImage = LoadedImage()\n self.sceneWidth = 0\n #self.sorter = SortImages()\n\n\n def start(self, args):\n self.node = args.get_node(\"file\")\n self.curnode = self.node\n #self.parent = self.node.parent\n #self.sorter.setFolder(self.parent)\n #self.sorter.start()\n #self.getImage()\n\n\n def createMenuItems(self):\n self.l90button = QRotateButton(-90, \":rotate-left.png\")\n self.r90button = QRotateButton(90, \":rotate-right.png\")\n self.rotate180button = QRotateButton(180, \":rotate-180.png\")\n self.zoomin = QZoomButton(float(1.25), \":zoom-in.png\")\n self.zoomout = QZoomButton(float(0.8), \":zoom-out.png\")\n self.fitbest = QPushButton(\"fitbest\")\n #self.previous = QPushButton(\"previous\")\n #self.next = QPushButton(\"next\")\n\n self.connect(self.l90button, SIGNAL(\"clicked\"), self.rotate)\n self.connect(self.r90button, SIGNAL(\"clicked\"), self.rotate)\n self.connect(self.rotate180button, SIGNAL(\"clicked\"), self.rotate)\n self.connect(self.zoomin, SIGNAL(\"zoomed\"), self.zoom)\n self.connect(self.zoomout, SIGNAL(\"zoomed\"), self.zoom)\n self.connect(self.fitbest, SIGNAL(\"clicked()\"), self.fitbestgeom)\n #self.connect(self.previous, SIGNAL(\"clicked()\"), self.setPreviousImage)\n #self.connect(self.next, SIGNAL(\"clicked()\"), self.setNextImage)\n\n\n def drawMenu(self):\n self.hbox = QHBoxLayout()\n self.setLayout(self.vbox)\n self.hbox.addWidget(self.l90button)\n self.hbox.addWidget(self.r90button)\n self.hbox.addWidget(self.rotate180button)\n self.hbox.addWidget(self.zoomin)\n self.hbox.addWidget(self.zoomout)\n #self.hbox.addWidget(self.previous)\n #self.hbox.addWidget(self.next)\n self.hbox.addWidget(self.fitbest)\n self.vbox.addLayout(self.hbox)\n\n \n #def getIdx(self):\n # idx = 0\n # res = -1\n # for node in self.parent.next:\n # if node.name == self.node.name:\n # res = idx\n # idx += 1\n # return res\n\n\n\n #type: 0 = forward, 1 = backward\n #def getImage(self, type=1):\n # pass\n #idx = self.parent.next.(self.curnode)\n #print nodes\n #for node in self.parent.next[self.idx:]:\n # type = getImageType(node)\n # if type != None:\n #self.setImage()\n \n\n #def setPreviousImage(self):\n # if self.idx == 0:\n # self.idx = len(self.parent.next)\n # self.node = self.parent.next[self.idx]\n # else:\n # self.idx -= 1\n # self.node = self.parent.next[self.idx]\n # self.setImage()\n\n\n #def setNextImage(self):\n # pass\n\n\n def setImage(self):\n if self.node.attr.size != 0:\n map = self.node.attr.smap\n try:\n #XXX temporary patch for windows magic\n f = self.node.attr.smap[\"type\"]\n except IndexError:\n #XXX temporary patch for windows magic\n self.ft.filetype(node)\n f = self.node.attr.smap[\"type\"]\n res = self.reg_viewer.match(f)\n if res != None:\n type = f[:f.find(\" \")]\n self.loadedImage.load(self.node, type)\n else:\n self.loadedImage.notSupported()\n #not supported format\n #self.loadedImage.notSupported()\n\n\n def g_display(self):\n QWidget.__init__(self, None)\n self.factor = 1\n self.vbox = QVBoxLayout()\n self.setLayout(self.vbox)\n self.scrollArea = QScrollArea()\n self.scrollArea.setWidget(self.loadedImage)\n self.scrollArea.setAlignment(Qt.AlignCenter)\n self.vbox.addWidget(self.scrollArea)\n self.createMenuItems()\n self.drawMenu()\n self.setImage()\n\n\n def zoom(self, zoomer):\n self.factor *= zoomer\n self.loadedImage.resize(zoomer)\n if self.factor > 3.33:\n self.zoomin.setEnabled(False)\n elif self.factor < 0.33:\n self.zoomout.setEnabled(False)\n else:\n self.zoomin.setEnabled(True)\n self.zoomout.setEnabled(True)\n\n \n def fitbestgeom(self):\n self.factor = 1\n self.loadedImage.adjust(self.sceneWidth)\n self.zoomin.setEnabled(True)\n self.zoomout.setEnabled(True)\n\n\n def rotate(self, angle):\n self.loadedImage.rotate(angle)\n\n\n def updateWidget(self):\n self.sceneWidth = self.scrollArea.geometry().width()\n self.loadedImage.adjust(self.sceneWidth)\n\n\n def resizeEvent(self, e):\n self.sceneWidth = self.scrollArea.geometry().width()\n self.loadedImage.adjust(self.sceneWidth)\n\n\nclass viewerimage(Module):\n def __init__(self):\n Module.__init__(self, \"viewerimage\", ImageView)\n self.conf.add(\"file\", \"node\")\n self.conf.add_const(\"mime-type\", \"JPEG\")\n self.conf.add_const(\"mime-type\", \"GIF\")\n self.conf.add_const(\"mime-type\", \"PNG\")\n self.tags = \"viewer\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":670,"cells":{"__id__":{"kind":"number","value":1683627200041,"string":"1,683,627,200,041"},"blob_id":{"kind":"string","value":"9f472aabe345931f1bc7a1ffcd8fbfbf8954ee29"},"directory_id":{"kind":"string","value":"c1c004b9721f7c16075e289870a35730377543ab"},"path":{"kind":"string","value":"/musicdb/nonclassical/models.py"},"content_id":{"kind":"string","value":"d132228134e8402dc839eab51ce569979d40c781"},"detected_licenses":{"kind":"list like","value":["AGPL-3.0-only"],"string":"[\n \"AGPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"MechanisM/musicdb"},"repo_url":{"kind":"string","value":"https://github.com/MechanisM/musicdb"},"snapshot_id":{"kind":"string","value":"acc7411b2a53ba18df39b22aeb1cbfaac0541e14"},"revision_id":{"kind":"string","value":"0c779da9ae67b505fc5ed3722d1cbef32404c18d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T01:38:10.204813","string":"2021-01-20T01:38:10.204813"},"revision_date":{"kind":"timestamp","value":"2011-12-02T17:56:12","string":"2011-12-02T17:56:12"},"committer_date":{"kind":"timestamp","value":"2011-12-02T17:56:12","string":"2011-12-02T17:56:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os\nimport urllib\n\nfrom mutagen import mp3, easyid3, File as MutagenFile\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.files import File as DjangoFile\nfrom django.db.models.aggregates import Sum\n\nfrom musicdb.common.models import AbstractArtist, Nationality, MusicFile, File\n\nfrom musicdb.db.mixins import NextPreviousMixin\nfrom musicdb.db.fields import MySlugField, FirstLetterField, DirNameField\nfrom musicdb.db.std_image.fields import StdImageField\n\nfrom .managers import AlbumManager, TrackManager\n\n\"\"\"\nNon-classical models.\n\"\"\"\n\n__all__ = ('Artist', 'Album', 'CD', 'Track')\n\nclass Artist(AbstractArtist, NextPreviousMixin):\n name = models.CharField(max_length=250)\n\n is_solo_artist = models.BooleanField(\n 'Artist represents a single person',\n default=False,\n )\n\n nationality = models.ForeignKey(\n Nationality, blank=True, null=True,\n related_name='nonclassical_artists',\n )\n\n name_first = FirstLetterField('name')\n dir_name = DirNameField('name')\n\n class Meta:\n ordering = ('name',)\n\n def __unicode__(self):\n return self.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('nonclassical-artist', (self.slug,))\n\n def long_name(self):\n if self.is_solo_artist:\n try:\n last, first = self.name.split(', ', 1)\n return \"%s %s\" % (first, last)\n except ValueError:\n return self.name\n return self.name\n slug_name = long_name\n\nclass Album(models.Model, NextPreviousMixin):\n title = models.CharField(max_length=200)\n artist = models.ForeignKey(Artist, related_name='albums')\n year = models.IntegerField(blank=True, null=True)\n\n cover = StdImageField(upload_to='album_covers', size=(300, 300), thumbnail_size=(125, 125), blank=True)\n\n slug = MySlugField('title')\n dir_name = DirNameField('get_dir_name')\n\n objects = AlbumManager()\n\n class Meta:\n ordering = ('year', 'title')\n\n def __unicode__(self):\n if self.year:\n return u\"%s (%d)\" % (self.title, self.year)\n return self.title\n\n def delete(self, *args, **kwargs):\n for track in self.get_tracks():\n track.delete()\n\n super(Album, self).delete(*args, **kwargs)\n\n @models.permalink\n def get_absolute_url(self):\n return ('nonclassical-album', (self.artist.slug, self.slug))\n\n def get_dir_name(self):\n if self.year:\n return \"%d %s\" % (self.year, self.title)\n return self.title\n\n def get_tracks(self):\n return MusicFile.objects.filter(track__cd__album=self). \\\n order_by('track__cd', 'track')\n\n def get_nonclassical_tracks(self):\n return Track.objects.filter(cd__album=self). \\\n order_by('cd__num', 'track')\n\n def total_duration(self):\n return self.get_tracks().aggregate(Sum('length')).values()[0] or 0\n\n def next(self):\n return super(Album, self).next(artist=self.artist)\n\n def previous(self):\n return super(Album, self).previous(artist=self.artist)\n\n def set_artwork_from_url(self, url):\n tempfile, headers = urllib.urlretrieve(url)\n try:\n self.cover = DjangoFile(open(tempfile))\n self.save()\n except:\n self.cover.delete()\n raise\n finally:\n try:\n os.unlink(tempfile)\n except:\n pass\n\nclass CD(models.Model):\n album = models.ForeignKey(Album, related_name='cds')\n num = models.IntegerField()\n\n class Meta:\n ordering = ('num',)\n unique_together = ('album', 'num')\n verbose_name_plural = 'CDs'\n\n def __unicode__(self):\n return u\"CD %d of %d from %s\" % \\\n (self.num, self.album.cds.count(), self.album)\n\n def get_tracks(self):\n return MusicFile.objects.filter(track__cd=self).order_by('track')\n\n def total_duration(self):\n return self.get_tracks().aggregate(Sum('length')).values()[0] or 0\n\nclass Track(models.Model):\n title = models.CharField(max_length=250)\n cd = models.ForeignKey(CD, related_name='tracks')\n num = models.IntegerField()\n music_file = models.OneToOneField(MusicFile, related_name='track')\n\n dir_name = DirNameField('get_dir_name')\n\n objects = TrackManager()\n\n class Meta:\n ordering = ('num',)\n unique_together = ('cd', 'num')\n\n def __unicode__(self):\n return self.title\n\n def get_dir_name(self):\n return \"%02d %s.mp3\" % (self.num, self.title)\n\n def metadata(self):\n album = self.cd.album\n return {\n 'title': self.title,\n 'album': unicode(album.title),\n 'artist': unicode(album.artist.long_name()),\n 'tracknumber': str(self.num),\n 'date': str(album.year) or '',\n }\n\n @classmethod\n def quick_create(cls, abspath, cd, track_title, track_num):\n audio = MutagenFile(abspath)\n\n if isinstance(audio, mp3.MP3):\n extension = 'mp3'\n\n location = os.path.join(\n 'albums',\n '%d' % cd.id,\n '%.2d.%s' % (track_num, extension),\n )\n\n file = File.create_from_path(abspath, location)\n\n music_file = MusicFile.objects.create(\n file=file,\n rev_model='track',\n )\n\n return cls.objects.create(\n cd=cd,\n num=track_num,\n title=track_title,\n music_file=music_file,\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":671,"cells":{"__id__":{"kind":"number","value":3040836853491,"string":"3,040,836,853,491"},"blob_id":{"kind":"string","value":"3a01f9dd22eaf042760cc0d71d562a18ee3213e6"},"directory_id":{"kind":"string","value":"1b98c70426580d6cebf36b6f9ed807fe7a9c0729"},"path":{"kind":"string","value":"/rl-overhead.py"},"content_id":{"kind":"string","value":"378dcc7ba1e834f8041a900f03f1bf347ce6a4f1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jvimal/eyeq-tests"},"repo_url":{"kind":"string","value":"https://github.com/jvimal/eyeq-tests"},"snapshot_id":{"kind":"string","value":"54a1bba50d7019c07b09fdd147b831a5a823b5ba"},"revision_id":{"kind":"string","value":"d99d05d001d5a3d9fce53b66c6587f605245b555"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-06T17:31:13.127695","string":"2020-06-06T17:31:13.127695"},"revision_date":{"kind":"timestamp","value":"2013-03-21T17:24:46","string":"2013-03-21T17:24:46"},"committer_date":{"kind":"timestamp","value":"2013-03-21T17:24:46","string":"2013-03-21T17:24:46"},"github_id":{"kind":"number","value":8303987,"string":"8,303,987"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\nimport sys\nimport argparse\nimport multiprocessing\nfrom common import *\nimport termcolor as T\nfrom expt import Expt\nfrom iperf import Iperf\nfrom time import sleep\nfrom host import *\nimport os\n\nparser = argparse.ArgumentParser(description=\"RL overhead.\")\n\nparser.add_argument('--rate',\n dest=\"rate\",\n action=\"store\",\n help=\"Aggregate rate of RL.\",\n required=True)\n\nparser.add_argument('--dir',\n dest=\"dir\",\n action=\"store\",\n help=\"Directory to store outputs.\",\n required=True)\n\nparser.add_argument('-n',\n dest=\"n\",\n action=\"store\",\n type=int,\n help=\"Number of RLs.\",\n default=1)\n\nparser.add_argument('-P',\n dest=\"P\",\n action=\"store\",\n type=int,\n help=\"Number of TCP connections.\",\n default=20000)\n\nparser.add_argument('-t',\n dest=\"t\",\n action=\"store\",\n type=int,\n help=\"Time to run expt in seconds.\",\n default=120)\n\nparser.add_argument('--rl',\n dest=\"rl\",\n choices=[\"newrl\", \"htb\"],\n help=\"Choose rate limiter\")\n\nparser.add_argument(\"--profile\",\n dest=\"profile\",\n help=\"Directory to store profile data. Omit if you don't want to profile\",\n default=None)\n\nargs = parser.parse_args()\n\nclass RlOverhead(Expt):\n def start(self):\n h1 = Host(\"10.0.1.1\")\n h2 = Host(\"10.0.1.2\")\n hlist = HostList(h1, h2)\n self.hlist = hlist\n n = self.opts('n')\n\n hlist.cmd(\"rmmod newrl\")\n hlist.remove_qdiscs()\n hlist.configure_rps()\n n = self.opts('n')\n\n if self.opts(\"rl\") == \"newrl\":\n dev = h1.get_10g_dev()\n rate = self.opts(\"rate\")\n h1.cmd(\"insmod ~/vimal/exports/newrl.ko rate=%s dev=%s\" % (rate, dev))\n else:\n # insert htb qdiscs in hierarchy\n dev = h1.get_10g_dev()\n ceil = '%sGbit' % (int(self.opts('rate')) / 1000)\n cmd = \"tc qdisc add dev %s root handle 1: htb default 1\" % dev\n h1.cmd(cmd)\n cmd = \"tc class add dev %s classid 1:1 parent 1: htb\" % dev\n cmd += \" rate %s ceil %s mtu 64000\" % (ceil, ceil)\n h1.cmd(cmd)\n\n h1.start_monitors(self.opts(\"dir\"))\n # Start iperf server\n iperf = Iperf({'-p': 5001})\n self.procs.append(iperf.start_server(h2))\n sleep(1)\n\n # Start all iperf clients\n iperf = Iperf({'-p': 5001,\n '-P': self.opts(\"P\"),\n '-i': '10',\n '-c': h2.get_10g_ip(),\n 'dir': os.path.join(self.opts(\"dir\"), \"iperf\"),\n '-t': self.opts('t')})\n self.procs.append(iperf.start_client(h1))\n if self.opts(\"profile\"):\n self.h1 = h1\n h1.start_profile(dir=self.opts(\"profile\"))\n return\n\n def stop(self):\n if self.opts(\"profile\"):\n self.h1.stop_profile(dir=self.opts(\"profile\"))\n\n self.hlist.killall()\n self.hlist.remove_qdiscs()\n self.hlist.cmd(\"rmmod newrl\")\n\nif __name__ == \"__main__\":\n RlOverhead(vars(args)).run()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":672,"cells":{"__id__":{"kind":"number","value":2929167709339,"string":"2,929,167,709,339"},"blob_id":{"kind":"string","value":"02339c95bfaf7ffd198283c6afe0914310c7ae51"},"directory_id":{"kind":"string","value":"87502d05a8fc8408aa50554d170dac06c634c797"},"path":{"kind":"string","value":"/ui/ranklistitemwidget.py"},"content_id":{"kind":"string","value":"fe7a6c6da5265f6dce9d024b23ba4cf20217da2b"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only"],"string":"[\n \"GPL-2.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"Jexxar/software-center"},"repo_url":{"kind":"string","value":"https://github.com/Jexxar/software-center"},"snapshot_id":{"kind":"string","value":"d986f65f17d807e3b413913209171662f5ac9289"},"revision_id":{"kind":"string","value":"d5b73c689a39721d6483f949f085204261c9d86a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-01-13T18:24:37.774564","string":"2018-01-13T18:24:37.774564"},"revision_date":{"kind":"timestamp","value":"2014-09-11T15:55:05","string":"2014-09-11T15:55:05"},"committer_date":{"kind":"timestamp","value":"2014-09-11T15:55:05","string":"2014-09-11T15:55:05"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n### BEGIN LICENSE\n\n# Copyright (C) 2013 National University of Defense Technology(NUDT) & Kylin Ltd\n\n# Author:\n# Shine Huang\n# Maintainer:\n# Shine Huang\n\n# This program is free software: you can redistribute it and/or modify it\n# under the terms of the GNU General Public License version 3, as published\n# by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranties of\n# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR\n# PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program. If not, see .\n\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom ui.ukrliw import Ui_RankListWidget\n\n\nclass RankListItemWidget(QWidget):\n\n def __init__(self, name, rank, parent=None):\n QWidget.__init__(self,parent)\n self.ui_init()\n\n self.ui.name.setText(name)\n self.ui.number.setText(str(rank))\n self.ui.number.setAlignment(Qt.AlignCenter)\n\n # letter spacing\n # font = QFont()\n # font.setLetterSpacing(QFont.PercentageSpacing, 95.0)\n # self.ui.name.setFont(font)\n\n self.ui.name.setStyleSheet(\"QLabel{font-size:13px;color:#666666;}\")\n self.ui.number.setStyleSheet(\"QLabel{font-size:15px;font-style:italic;color:#999999;}\")\n\n def ui_init(self):\n self.ui = Ui_RankListWidget()\n self.ui.setupUi(self)\n self.show()\n\n def enterEvent(self, event):\n self.resize(200, 52)\n\n def leaveEvent(self, event):\n self.resize(200, 24)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":673,"cells":{"__id__":{"kind":"number","value":3272765104823,"string":"3,272,765,104,823"},"blob_id":{"kind":"string","value":"f66b69d407cfc217f4b330f6f22f8036c3a99f96"},"directory_id":{"kind":"string","value":"67b01421b5058dc3d9df4a7ac52169984938bb16"},"path":{"kind":"string","value":"/Examples/outer()/outer.py # import inner"},"content_id":{"kind":"string","value":"de5d6b281c36e6ed3087cf1daabbcf4cf0132953"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"cancerhermit/outer.py"},"repo_url":{"kind":"string","value":"https://github.com/cancerhermit/outer.py"},"snapshot_id":{"kind":"string","value":"d2176551554c6f2445816fb665245e2110dd0ad5"},"revision_id":{"kind":"string","value":"c1dad052311d85f2de84ebabaab212e9bdc78eec"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-08-10T16:08:27.262190","string":"2015-08-10T16:08:27.262190"},"revision_date":{"kind":"timestamp","value":"2014-07-16T08:05:34","string":"2014-07-16T08:05:34"},"committer_date":{"kind":"timestamp","value":"2014-07-16T08:05:34","string":"2014-07-16T08:05:34"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport inner\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":674,"cells":{"__id__":{"kind":"number","value":1906965517628,"string":"1,906,965,517,628"},"blob_id":{"kind":"string","value":"90b52a098a98da7c6f9b4fceb14a7a6d2f7b290b"},"directory_id":{"kind":"string","value":"07208e00e630949d378c0a97311b7f34024cf8ed"},"path":{"kind":"string","value":"/yts/cryptopage/models.py"},"content_id":{"kind":"string","value":"7cb4f8476d87194d129d3e5ff330cc6abb833924"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ytsschool/main"},"repo_url":{"kind":"string","value":"https://github.com/ytsschool/main"},"snapshot_id":{"kind":"string","value":"494d402f7d4cbb73c7f13e0e1a6b3367959b235c"},"revision_id":{"kind":"string","value":"386f66043d348d4acfca8bd4e4718e09044d7fe4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T09:44:23.414087","string":"2021-01-19T09:44:23.414087"},"revision_date":{"kind":"timestamp","value":"2014-03-20T18:49:02","string":"2014-03-20T18:49:02"},"committer_date":{"kind":"timestamp","value":"2014-03-20T18:49:02","string":"2014-03-20T18:49:02"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.db import models\nfrom django import forms\n\nimport random\n\ntexts = {\n\t1: \"Not to know is bad, not to wish to know is worse.\",\n\t2: \"Success doesn't come to you... you go to it.\",\n\t3: \"Formal education will make you a living. Self-education will make you a fortune.\",\n\t4: \"Those who cannot change their minds cannot change anything.\",\n\t5: \"If anything is worth trying at all, it's worth trying at least 10 times.\",\n\t6: \"Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.\",\n\t7: \"Being entirely honest with oneself is a good exercise.\",\n\t8: \"The possession of unlimited power will make a despot of almost any man. There is a possible Nero in the gentlest human creature that walks.\",\n\t9: \"Every English poet should master the rules of grammar before he attempts to bend or break them.\",\n\t10: \"To limit the press is to insult a nation; to prohibit reading of certain books is to declare the inhabitants to be either fools or slaves.\",\n}\n\n# Create your models here.\nclass Cryptotext(models.Model):\n\ttext1 = \"let it be let it be speaking words of wisdom let it be\"\n\n\n\t@staticmethod\n\tdef encrypt(text):\n\t\tencrypted = \"\"\n\t\tfor i in text:\n\t\t\tencrypted += chr(ord(i) + 3)\n\t\treturn encrypted\n\n\t@staticmethod\n\tdef random_text():\n\t\treturn texts[random.randint(1,10)]\n\nclass ContactForm(forms.Form):\n\tdecrypted = forms.CharField(max_length=200)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":675,"cells":{"__id__":{"kind":"number","value":10462540361492,"string":"10,462,540,361,492"},"blob_id":{"kind":"string","value":"b86f1509d034c7686fda6434f91e7c1c53e69071"},"directory_id":{"kind":"string","value":"07b8a01fc847348b276a576c57ae38d21114e414"},"path":{"kind":"string","value":"/Revolutionary Technology/Prime Number Generator.py"},"content_id":{"kind":"string","value":"49707fc5fc74be6fefb9384cd1eb38e60b0cc4ba"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"AngryNerds/general-projects"},"repo_url":{"kind":"string","value":"https://github.com/AngryNerds/general-projects"},"snapshot_id":{"kind":"string","value":"6df27cf26d33a82f9712482e30965727eae01c0c"},"revision_id":{"kind":"string","value":"70a24e94c1c508b03670d5866bd61de99c1b4984"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T19:41:59.059106","string":"2021-01-01T19:41:59.059106"},"revision_date":{"kind":"timestamp","value":"2014-12-10T08:03:04","string":"2014-12-10T08:03:04"},"committer_date":{"kind":"timestamp","value":"2014-12-10T08:03:04","string":"2014-12-10T08:03:04"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"num = 2\nremainders = []\n\nwhile True:\n \n print num\n \n num = num + 1\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":676,"cells":{"__id__":{"kind":"number","value":3178275851248,"string":"3,178,275,851,248"},"blob_id":{"kind":"string","value":"9dda823777e9931fc23a2fd21cd49f42ccfb4d70"},"directory_id":{"kind":"string","value":"0cbd1b5a91cf0b1c763656d8c90f8ac081efe674"},"path":{"kind":"string","value":"/rules.py"},"content_id":{"kind":"string","value":"2d389b482361d43404c6b8005423092daa6335ed"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"taliesinb/chartreuse"},"repo_url":{"kind":"string","value":"https://github.com/taliesinb/chartreuse"},"snapshot_id":{"kind":"string","value":"ec3259a59640aa0dce5a9331aa4c91ad025502ea"},"revision_id":{"kind":"string","value":"c8f152f6ef80fdcca2749e5c89c600a0caa4bc97"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T05:35:43.645149","string":"2021-01-01T05:35:43.645149"},"revision_date":{"kind":"timestamp","value":"2011-08-27T23:18:29","string":"2011-08-27T23:18:29"},"committer_date":{"kind":"timestamp","value":"2011-08-27T23:18:29","string":"2011-08-27T23:18:29"},"github_id":{"kind":"number","value":1395527,"string":"1,395,527"},"star_events_count":{"kind":"number","value":4,"string":"4"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from utils import list_replace, identity\n\ndef singleton(x):\n if type(x) not in [list]:\n return [x]\n else:\n return x\n\ndef rules(symbol, patterns, action=identity):\n return [rule(symbol, singleton(p), action) for p in patterns]\n\nclass rule(object):\n def __init__(self, symbol, pattern, action=identity):\n self.symbol = symbol\n if type(pattern) == str: pattern = [pattern]\n self.pattern = pattern\n self.action = action\n \n def first_symbol(self):\n return self.pattern[0]\n \n def replace(self, reps):\n if self.symbol in reps:\n del self # is this okay?\n else:\n list_replace(self.pattern, reps)\n \n def __repr__(self):\n return self.symbol.ljust(15) + \"->\\t\" + str(self.pattern).ljust(20)\n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":677,"cells":{"__id__":{"kind":"number","value":11776800347765,"string":"11,776,800,347,765"},"blob_id":{"kind":"string","value":"d85c438f2fb35cd659b0f8163fb539af1bf6fba5"},"directory_id":{"kind":"string","value":"4d3fe4579fd39a8042e87de0132f1824d8627c19"},"path":{"kind":"string","value":"/myapps/forms.py"},"content_id":{"kind":"string","value":"554abc986814e15448f11efab84b46513df0b28d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jzxyouok/yunfan"},"repo_url":{"kind":"string","value":"https://github.com/jzxyouok/yunfan"},"snapshot_id":{"kind":"string","value":"185a061cab6cee5d83a976e3afc86bebbccf49a5"},"revision_id":{"kind":"string","value":"62e99261d4d2d41fb2de65f9c3d8eb7aee80913b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-30T23:47:02.821912","string":"2020-06-30T23:47:02.821912"},"revision_date":{"kind":"timestamp","value":"2014-03-21T02:45:41","string":"2014-03-21T02:45:41"},"committer_date":{"kind":"timestamp","value":"2014-03-21T02:45:41","string":"2014-03-21T02:45:41"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding=utf-8\n#todo 从前台表单传来问题id数据\n\nfrom django import forms \nfrom models import Answer,Question\n \nclass AnswerForm(forms.ModelForm): #ModelForm 使form能save数据到数据库\n \n class Meta: \n model = Answer#关联起来 \n \n description = forms.CharField(label=(\"问题描述\"),\n max_length=200,\n widget=forms.Textarea(),\n \n error_messages={'invalid': (\"\")}\n ) \n\n\n def __init__(self, *args, **kwargs): \n super(AnswerForm, self).__init__(*args, **kwargs) \n \n def clean_description(self): \n description = self.cleaned_data['description'] \n if len(description) <=8: \n raise forms.ValidationError(\"拜托,多些几个字会死啊\") \n return description \n def clean_owner(self): \n owner = self.cleaned_data['owner'] \n if owner == 'lzj' :\n raise forms.ValidationError('这个问题让wwj来回答') \n return owner \n \n \nclass QuestionForm(forms.ModelForm): #ModelForm 使form能save数据到数据库\n \n class Meta: \n model = Question#关联起来 \n exclude = ('owner')\n\n title = forms.CharField(label=(\"\"),\n max_length=30,\n widget=forms.TextInput(attrs={'placeholder': '问题名称', }),\n\n \n error_messages={'invalid': (\"\")}\n ) \n description = forms.CharField(label=(\"\"),\n max_length=200,\n widget=forms.Textarea(attrs={'placeholder': '问题描述', }),\n \n error_messages={'invalid': (\"字数不够\")}\n ) \n\n \n def __init__(self, *args, **kwargs): \n super(QuestionForm, self).__init__(*args, **kwargs) \n \n def clean_description(self): \n description = self.cleaned_data['description'] \n if len(description) <=8: \n raise forms.ValidationError(\"字数不够\") \n return description \n def clean_owner(self): \n owner = self.cleaned_data['owner'] \n if owner == 'lzj' :\n raise forms.ValidationError('这个问题让wwj来回答') \n return owner"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":678,"cells":{"__id__":{"kind":"number","value":15994458249751,"string":"15,994,458,249,751"},"blob_id":{"kind":"string","value":"b760564f14d3520adfe5b299f512dd4868ae43df"},"directory_id":{"kind":"string","value":"9a5b06660494299dfa9711d1ffce43ca3af45e57"},"path":{"kind":"string","value":"/aurora/webcomponents/session.py"},"content_id":{"kind":"string","value":"4479dd5e895c37657feb1612c5394d1b95ad0c85"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"yeiniel/aurora"},"repo_url":{"kind":"string","value":"https://github.com/yeiniel/aurora"},"snapshot_id":{"kind":"string","value":"9e32097e70333d0e5034fd48d8020ab0a88ac5f4"},"revision_id":{"kind":"string","value":"77cd9cda2493f5a8c9b6ee759e5c2855c6a892f5"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-17T07:59:00.330797","string":"2020-05-17T07:59:00.330797"},"revision_date":{"kind":"timestamp","value":"2013-01-21T13:34:17","string":"2013-01-21T13:34:17"},"committer_date":{"kind":"timestamp","value":"2013-01-21T13:34:17","string":"2013-01-21T13:34:17"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Copyright (c) 2011, Yeiniel Suarez Sosa.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Yeiniel Suarez Sosa. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport collections\nimport hashlib\nimport hmac\nimport random\nimport time\n\nfrom aurora.webapp import foundation\n\n__all__ = ['SessionProvider']\n\n\nclass SessionProvider:\n \"\"\" Provide state to the HTTP protocol.\n\n This component has two use cases:\n\n * In a Web application that use the Aurora Web framework infrastructure.\n * In a Web application with a custom infrastructure.\n\n In the first case you need to provide an implementation for the\n `get_request` optional dependency service and add the `after_handle`\n listener service to the application instance `after_handle` event. Under\n this conditions you can use the `get_session` provided service without\n the Web request object argument. This is known as the higher level\n service.\n\n In the second case the exposed services are the ones known as the lower\n level services: the `get_session` and `persist_session` services. As\n long as there is no implementation for the optional Web request object\n provisioning service you need to pass explicitly the Web request object\n as argument for the `get_session` service and call the `persist_session`\n service in all you handlers.\n \"\"\"\n\n #\n # stubs for component dependencies\n #\n\n secret = None # secret string used to create the (id, hash) pair.\n\n cookie_name = 'aurora-sid' # name of the cookie used to persist\n # information on the client browser.\n\n max_age = 3300 # browser cookie maximum age\n\n def get_cache(self) -> collections.MutableMapping:\n \"\"\" Return the session cache.\n\n The session cache is a mapping that use as key the session id. By\n default this service return an in-memory session cache and this\n implementation is not suitable in the use case of multiple\n application instances behind a load balancing proxy.\n :return: A mapping.\n \"\"\"\n try:\n return self.__cache\n except AttributeError:\n self.__cache = {}\n\n return self.__cache\n\n def get_request(self) -> foundation.Request:\n \"\"\" Web request been handled by the application.\n\n The Web application can handle one Web request at a time. This service\n return the Web request currently been handled.\n \"\"\"\n raise NotImplementedError()\n\n #\n # component implementation\n #\n\n def __init__(self, secret: str, get_request=None):\n \"\"\" Initialize the session provider component.\n\n If the `get_request` service is provided then the `after_handle`\n service is exposed by the component.\n :param secret: The secret used to create the (id, hash) pair.\n :param get_request: A\n :func:`aurora.webapp.infrastructure.Application.get_request`\n compliant service.\n \"\"\"\n self.secret = secret\n\n if get_request:\n self.get_request = get_request\n\n def generate_id(self) -> str:\n rnd = ''.join((str(time.time()), str(random.random()), self.secret))\n\n return hashlib.sha1(rnd.encode()).hexdigest()[:8]\n\n def make_hash(self, id: str) -> str:\n return hmac.new(\n self.secret.encode(), id.encode(), hashlib.sha1).hexdigest()[:8]\n\n def get_session_info(self, request: foundation.Request) -> (str, str):\n if hasattr(request, '_session_id'):\n id = request._session_id\n hash = request._session_hash\n else:\n cn = self.cookie_name\n if cn in request.cookies and \\\n request.cookies[cn][8:] == self.make_hash(\n request.cookies[cn][:8]):\n id = request.cookies[self.cookie_name][:8]\n hash = request.cookies[self.cookie_name][8:]\n else:\n id = self.generate_id()\n hash = self.make_hash(id)\n\n request._session_id = id\n request._session_hash = hash\n\n return id, hash\n\n #\n # services provided by the component\n #\n\n def get_session(self, request=None) -> collections.MutableMapping:\n \"\"\" Return the session mapping associated to a Web request object.\n\n If the Web request object is not given then the one returned by the\n `on_request` optional dependency service will be used as default.\n This method create the session mapping on first access if needed.\n :param request: A Web request object.\n :return: The session mapping.\n \"\"\"\n if not request:\n request = self.get_request()\n\n id, hash = self.get_session_info(request)\n return self.get_cache().setdefault(id, {})\n\n\n def persist_session(self, request: foundation.Request,\n response: foundation.Response):\n \"\"\" Persist session identification information on the client browser.\n\n If the session cache mapping is empty then it is destroyed and no\n session information is sent to the browser.\n :param request: A Web request object.\n :param response: A Web response object.\n \"\"\"\n id, hash = self.get_session_info(request)\n if id in self.get_cache() and len(self.get_cache()[id]) > 0:\n response.set_cookie(self.cookie_name, ''.join((id, hash)),\n self.max_age, request.script_name)\n elif id in self.get_cache():\n del self.get_cache()[id]\n\n\n def post_dispatch(self, response: foundation.Response):\n \"\"\" Service meant to be used as a application `after_handle` listener.\n\n It is only available if the `get_request` optional dependency service\n is provided because it use that service to make Web request\n provisioning.\n :param response: The Web response object.\n \"\"\"\n self.persist_session(self.get_request(), response)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":679,"cells":{"__id__":{"kind":"number","value":5446018554043,"string":"5,446,018,554,043"},"blob_id":{"kind":"string","value":"681f7e04284f409ffd65161de2b6e23900424df6"},"directory_id":{"kind":"string","value":"0b5c0259b13821e6a8990929dceee92caf043217"},"path":{"kind":"string","value":"/lib/db/game.py"},"content_id":{"kind":"string","value":"b8f0693397efb84a845af1d1c2f124bc98090244"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"bfellers/game_collector"},"repo_url":{"kind":"string","value":"https://github.com/bfellers/game_collector"},"snapshot_id":{"kind":"string","value":"91e2aabbb3c345cad1e12ce16a995e64b2fc56bd"},"revision_id":{"kind":"string","value":"bc01af753d69596afe2da941082c56e3d8d07aeb"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-05T07:09:55.103148","string":"2016-08-05T07:09:55.103148"},"revision_date":{"kind":"timestamp","value":"2012-06-10T23:11:25","string":"2012-06-10T23:11:25"},"committer_date":{"kind":"timestamp","value":"2012-06-10T23:11:25","string":"2012-06-10T23:11:25"},"github_id":{"kind":"number","value":3294184,"string":"3,294,184"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from lib.util import load_image\r\n\r\n\r\nclass Game():\r\n def __init__(self,\r\n description=None,\r\n developer=None,\r\n genre=None,\r\n id=None,\r\n image=None,\r\n isbn=None,\r\n location=None,\r\n notes=None,\r\n platform=None,\r\n price=None,\r\n publisher=None,\r\n rating=None,\r\n release_date=None,\r\n score=None,\r\n theme=None,\r\n title=None):\r\n\r\n self.description = description\r\n self.developer = developer\r\n self.genre = genre\r\n self.id = id\r\n self.image = load_image(image)\r\n self.isbn = isbn\r\n self.location = location\r\n self.notes = notes\r\n self.platform = platform\r\n self.price = price\r\n self.publisher = publisher\r\n self.rating = rating\r\n self.release_date = release_date\r\n self.score = score\r\n self.theme = theme\r\n self.title = title\r\n\r\n def __repr__(self):\r\n '''\r\n What printing the object displays.\r\n '''\r\n output = \"ID:{id}, Title:{title}, Rating:{rating}, \" \\\r\n \"Developer:{developer}\"\r\n output = output.format(id=self.id,\r\n title=self.title,\r\n rating=self.rating,\r\n developer=self.developer)\r\n return output\r\n\r\n def get_id(self):\r\n '''\r\n Return game id\r\n '''\r\n return self.id\r\n\r\n def get_title(self):\r\n '''\r\n Return game title\r\n '''\r\n return self.title\r\n\r\n def get_rating(self):\r\n '''\r\n Return game rating\r\n '''\r\n return self.rating\r\n\r\n def get_image(self):\r\n '''\r\n Return game image\r\n '''\r\n return self.image\r\n\r\n def set_id(self, id):\r\n '''\r\n @param id is new id value\r\n\r\n Set new game id.\r\n '''\r\n self.id = id\r\n return self.id\r\n\r\n def set_title(self, title, is_key=False):\r\n '''\r\n @param title is new title value\r\n @param is_key denotes whether new value is key or value\r\n\r\n Set game title\r\n return updated title\r\n\r\n add logic to update db record (maybe make commit function)\r\n '''\r\n self.title = title\r\n return self.title\r\n\r\n def set_rating(self, rating, is_key=False):\r\n '''\r\n @param rating is new rating value\r\n @param is_key denotes whether new value is key or value\r\n\r\n set game rating\r\n return updated rating\r\n\r\n make so you can pass\r\n\r\n add logic to update db record (maybe make commit function)\r\n '''\r\n self.rating = rating\r\n return self.rating\r\n\r\n def set_image(self, image):\r\n '''\r\n @param image is an image file\r\n\r\n set new image file\r\n\r\n make sure we add logic here to update db immediately\r\n '''\r\n self.image = image\r\n return self.image\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":680,"cells":{"__id__":{"kind":"number","value":14147622298474,"string":"14,147,622,298,474"},"blob_id":{"kind":"string","value":"36cdd7366db7c415caa3f51a6f911bdcd9f21b26"},"directory_id":{"kind":"string","value":"bd827d8135634b637a4b647f5161593136057a00"},"path":{"kind":"string","value":"/trunk/src/lib/python/RepoSuiteComponent.py"},"content_id":{"kind":"string","value":"a1b5fdcab57ca1ff26e727c4fc29253969db7f35"},"detected_licenses":{"kind":"list like","value":["GPL-1.0-or-later","GPL-3.0-or-later","GPL-3.0-only"],"string":"[\n \"GPL-1.0-or-later\",\n \"GPL-3.0-or-later\",\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"alvesev/apt-satellites"},"repo_url":{"kind":"string","value":"https://github.com/alvesev/apt-satellites"},"snapshot_id":{"kind":"string","value":"fb9b4156b451b001001ab9363041cb1a26d74b7b"},"revision_id":{"kind":"string","value":"57477c099119f3532b9c7952c09c6e3f76f3ef7c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T13:46:00.586972","string":"2021-01-18T13:46:00.586972"},"revision_date":{"kind":"timestamp","value":"2014-03-30T10:52:12","string":"2014-03-30T10:52:12"},"committer_date":{"kind":"timestamp","value":"2014-03-30T10:52:12","string":"2014-03-30T10:52:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python -B\n\n#\n# Copyright 2013-2014 Alex Vesev\n#\n# This file is part of Apt Satellites.\n#\n# Apt Satellites is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Apt Satellites is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Apt Satellites. If not, see .\n#\n##\n\n\nclass RepoSuiteComponent:\n name = 'default'\n architecturesPool = ['i386', 'amd64', 'all', 'source']\n\n def __init__(self, newName = '', newArchPool = []):\n if newName: self.name = newName\n if newArchPool: self.architecturesPool = newArchPool\n\n if not type(newName) == type(''):\n raise Exception(\"New name must be a string but it is not.\")\n if not type(newArchPool) == type([]):\n raise Exception(\"New architectures pool must be a list but it is not.\")\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":681,"cells":{"__id__":{"kind":"number","value":11879879557354,"string":"11,879,879,557,354"},"blob_id":{"kind":"string","value":"56eb2035275721d0acafe1fbf6ed8c7608f02978"},"directory_id":{"kind":"string","value":"694c8f0bb14dabd58516dcbadfa93c99d9dcbb8b"},"path":{"kind":"string","value":"/problem20.py"},"content_id":{"kind":"string","value":"5c5be4bf2f77dd8b857154a4a322d2ae144b07d4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"JacAbreu/ProjetoEuler"},"repo_url":{"kind":"string","value":"https://github.com/JacAbreu/ProjetoEuler"},"snapshot_id":{"kind":"string","value":"4484cb2dcb1942e848fdeab68413d1d1abc23a83"},"revision_id":{"kind":"string","value":"8d284446aca2d74eecd14af5276f5b6626d504db"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-25T03:19:49.722077","string":"2021-01-25T03:19:49.722077"},"revision_date":{"kind":"timestamp","value":"2012-11-04T02:16:08","string":"2012-11-04T02:16:08"},"committer_date":{"kind":"timestamp","value":"2012-11-04T02:16:08","string":"2012-11-04T02:16:08"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#Find the sum of the digits in the number 100!\n\nfrom math import *\n\ndef factorial (n):\n\tif n == 0:\n\t\treturn 1\n\telse:\n\t\treturn n*factorial(n-1)\n\t\t\nresult = factorial(100)\n\nsum_digits_factorial_100 = 0\n\nresult_aux = result\nwhile(result_aux > 0):\n\n\tsum_digits_factorial_100 += result_aux % 10\n\t\n\tresult_aux /=10\n\t\nprint sum_digits_factorial_100\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":682,"cells":{"__id__":{"kind":"number","value":8160437893980,"string":"8,160,437,893,980"},"blob_id":{"kind":"string","value":"d9f4394800730f755dacbf2be3b1ba982d8f17b3"},"directory_id":{"kind":"string","value":"abaceea434f17b543688f4d6b1b077fbb9d02789"},"path":{"kind":"string","value":"/CoreNLP/pretraining_code/PretrainSTB.py"},"content_id":{"kind":"string","value":"d1b16a0dcaba5dab531069f050ab4f1e7edecf74"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-or-later"],"string":"[\n \"GPL-2.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"kkawabat/Pseudo-Ensembles"},"repo_url":{"kind":"string","value":"https://github.com/kkawabat/Pseudo-Ensembles"},"snapshot_id":{"kind":"string","value":"3150aad8350f51b5b61840456f443af4f642a83e"},"revision_id":{"kind":"string","value":"6fa81df05b64528b92b262e7d7882d0033e75f24"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T09:55:47.535261","string":"2021-01-18T09:55:47.535261"},"revision_date":{"kind":"timestamp","value":"2014-11-06T01:42:17","string":"2014-11-06T01:42:17"},"committer_date":{"kind":"timestamp","value":"2014-11-06T01:42:17","string":"2014-11-06T01:42:17"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nimport numpy.random as npr\nimport NLMLayers as nlml\nimport NLModels as nlm\nimport cPickle as pickle\nfrom HelperFuncs import zeros, ones, randn, rand_word_seqs\nimport CorpusUtils as cu\n\n#######################\n# Test scripting code #\n#######################\n\ndef some_nearest_words(keys_to_words, sample_count, W1=None, W2=None):\n assert(not (W1 is None))\n if not (W2 is None):\n W = np.hstack((W1, W2))\n else:\n W = W1\n norms = np.sqrt(np.sum(W**2.0,axis=1,keepdims=1))\n W = W / (norms + 1e-5)\n max_valid_key = np.max(keys_to_words.keys())\n W = W[0:(max_valid_key+1),:]\n # \n source_keys = np.zeros((sample_count,)).astype(np.uint32)\n neighbor_keys = np.zeros((sample_count, 10)).astype(np.uint32)\n all_keys = np.asarray(keys_to_words.keys()).astype(np.uint32)\n for s in range(sample_count):\n i = npr.randint(0,all_keys.size)\n source_k = all_keys[i]\n neg_cos_sims = -1.0 * np.sum(W * W[source_k], axis=1)\n sorted_k = np.argsort(neg_cos_sims)\n source_keys[s] = source_k\n neighbor_keys[s,:] = sorted_k[1:11]\n source_words = []\n neighbor_words = []\n for s in range(sample_count):\n source_words.append(keys_to_words[source_keys[s]])\n neighbor_words.append([keys_to_words[k] for k in neighbor_keys[s]])\n return [source_keys, neighbor_keys, source_words, neighbor_words]\n\ndef record_word_vectors(w2k, cam, file_name):\n \"\"\"Write some trained word vectors to the given file.\"\"\"\n wv_file = open(file_name, 'w')\n all_words = w2k.keys()\n all_words.sort()\n wv_std_dev = 0.0\n for word in all_words:\n key = w2k[word]\n word_vec = cam.word_layer.params['W'][key]\n wv_std_dev += np.mean(word_vec**2.0)\n word_vals = [word]\n word_vals.extend([str(val) for val in word_vec])\n wv_file.write(\" \".join(word_vals))\n wv_file.write(\"\\n\")\n wv_file.close()\n wv_std_dev = np.sqrt(wv_std_dev / len(all_words))\n print(\"approximate word vector std-dev: {0:.4f}\".format(wv_std_dev))\n return\n\ndef init_biases_with_lups(cam, w2lup, w2k):\n \"\"\"Init class layer biases in cam with log unigram probabilities.\"\"\"\n for w in w2lup:\n cam.class_layer.params['b'][w2k[w]] = max(w2lup[w], -6.0)\n return\n\n\nif __name__==\"__main__\":\n # select source of phrases to pre-train word vectors for.\n data_dir = './training_text/train_and_dev' # TO USE TRAIN AND DEV SETS\n #data_dir = './training_text/train_only' # TO USE ONLY TRAIN SET\n # set some parameters.\n min_count = 2 # lower-bound on frequency of words in kept vocab\n sg_window = 5 # context size or skip-gram sampling\n ns_count = 15 # number of negative samples for negative sampling\n wv_dim = 70 # dimensionality of vectors to pre-train\n cv_dim = 10 # this won't be used. it's safe to ignore\n lam_l2 = 0.5 * wv_dim**0.5 # will be used to constrain vector norms\n\n # generate the training vocabulary\n sentences = cu.SentenceFileIterator(data_dir)\n key_dicts = cu.build_vocab(sentences, min_count=2, compute_hs_tree=True, \\\n compute_ns_table=True, down_sample=0.0)\n w2k = key_dicts['words_to_keys']\n k2w = key_dicts['keys_to_words']\n w2lups = key_dicts['word_log_probs']\n neg_table = key_dicts['ns_table']\n unk_word = key_dicts['unk_word']\n hsm_code_dict = key_dicts['hs_tree']\n sentences = cu.SentenceFileIterator(data_dir)\n tr_phrases = cu.sample_phrases(sentences, w2k, unk_word=unk_word, \\\n max_phrases=100000)\n # get some important properties of the generated training vocabulary\n max_cv_key = len(tr_phrases) + 1\n max_wv_key = max(w2k.values())\n max_hs_key = key_dicts['hs_tree']['max_code_key']\n\n\n # initialize the model to-be-trained\n cam = nlm.CAModel(wv_dim, cv_dim, max_wv_key, max_cv_key, \\\n use_ns=True, max_hs_key=max_hs_key, \\\n lam_wv=lam_l2, lam_cv=lam_l2, lam_cl=lam_l2)\n # init parameters in word, context, and classification layers\n cam.use_tanh = True\n cam.init_params(0.02)\n # set parameters in context layer to 0s, across the board\n cam.context_layer.init_params(0.0)\n # tell the model to train subject to dropout and weight fuzzing\n cam.set_noise(drop_rate=0.5, fuzz_scale=0.02)\n # init prediction layer biases with log unigram probabilities\n init_biases_with_lups(cam, w2lups, w2k)\n # NOTE: given the properties of negative sampling, initializing with the\n # log unigram probabilities is actually kind of silly. But, we'll leave it\n # in there because I didn't know better at the time, and the resulting\n # vectors performed adequately.\n\n # initialize samplers for drawing positive pairs and negative contrastors\n pos_sampler = cu.PhraseSampler(tr_phrases, sg_window)\n neg_sampler = cu.NegSampler(neg_table=neg_table, neg_count=ns_count)\n\n # train all parameters using the training set phrases\n learn_rate = 1e-2\n decay_rate = 0.975\n for i in range(50):\n cam.train(pos_sampler, neg_sampler, 250, 50001, train_ctx=False, \\\n train_lut=True, train_cls=True, learn_rate=learn_rate)\n learn_rate = learn_rate * decay_rate\n record_word_vectors(w2k, cam, \"wv_d{0:d}_mc{1:d}.txt\".format(wv_dim, min_count))\n [s_keys, n_keys, s_words, n_words] = some_nearest_words( k2w, 10, \\\n W1=cam.word_layer.params['W'], W2=None)\n for w in range(10):\n print(\"{0:s}: {1:s}\".format(s_words[w],\", \".join(n_words[w])))\n\n\n\n\n\n\n\n##############\n# EYE BUFFER #\n##############\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":683,"cells":{"__id__":{"kind":"number","value":3650722201768,"string":"3,650,722,201,768"},"blob_id":{"kind":"string","value":"191cc70add324b414a8335c68879f2456047a05f"},"directory_id":{"kind":"string","value":"57fd2fc08771c8707ab567dba66f4a70ba0caec3"},"path":{"kind":"string","value":"/src/manozodynas/views.py"},"content_id":{"kind":"string","value":"dc24db4c41ea92ea1d3c10ad2fd9306d25b30642"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"mariukz3/manozodynas"},"repo_url":{"kind":"string","value":"https://github.com/mariukz3/manozodynas"},"snapshot_id":{"kind":"string","value":"d970148ebd9ba8214e19662bb022e839203f3414"},"revision_id":{"kind":"string","value":"907a6751a9ebc13563fbd20a969fbe620f6d4c4b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-16T20:48:52.464467","string":"2021-01-16T20:48:52.464467"},"revision_date":{"kind":"timestamp","value":"2014-05-26T16:40:54","string":"2014-05-26T16:40:54"},"committer_date":{"kind":"timestamp","value":"2014-05-26T16:40:54","string":"2014-05-26T16:40:54"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom .forms import LoginForm\nfrom django.contrib.auth import login\nfrom manozodynas.models import Word\nfrom manozodynas.models import Translation\nfrom django.views.generic import CreateView\n\ndef index_view(request):\n return render(request, 'manozodynas/index.html', {})\n\ndef login_view(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n user = form.cleaned_data['user']\n if user is not None and user.is_active:\n login(request, user)\n return HttpResponseRedirect(reverse('index'))\n else:\n form = LoginForm()\n #import ipdb; ipdb.set_trace()\n return render(request, 'manozodynas/login.html', {'form':form})\n\ndef words_view(request):\n return render(request, 'manozodynas/list.html', {'list':Word.objects.all()})\n\nclass TypeWord(CreateView):\n model = Word\n template_name = 'manozodynas/type_word.html'\n success_url = '/list_words/'\n\nclass TypeTranslation(CreateView):\n model = Translation\n template_name = 'manozodynas/type_translation.html'\n success_url = '/list_words/'\n\n def get_context_data(self, **kwargs):\n sarasas=super(TypeTranslation, self).get_context_data(**kwargs)\n a=self.kwargs.get(\"pk\")\n sarasas[\"pk\"]=a\n zod=Word.objects.get(pk=a)\n sarasas[\"var\"]=zod\n return sarasas\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":684,"cells":{"__id__":{"kind":"number","value":12764642834188,"string":"12,764,642,834,188"},"blob_id":{"kind":"string","value":"0fe86d81dcae1f2102ed4b4ccd9cbe4386e8ee04"},"directory_id":{"kind":"string","value":"7bb2971188326cf78f33f02588f7229798f1a5e8"},"path":{"kind":"string","value":"/ksp_login/templatetags/ksp_login.py"},"content_id":{"kind":"string","value":"21a758283a83f9ca057d5640ba1dda6c76988d55"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"black3r/ksp_login"},"repo_url":{"kind":"string","value":"https://github.com/black3r/ksp_login"},"snapshot_id":{"kind":"string","value":"a01ec49b455347737866f98a8e3f29d7ad5aefaa"},"revision_id":{"kind":"string","value":"0e16707c48b79d78e16e43cc83e8fa8ccd6427f6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-24T01:13:54.791000","string":"2021-01-24T01:13:54.791000"},"revision_date":{"kind":"timestamp","value":"2014-05-16T12:17:21","string":"2014-05-16T12:17:21"},"committer_date":{"kind":"timestamp","value":"2014-05-16T12:17:52","string":"2014-05-16T12:17:52"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django import template\nfrom django.contrib.auth.forms import AuthenticationForm\n\n\nregister = template.Library()\n\n@register.simple_tag(takes_context=True)\ndef ksp_login_next(context):\n request = context['request']\n if 'next' in context:\n next_page = context['next']\n elif 'next' in request.REQUEST:\n next_page = request.REQUEST['next']\n else:\n next_page = request.get_full_path()\n\n return next_page\n\n@register.assignment_tag(takes_context=True)\ndef ksp_login_auth_form(context):\n return AuthenticationForm\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":685,"cells":{"__id__":{"kind":"number","value":18811956787860,"string":"18,811,956,787,860"},"blob_id":{"kind":"string","value":"281ce20f43c629cecae7a1dba2c403f19ee14c20"},"directory_id":{"kind":"string","value":"91cef398a5df7d0775bbd88a8af5a8bebc1874b0"},"path":{"kind":"string","value":"/scripts/CalcBDT.py"},"content_id":{"kind":"string","value":"890ea8653325f1750ee4a8c00446d708fe518a33"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"EmmanueleSalvati/RazorCombinedFit_Git"},"repo_url":{"kind":"string","value":"https://github.com/EmmanueleSalvati/RazorCombinedFit_Git"},"snapshot_id":{"kind":"string","value":"eceed652ef9562752475103ddb75f06b24c8b6e1"},"revision_id":{"kind":"string","value":"a8184aedffedc5a68cf1eb82132db3f09607b1fa"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-17T15:35:46.927516","string":"2020-05-17T15:35:46.927516"},"revision_date":{"kind":"timestamp","value":"2014-11-19T21:17:47","string":"2014-11-19T21:17:47"},"committer_date":{"kind":"timestamp","value":"2014-11-19T21:17:47","string":"2014-11-19T21:17:47"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import ROOT as rt\nimport array\n\n\nclass BranchDumper(object):\n\n def __init__(self, tree, norm=None, xs=None):\n\n self.tree = tree\n self.norm = norm\n\n self.lumi = 19300.\n self.filter = 1.0\n\n self.weightVal = 1.0\n if norm is not None:\n point = (-1, -1)\n #point = (200,0)\n self.weightVal = (self.lumi*float(xs)*self.filter)/(1.0*int(self.norm[point]))\n print self.weightVal\n\n self.vars = None\n self.jets = range(6)\n\n def tag_jets(self):\n #order by btag output - high to low\n jets = sorted([(self.tree.jet_csv.at(i), i) for i in xrange(len(self.tree.jet_csv))], reverse=True)\n self.jets = [i for b, i in jets]\n\n def select(self): # TTbar MC\n return self.tree.metFilter and self.tree.hadBoxFilter and self.tree.hadTriggerFilter and self.tree.nCSVM > 0 and self.tree.MR >= 450 and self.tree.RSQ >= 0.03 and\\\n self.tree.nMuonTight == 0 and self.tree.nElectronTight == 0 and not self.tree.isolatedTrack10Filter and self.tree.nMuonLoose == 0 and self.tree.nElectronLoose == 0\n# def select(self): # QCD CR from data\n# return self.tree.metFilter and self.tree.hadBoxFilter and self.tree.hadTriggerFilter and self.tree.nCSVL == 0 and self.tree.MR >= 450 and self.tree.RSQ >= 0.03 and\\\n# self.tree.nMuonTight == 0 and self.tree.nElectronTight == 0 and not self.tree.isolatedTrack10Filter and self.tree.nMuonLoose == 0 and self.tree.nElectronLoose == 0\n\n def weight(self):\n return self.weightVal\n #return 1.0\n\n def weightPU(self):\n return self.tree.pileUpWeightABCD\n\n def thetaH1(self):\n if self.tree.bestHemi == 1:\n return self.tree.hemi1ThetaH\n return self.tree.hemi2ThetaH\n\n def thetaH2(self):\n if self.tree.bestHemi == 2:\n return self.tree.hemi1ThetaH\n return self.tree.hemi2ThetaH\n\n def topMass1(self):\n if self.tree.bestHemi == 1:\n return self.tree.hemi1TopMass\n return self.tree.hemi2TopMass\n\n def topMass2(self):\n if self.tree.bestHemi == 2:\n return self.tree.hemi1TopMass\n return self.tree.hemi2TopMass\n\n def wMass1(self):\n if self.tree.bestHemi == 1:\n return self.tree.hemi1WMass\n return self.tree.hemi2WMass\n\n def wMass2(self):\n if self.tree.bestHemi == 2:\n return self.tree.hemi1WMass\n return self.tree.hemi2WMass\n\n def jetNpt(self, n):\n return self.tree.jet_pt.at(self.jets[n])\n\n def jet1pt(self):\n return self.jetNpt(0)\n\n def jet2pt(self):\n return self.jetNpt(1)\n\n def jet3pt(self):\n return self.jetNpt(2)\n\n def jet4pt(self):\n return self.jetNpt(3)\n\n def jet5pt(self):\n return self.jetNpt(4)\n\n def jet6pt(self):\n return self.jetNpt(5)\n\n def jet1mult(self):\n return self.tree.jet_mult.at(self.jets[0])\n\n def jet2mult(self):\n return self.tree.jet_mult.at(self.jets[1])\n\n def jet3mult(self):\n return self.tree.jet_mult.at(self.jets[2])\n\n def jet4mult(self):\n return self.tree.jet_mult.at(self.jets[3])\n\n def jet5mult(self):\n return self.tree.jet_mult.at(self.jets[4])\n\n def jet6mult(self):\n return self.tree.jet_mult.at(self.jets[5])\n\n def jet1girth(self):\n return self.tree.jet_girth_ch.at(self.jets[0])\n\n def jet2girth(self):\n return self.tree.jet_girth_ch.at(self.jets[1])\n\n def jet3girth(self):\n return self.tree.jet_girth_ch.at(self.jets[2])\n\n def jet4girth(self):\n return self.tree.jet_girth_ch.at(self.jets[3])\n\n def jet5girth(self):\n return self.tree.jet_girth_ch.at(self.jets[4])\n\n def jet6girth(self):\n return self.tree.jet_girth_ch.at(self.jets[5])\n\n def nVertex(self):\n return self.tree.nVertex\n\n def headers(self):\n return ['nVertex', 'weightPU', 'weight', 'thetaH1', 'thetaH2', 'topMass1', 'topMass2', 'wMass1', 'wMass2',\n 'jet1pt', 'jet2pt', 'jet3pt', 'jet4pt', 'jet5pt', 'jet6pt',\n 'jet1mult', 'jet2mult', 'jet3mult', 'jet4mult', 'jet5mult', 'jet6mult',\n 'jet1girth', 'jet2girth', 'jet3girth', 'jet4girth', 'jet5girth', 'jet6girth']\n\n #@staticmethod\n def headers_for_MVA(self):\n return ['thetaH1', 'thetaH2', 'topMass1', 'topMass2', 'wMass1', 'wMass2',\n 'jet1mult', 'jet2mult', 'jet3mult', 'jet4mult', 'jet5mult', 'jet6mult',\n 'jet1girth', 'jet2girth', 'jet3girth', 'jet4girth', 'jet5girth', 'jet6girth']\n\n def values(self):\n values = []\n for h in self.headers():\n values.append(getattr(self, h)())\n return values\n\n def make_tree(self, clone=False):\n\n rt.gROOT.ProcessLine(\"\"\"\nstruct BranchDumper{\\\n Double_t weight;\\\n Double_t weightPU;\\\n Double_t thetaH1;\\\n Double_t thetaH2;\\\n Double_t topMass1;\\\n Double_t topMass2;\\\n Double_t wMass1;\\\n Double_t wMass2;\\\n Double_t jet1pt;\\\n Double_t jet2pt;\\\n Double_t jet3pt;\\\n Double_t jet4pt;\\\n Double_t jet5pt;\\\n Double_t jet6pt;\\\n Double_t jet1mult;\\\n Double_t jet2mult;\\\n Double_t jet3mult;\\\n Double_t jet4mult;\\\n Double_t jet5mult;\\\n Double_t jet6mult;\\\n Double_t jet1girth;\\\n Double_t jet2girth;\\\n Double_t jet3girth;\\\n Double_t jet4girth;\\\n Double_t jet5girth;\\\n Double_t jet6girth;\\\n Double_t MR;\\\n Double_t RSQ;\\\n Double_t nVertex;\\\n Double_t BDT;\\\n};\"\"\")\n from ROOT import BranchDumper\n if not clone:\n tree = rt.TTree('RMRTree', 'Multijet events')\n else:\n tree = self.tree.CloneTree(0)\n tree.SetDirectory(0)\n\n def setAddress(obj, flag):\n for branch in dir(obj):\n if branch.startswith('__'):\n continue\n tree.Branch(branch, rt.AddressOf(obj, branch), '%s/%s' % (branch, flag))\n\n self.vars = BranchDumper()\n setAddress(self.vars, 'D')\n return tree\n\n def set_tree(self, tree, fill=True):\n #self.tag_jets()\n for h in self.headers():\n setattr(self.vars, h, getattr(self, h)())\n self.vars.MR = self.tree.MR\n self.vars.RSQ = self.tree.RSQ\n if fill:\n tree.Fill()\n\n\nclass CalcBDT(object):\n\n def __init__(self, oldTree):\n\n self.sel = BranchDumper(oldTree)\n self.tree = self.sel.make_tree(False)\n\n self.reader = rt.TMVA.Reader()\n self.bdt_vars = {}\n for h in self.sel.headers_for_MVA():\n self.bdt_vars['%s_var' % h] = array.array('f', [0])\n self.reader.AddVariable(h, self.bdt_vars['%s_var' % h])\n self.mr_var = array.array('f', [0])\n self.rsq_var = array.array('f', [0])\n self.nvertex_var = array.array('f', [0])\n\n self.reader.AddSpectator('MR', self.mr_var)\n self.reader.AddSpectator('RSQ', self.rsq_var)\n self.reader.AddSpectator('nVertex', self.nvertex_var)\n # self.reader.BookMVA('BDT','/afs/cern.ch/user/w/wreece/public/Razor2012/BDT/Had/TMVAClassification_BDT.weights.xml')\n self.reader.BookMVA('BDT', 'TMVAClassification_BDT.weights.xml')\n\n self.bdt_val = 0.0\n\n def select(self):\n return self.sel.select()\n\n def bdt(self):\n\n for h in self.sel.headers_for_MVA():\n self.bdt_vars['%s_var'%h][0] = getattr(self.sel,h)()\n self.bdt_val = self.reader.EvaluateMVA('BDT')\n return self.bdt_val\n\n def Fill(self):\n self.sel.set_tree(self.tree, False)\n self.sel.vars.BDT = self.bdt()\n self.tree.Fill()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":686,"cells":{"__id__":{"kind":"number","value":10986526389569,"string":"10,986,526,389,569"},"blob_id":{"kind":"string","value":"93f7f9ff28fbdb0303e3c0cc3c47c72f3595bae2"},"directory_id":{"kind":"string","value":"f155cbaade22c2253de6dc475c94ede1a08ee212"},"path":{"kind":"string","value":"/tento/web/app.py"},"content_id":{"kind":"string","value":"b2f77f548b347abe097af0f2687ef4e610802fd2"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"nl-seoultech/tento-server"},"repo_url":{"kind":"string","value":"https://github.com/nl-seoultech/tento-server"},"snapshot_id":{"kind":"string","value":"78e8d31ceacedca3c9b134e7986f1670c6303627"},"revision_id":{"kind":"string","value":"0c6e62953348c23ab92f15b774e71eb82dc97b8f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T23:15:52.894300","string":"2016-09-05T23:15:52.894300"},"revision_date":{"kind":"timestamp","value":"2014-09-22T06:38:37","string":"2014-09-22T06:38:37"},"committer_date":{"kind":"timestamp","value":"2014-09-22T06:41:32","string":"2014-09-22T06:41:32"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom flask import Flask\n\nfrom . import user, login, music\n\n\napp = Flask(__name__)\napp.register_blueprint(user.bp, url_prefix='/users')\napp.register_blueprint(login.bp, url_prefix='/login')\napp.register_blueprint(music.bp, url_prefix='/musics')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":687,"cells":{"__id__":{"kind":"number","value":6743098677068,"string":"6,743,098,677,068"},"blob_id":{"kind":"string","value":"83f1d2360cab9cdae95ebb4a7e945de87a063364"},"directory_id":{"kind":"string","value":"f2cd8e94cd609349f591631fa233b2a62c980e4f"},"path":{"kind":"string","value":"/heightmaps/test.py"},"content_id":{"kind":"string","value":"413cf2532fb66c8aa2a3efec69a8d9e39d91f7bd"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"JoeClacks/Khopesh"},"repo_url":{"kind":"string","value":"https://github.com/JoeClacks/Khopesh"},"snapshot_id":{"kind":"string","value":"ec3fc44f2a21f1810560066c75098a5838f44c9e"},"revision_id":{"kind":"string","value":"f9d9a9d583e162cfa8def856532b18f18d30ab66"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-05T04:46:17.585882","string":"2020-06-05T04:46:17.585882"},"revision_date":{"kind":"timestamp","value":"2014-01-16T23:47:09","string":"2014-01-16T23:47:09"},"committer_date":{"kind":"timestamp","value":"2014-01-16T23:47:09","string":"2014-01-16T23:47:09"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#this file converts the 16-bit signed format into 16-bit unsigned greyscale\n#the command to get the png file from the orgional tiff is:\n#convert ETOPO1_Bed_g.tif -depth 16 -type Grayscale ETOPO1_Bed_g2.png\n\nimport Image\n\nim = Image.open('ETOPO1_Bed_g2.png')\n#im = Image.open('test.png')\n\nxSize, ySize = im.size\n\nprint im.size\n\nim2 = Image.new('I', im.size)\n\nfor x in xrange(xSize):\n for y in xrange(ySize):\n pix = im.getpixel((x,y)) \n \n if(pix >= 32768):\n pix = pix - 32768\n else:\n pix = pix + 32768\n \n \n im2.putpixel((x,y),pix)\n\n print x,\n \nim2.save('ETOPO1_Bed_g3.png')\n\n#"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":688,"cells":{"__id__":{"kind":"number","value":1236950601852,"string":"1,236,950,601,852"},"blob_id":{"kind":"string","value":"3014aef6b55093549681cdcaa3efc6738ac74cea"},"directory_id":{"kind":"string","value":"626525770a69c9b8c7725bd2baa7b2f85b7c6e88"},"path":{"kind":"string","value":"/declare/__init__.py"},"content_id":{"kind":"string","value":"e198153a1e60b40a90ad0543961db4b35b58311a"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"madjar/declare"},"repo_url":{"kind":"string","value":"https://github.com/madjar/declare"},"snapshot_id":{"kind":"string","value":"8bce643eb032c471f28d2d37735144f3d385505a"},"revision_id":{"kind":"string","value":"72a0cd5940fce3f003618ae2979fb6b93660810d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-21T13:11:54.761492","string":"2021-01-21T13:11:54.761492"},"revision_date":{"kind":"timestamp","value":"2013-09-30T09:09:39","string":"2013-09-30T09:09:39"},"committer_date":{"kind":"timestamp","value":"2013-09-30T09:10:46","string":"2013-09-30T09:10:46"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from .core import Item, Field, MapField, prepare # noqa\nfrom .magic import MagicField\n\nMAGIC = MagicField()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":689,"cells":{"__id__":{"kind":"number","value":8856222591750,"string":"8,856,222,591,750"},"blob_id":{"kind":"string","value":"c1c0b61046a3d9c366fe22b117ca44c91a7f6ae3"},"directory_id":{"kind":"string","value":"8ab04c839dc51bcf0a830d45bbebb9f930e57429"},"path":{"kind":"string","value":"/PieClockScreenlet.py"},"content_id":{"kind":"string","value":"72b027f470f39505c7ad7583bcc672c38cbc965e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ITikhonov/pieclock"},"repo_url":{"kind":"string","value":"https://github.com/ITikhonov/pieclock"},"snapshot_id":{"kind":"string","value":"a8aef6bc0726e2b4ad2c2b77c2ef77c5172fc280"},"revision_id":{"kind":"string","value":"56490877f5a4c95c6ed85c35e00b4710dc80d17e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T09:12:45.689611","string":"2016-09-05T09:12:45.689611"},"revision_date":{"kind":"timestamp","value":"2010-06-21T06:47:00","string":"2010-06-21T06:47:00"},"committer_date":{"kind":"timestamp","value":"2010-06-21T06:47:00","string":"2010-06-21T06:47:00"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport screenlets\nfrom screenlets import Screenlet\nfrom screenlets.options import IntOption, BoolOption, TimeOption, FloatOption\nfrom screenlets.options import StringOption, FontOption, ColorOption\nfrom screenlets.services import ScreenletService\nfrom cairo import OPERATOR_DEST_OUT,OPERATOR_OVER\n\nfrom math import pi\nfrom datetime import datetime\nfrom gobject import timeout_add\n\nfrom calendar import monthrange,isleap\n\ndef deg(a): return a*(pi/180)\n\ndef circle(ctx,parts,r,w,c,v,b=10,d=0.5):\n\tctx.set_operator(OPERATOR_OVER)\n\twl=360.0/parts\n\tfor i in range(0,parts):\n\t\tdr=1-d*(i%2)\n\t\tpie(ctx,(wl*i,wl*i+wl-b),(r,r+w*dr),c)\n\tctx.set_operator(OPERATOR_DEST_OUT)\n\tpie(ctx, (0,wl*int(v) + (wl-b)*(v-int(v)) ), (r-0.01,r+w+0.01),(1.0,1.0,1.0,0.8))\n\ndef pie(ctx,a,r,c):\n\ta=deg(a[0]-90),deg(a[1]-90)\n\tif len(c)==3: ctx.set_source_rgb(*c)\n\telse: ctx.set_source_rgba(*c)\n\tctx.arc(0,0,r[0], a[0],a[1])\n\tctx.arc_negative(0,0,r[1], a[1],a[0])\n\tctx.fill()\n\nclass PieClockScreenlet (Screenlet):\n\t__name__\t= 'PieClockScreenlet'\n\t__version__\t= '0.0'\n\t__author__\t= 'Ivan Tikhonov'\n\t__desc__\t= 'Pie Clocks'\n\n\tdef __init__ (self, parent_window=None, **keyword_args):\n\t\t\"\"\"Create a new ClockScreenlet instance.\"\"\"\n\t\t# call super (we define to use our own service here)\n\t\tScreenlet.__init__(self,**keyword_args)\n\t\tself.__timeout = timeout_add(500, self.update)\n\t\n\tdef on_init (self):\n\t\tprint \"OK - Clock has been initialized.\"\n\t\tself.add_default_menuitems()\n\n\tdef update(self):\n\t\tself.redraw_canvas()\n\t\treturn True\n\n\tdef on_draw (self, ctx):\n\t\tctx.scale(self.scale,self.scale)\n\t\tctx.scale(self.width/2,self.height/2)\n\t\tctx.translate(1,1)\n\n\t\tself._color_yd = (0.0,0.0,0.0)\n\t\tself._color_md = (0.2,0.2,0.0)\n\t\tself._color_wd = (0.5,0.0,1.0)\n\t\tself._color_hr = (0.0,0.5,1.0)\n\t\tself._color_mn = (0.0,0.5,0.0)\n\t\tself._color_sc = (0.8,0.8,0.8)\n\n\t\tnow=datetime.now()\n\n\t\tmr=monthrange(now.year,now.month)[1]\n\t\tdr=365+isleap(now.year)\n\n\t\tcircle(ctx, 4, 0.9,0.1, self._color_yd, (now-datetime(now.year,1,1)).days/float(dr/4.0),2,d=0)\n\t\tcircle(ctx, mr, 0.775,0.1, self._color_md, now.day+now.hour/24.0+now.minute/(24*60.0), 4, 0.2)\n\t\tcircle(ctx, 7, 0.65,0.1, self._color_wd, now.weekday()+now.hour/24.0+now.minute/(24*60.0),d=0)\n\t\tcircle(ctx, 24, 0.35,0.15, self._color_hr, now.hour+now.minute/60.0, 4, 0.2)\n\t\tcircle(ctx, 3, 0.2,0.1, self._color_mn, now.minute/20.0,d=0)\n\t\tcircle(ctx, 2, 0.01,0.1, self._color_sc, (now.second + now.microsecond/1000000.0)/30.0,d=0)\n\n\nif __name__ == \"__main__\":\n\timport screenlets.session\n\tscreenlets.session.create_session(PieClockScreenlet)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":690,"cells":{"__id__":{"kind":"number","value":13091060352257,"string":"13,091,060,352,257"},"blob_id":{"kind":"string","value":"1a0c1c0bd7a9804d8518f8990e8ab913da360484"},"directory_id":{"kind":"string","value":"7543fb0f4ce3a8b2643538d9016156c73ee0a564"},"path":{"kind":"string","value":"/mpm_old/src/materialmodel2d.py"},"content_id":{"kind":"string","value":"42f8b9a8d183824c25615a2f0d53623d228fc71f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"raulrn/simple-mpm"},"repo_url":{"kind":"string","value":"https://github.com/raulrn/simple-mpm"},"snapshot_id":{"kind":"string","value":"e213a3e8a2670a602e931f48a52e3abc21639e1e"},"revision_id":{"kind":"string","value":"139d9c5b9c19701da2a92059dbe1bfdca32869f7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-25T23:10:00.350591","string":"2020-12-25T23:10:00.350591"},"revision_date":{"kind":"timestamp","value":"2013-10-04T04:18:29","string":"2013-10-04T04:18:29"},"committer_date":{"kind":"timestamp","value":"2013-10-04T04:18:29","string":"2013-10-04T04:18:29"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\n\n\n#===============================================================================\nclass MaterialModel:\n # Defines material models - accessed using getStress \n # - actual computation done in static methods \n # Returns stress tensor and jacobian of deformation\n def __init__(self, modelName, props):\n self.modelName = modelName # Selects Material Model\n self.props = props\n \n def getStress( self, F ):\n model = getattr( self, self.modelName )\n S,Ja = model(self.props, F); \n return (S,Ja)\n \n def changeProps( self, props ):\n self.props = props\n\n\n @staticmethod\n def planeStrainNeoHookean( props, F ):\n # Props - poisson, E\n I2 = F*0.\n I2[0,0] = I2[1,1] = 1.\n v = props['poisson']\n E = props['modulus']\n l = E * v / ((1.+v)*(1.-2.*v))\n m = 0.5 * E / (1.+v)\n Ja = F[0,0]*F[1,1] - F[1,0]*F[0,1]\n S = I2*l*np.log(Ja)/Ja + m/Ja * (np.dot(F, F.T) - I2)\n \n return (S,Ja)\n \n @staticmethod\n def planeStrainNeoHookeanMaxStress( props, F ):\n # Props - poisson, E\n I2 = np.eye(2)\n v = props['poisson']\n E = props['modulus']\n sMax = props['maxStress']\n l = E * v / ((1.+v)*(1.-2.*v))\n m = 0.5 * E / (1.+v)\n Ja = np.linalg.det(F)\n S = I2*l*np.log(Ja)/Ja + m/Ja * (np.dot(F, F.T) - I2)\n if vonMises(S) > sMax: \n S = I2*0.\n Ja = 1.\n \n return (S,Ja)\n \n @staticmethod\n def vonMises( S ):\n return np.sqrt( S[0,0]*S[0,0] - S[0,0]*S[1,1] + S[1,1]*S[1,1] +\n 3.*S[1,0]*S[0,1] )"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":691,"cells":{"__id__":{"kind":"number","value":1812476244333,"string":"1,812,476,244,333"},"blob_id":{"kind":"string","value":"d91b4189aa806e844e7aaef70ad124c8086dfb8c"},"directory_id":{"kind":"string","value":"577bbca525cf6d3fdcf9859597e40edbe1a312ac"},"path":{"kind":"string","value":"/Python_learning/str2dict.py"},"content_id":{"kind":"string","value":"9f9a9a2ad1b8e5c61548607a46d25bb8583186da"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"elenmax0607/test"},"repo_url":{"kind":"string","value":"https://github.com/elenmax0607/test"},"snapshot_id":{"kind":"string","value":"a5e9eb25eaf6531a240c52757d9a07ad423e1205"},"revision_id":{"kind":"string","value":"102beeb78a273142b20bb68d8a7443131d038623"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-07T04:33:46.039090","string":"2016-08-07T04:33:46.039090"},"revision_date":{"kind":"timestamp","value":"2014-02-15T03:57:36","string":"2014-02-15T03:57:36"},"committer_date":{"kind":"timestamp","value":"2014-02-15T03:57:36","string":"2014-02-15T03:57:36"},"github_id":{"kind":"number","value":16855713,"string":"16,855,713"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#from time import time \n#t = time() \n#lista = [1,2,3,4,5,6,7,8,9,10] \n#listb =[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.01] \n#len1=len(lista) \n#len2=len(listb) \n#for i in xrange (1000000): \n# for a in xrange(len1): \n# temp=lista[a] \n# for b in xrange(len2): \n# x=temp+listb[b] \n#print x\n#print \"total run time:\"\n#print time()-t \n\na = 'abcd1234'\nprint a[:-1]\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":692,"cells":{"__id__":{"kind":"number","value":10170482593879,"string":"10,170,482,593,879"},"blob_id":{"kind":"string","value":"f2324d2973deddfa6982de14e68968ced0fa5910"},"directory_id":{"kind":"string","value":"20d24dbb49e1e1a62bc1c1a1ad1803681821bf06"},"path":{"kind":"string","value":"/tools/bson_splitter.py"},"content_id":{"kind":"string","value":"1aa9494b62e389bb53bd282fe9f2282fe3e2f9e7"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"criteo/mongo-hadoop"},"repo_url":{"kind":"string","value":"https://github.com/criteo/mongo-hadoop"},"snapshot_id":{"kind":"string","value":"7790dc9c63baf778c43cbbfff3c42e548aaae560"},"revision_id":{"kind":"string","value":"f5426411d06967a10279b07e7bf9de49bda282aa"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-21T00:43:50.856829","string":"2021-01-21T00:43:50.856829"},"revision_date":{"kind":"timestamp","value":"2013-09-12T11:41:48","string":"2013-09-12T11:41:48"},"committer_date":{"kind":"timestamp","value":"2013-09-12T11:41:48","string":"2013-09-12T11:41:48"},"github_id":{"kind":"number","value":12541149,"string":"12,541,149"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":true,"string":"true"},"gha_event_created_at":{"kind":"timestamp","value":"2013-09-12T11:41:48","string":"2013-09-12T11:41:48"},"gha_created_at":{"kind":"timestamp","value":"2013-09-02T13:53:23","string":"2013-09-02T13:53:23"},"gha_updated_at":{"kind":"timestamp","value":"2013-09-12T11:41:48","string":"2013-09-12T11:41:48"},"gha_pushed_at":{"kind":"timestamp","value":"2013-09-12T11:41:48","string":"2013-09-12T11:41:48"},"gha_size":{"kind":"number","value":80231,"string":"80,231"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"number","value":0,"string":"0"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"string","value":"Java"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sys\nimport struct\nimport pymongo\nfrom bson import BSON\nimport os\n\nSPLIT_SIZE = 64 * 1024 * 1024\n\ndef main(argv):\n split_bson(argv[0])\n\ndef split_bson(path):\n bsonfile_path = os.path.abspath(path)\n splitsfile_out = os.path.join(os.path.dirname(bsonfile_path), \".\" + os.path.basename(bsonfile_path) + \".splits\")\n bsonfile = open(bsonfile_path,'r')\n splitsfile = open(splitsfile_out,'w')\n\n file_position = 0\n cur_split_start = 0\n cur_split_size = 0\n while True:\n size_bits = bsonfile.read(4)\n if len(size_bits) < 4:\n if cur_split_size > 0:\n #print {\"start\":cur_split_start, \"length\": bsonfile.tell() - cur_split_start}\n splitsfile.write(BSON.encode({\"s\":long(cur_split_start), \"l\": long(bsonfile.tell() - cur_split_start)}))\n break\n size = struct.unpack(\" SPLIT_SIZE:\n #print {\"start\":cur_split_start, \"length\": bsonfile.tell() - 4 - cur_split_start}\n splitsfile.write(BSON.encode({\"s\":long(cur_split_start), \"l\": long(bsonfile.tell() - 4 - cur_split_start)}))\n cur_split_start = bsonfile.tell() - 4\n cur_split_size = 0\n else:\n pass\n\n bsonfile.seek(file_position + size)\n file_position += size\n cur_split_size += 4 + size\n \nif __name__ == '__main__':\n main(sys.argv[1:])\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":693,"cells":{"__id__":{"kind":"number","value":10823317635030,"string":"10,823,317,635,030"},"blob_id":{"kind":"string","value":"07331370e3d693d643ffbab2080288af21c1ceb8"},"directory_id":{"kind":"string","value":"9d795fb88259782cee7da9a9dfdb1d085634a4a7"},"path":{"kind":"string","value":"/Tests/dna_starts_with.py"},"content_id":{"kind":"string","value":"988fac882666e94f44ca231b35d3aebf0b11e3c8"},"detected_licenses":{"kind":"list like","value":["CC-BY-SA-3.0"],"string":"[\n \"CC-BY-SA-3.0\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"nataliethorne/adelaide_swc"},"repo_url":{"kind":"string","value":"https://github.com/nataliethorne/adelaide_swc"},"snapshot_id":{"kind":"string","value":"83e485ba512a4874a77424ae776eeccdd5a8c0e8"},"revision_id":{"kind":"string","value":"5560f857119b8c2776a8500ce04b8769c6304030"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-03T04:56:45.138802","string":"2020-05-03T04:56:45.138802"},"revision_date":{"kind":"timestamp","value":"2013-09-26T06:55:36","string":"2013-09-26T06:55:36"},"committer_date":{"kind":"timestamp","value":"2013-09-26T06:55:36","string":"2013-09-26T06:55:36"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def dna_starts_with(input_dna,start_dna):\n return input_dna[0:len(start_dna)]==start_dna\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":694,"cells":{"__id__":{"kind":"number","value":2791728752057,"string":"2,791,728,752,057"},"blob_id":{"kind":"string","value":"b75e298c4e094d3e7cd544d73efcb59e9772d380"},"directory_id":{"kind":"string","value":"495171fa04685c63c61f630531a583ce46bb87d6"},"path":{"kind":"string","value":"/Final_Lab_Python/Commands.py"},"content_id":{"kind":"string","value":"e4de67522f4821c664be9265c50dd8f09eff7c68"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"hlfshell/Robot-Framework-RBE3002"},"repo_url":{"kind":"string","value":"https://github.com/hlfshell/Robot-Framework-RBE3002"},"snapshot_id":{"kind":"string","value":"6165ba734c4a0ed1ec42739bb0cad1a3532ae4cb"},"revision_id":{"kind":"string","value":"cefebdb5819446c4637c3e0f4bdda274d6147e0d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T22:10:32.516888","string":"2021-01-23T22:10:32.516888"},"revision_date":{"kind":"timestamp","value":"2012-04-26T01:52:21","string":"2012-04-26T01:52:21"},"committer_date":{"kind":"timestamp","value":"2012-04-26T01:52:21","string":"2012-04-26T01:52:21"},"github_id":{"kind":"number","value":4143152,"string":"4,143,152"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''\nCommands.py\n\nAuthor: Keith Chester\n\nThis holds all possible commands - should be kept\nin sync with Commands.h on robot side.\n\n'''\n\nsetID = chr(0x00)\nrequestID = chr(0x01)\nInitializeRobot = chr(0x02)\nreset = chr(0x03)\n\nrequestPinValue = chr(0xA0)\nrequestADCchannel = chr(0xA1)\n\nenableMotors = chr(0xC0)\ndisableMotors = chr(0xC1)\nsetPID = chr(0xC3)\nsetMotorRight = chr(0xC4)\nsetMotorLeft = chr(0xC5)\nsendStepperTo = chr(0xC6)\nsetServoTo = chr(0xD0)\nsetMotorLeftForward = chr(0xD1)\nsetMotorRightForward = chr(0xD3)\nsetMotorLeftReverse = chr(0xD2)\nsetMotorRightReverse = chr(0xD4)\n\nPING = chr(0xFF)\n\n\n\nSUCCESS = chr(0xFF)\nFAILURE = chr(0x00)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":695,"cells":{"__id__":{"kind":"number","value":16870631568839,"string":"16,870,631,568,839"},"blob_id":{"kind":"string","value":"82a30ba9cbc1bfffdf71c89d757519610a03fb78"},"directory_id":{"kind":"string","value":"310f654682ed30207994d4eebc61a5d0bfd1ccdf"},"path":{"kind":"string","value":"/example_func.py"},"content_id":{"kind":"string","value":"21dc62392b6fdf62de2dd2f585692ca6e0e3aecc"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"AndyTimmins/adelaide_swc2"},"repo_url":{"kind":"string","value":"https://github.com/AndyTimmins/adelaide_swc2"},"snapshot_id":{"kind":"string","value":"607b8f1be70264dd2fa672c7c8aae9cdabfe6d24"},"revision_id":{"kind":"string","value":"ab7700bebf1e89ffd37582ee749fbca1b7221c3d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T14:59:08.352273","string":"2021-01-19T14:59:08.352273"},"revision_date":{"kind":"timestamp","value":"2013-09-25T04:51:23","string":"2013-09-25T04:51:23"},"committer_date":{"kind":"timestamp","value":"2013-09-25T04:51:23","string":"2013-09-25T04:51:23"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def sum_two_numbers(first_number, second_number):\n result = first_number + second_number\n return result\n\nprint sum_two_numbers(2, 7)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":696,"cells":{"__id__":{"kind":"number","value":15092515107573,"string":"15,092,515,107,573"},"blob_id":{"kind":"string","value":"6730806b61ad3487068f96f5b71a0703ad2a1052"},"directory_id":{"kind":"string","value":"cd3ef1af3167d3a16f37157156dede151bf99f34"},"path":{"kind":"string","value":"/ranking.py"},"content_id":{"kind":"string","value":"dfb003b3f09f1570f9fd19211c863e416df140cd"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tclh123/SSSTA-Ranking"},"repo_url":{"kind":"string","value":"https://github.com/tclh123/SSSTA-Ranking"},"snapshot_id":{"kind":"string","value":"6820ad75d850f21424febd57f38a5aa503d5f2e7"},"revision_id":{"kind":"string","value":"d27c69b253d2e92b38568989f90a993f3c9ebb1d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T01:34:29.001982","string":"2016-09-06T01:34:29.001982"},"revision_date":{"kind":"timestamp","value":"2012-11-30T20:34:49","string":"2012-11-30T20:34:49"},"committer_date":{"kind":"timestamp","value":"2012-11-30T20:34:49","string":"2012-11-30T20:34:49"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'tclh123'\n\nfrom scoring import *\n\ndef ranking(html, prob_num, player_num, max_pts=1000.0):\n \"\"\"\n return [('mizuki_tw', 1503), ('brotherroot', 1498), ...\n \"\"\"\n rank = {}\n scores = scoring(html, prob_num, player_num, max_pts)\n for username in scores:\n tmp = 0.0\n for prob_id in scores[username]:\n tmp += scores[username][prob_id]\n rank[username] = int(tmp+0.5)\n return sorted(rank.iteritems(), key = lambda asd:asd[1] ,reverse = True)\n # return rank\n\ndef test():\n htmltest = \"
RankIdSolvePenaltyABCDEFG
1mizuki_tw723:49:572:24:52(-5)2:09:32(-3)2:30:493:20:24(-2)2:57:19(-1)3:17:453:29:16
2biamgo724:10:132:27:22(-1)2:46:143:05:14(-3)3:14:223:31:13(-1)3:39:163:46:32
3ktboyyy725:23:412:57:30(-5)2:27:092:38:574:35:03(-2)3:16:023:29:383:39:22
4brotherroot727:00:371:34:50(-2)2:17:51(-5)2:28:18(-1)2:38:203:17:40(-7)3:07:47(-1)3:55:51(-7)
5cripout734:08:042:38:23(-5)3:02:46(-1)3:25:04(-1)4:40:00(-2)5:11:09(-5)5:22:054:48:37(-1)
6neveralso734:23:342:43:47(-12)2:35:282:40:376:01:28(-8)3:11:445:32:14(-2)3:58:16(-1)
7gswxw743:00:435:54:53(-13)2:40:144:15:51(-5)5:46:58(-4)5:23:495:02:10(-1)6:16:48
8h549570564745:21:085:56:42(-4)3:55:32(-5)4:12:006:53:57(-7)5:41:32(-5)5:05:376:35:48
9perfect28526:03:154:55:46(-5)5:19:31(-3)5:23:16(-1) (-4) (-8)3:44:303:40:12
10tclh123411:56:442:35:18(-1)3:00:143:00:283:00:44
11lkid314:13:103:31:45(-2)4:49:11(-1)4:52:14 (-2)
12z45153847313:15:392:55:39(-1)
13frustratingman13:45:073:45:07 (-3)
14zzs132400:00:00 (-7) (-1)
\"\n\n print ranking(htmltest, 7, 13)\n\nif __name__ == \"__main__\":\n test()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":697,"cells":{"__id__":{"kind":"number","value":9036611222138,"string":"9,036,611,222,138"},"blob_id":{"kind":"string","value":"825663fda964de648ba36260d186e5bc902e6993"},"directory_id":{"kind":"string","value":"aeb910d12562abdbd428fafb43106f15ac2649e6"},"path":{"kind":"string","value":"/homepage/cached_templates/templates/index.html.py"},"content_id":{"kind":"string","value":"ad9cbde189a1fc24eb10e96b9b407c3a52914d83"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tk4d/MyStuffWebpage"},"repo_url":{"kind":"string","value":"https://github.com/tk4d/MyStuffWebpage"},"snapshot_id":{"kind":"string","value":"6482a5d02cdd64c21e80ab1842c11e1f438bf1e2"},"revision_id":{"kind":"string","value":"31f696649ad749145b345b8d06267a2a723f6f97"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T17:30:32.188655","string":"2021-01-23T17:30:32.188655"},"revision_date":{"kind":"timestamp","value":"2014-01-21T14:59:57","string":"2014-01-21T14:59:57"},"committer_date":{"kind":"timestamp","value":"2014-01-21T14:59:57","string":"2014-01-21T14:59:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 9\n_modified_time = 1389655070.646056\n_enable_loop = True\n_template_filename = 'C:\\\\Python33\\\\mystuff\\\\homepage\\\\templates/index.html'\n_template_uri = 'index.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['center']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def center():\n return render_center(context._locals(__M_locals))\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 2\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'center'):\n context['self'].center(**pageargs)\n \n\n # SOURCE LINE 9\n __M_writer(' \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_center(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def center():\n return render_center(context)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n __M_writer = context.writer()\n # SOURCE LINE 4\n __M_writer('\\n \\n

The #1 Local Photo Store

\\n At Mystuff we offer the best products and services for your everyday photo needs.
\\n Be sure to check out our daily deals for incredible savings!\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":698,"cells":{"__id__":{"kind":"number","value":16587163713010,"string":"16,587,163,713,010"},"blob_id":{"kind":"string","value":"e57a5a76519eae230eacdc0d7896ccedb9cf2262"},"directory_id":{"kind":"string","value":"79b3036b797bbb1b49fe4cca33e9cc752a332cc5"},"path":{"kind":"string","value":"/wallet/model_signup.py"},"content_id":{"kind":"string","value":"24a8316238665048f08e17c260f97daf7c8268c9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"mohakrajendra/dbmswallet"},"repo_url":{"kind":"string","value":"https://github.com/mohakrajendra/dbmswallet"},"snapshot_id":{"kind":"string","value":"ffe64f43cf3e0f12c9a03e6df7bba607430c7239"},"revision_id":{"kind":"string","value":"b66f17cd70c7ad0a08e112d4ca3a875af6f9b840"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-30T05:37:40.769931","string":"2020-05-30T05:37:40.769931"},"revision_date":{"kind":"timestamp","value":"2012-07-12T08:23:40","string":"2012-07-12T08:23:40"},"committer_date":{"kind":"timestamp","value":"2012-07-12T08:23:40","string":"2012-07-12T08:23:40"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.db import models\n\nclass userprofile(models.Model):\n name=models.CharField(max_length=100)\n website=models.CharField(max_length=200)\n email=models.EmailField()\n birth_date=models.DateField()\n sex=models.CharField(max_length=50)\n country=models.CharField(max_length=100)\n password=models.CharField(max_length=100)\n mailing=models.BooleanField()\n \n\n def __unicode__(self):\n return self.name\n#admin.site.register(userprofile)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":699,"cells":{"__id__":{"kind":"number","value":7679401554646,"string":"7,679,401,554,646"},"blob_id":{"kind":"string","value":"9b2f496554e746965b542d59d158e1e7d3bb6c9f"},"directory_id":{"kind":"string","value":"ba1066b0860a73020eb5c4ee0021f68e3639327c"},"path":{"kind":"string","value":"/Sujet 1/machine.py"},"content_id":{"kind":"string","value":"dbb27b2dc8f0730571f59bba3d99f69f9ba0fd36"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Hiestaa/TARP-ODNL"},"repo_url":{"kind":"string","value":"https://github.com/Hiestaa/TARP-ODNL"},"snapshot_id":{"kind":"string","value":"cf51678ce4940d2d84a167317eb70298863cc9b1"},"revision_id":{"kind":"string","value":"3a09054558ddc188f80abfd13ea51e1e99d64d68"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-25T07:27:54.313545","string":"2021-01-25T07:27:54.313545"},"revision_date":{"kind":"timestamp","value":"2014-01-13T01:14:33","string":"2014-01-13T01:14:33"},"committer_date":{"kind":"timestamp","value":"2014-01-13T01:14:33","string":"2014-01-13T01:14:33"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import Log\nfrom task import Task\n\nclass Machine:\n\t\"\"\"Represente une machine capable d'executer une operation\n\tDe plus, assigner une tache a un machine sous la forme d'une liste d'operation\n\tpermet d'automatiser le passage de l'operation suivante a la machine suivante lorsque l'operation courante est terminee\"\"\"\n\tdef __init__(self, mid, log):\n\t\tself.id = mid\n\t\tself.next = None # prochaine machine de la liste\n\t\tself.currentoptime = 0 # temps total necessaire pour l'operation en cours\n\t\tself.currentopstate = 0 # etat du travail sur l'operation en cours\n\t\tself.working = False # la machine est-elle en train de travailler ?\n\t\tself.currenttask = None\n\t\tself.waitingtask = []\n\t\tself.onopdone = None # callback a appeler lorsque l'operation est terminee\n\t\tself.ontaskdone = None # callback a appeler lorsque la tache est terminee\n\t\tself.time = 0\n\n\t\tself.total_working_time = 0\n\t\tself.total_waiting_time = 0\n\t\tself.work_history = [] # list of tupples (taskid, opid, time of work)\n\t\tself.log = log\n\n\tdef assignTask(self, task, onopdone, ontaskdone):\n\t\tself.log.log_event(self.time, \"MachineEvent\", \"Machine \"+str(self.id)+\": assign task \"+str(task.id)+\" operation \"+str(task.opdone))\n\t\tself.currenttask = task\n\t\tself.assign_operation(self.currenttask.get_next_op())\n\t\tself.start()\n\t\tself.onopdone = onopdone\n\t\tself.ontaskdone = ontaskdone\n\n\t\tself.work_history.append((task.id, task.opdone, self.currentoptime))\n\n\tdef assign_operation(self, optime):\n\t\tself.log.log_event(self.time, \"MachineEvent\", \"Machine \"+str(self.id)+\": assign op time=\"+str(optime))\n\t\tself.currentoptime = optime\n\t\tself.currentopstate = optime\n\t\tself.working = False\n\n\tdef start(self):\n\t\tself.working = True\n\n\tdef on_next_op_done(self):\n\t\tif self.waitingtask:\n\t\t\ttask = self.waitingtask.pop(0)\n\t\t\tself.next.assignTask(task, self.on_next_op_done, self.ontaskdone)\n\n\tdef update(self, time):\n\t\tself.time = time\n\n\t\tif self.working:\n\t\t\tself.total_working_time += 1\n\t\t\t# if machine is working, decrease current op counter\n\t\t\tself.currentopstate -= 1\n\t\t\tif self.currentopstate == 0:\n\t\t\t\t# the machine ended it's work\n\n\t\t\t\t# log that the operation has finished\n\t\t\t\tself.log.log_event_info(time, 'MachineEvent', \"Machine \" + str(self.id) +\n\t\t\t\t\t\" just finished task \" + str(self.currenttask.id) +\n\t\t\t\t\t\" operation \" + str(self.currenttask.opdone) +\n\t\t\t\t\t\" (time: \" + str(self.currentoptime)+')')\n\n\t\t\t\tif self.next:\n\t\t\t\t\t# notify task that it is complete\n\t\t\t\t\tself.currenttask.op_done()\n\t\t\t\t\t# add to the fifo of task waiting for the next machine to be free\n\t\t\t\t\tself.waitingtask.append(self.currenttask)\n\t\t\t\telse:\n\t\t\t\t\t# if next is None, this is the last machine of the chain\n\t\t\t\t\t# notify that the current task is done\n\t\t\t\t\tself.ontaskdone(self.currenttask)\n\n\t\t\t\t# note that we are not working anymore\n\t\t\t\tself.working = False\n\t\t\t\t# notify that the operation is done and machine is free\n\t\t\t\tself.onopdone()\n\t\telse:\n\t\t\tself.total_waiting_time += 1\n\t\t# if there is a machine after this\n\t\tif self.next:\n\t\t\t# update the machine\n\t\t\tself.next.update(self.time)\n\t\t\tif self.waitingtask and self.next.working == False:\n\t\t\t\t# assign the first\n\t\t\t\ttask = self.waitingtask.pop(0)\n\t\t\t\tself.next.assignTask(task, self.on_next_op_done, self.ontaskdone)\n\n\tdef getNbMachines(self):\n\t\tif not self.next:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 1 + self.next.getNbMachines()\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":6,"numItemsPerPage":100,"numTotalItems":42509,"offset":600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjIyMzk1Miwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU2MjI3NTUyLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.HxOEpr_DjWsvE-pzu9MFYr8ZTAceJm25RcMQWYW6g60zhL9GZp4c-TXheHCn1rKE3zvyYQaz9qJA3L67R6v-Cw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
__id__
int64
3.09k
19,722B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
256
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
3 values
repo_name
stringlengths
5
109
repo_url
stringlengths
24
128
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
6.65k
581M
star_events_count
int64
0
1.17k
fork_events_count
int64
0
154
gha_license_id
stringclasses
16 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
5.76M
gha_stargazers_count
int32
0
407
gha_forks_count
int32
0
119
gha_open_issues_count
int32
0
640
gha_language
stringlengths
1
16
gha_archived
bool
2 classes
gha_disabled
bool
1 class
content
stringlengths
9
4.53M
src_encoding
stringclasses
18 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
year
int64
1.97k
2.01k
6,081,673,708,889
77318b90eecc34a9f7e2d1e8e926c720c26ebf6e
d89cf9eae7c12fc626818b74a23c94deffcd6483
/FreeOrion/Xcode/dep/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/modulegraph/util.py
1627eb1feb4083536202482dbfd6e4568c8c219a
[]
no_license
zweizeichen/freeorion
https://github.com/zweizeichen/freeorion
136ebe6b35397b291b6fa86cf6593163645c8b32
e6aa5dce2dd4c5752958ed6d096d1923a5bd9d68
refs/heads/master
2017-04-28T19:23:53.101664
2013-02-22T14:05:12
2013-02-22T14:05:12
2,568,469
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import imp import sys from compat import B def imp_find_module(name, path=None): """ same as imp.find_module, but handles dotted names """ names = name.split('.') if path is not None: path = [os.path.realpath(path)] for name in names: result = imp.find_module(name, path) path = [result[1]] return result def _check_importer_for_path(name, path_item): try: importer = sys.path_importer_cache[path_item] except KeyError: for path_hook in sys.path_hooks: try: importer = path_hook(path_item) break except ImportError: pass else: importer = None sys.path_importer_cache.setdefault(path_item, importer) if importer is None: try: return imp.find_module(name, [path_item]) except ImportError, e: return None return importer.find_module(name) def imp_walk(name): """ yields namepart, tuple_or_importer for each path item raise ImportError if a name can not be found. """ if name in sys.builtin_module_names: yield name, (None, None, ("", "", imp.C_BUILTIN)) return paths = sys.path res = None for namepart in name.split('.'): for path_item in paths: res = _check_importer_for_path(namepart, path_item) if hasattr(res, 'find_module'): break elif isinstance(res, tuple): break else: break yield namepart, res paths = [os.path.join(path_item, namepart)] else: return raise ImportError('No module named %s' % (name,)) if sys.version_info[0] != 2: import re cookie_re = re.compile(B("coding[:=]\s*([-\w.]+)")) def guess_encoding(fp): for i in range(2): ln = fp.readline() m = cookie_re.search(ln) if m is not None: return m.group(1).decode('ascii') return 'utf-8'
UTF-8
Python
false
false
2,013
10,539,849,794,894
67a7a00e9c09cacbb63625a2210f3d9baeb61d19
41606501b2002d9efac8c4908d61df5a696da361
/pyDetect.py
c1ae83d1c1261f6c8c3cacd2e454abc96262fce2
[]
no_license
ssjoleary/PiRecogniserPython
https://github.com/ssjoleary/PiRecogniserPython
5aee1185e9639ca16e3f07ca8fa5828ad9b0c67c
8ec3b46a7e7c3a096410d6f54b20f1a7b36b74c1
refs/heads/master
2016-09-10T19:27:58.918133
2013-04-03T20:12:25
2013-04-03T20:12:25
9,203,843
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on 2 Apr 2013 @author: samoleary ''' #/usr/bin/env python import cv2 import cv2.cv as cv HAAR_CASCADE_PATH = "/Users/samoleary/Documents/opencv/data/haarcascades/haarcascade_frontalface_alt_tree.xml" SCALE = 2 def detect_faces(image): faces = [] # create temp storage, used during object detection storage = cv.CreateMemStorage() # create a face detector from the cascade file in the resources directory cascade = cv.Load(HAAR_CASCADE_PATH) print '\nDetecting faces' detected = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (100, 100)) if detected: for (x,y,w,h),n in detected: faces.append((x,y,w,h)) return faces if __name__ == '__main__': print '\nPython: Running pyDetect' image = cv.LoadImage('/Users/samoleary/Documents/Images/Python/InputImages/wholeGROUP.jpg', cv.CV_LOAD_IMAGE_COLOR) #convert to grayscale grayImage = cv.CreateImage((image.width,image.height), cv.IPL_DEPTH_8U, 1) cv.CvtColor(image, grayImage, cv.CV_BGR2GRAY) # equalize the small grayscale (to speed up face detection) smallImage = cv.CreateImage((cv.Round(grayImage.width / SCALE), cv.Round(image.height / SCALE)), cv.IPL_DEPTH_8U, 1) cv.Resize(grayImage, smallImage, cv.CV_INTER_LINEAR) # equalize the small grayscale equImage = cv.CreateImage((smallImage.width, smallImage.height), cv.IPL_DEPTH_8U, 1) cv.EqualizeHist(smallImage, equImage) faces = [] faceRectCollection = [] faces = detect_faces(equImage) total = (len(faces)) print '\nDetected ' + `total` + ' faces' for (x,y,w,h) in faces: cv.Rectangle(image, (x*SCALE, y*SCALE), ((x+w)*SCALE, (y+h)*SCALE), 255, 6, cv.CV_AA, 0) cv.Rectangle(equImage, (x, y), ((x+w), (y+h)), 255, 6, cv.CV_AA, 0) faceRect = cv.GetSubRect(equImage, (x, y, w, h)) faceRectCollection.append(faceRect) cv.SaveImage('/Users/samoleary/Documents/Images/Python/OutputImages/out.jpg', image) cv.SaveImage('/Users/samoleary/Documents/Images/Python/OutputImages/equOut.jpg', equImage) total = len(faceRectCollection) for total in range(0, total): cv.SaveImage('/Users/samoleary/Documents/Images/Python/OutputImages/' + `total` +'.jpg', faceRectCollection[total])
UTF-8
Python
false
false
2,013
3,126,736,201,882
0c487d947508840da87ffe40bfa45b5b8f326fbc
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
/Kepivance_WC500040541.1.py
61230960c466944c0a8fa6b7ff75241aec9e8f20
[]
no_license
urudaro/data-ue
https://github.com/urudaro/data-ue
2d840fdce8ba7e759b5551cb3ee277d046464fe0
176c57533b66754ee05a96a7429c3e610188e4aa
refs/heads/master
2021-01-22T12:02:16.931087
2013-07-16T14:05:41
2013-07-16T14:05:41
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
{'_data': [['Very common', [['Nervous system', u'Dysgeusi'], ['GI', u'Oral slemhinnehypertrofi/hypertrofi i tungans papiller, missf\xe4rgning av orala slemhinnor/tungan'], ['Skin', u'Utslag, kl\xe5da och hudrodnad'], ['Musculoskeletal', u'Artralgi'], ['General', u'\xd6dem, perifert \xf6dem, sm\xe4rta och pyrexi'], ['Investigations', u'F\xf6rh\xf6jda niv\xe5er av amylas, f\xf6rh\xf6jda']]], ['Common', [['Nervous system', u'Oral parestesi'], ['Skin', u'Hyperpigmentering av huden'], ['General', u'Svullna l\xe4ppar, \xf6gonlocks\xf6dem']]], ['Unknown', [['Immune system', u'Anafylaktiska reaktioner/\xf6verk\xe4nslighet'], ['GI', u'Tungf\xf6r\xe4ndringar (t.ex. rodnad, kn\xf6lar), tung\xf6dem'], ['Skin', u'Palmar-plantart erytrodysestesi- syndrom (sm\xe4rta, erytem, \xf6dem i handflator och fotsulor)'], ['Reproductive system', u'Vaginalt \xf6dem och vulvovaginalt erytem'], ['General', u'Ansikts\xf6dem och mun\xf6dem']]]], '_pages': [3, 4], u'_rank': 14, u'_type': u'TSFU'}
UTF-8
Python
false
false
2,013
16,758,962,420,207
f4ade29c79393049d00ef925905d1d8d0fd97564
825fc5f6686e783d221d237ed3a1f23eaf87f559
/applications/TerminalProgs/fit_gauss.py
1c675bbed0fc1c9c416e472ff350591c8c0e88db
[]
no_license
minrk/phoenix
https://github.com/minrk/phoenix
20174ebf5833ad6aa2d5b0dafa83e39ce728cd6d
856a3dc1108b5d9f89610be25a9d3d9e43f095ba
refs/heads/master
2023-06-08T11:52:17.601535
2013-03-16T04:01:49
2013-03-16T04:01:49
8,548,936
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import phmath, phm import scipy p=phm.phm() amplitude = 15 mean = 10 sigma = 0.5 NP = 200 par = [amplitude, mean, sigma] deviation = scipy.rand(NP)/1.0 x = scipy.linspace(mean-10*sigma, mean +10*sigma, 200) y = phmath.gauss_eval(x,par) + deviation data = [] for k in range(NP): data.append( [x[k], y[k]]) res = phmath.fit_gauss(data) print par, res[1] p.plot(res[0]) p.set_scale(0, 0, mean*2, amplitude*1.2) raw_input()
UTF-8
Python
false
false
2,013
6,992,206,783,792
3206571055cc902d5e4ef07dd4ab4f4271ce2080
719bf19e4a4af26c8172f41e82d19485f4ed1445
/algorithms/warmup/service_lane.py
68252b7c61be3cfd304530d2d01ac58ab55b706d
[]
no_license
mertkasar/hackerrank-solutions
https://github.com/mertkasar/hackerrank-solutions
db53a8bcda0c42e98d4ba357b02fcccbb3667d3d
dbbe64bb92b86a8e3919fefefcbc8219e2b575e7
refs/heads/master
2021-01-01T17:27:07.205240
2014-06-05T03:53:21
2014-06-05T03:53:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
N, M = map(int, input().split()) lane = list(map(int, input().split())) for i in range(M): ent, out = map(int, input().split()) test = lane[ent:out + 1] print(min(test))
UTF-8
Python
false
false
2,014
18,717,467,477,911
d2964392b13bdec17a682c2a624e042dea34c896
73c3035496a0b3c7d9bbb3e9155a6976cbe29704
/bcast.py
77ae6399596939426d1c699dd3318c940f6d43bc
[]
no_license
steder/pyccsm
https://github.com/steder/pyccsm
da83567f710a6e4f78e156a5f9fc031e8a13ee7d
a71d2dc087b21b1b7ed15b5b70c6978795722c33
refs/heads/master
2021-01-02T09:09:10.197592
2007-01-24T23:12:56
2007-01-24T23:12:56
32,210,873
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys,mpi rank,size = mpi.init(len(sys.argv),sys.argv) sigma = mpi.bcast( 7, 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD ) print "broadcasted result:",sigma mpi.finalize()
UTF-8
Python
false
false
2,007
7,206,955,161,780
079b7aadaf67d3351161b192a1296b7e55f4681e
c8c89e5bd4892f0abf3aa63265e730db115e25fb
/raptus/torii/ipython/__init__.py
5be1b1408f5ef77938fd12ca6d3e248f04d21a77
[ "GPL-1.0-or-later", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
Raptus/raptus.torii.ipython
https://github.com/Raptus/raptus.torii.ipython
e1653c32f57782b78f9fa894ea919ccf840a63e5
cd6611b5d7a93faed738fa62ab96dbba793b6196
refs/heads/master
2016-09-11T06:38:35.606067
2012-11-26T11:29:27
2012-11-26T11:29:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from raptus.torii.ipython.ipython import IPython interpreter = IPython
UTF-8
Python
false
false
2,012
9,835,475,109,036
43188e3f724ef4a22083718303dbea0bd2df57f3
046cafbfb4b2d025b61c30aa5805f134a51c7b7b
/ljn/Repository.py
af2fad61e3bc30db188d45defc1f80bcbce6bfa8
[]
no_license
leojay/ljn
https://github.com/leojay/ljn
d9834b50a567fd7f2e1f72fe64735e73ee092c58
322c220bccb0108410b02601b34a8f7741904823
refs/heads/master
2019-01-01T05:39:06.623174
2012-03-19T09:12:35
2012-03-19T09:12:35
3,626,188
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding:utf8 from ljn.Model import Article, ArticleNewWord, Word session_maker = None def add_test_data(): from ljn.Model import Category s = session_maker() if not len(Category.all(s)): s.add(Category(u'c1')) s.add(Category(u'c2')) if not len(Article.all(s)): s.add(Article('this is content 1', Category.find_by_name(s, u'c1'), u'title of a1')) s.add(Article('this is content 2', Category.find_by_name(s, u'c1'), u'title of a2')) s.add(Article('this is content 3', Category.find_by_name(s, u'c2'), u'title of a3')) article = Category.find_by_name(s, u'c1').articles[0] if not len(article.new_words): w = Word('is') article.new_words.append(ArticleNewWord(article, w, 'is')) s.commit() def init(): global session_maker from ljn.Model import init as model_init, BaseModel model_init() from sqlalchemy.orm import sessionmaker session_maker = sessionmaker(BaseModel.metadata.bind) # add_test_data() def get_session(): """ @rtype: sqlalchemy.orm.session.Session """ return session_maker()
UTF-8
Python
false
false
2,012
3,590,592,697,032
2b116c896253b92de5bc77fcfba034aecc8f64ca
b918f01efe27ca93cfc2a2722090a76c2db96ab5
/gears/compressors/uglifyjs.py
7692d8e1a30345b69b046539b96e72db35d80ab1
[ "ISC" ]
permissive
xobb1t/gears
https://github.com/xobb1t/gears
358d8bed82620dbed57d1eeed815ac8ae4f6d40f
b63908d6efe9277a5c0964bc9cc60fb94c1a69bf
refs/heads/master
2021-01-19T05:21:03.295305
2012-02-27T16:50:12
2012-02-27T16:50:12
3,557,741
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os from .base import ExecCompressor class UglifyJSCompressor(ExecCompressor): executable = 'node' params = [os.path.join(os.path.dirname(__file__), 'uglifyjs.js')]
UTF-8
Python
false
false
2,012
4,123,168,634,965
4eea55d5fa4b4e9b4979ce778292294121bbd489
de7d6d0c97b041d2233e33b6abaf2367b8bc79fb
/pra5.py
7612a6fa22d93c5997d296f329278145f8431853
[]
no_license
eduOSS/osforce_Python_L1
https://github.com/eduOSS/osforce_Python_L1
9abe94c31d48fe89f6663b05066f080609344ccf
e83fb2656125c7673e0afb69fa80943d7e0893e0
refs/heads/master
2020-05-20T11:44:12.852451
2014-07-06T12:43:05
2014-07-06T12:43:05
20,670,943
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
def decomaker(arg): def newDeco(func): print func,arg return func return newDeco @decomaker(deco_args) def foo():pass foo()
UTF-8
Python
false
false
2,014
13,434,657,710,564
2ac9b4017ba7bfff32c4122eb841009262dbd9ea
2e908072b624c46240ee9e7fcb993b2f21aee0da
/plugins/rpc_handler.py
4a4d2adbe5161017701a4970e14a6e5140ac2ce1
[]
no_license
bogobog/steerage
https://github.com/bogobog/steerage
d7e562c5d7cf8a730b0de5e8176c78cb7ecd1147
ce3d0e39efb26615ad114195d9675401e9cac99f
refs/heads/master
2016-09-06T13:50:15.830892
2014-01-08T05:15:56
2014-01-08T05:15:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from twisted.words.xish import domish from twisted.words.protocols.jabber import xmlstream from twisted.python import log from wokkel.subprotocols import XMPPHandler import xmlrpclib, xml.dom.minidom, logging from common import CommonClientManager NS_RPC = 'jabber:iq:rpc' def removeWhitespaceNodes(dom): """ This method is called recursively for each element within the submitted dom and is used to remove empty whitespace elements """ for child in list( dom.childNodes ): if child.nodeType == child.TEXT_NODE and child.data.strip() == '': dom.removeChild(child) else: removeWhitespaceNodes(child) class RPCProtocolHandler( XMPPHandler ): subscribed_methods = {} def __init__( self, client ): super( RPCProtocolHandler, self ).__init__() self.my_client = client def connectionInitialized(self): RPC_SET = "/iq[@type='set']/query[@xmlns='%s']" % NS_RPC self.xmlstream.addObserver(RPC_SET, self.onMethodCall) def onMethodCall(self, iq): log.msg( 'onMethodCall', level = logging.DEBUG ) method_name = str( iq.query.methodName ) if not method_name in self.subscribed_methods: # send error response err_iq = FaultResponse( self.xmlstream, 1, 'method not implemented' ) err_iq['id'] = iq.getAttribute('id') err_iq['to'] = iq.getAttribute('from') err_iq.send() return log.msg( 'method found', level = logging.DEBUG ) target_method = self.subscribed_methods[ method_name ] converted_data = xmlrpclib.loads( iq.query.params.toXml() )[0] log.msg( 'converted_data', level = logging.DEBUG ) log.msg( iq.query.params.toXml(), level = logging.DEBUG ) log.msg( converted_data, level = logging.DEBUG ) method_result = target_method( iq, *converted_data ) if isinstance( method_result, RPCFault ): response_iq = FaultResponse( self.xmlstream, method_result.error_code, method_result.error_string ) else: response_iq = MethodResponse( self.xmlstream, method_result ) response_iq['id'] = iq.getAttribute('id') response_iq['to'] = iq.getAttribute('from') response_iq.send() def callMethod(self, recipient, method_name, params ): new_iq = xmlstream.IQ( self.xmlstream, 'set' ) new_iq['to'] = recipient.full() q = new_iq.addElement( ( NS_RPC, 'query') ) q.addElement( (None, 'methodName' ), content = str( method_name ) ) params_dom = xml.dom.minidom.parseString( xmlrpclib.dumps( tuple( params ), allow_none = True ) ) removeWhitespaceNodes( params_dom ) q.addRawXml( params_dom.documentElement.toxml( 'utf-8' ) ) log.msg( 'callMethod', level = logging.DEBUG ) log.msg( tuple( params ), level = logging.DEBUG ) log.msg( xmlrpclib.dumps( tuple( params ), allow_none = True ), level = logging.DEBUG ) return new_iq.send() def subscribeMethod(self, method_name, method ): self.subscribed_methods[ method_name ] = method def unsubscribeMethod(self, method_name): if method_name in self.subscribed_methods: del self.subscribed_methods[ method_name ] class RPCFault( object ): def __init__(self, error_code, error_string): self.error_code = error_code self.error_string = error_string class FaultResponse( xmlstream.IQ ): def __init__(self, stream, error_code, error_string ): xmlstream.IQ.__init__(self, stream, 'result') q = self.addElement( ( NS_RPC, 'query') ) st = q.addElement( ( None, 'methodResponse') ).addElement( ( None, 'fault' ) ).addElement( ( None, 'value' ) ).addElement( ( None, 'struct' ) ) cm = st.addElement( (None, 'member' ) ) cm.addElement( (None, 'name'), content = 'faultCode' ) cm.addElement( (None, 'value') ).addElement( (None, 'int'), content = str( error_code ) ) sm = st.addElement( (None, 'member' ) ) sm.addElement( (None, 'name'), content = 'faultString' ) sm.addElement( (None, 'value') ).addElement( (None, 'string'), content = str( error_string ) ) class MethodResponse( xmlstream.IQ ): def __init__(self, stream, params ): xmlstream.IQ.__init__(self, stream, 'result') q = self.addElement( ( NS_RPC, 'query') ) q.addElement( ( None, 'methodResponse') ) params_dom = xml.dom.minidom.parseString( xmlrpclib.dumps( ( params, ), allow_none = True ) ) removeWhitespaceNodes( params_dom ) q.addRawXml( params_dom.documentElement.toxml( 'utf-8' ) ) def objectToElement( item ): if isinstance( item, type( () ) ) or isinstance( item, type( [] ) ) and len( item ): new_ele = domish.Element( (None, 'array') ) d = new_ele.addElement( (None, 'data') ) for x in item: d.addElement( (None, 'value') ).addChild( objectToElement( x ) ) elif isinstance( item, type( {} ) ): new_ele = domish.Element( (None, 'struct') ) for name, value in item.items(): if not isinstance( name, type( str() ) ): continue m = new_ele.addElement( (None, 'member') ) m.addElement( (None, 'name'), content = str( name ) ) m.addElement( (None, 'value') ).addChild( objectToElement( value ) ) elif item is True or item is False: new_ele = domish.Element( (None, 'boolean') ) new_ele.addContent( ( item and '1' ) or '0' ) elif isinstance( item, type( int() ) ): new_ele = domish.Element( (None, 'int') ) new_ele.addContent( str( item ) ) elif isinstance( item, type( float() ) ): new_ele = domish.Element( (None, 'double') ) new_ele.addContent( str( item ) ) else: new_ele = domish.Element( (None, 'string') ) new_ele.addContent( str( item ) ) return new_ele CommonClientManager.addHandler( 'rpc', RPCProtocolHandler )
UTF-8
Python
false
false
2,014
15,599,321,262,858
1276f16028fab302a4c60e68875116e87beb02bd
3628824faecbb9831c8ba43a45ceb2cc47de065d
/src/mymod/cgifunc.py
046187dd57f191832d7491c0b2e6fe39830cb91e
[]
no_license
TRYang/Yagra
https://github.com/TRYang/Yagra
8139875f179ea03c5099a0872160dd15dd7fd0a7
d8b2142f7732e67a8ad90bc468ee41adb8b273ab
refs/heads/master
2020-05-31T00:43:33.834569
2014-03-03T08:48:51
2014-03-03T08:48:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python def content_type(ctype = 'text/html'): return 'Content-Type: %s\n\n' % ctype def html_header(title = "cgi-response"): return """<html> <head> <title>%s</title> </head> <body>""" % title def html_tail(): return '</body></html>' def back_button(back_page = 'back_to_login.py'): return """<FORM METHOD=GET ACTION=%s> <INPUT TYPE="SUBMIT" VALUE="BACK" NAME="Back"> </FORM>""" % back_page def link(url): print content_type() print """ <HTML> <head> <meta http-equiv="Refresh" content="0;URL=%s"> </head> <body> </body> </HTML> """ % url def output_error(message, back_page): print content_type() print html_header('Error') print '<H1>An error occured</H1><br/>' print '<h3> the error message is : %s</h3><br/>' % message print back_button(back_page) print html_tail()
UTF-8
Python
false
false
2,014
1,958,505,089,897
e24f90ad40ee46ae80b0a36c5bebb049d5db53c3
25f36d92ed3bfb3b341848360f13caff8661ea3c
/traitlets/test/test_text.py
c0d758c65f2b09901b2e213b6be2ea1a9933d6db
[ "BSD-3-Clause" ]
permissive
uservidya/traitlets
https://github.com/uservidya/traitlets
aff1c3550e0cd9aed6aafde64fbc6a414d637335
171e400914f8d3ca5584ca27fafafda1c4c1fe3a
refs/heads/master
2021-01-22T17:03:21.123486
2013-12-11T07:49:37
2013-12-11T07:49:37
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# encoding: utf-8 """Tests for traitlets.text""" from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import math import random import nose.tools as nt from traitlets import text #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- def eval_formatter_check(f): ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u"café", b="café") s = f.format("{n} {n//4} {stuff.split()[0]}", **ns) nt.assert_equal(s, "12 3 hello") s = f.format(' '.join(['{n//%i}'%i for i in range(1,8)]), **ns) nt.assert_equal(s, "12 6 4 3 2 2 1") s = f.format('{[n//i for i in range(1,8)]}', **ns) nt.assert_equal(s, "[12, 6, 4, 3, 2, 2, 1]") s = f.format("{stuff!s}", **ns) nt.assert_equal(s, ns['stuff']) s = f.format("{stuff!r}", **ns) nt.assert_equal(s, repr(ns['stuff'])) # Check with unicode: s = f.format("{u}", **ns) nt.assert_equal(s, ns['u']) # This decodes in a platform dependent manner, but it shouldn't error out s = f.format("{b}", **ns) nt.assert_raises(NameError, f.format, '{dne}', **ns) def eval_formatter_slicing_check(f): ns = dict(n=12, pi=math.pi, stuff='hello there', os=os) s = f.format(" {stuff.split()[:]} ", **ns) nt.assert_equal(s, " ['hello', 'there'] ") s = f.format(" {stuff.split()[::-1]} ", **ns) nt.assert_equal(s, " ['there', 'hello'] ") s = f.format("{stuff[::2]}", **ns) nt.assert_equal(s, ns['stuff'][::2]) nt.assert_raises(SyntaxError, f.format, "{n:x}", **ns) def eval_formatter_no_slicing_check(f): ns = dict(n=12, pi=math.pi, stuff='hello there', os=os) s = f.format('{n:x} {pi**2:+f}', **ns) nt.assert_equal(s, "c +9.869604") s = f.format('{stuff[slice(1,4)]}', **ns) nt.assert_equal(s, 'ell') nt.assert_raises(SyntaxError, f.format, "{a[:]}") def test_long_substr(): data = ['hi'] nt.assert_equal(text.long_substr(data), 'hi') def test_long_substr2(): data = ['abc', 'abd', 'abf', 'ab'] nt.assert_equal(text.long_substr(data), 'ab') def test_long_substr_empty(): data = [] nt.assert_equal(text.long_substr(data), '') def test_strip_email(): src = """\ >> >>> def f(x): >> ... return x+1 >> ... >> >>> zz = f(2.5)""" cln = """\ >>> def f(x): ... return x+1 ... >>> zz = f(2.5)""" nt.assert_equal(text.strip_email_quotes(src), cln) def test_strip_email2(): src = '> > > list()' cln = 'list()' nt.assert_equal(text.strip_email_quotes(src), cln)
UTF-8
Python
false
false
2,013
17,927,193,525,913
d3a72d0161d2b8ca9212da7526d14eb8311bec8c
a117d85fbc7de4d1416b6c5d903c1ca7d1850f07
/python/generator/generator.py
76ca62b722b4f99fc4b78af819f660b8c0a05bef
[]
no_license
moutard/learn
https://github.com/moutard/learn
2a10d88a4d7a61af96915d75d4790858eb405c6c
5ee94b09a2ca2b30eacedd51a2885c29b454668c
refs/heads/master
2021-05-01T00:20:03.925763
2014-06-01T15:25:56
2014-06-01T15:25:56
9,283,361
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def g(): """This is a generator, it has 3 methods : - next - send - throw """ yield 1 yield 2 yield 3 for x in g(): print x gi = g() print next(gi) for x in g(): print next(gi)
UTF-8
Python
false
false
2,014
19,146,964,210,208
b39b8513f71ac6a46716e401faeb69495d25761a
6973bab2b792498cfff04b313144c3a2738af270
/pavement.py
be043a038c77804edb5c6600a623d472d12a97e0
[]
no_license
xdissent/skel
https://github.com/xdissent/skel
6db2e21c616efa2fb5c7c048574483e23e45de4f
c5501946ab5c542cabf054cf69492e5d5c0e828c
refs/heads/master
2020-05-18T12:14:12.098539
2009-07-19T23:04:51
2009-07-19T23:04:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from paver.easy import * from paver.tasks import consume_args import paver.doctools import paver.setuputils import paver.misctasks paver.setuputils.install_distutils_tasks() import os from setuptools import find_packages PROJECT_FILES = [ '__init__.py', 'local_settings.py', 'manage.py', 'settings_dev.py', 'settings.py', 'static', 'templates', # 'initial_data.json', 'urls.py', ] SKEL_SVN_URL = 'https://code.hartzogcreative.com/svn/hartzog_skel/trunk' # Local Settings PROJECTS_DIR = os.environ.get('SKEL_PROJECTS_DIR', '~/Sites/') SVN_URL_ROOT = os.environ.get('SKEL_SVN_URL_ROOT', 'https://code.hartzogcreative.com/svn') SVN_SSH_HOST = os.environ.get('SKEL_SVN_SSH_HOST', 'code.hartzogcreative.com') SVN_SSH_ROOT = os.environ.get('SKEL_SVN_SSH_ROOT', '/var/svn/code.hartzogcreative.com') ENVIRONMENTS_DIR = os.environ.get('SKEL_ENVIRONMENTS_DIR', False) if not ENVIRONMENTS_DIR: ENVIRONMENTS_DIR = os.environ.get('WORKON_HOME', False) PACKAGE_DATA = paver.setuputils.find_package_data() # print PACKAGE_DATA PACKAGES = sorted(PACKAGE_DATA.keys()) PACKAGES = find_packages() # print PACKAGES DATA_FILES = [ ('skel/core/management', ['pavement.py']), ] options( setup=Bunch( name='Skel', version='0.1', description='Hartzog Creative Skel Framework for Django', author='Greg Thornton', author_email='[email protected]', packages=PACKAGES, package_data={ 'skel.core.management': ['pavement.py'], }, zip_safe=False, entry_points = { 'console_scripts': [ 'skel-admin.py = skel.core.management:launch_paver', ], }, include_package_data=True, data_files=DATA_FILES, ), minilib=Bunch( extra_files=['doctools', 'setuputils'] ), startproject=Bunch( projects_dir=PROJECTS_DIR, svn_url_root=SVN_URL_ROOT, environments_dir=ENVIRONMENTS_DIR, environment=False, svn_ssh_host=SVN_SSH_HOST, svn_ssh_root=SVN_SSH_ROOT, svn_dev_branch='xdissent', no_coda=False, no_svn=False, no_requirements=False, ), deploy=Bunch( targets=Bunch( dev=Bunch( ), ), ), ) @task @cmdopts([ ('no-coda', None, 'Disable Site creation in Coda'), ('no-svn', None, 'Disable Subversion repository creation'), ('environment=', 'E', 'Use a virtualenv for the environment'), ('no-upgrade', None, 'Prevent updating Skel in the virtualenv'), ('no-requirements', None, 'Prevent Virtualenv from being populated with requirements') ]) @consume_args def startproject(options): """Starts a new Skel project""" info('pavement %s' % environment.pavement_file) project_path = path(options.args[0]) project_name = project_path.name # Ensure either absolute path or use project_dir if project_name == project_path: projects_dir = path(options.projects_dir).expand() project_path = path.joinpath(projects_dir, project_name) if project_path.exists(): raise BuildFailure('Project already exists at %s' % project_path) try: __import__(project_name) except ImportError: pass else: raise BuildFailure('Python module named %s already exists on your path' % project_path) svn_url_root = path(options.svn_url_root) svn_url = path.joinpath(svn_url_root, project_name) if not options.no_svn: try: info('Checking for project name collision at %s' % svn_url) sh('svn ls %s' % svn_url) except BuildFailure: pass else: raise BuildFailure('Project already in subversion at %s' % svn_url) dry('Creating project directory at %s' % project_path, project_path.mkdir) environments_dir = path(options.environments_dir) venv_path = environments_dir.joinpath(project_name) if options.environment: venv_path = path(options.environment) venv_path = venv_path.expand() if venv_path.exists(): if not venv_path.joinpath('bin/activate').exists(): raise BuildFailure('Folder at %s does not contain a Virtualenv' % venv_path) info('Using existing virtualenv at %s' % venv_path) else: info('Creating virtualenv for %s at %s' % (venv_path, project_name)) sh('virtualenv %s' % venv_path) easy_install_path = venv_path.joinpath('bin/easy_install') if not easy_install_path.exists(): raise BuildFailure('Virtualenv at %s does not contain easy_install' % venv_path) skel_path = venv_path.joinpath('src/skel/skel') if not options.no_requirements: info('Easy installing PIP to process requirements.txt') sh('%s pip' % easy_install_path) pip_path = venv_path.joinpath('bin/pip') info('Installing latest copy of Skel') sh('%s install -e svn+%s#egg=Skel' % (pip_path, SKEL_SVN_URL)) # TODO: handle skel upgrade option and set skel_path appropriately # if no upgrade, run paver install task into virtualenv # if upgrade run pip install skel # import skel # TODO: get skel pkg_resources path as skel_path # TODO: get requirements_path from skel pkg_resources: # requirements_path = skel_path.joinpath('requirements.txt') requirements_path = venv_path.joinpath('src/skel/requirements.txt') info('Installing requirements with PIP') sh('%s install -r %s' % (pip_path, requirements_path)) if not options.no_svn: svn_ssh_root = path(options.svn_ssh_root) svn_ssh_path = svn_ssh_root.joinpath(project_name) svnadmin_command = 'svnadmin create %s' % svn_ssh_path ssh_command = "ssh %s '%s'" % (options.svn_ssh_host, svnadmin_command) info('Creating repository using "%s"' % ssh_command) sh(ssh_command) sh('svn mkdir %s -m "creating %s"' % (svn_url.joinpath('trunk'), 'trunk')) sh('svn mkdir %s -m "creating %s"' % (svn_url.joinpath('tags'), 'tags')) sh('svn mkdir %s -m "creating %s"' % (svn_url.joinpath('branches'), 'branches')) svn_dev_branch_url = svn_url.joinpath('branches', options.svn_dev_branch) sh('svn copy %s %s -m "creating development branch (%s)"' % (svn_url.joinpath('trunk'), svn_dev_branch_url, svn_dev_branch_url)) sh('svn co %s %s' % (svn_dev_branch_url, project_path)) # TODO: set svnignores # TODO: fix into pkg_resources instead of skel_path info('Copying default files from %s to %s' % (skel_path, project_path)) for file_path in PROJECT_FILES: src_path = skel_path.joinpath(file_path) dest_path = project_path.joinpath(file_path) if src_path.isdir(): src_path.copytree(dest_path) else: src_path.copy(dest_path) # from pprint import pprint # pprint(options.startproject)
UTF-8
Python
false
false
2,009
9,861,244,938,768
2bf0716b58f1946ceffd362ba53015e10e7d9cf5
b9f13232d92c5ee36fcca10ae74b2591f38a2674
/tests/test_pmd.py
714b51ee9d7780a19a9271b54f52c7ea80a1d6c6
[]
no_license
jjst/reviewbot-pmd
https://github.com/jjst/reviewbot-pmd
ad2562b12d06f1126e1b86db856185e285b721e5
477ea74a9556d069dd8bdaf115fd8c8c651207d1
refs/heads/master
2021-01-23T18:12:05.526662
2014-08-13T18:25:31
2014-08-13T18:25:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import subprocess import shutil import tempfile from collections import namedtuple from nose import SkipTest from nose.tools import * from nose.plugins.attrib import attr from reviewbotpmd.pmd import * import xml.etree.ElementTree as ElementTree def setup_module(): global pmd_install_path, pmd_script_path pmd_install_path = os.environ.get('PMD_INSTALL_PATH', '/opt/pmd/') pmd_script_path = os.path.join(pmd_install_path, 'bin/run.sh') if not os.path.exists(pmd_install_path): raise SkipTest("Cannot run run tests as no valid " "$PMD_INSTALL_PATH was provided") java_source_path = os.path.join(os.path.dirname(__file__), 'testdata/HelloWorld.java') js_source_path = os.path.join(os.path.dirname(__file__), 'testdata/hello-http.js') invalid_source_path = os.path.join(os.path.dirname(__file__), 'testdata/IDontExist.java') def test_violation_num_lines(): one_line_violation = Violation(rule='', priority=1, text='', url='', first_line=1, last_line=1) assert one_line_violation.num_lines == 1 def test_violation_is_consecutive(): violation_text = "Text" v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1) v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2) assert v1.is_consecutive(v2) assert v2.is_consecutive(v1) def test_violation_is_consecutive_text_different(): v1 = Violation('', 1, "Text", '', first_line=1, last_line=1) v2 = Violation('', 1, "Different text", '', first_line=2, last_line=2) assert not v1.is_consecutive(v2) assert not v2.is_consecutive(v1) def test_violation_combine(): violation_text = "Text" v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1) v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2) combined = v1.combine(v2) assert_equals(combined.first_line, 1) assert_equals(combined.last_line, 2) assert_equals(combined.text, violation_text) def test_violation_combine_not_consecutive(): v1 = Violation('', 1, "Banana", '', first_line=1, last_line=1) v2 = Violation('', 1, "Strawberry", '', first_line=2, last_line=2) assert_raises(ValueError, v1.combine, v2) def test_violation_group_consecutive(): violation_text = "Text" v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1) v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2) v1_v2_combined = v1.combine(v2) assert_equals(Violation.group_consecutive([v1, v2]), [v1_v2_combined]) def test_violation_group_consecutive_empty(): assert_equals(Violation.group_consecutive([]), []) def test_violation_group_consecutive_nothing_consecutive(): violation_text = "Text" v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1) v2 = Violation('', 1, violation_text, '', first_line=3, last_line=3) v3 = Violation('', 1, violation_text, '', first_line=5, last_line=10) assert_equals(Violation.group_consecutive([v1, v2, v3]), [v1, v2, v3]) def test_violation_group_consecutive_2(): violation_text = "Text" v1 = Violation('', 1, violation_text, '', first_line=1, last_line=1) v2 = Violation('', 1, violation_text, '', first_line=2, last_line=2) v3 = Violation('', 1, violation_text, '', first_line=5, last_line=10) v1_v2_combined = v1.combine(v2) assert_equals( Violation.group_consecutive([v1, v2, v3]), [v1_v2_combined, v3]) class TestResult(object): @classmethod def setup_class(cls): cls.testdir = tempfile.mkdtemp() cls.pmd_result_path = os.path.join( cls.testdir, 'HelloWorld_result.xml') with open(os.devnull, 'w') as devnull: subprocess.check_call( [pmd_script_path, 'pmd', '-d', java_source_path, '-R', 'rulesets/internal/all-java.xml', '-f', 'xml', '-r', cls.pmd_result_path], stdout=devnull, stderr=devnull) assert os.path.exists(cls.pmd_result_path) @classmethod def teardown_class(cls): shutil.rmtree(cls.testdir) def test_result_from_xml(self): result = Result.from_xml(self.pmd_result_path, java_source_path) assert len(result.violations) == 6 class TestPMDTool(object): def setup(self): self.pmd = PMDTool() default_settings = { 'markdown': False, 'pmd_install_path': pmd_install_path, 'rulesets': 'java-comments', 'max_priority_for_issue': 5, } self.num_violations = 2 self.pmd.settings = default_settings self.pmd._setup(default_settings) self.pmd.processed_files = set() self.pmd.ignored_files = set() def is_valid_ruleset_file(self, filepath): if not os.path.exists(filepath): return False try: tree = ElementTree.parse(filepath) except ElementTree.ParseError: return False root = tree.getroot() if root.tag != 'pmd': return False file_elems = root.findall('file') return len(file_elems) == 1 @attr('slow') def test_run_pmd_creates_file(self): results_file_path = self.pmd.run_pmd(java_source_path, rulesets=['java-basic']) assert os.path.exists(results_file_path) @attr('slow') def test_run_pmd_invalid_ruleset(self): assert_raises(PMDError, self.pmd.run_pmd, java_source_path, ['invalid-ruleset-path']) @attr('slow') def test_run_pmd_absolute_path_to_ruleset(self): ruleset_full_path = os.path.join( os.path.dirname(__file__), 'testdata/test_ruleset.xml') results_file_path = self.pmd.run_pmd( java_source_path, rulesets=[ruleset_full_path]) assert self.is_valid_ruleset_file(results_file_path) @attr('slow') def test_run_pmd_relative_path_to_ruleset_in_classpath(self): ruleset_path = 'rulesets/java/comments.xml' results_file_path = self.pmd.run_pmd( java_source_path, rulesets=[ruleset_path]) assert self.is_valid_ruleset_file(results_file_path) @attr('slow') def test_run_pmd_creates_valid_pmd_result(self): results_file_path = self.pmd.run_pmd( java_source_path, rulesets=self.pmd.rulesets) assert self.is_valid_ruleset_file(results_file_path) def test_run_pmd_with_invalid_source_file(self): assert not os.path.exists(invalid_source_path) results_file_path = self.pmd.run_pmd( invalid_source_path, rulesets=['java-basic']) assert_raises( ElementTree.ParseError, ElementTree.parse, results_file_path) @attr('slow') def test_handle_file(self): reviewed_file = FileMock(java_source_path, java_source_path) assert_true(self.pmd.handle_file(reviewed_file)) assert_equal(len(reviewed_file.comments), self.num_violations) def test_handle_file_unsupported_file_type(self): reviewed_file = FileMock(dest_file='test.php') assert_false(self.pmd.handle_file(reviewed_file)) def test_handle_file_invalid_file(self): reviewed_file = FileMock(dest_file=invalid_source_path) assert_false(self.pmd.handle_file(reviewed_file)) def test_handle_files(self): reviewed_file = FileMock(java_source_path, java_source_path) self.pmd.handle_files([reviewed_file]) assert self.pmd.processed_files == set([reviewed_file.dest_file]) assert self.pmd.ignored_files == set() assert len(reviewed_file.comments) == self.num_violations def test_handle_files_opens_issues(self): reviewed_file = FileMock( java_source_path, java_source_path, open_issues=True) self.pmd.settings['max_priority_for_issue'] = Priority.MAX self.pmd.handle_files([reviewed_file]) assert self.pmd.processed_files == set([reviewed_file.dest_file]) assert self.pmd.ignored_files == set() assert all(c.issue == True for c in reviewed_file.comments) def test_handle_files_invalid_pmd_install(self): self.pmd.settings['pmd_install_path'] = 'invalid_path' reviewed_file = FileMock(java_source_path, java_source_path) self.pmd.handle_files([reviewed_file]) assert self.pmd.processed_files == set() assert self.pmd.ignored_files == set([reviewed_file.dest_file]) def test_handle_files_invalid_ruleset(self): self.pmd.settings['rulesets'] = 'invalid-ruleset-path' reviewed_file = FileMock(java_source_path, java_source_path) self.pmd.handle_files([reviewed_file]) assert self.pmd.processed_files == set() assert self.pmd.ignored_files == set([reviewed_file.dest_file]) def test_post_comments(self): result = mock_result() reviewed_file = FileMock(java_source_path) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == 2 def test_post_comments_opens_issues(self): self.pmd.max_priority_for_issue = Priority.MAX result = mock_result() reviewed_file = FileMock(java_source_path, open_issues=True) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == 2 assert all(c.issue == True for c in reviewed_file.comments) def test_post_comments_custom_priority(self): self.pmd.max_priority_for_issue = 3 result = mock_result() result.violations = [ mock_violation(rule=str(i), priority=i) for i in Priority.values] reviewed_file = FileMock(java_source_path, open_issues=True) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == len(Priority.values) comments_with_issues = [c for c in reviewed_file.comments if c.issue] assert len(comments_with_issues) == 3 def test_post_comments_open_issues_disabled(self): self.pmd.max_priority_for_issue = Priority.MAX result = mock_result() reviewed_file = FileMock(java_source_path, open_issues=False) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == 2 assert all(c.issue == False for c in reviewed_file.comments) def test_post_comments_open_issues_consecutive_violation(self): self.pmd.max_priority_for_issue = Priority.MAX result = mock_result() v = result.violations[-1] consecutive_violation = Violation( v.rule, v.priority, v.text, v.url, v.last_line + 1, v.last_line + 5) result.violations.append(consecutive_violation) reviewed_file = FileMock(java_source_path, open_issues=True) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == 2 combined_violation_comment = next(c for c in reviewed_file.comments if v.text in c.text) assert_equals(combined_violation_comment.first_line, v.first_line) expected_num_lines = consecutive_violation.last_line - v.first_line + 1 assert_equals(combined_violation_comment.num_lines, expected_num_lines) def test_post_comments_consecutive_violations(self): result = mock_result() result.violations = [Violation('', Priority.MAX, '', '', 1, 1,)] reviewed_file = FileMock(java_source_path, open_issues=True) self.pmd.post_comments(result, reviewed_file) assert len(reviewed_file.comments) == 1 assert all(c.issue == True for c in reviewed_file.comments) def test_post_comments_comment_plain_text(self): result = mock_result() reviewed_file = FileMock(java_source_path) self.pmd.post_comments(result, reviewed_file, use_markdown=False) violation = result.violations[0] assert_equals( reviewed_file.comments[0].text, "%s: %s\n\nMore info: %s" % (violation.rule, violation.text, violation.url)) def test_post_comments_comment_markdown(self): result = mock_result() reviewed_file = FileMock(java_source_path) self.pmd.post_comments(result, reviewed_file, use_markdown=True) violation = result.violations[0] assert_equals( reviewed_file.comments[0].text, "[%s](%s): %s" % (violation.rule, violation.url, violation.text)) def mock_result(): v1 = Violation('TestRule1', 1, 'A test rule', 'dummy_url', 1, 10) v2 = Violation('TestRule2', 4, 'Another test rule', 'dummy_url', 14, 14) return Result('', [v1, v2]) def mock_violation(**kwargs): return Violation( kwargs.get('rule', 'RuleMock'), kwargs.get('priority', 1), kwargs.get('text', 'A test rule'), kwargs.get('url', 'http://dummy.url/'), kwargs.get('first_line', 1), kwargs.get('last_line', 1)) Comment = namedtuple('Comment', ['text', 'first_line', 'num_lines', 'issue']) class FileMock(object): class Object: pass def __init__(self, patched_file_path=None, dest_file=None, open_issues=False): self.comments = [] self.patched_file_path = patched_file_path self.dest_file = dest_file self.review = FileMock.Object() self.review.settings = {'open_issues': open_issues} def get_patched_file_path(self): return self.patched_file_path def comment(self, text, first_line, num_lines=1, issue=None, original=False): self.comments.append(Comment(text, first_line, num_lines, issue))
UTF-8
Python
false
false
2,014
8,486,855,414,411
0d3cc3aa8f99782a77258a690d8355e3df0ddf22
4fc8b8769b3dbca488f0211a1f249e9b5258530c
/examples/advanalysis.py
8e76a307db892b46f57de1d80cd18e4f2eaec0aa
[]
no_license
majidaldo/psmt
https://github.com/majidaldo/psmt
b977cffe0215d9174cf072b9e1290051698d78f6
aeeab95f40f8b25620fc9e501054d384a4728921
refs/heads/master
2021-01-10T07:51:20.267942
2014-01-31T20:28:18
2014-01-31T20:28:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from analysisbase import analysis from scriptmgt import batchfoldermgt import scipy as sp import numpy as np from scipy import * from scipy import optimize from scipy import stats from matplotlib import mlab import itertools class gkanalysis(analysis): def gethc2(self,params,resetts=True,takeoutfirstone=True ,sumup='all'): fns=self.batchobj.user_getoutputvsinput(params,['*cumdE.avetime']) d={} for afp in fns['*cumdE.avetime']: try: #b/c it maybe blank ld=sp.loadtxt(afp ,unpack=True ,usecols=(0,1,2,3)) for ats,ajal,ajo, ajy in zip(*ld): d.update({int(ats):(ajal,ajo, ajy)}) except: continue tss=sp.reshape(d.keys(),(len(d.keys()),1)) sa=sp.hstack([tss,d.values()]) detype=[('ts',int),('jal',float),('jo',float),('jy',float)] sa=sp.array(map(tuple,sa.tolist()),dtype=detype) #i just had to make it tuples?! #..instead of nested brackets. idk why sa.sort(order='ts') #probably unnecessary here #..so i don't need ts if i'm taking data every ts if takeoutfirstone==True: sa=sa[1:] if resetts==True: sa['ts']=sa['ts']-sa['ts'][0] #if sumup==True: return self.sumhc(sa,sumup=sumup) return self.sumhc(sa,sumup=sumup)[:1001000] #else: return sa def sumhc(self,hc,sumup='all'): if sumup=='all': return sp.sum([hc['jal'],hc['jo'],hc['jy']],axis=0) elif sumup==False: return hc #w/ components intact summed=sp.zeros(len(hc)) for ahc in sumup: summed+=hc[ahc] return summed def gethc(self,*args,**kwargs): try: return self.gethc2(*args,**kwargs) except :multipleparamsets=args[0] def returnhcs(multipleparamsets,**kwargs): for aparamset in multipleparamsets: yield self.gethc2(aparamset,**kwargs) return returnhcs(multipleparamsets,**kwargs) def shortcutac(self,hc,trimend=True): #ac= real(ifft(real(fft(hc))**2)) ffthc=(sp.fft(hc)) ac= sp.real(sp.ifft((ffthc)*sp.conjugate(ffthc)))# i believe this if trimend==True: first90pc=int(len(hc)*.9) return self.divac(ac[:first90pc]) else: return self.divac(ac) #last 10% of the series is ALWAYS meaningless #..isn't the normalized def. #supposely im part should be small #avgac=sp.average(ac) #avgac=sp.average(ac) #return ac def divac(self,ac): n=len(ac) #or hc ##div=[((n-k)*avgac**2) for k in xrange(n)] #divby=[(n-k) for k in xrange(n)] ##div=n #return ac/divby #ac[:-100000]/divby[:-100000]#(ac[0])#divby #chk end of corr for extremes return array(ac)/n def hcacintseries(self,hcac): #todo cache try: return self.cache['hcint'][hcac[-10:]] except: self.cache.update({'hcint':{}}) hcint=sp.cumsum( [sp.trapz(hcac[i:i+2]) \ for i in xrange(len(hcac-1))] ) self.cache['hcint'].update({ tuple(hcac[-10:]) : hcint }) return hcint def convertconductivity(self,params,dt=.001e-12): """multiply the integral by this""" #dt in ps; xyz in Ao; T in K #"18592486.74*integral*dt/x/y/z/T^2" params=dict(params) a=(params['Tk']*9.67114349e-05)+12.3918 #lattice const convert=(1/1e-12)*18592486.74*\ dt/params['dx']/params['ly']/params['lz']/params['Tk']**2/a**3 return convert def getkseries(self,params,ac,dt=.001e-12): """hcac integral in W/mK""" #dt in ps; xyz in Ao; T in K #"18592486.74*integral*dt/x/y/z/T^2" params=dict(params) #hc=(self.sumhc(self.gethc(params))) #ac=self.shortcutac(hc) #schcac=self.divac(schcac) #just a div by N for "stochastic" process hcintegral=self.hcacintseries(ac) convert=self.convertconductivity(params) return convert*hcintegral # def findpeaks(self, spectra,win,peaktol=.025): #401 in my case # #need to input below nyquist # """input noisy spectra # 1st filters it b/c too many "needles" on a spike # """ # fs=(savitzky_golay(spectra,win,5)) #play w/ these params # pt=peaktol*max(fs) # return dict(peakdet(fs,pt)[0]) #dict it? #max freq N/(2T) T is sampling time #T=N*dt #=>maxfreq= 1/(2*dt) = 1/(2*.001*1e-12) . /1e12 for THz todo: is there a 2pi factor here? #around 500THz. vibes are around 4thz factor of 2 #note psd chokes at nfft lower than n/2 #works faster for even but few times the even no. takes a while. idkw #eg. 18000 vs 18002 for a len(tsd)=1e6 def psd(self,tsd,seg=1024*16,dt=.001e-12,sides='onesided',scalebyfreq=True ,divby2=True ,takeeven=True,nsigfigs=3 #performance params ,chopoff=True): if takeeven==True: seg=seg- ( seg % 2) #make even if nsigfigs!=None: bigno=seg numofdigits=int(math.log(bigno,10))+1 divby=10**(numofdigits-nsigfigs) sigfigs=bigno/divby seg=int(sigfigs*divby) #return seg #when nfft is big, the psd calc becomes slow so it's good enough just to #take the nearest (lower?) even #if seg is odd, it will go down by 1 to make it even """a shortcut to get psd at expense of freq fidelity""" #seg default good for length ts ~1e6 ps= mlab.psd(tsd,NFFT=seg ,Fs=1.0/(dt)#/2 #i guess this Fs is what i want ,scale_by_freq=scalebyfreq ,sides=sides#'twosided' #,detrend=mlab.detrend_mean #,noverlap=0 #,pad_to=None ) #return ps[1]*((2*ts)**-1) \ #, ps[0] #freq vs power #return ps[1], ps[0]#/(pi)**.5 #freq in hz vs power ps=list(ps) if divby2==True:ps[0]=ps[0]/2 if chopoff==True: return chopoffspectra([ps[1], ps[0]]) else: return [ps[1], ps[0]] def smoothspectra(self,spectra,winfrac=.001):#useless #todo winfrac per f #useless? """Input spectra w/o mirror image""" #it has to do w/ width of peak #that is rel to size of features win=int(winfrac*len(spectra)) if win%2==0: win+=1 return (savitzky_golay(spectra,11,0)) #201 gets the peaks def findpeaks(self,spectra,peaktol=.025): """peaktol: fraction of highest spike returns {freq:amp}. spectra should be smoothed spectra has x index """ #spectra can be a bit noisy but not so much # that you have 'needles' on the order of the spike pt=max(spectra[1])*peaktol return dict( peakdet(spectra[1],pt,x=spectra[0])[0] ) #zero idexes peaks def returnpeaklocs(self,*args,**kwargs):#just need to put in .psd args pt=kwargs.pop('peaktol') sg=self.psd(*args,**kwargs) peaks=self.findpeaks(sg,peaktol=pt) #cool prog'ing! return sorted(peaks.keys()) def findbestnfft(self,tsd,dt=.001e-12 ,peaktol=.03,minpeakfreqdiff=.5e12 #physics params reject anything w/ dfreq smaller than ftol #..so smaller values of these give more peaks..but too low and you get "false" peaks #,guessdf=None,cutfreq=None ,atatime=2,ntol=.98,minn=1024 #optimization params ,returnpeaks=False ,statmsgs=True): #maxfreq=1.0/(dt*2) maxn=len(tsd) #mindf=maxfreq/(maxn/2.0) #eqn. (maxfreq is const) """ physical params peaktol: fraction of max peak to be id.ed as a peak ftol: min freq b/w peaks. used to eliminate noise adjacent to peaks optimization params ntol: frac of possible optimal numbers atatime: random no.s to assess at a time (not important. keep it 2) """ # #a guess # if guessdf==None: guessdf=mindf*2 #ohh let's start somewhere in the middle # #but if you gave a startdf it has to be bigger than min df # if guessdf<mindf: # raise ValueError, 'given start freq rez is smaller than the smallest\ # possible:'#, mindf #n=int(round( 2*maxfreq/guessdf )) #is there a 2 factor? #loop trytofind until not none. wadv=winadvisor(minpeakfreqdiff,maxn/2-2) # wadvid=(tuple(tsd[:10]),peaktol,ftolmin) #using 1st 10 no.s as an id # try: # wadv=self.cache['wadvs'][wadvid] # print 'got from cache' # except: # print 'didn\'t get from cache' # wadv=winadvisor(ftolmin,maxn/2-2) # self.cache.update({'wadvs':{wadvid:wadv}}) # wadv=self.cache['wadvs'][wadvid] #necessary? # #-2 is just a safety buffer # wintry=[n-2,n-1] #funcs as a 1st try (guess) # print wintry # pl1=self.returnpeaklocs(tsd,dt=dt,peaktol=peaktol,seg=wintry[0]) # pl2=self.returnpeaklocs(tsd,dt=dt,peaktol=peaktol,seg=wintry[1]) # result=wadv.trytofindhifi( wintry, [pl1,pl2] ) # del pl1; del pl2 # print result,wadv.stop # return wadv #to accel. finding optimum n, get for pwrs of two upto n highestpwrof2=int(math.log(wadv.n,2)) lowestpwrof2=int(math.log(minn,2)); del minn twotothes=[] for apower in xrange(lowestpwrof2,highestpwrof2+1): #assert type(apower)==int twotothes.append(2**apower) datas=[] if statmsgs==True: print '**initial scan (powers of 2)' for apo in twotothes: # if statmsgs==True: # print 'finding peaks for n=', apo plfid=(tuple(tsd[-10:]),dt,peaktol,apo) #id'ed w/ these try: pl=self.cache['plf'][plfid]#;print 'from cache' except:#not in cache pl=self.returnpeaklocs(tsd,dt=dt,peaktol=peaktol,seg=apo) self.cache.setdefault('plf',{}) self.cache['plf'].update({plfid:pl}) if statmsgs==True: aorr=wadv.acceptorreject(pl)#wadv.evaled[apo] if aorr==True: aorr='accepted' else: aorr='rejected' print 'found', len(pl), 'peaks for nfft=', apo,'.',aorr datas.append(pl) result=wadv.trytofindhifi( twotothes, datas ) if statmsgs==True: print '**initial scan complete' #import time #print wadv.n #todo: persist wadv cache wadv result=None while len(wadv.evaled.keys())/float(wadv.n)<ntol or result==None: #print result,len(wadv.evaled.keys())/float(wadv.n) try: #time.sleep(1) rns=wadv.getrandints(atatime,minn=1) datas=[] for arn in rns: #if statmsgs==True: print 'finding peaks for n=', arn pl=self.returnpeaklocs(tsd,dt=dt,peaktol=peaktol,seg=arn) if statmsgs==True: aorr=wadv.acceptorreject(pl)#wadv.evaled[apo] if aorr==True: aorr='accepted' else: aorr='rejected' print 'found', len(pl), 'peaks for nfft=', arn,'.',aorr datas.append(pl) result=wadv.trytofindhifi( rns, datas ) #return wadv if statmsgs==True: print 'stop?', wadv.stop #when the nfft diff =1 print 'accepted nfft\'s', result print 'eval\'ed no.s fraction', len(wadv.evaled.keys())/float(wadv.n) #but if.. if wadv.stop==True or 1==len(wadv.evaled.keys())/float(wadv.n): break #return result #lo pri todo: does this work? except:# if the result is unexpected just try again result=None if returnpeaks==False:return result else: return result, self.findpeaks(self.psd(tsd,dt=dt,seg=result),peaktol=peaktol) def solveforpeaksparams(self,spectra,peaks=None,maxfreq=1,tauguess=None):#10e-12 """input smooth spectra notes: - only sure if spectra w/o index - """ if len(sp.shape(spectra))==1:# if no freq index take list index as freqs fi=sp.linspace(0,maxfreq,len(spectra)) spectra=[fi,spectra] else: maxfreq= spectra[0][-1] #possibly use the shortcut psd #ss=self.smoothspectra(spectra[1]) ss=spectra[1] if peaks==None:peaks=self.findpeaks([spectra[0],ss]) peaksfunc=returnsumofpeaksfx(sp.array(peaks.keys()) ,len(spectra[0]),maxfreq=maxfreq) if tauguess==None:#tau based on weighted avg of freqs tauguess=sp.average(spectra[0],weights=spectra[1])**-1*1000 #assume the freq lasts 100-1000 times its cycle time? print 'no. of peaks detected:' , len(peaks) #guess as a vec #the peak val is actually something like a*tau so i put that in as a guess guess=sp.array( list( sp.array(peaks.values())/tauguess)+([tauguess]*len(peaks)) ) #guess=sp.array(([1]*len(peaks))+([tauguess]*len(peaks))) #.01 assumes typical spectra. just input the 'busy' part of the spectra #for some reason the guess needs to be close or else the solver wont find it #input one vec the abs is b/c i don't want the peaks func to see negs minfunc= lambda X: (peaksfunc( *sp.hsplit(sp.absolute(X),2) ) - spectra[1]) #minscalar= lambda X: sum( minfunc(X)**2 )*tauguess**-1*10e6 #to make it a big no. #posbounds=[] #keep it +. #for avar in guess: posbounds.append((0,inf)) #Inf for other than bfgs s=sp.optimize.leastsq( minfunc, guess #couldn't get the below to work #s=sp.optimize.fmin_l_bfgs_b( minscalar, guess,bounds=posbounds #,approx_grad=True,factr=10#? #,disp=2 #s=sp.optimize.fmin_slsqp( minscalar, guess,bounds=posbounds,iprint=2 #s=sp.optimize.fmin_tnc( minscalar, guess,bounds=posbounds,approx_grad=True #, xtol=1e-15 ,ftol=1e-15 #,maxfev=0 ) heights,hwhms=sp.absolute(sp.hsplit(s[0],2)) #sp.absolute fit=[spectra[0],peaksfunc( heights, hwhms ) ] peakparams=dict(zip(peaks.keys(),zip(heights,hwhms))) #print peakparams#,max(fit[1]),trapz(fit[1]) #,'solver.o':s return {'peaksparams':peakparams,'fit':fit}#,'spectra':spectra} #,'solver.o':s # def solveforanexpdecayparams(self,*args,**kwargs): #not used # #1. from spectra, get tau guesses and az # #2. normalize az # #3. give it to func fitter # bestn=self.findbestwinsize(*args,**kwargs) #todo..fill # return az,tauz def solveforgkintparams(self,params,**kwargs):#,maxfreq=40e12): hc=kwargs.setdefault('hc',self.gethc(params,sumup='all')) kwargs.pop('hc') """maxfreq to analyze""" # try: # nfft=kwargs['nfft']# confusing! and doesn't make sense # #w/ just a peaktol constraint # self.findpeaks(self.psd(tsd,dt=dt,seg=result),peaktol=peaktol) # peaks=self.findpeaks([freqs,spectra],**kwargs) # except: # kwargs.update({'returnpeaks':True}) # nfft,peaks=self.findbestwinsize(hc,**kwargs) kwargs.update({'returnpeaks':True}) nfft,hzpeaks=self.findbestnfft(hc,**kwargs) #i found no need to include beyond 40(20?!) thz freqs,spectra=self.psd(hc,scalebyfreq=True,seg=nfft,chopoff=True) #spectra should be 1 sided # #somehow the scalebyfreq makes the psd comparable to closed form soln # #find index of maxfreq # for afreq in freqs: # if afreq>maxfreq: # fl=list(freqs) # imf=fl.index(afreq) # break #div spectra to a more manageable no. .. #magscale=(max(spectra)) #..to help the solver #already div by 2 in the psd #spectra=spectra/2 #!??!?! #b/c this is what the peakfit expects #*2.566972197*10**(-34)#/magscale #later x by magscale #can't do this..messes up time scale # maxfreq = freqs[imf] #Hz #pps=self.solveforpeaksparams(spectra[:imf],maxfreq=maxfreq*2*pi) #rads #convvert peaks radpeaks={} for ahzpeakloc,itspeakval in hzpeaks.iteritems(): radpeaks.update({ahzpeakloc*2*sp.pi:itspeakval}) del hzpeaks pps=self.solveforpeaksparams(spectra,peaks=radpeaks,maxfreq=freqs[-1]*2*pi) #rads #return pps #dfreq=maxfreq/(imf+1) #how imf cancels? #dfreq=freqs[-1]/(len(spectra)-1) T=dict(params)['Tk'] a=(T*9.67114349e-05)+12.3918 V=dict(params)['dx']*dict(params)['ly']*dict(params)['lz']*(a**3) kinfo={} for apeakloc,pparams in pps['peaksparams'].iteritems(): #, i0 is height, i1 is halfwidh at half max peakfreq=apeakloc#rads #*maxfreq tau=(pparams[1]) #idk why a=pparams[0]#todo magscale factor here prob has dt or ttime #print tau, a, apeakloc/2/pi/1e12 pps['peaksparams'].update({apeakloc: (a ,pparams[1]#*magscale why did i do this? #, (maxfreq*pparams[1])**-1 )}) #use pparams[1] to draw #conductivity="h*.9296243367e43/V/T^2/(1e24+tau^2*wo^2)" #conductivity=a*9.296243367e18/V/(T**2)/(1+(tau**2)*((peakfreq*2*pi)**2))\ #conductivity=a*9.296243367e18/V/(T**2)/(1+(tau**2)*((peakfreq*2*pi)**2)) #conductivity=(7.242963817e52/V/(T**2)) *(2.566972197e-34)*a/(1+(tau**2)*peakfreq**2) \ #/(peakfreq/2/pi) #factor has conversions and conductivity=2*pi*(1.859248674*10**19)*a*tau/V/T**2/(1+(tau**2)*peakfreq**2) if conductivity<0: print 'WARNING: negative conductivity component calculated.' kinfo.update({apeakloc:{'k':conductivity,'tau':tau,'f':peakfreq/2/pi}}) pps.update({'kinfo':kinfo}) fit=pps['fit'] pps['fit']=[fit[0]/2.0/pi,fit[1]] #backto hz return pps def solveforavggkintparams2(self,*args,**kwargs): """a fx to reduce variability in the conductivity calc due to nfft sensitivity""" #only do if nfft not spec ncalcs=kwargs.setdefault('ncalcs',5) #no. of procedures ncalcs=kwargs.pop('ncalcs') calcs={} #gather the data for procn in xrange(ncalcs): acalc=self.solveforgkintparams(*args,**kwargs) calcs.update({procn:acalc}) #group same freqs by least diff #use the one w/ the max no. of peaks #stats #eh not gonna worrk. just sum and put a std dev ks=[] for acalc in calcs.iteritems(): ks.append(sumks(acalc[1]['kinfo'])) calcs.update( {'k': ( average(ks),sp.std(ks) )} ) return calcs def solveforavggkintparams(self,*args,**kwargs): """this func is for processing lists of inputs""" params=args[0] #hc=args[1] #"scalar" case: one HC. one paramset try: params[0] #if just one paramset (not in a list) input this should fail except: # if len(shape(hc))==1: return self.solveforavggkintparams2(*args,**kwargs) #else expect a set of set of params paramslist=params; del params #name change #hclist=hc; del hc #a hc list for one paramset makes sense #..but not the other way around #assert len(paramslist)==len(hclist) #todo=dict(zip(paramslist,hclist)) def returncalcgen(*args,**kwargs): for aparams in paramslist: acalc=self.solveforavggkintparams2(aparams,**kwargs) yield acalc # calcs={} # def returncalcgen(*args,**kwargs): # solverlooper=itertools.izip(paramslist),hclist) # for aparams,ahc in solverlooper: # #hcs=todo[aparams] # if len( shape(ahc) )==1: #ie just a vector # acalc=self.solveforavggkintparams2(aparams,**kwargs) # yield acalc # else: raise Exception, 'input HC not a vector' #calcs.update({aparams:acalc}) # else: #list of HCs #subcalcs=[] # for ahc in hcs: # acalc=self.solveforavggkintparams2(ahc,**kwargs) # subcalcs.append(acalc) #calcs.update({aparams:subcalcs}) return returncalcgen(*args,**kwargs)#calcs def savegkcalcs(self,paramslist,gkcalcs,solntype): #solnlooper=itertools.izip(paramslist,gkcalcs) savedataconstsolntype=lambda params,data: self.savedata(params,solntype,data) map(savedataconstsolntype,paramslist,gkcalcs) return # for aparams,acalc in solnlooper: # i=self.batchobj.runsi[aparams] # self.data[solntype][i]=acalc;del i # return def solvegktimeint2(self,params,hc=None,sumhcparts='all' ,zerotol=.05,stablepart=.02,taumultiplecutoff=30#or 15 #hc ,dt=.001e-12,minfreq=.01e12#,win=None #100k win (.01thz) seems best #detrending is futile ,nbins=100): if hc==None:hc=self.gethc(params,sumup=sumhcparts) #1#nstable=int(stablepart*len(hc))#it was found that 1st 2% of ts is stable #by 5% #hc=detrendhc(hc,dt=dt,minfreq=minfreq)#win=nstable)#linear detrend #1#ac=self.shortcutac(hc,trimend=False)[:nstable] ac=self.shortcutac(hc,trimend=True) #cutac=chopoffactail(ac,zerotol=zerotol) #if len(cutac)/float(len(hc)) < stablepart: #if it's too short # nstable=int(stablepart*len(hc)) # ac=self.shortcutac(hc,trimend=False)[:nstable] #else: ac=cutac;del cutac pxx,pys=returnpeaksofabsac(ac,peaktol=zerotol) a,tau=fitexpdecay(pxx,pys) #now cutoff at (5 to 10)tau itau=int(tau/dt)#;print itau ac=ac[:taumultiplecutoff*itau] #need 30 for the ones w/ doping, less for others ks=self.getkseries(params,ac,dt=dt) #plot(ks) h,lowest,binsize,useless=sp.stats.histogram(ks#[4*tau:]#doesn't matter #..for my procedure ,numbins=nbins,defaultlimits=(0,max(ks)),printextras=False) del useless;del lowest #b/c i set lowest to 0 h=list(h) maxbini=h.index(max(h)) lowerbound=maxbini*binsize;upperbound=lowerbound+binsize return {'ks':ks,'k':(average([lowerbound,upperbound]),binsize/2.0)\ ,'tau':tau,'a':a} def solvegktimeint(self,*args,**kwargs): paramsorlistofthem=args[0] try: return self.solvegktimeint2(paramsorlistofthem,**kwargs) except:#list of params def returncalcgen(*args,**kwargs): for aparams in paramsorlistofthem: print 'processing ', self.params[aparams] acalc=self.solvegktimeint2(aparams,**kwargs) yield acalc return returncalcgen(*args,**kwargs) def gkproctest1(gkao,params,hc): tauxs=range(5,60);ks=[] for ataux in tauxs: print 'processing tau x', ataux k=gkao.solvegktimeint(params,hc=hc,taumultiplecutoff=ataux)['k'][0] ks.append(k) return tauxs, ks def gkproctest(gkao,paramslist,hcs=None): if hcs==None: hcs=gkao.gethc(paramslist) results={} for aparams in paramslist: ahc=hcs.next() r=gkproctest1(gkao,aparams,ahc) results.update({aparams:r}) return results def plotgkproctest(gkao): results=gkao.data['gkproctest'] ys=[] for aparams,xy in results.iteritems(): ys.append(xy[1]) xs=xy[0] #xl=random.choice(xs);yl=dict(zip(*xy))[xl] #print xl,yl #matplotlib.pyplot.text( xl,yl,yl) plot(*xy,label=str(gkao.params[aparams])) plot(xs,np.sum(ys,axis=0)) return def plotcondtrends(gkao,kcalctype,trends='all' ,subset={'ly':[2],'dx':[4,8,16,32,64],'Tk':[300]}): #or 'int'..egral if trends=='all': #else fronzenset(dict) r=gkao.batchobj.user_groupbyconsts(['dx','dseed','vseed'] ,subsetdictofiters=subset)#,'ly','lz']#i1 is for the const throughout trends,consts=r[0],r[1];del r # trendsforseeds=gkao.batchobj.user_groupbyconsts(['dseed','vseed'] # ,subsetdictofiters=subset)[0] # knoseeds={} # for ast, paramlist in trendsforseeds.iteritems(): # ks=[] # for aps in paramlist: # ti=gka.params[aps] # if ti in gkao.data[kcalctype].keys(): # k=gkao.data[kcalctype][ti]['k'] # ks.append(k) # knoseeds.update({ frozenset(ast):(average(ks),sp.std(ks)) }) #keys have dx and dp for atrend, paramlist in trends.iteritems(): kt={}#;xls=[];ks=[];kerrs=[] #atd=dict(atrend) for aparamset in paramlist: psd=dict(aparamset) taskid=gkao.batchobj.runsi[aparamset] T=psd['Tk'] a=(T*9.67114349e-05)+12.3918 xl=a*psd['dx'] k=gkao.data[kcalctype][taskid]['k'];print taskid,k if k=={}: continue #else: k=k,0 for plotting tau kt.setdefault(xl,[]) kt[xl].append(k[0]) #xls.append(xl);ks.append(k[0]);kerrs.append(k[1]) #print xls,ks,kerrs ktreduce={} for alength, ks in kt.iteritems(): ktreduce[alength]=(average(kt[alength]),sp.std(kt[alength])) del kt xls,[ks,kerrs]=ktreduce.keys(), array(ktreduce.values()).transpose() #^coool code! kdata=array( zip( array(xls)**-1 , tuple(ks), tuple(kerrs) ) ,dtype=[('x^-1',float),('k',float),('kstdev',float)] ) kdata.sort(order='x^-1') #print consts lbl=frozenset.union(atrend,frozenset([(k,v[0]) for k,v in consts.iteritems()])) #print lbl matplotlib.pyplot.errorbar(kdata['x^-1'],kdata['k'],yerr=kdata['kstdev'] ,marker='o',label= str(dict(lbl)['dp']*100) ) legend(mode='expand',ncol=len(trends)) plt.xlabel(r'$length^{-1}$ ($\AA^{-1}$)') plt.ylabel(r'Conductivity ($W\cdot m^{-1}K^{-1}$)') return #def markcalcedhc(calcedk,tau): # atau=tau/.63 # mpl.scatter(atau,calcedcond) # return #should be given data def plotkintandtau(gkao,params,hc,**kwargs): dt=kwargs.setdefault('dt',.001e-12) kwargs.pop('dt') gks=gkao.solveforavggkintparams(params,hc,**kwargs) calcedcond=gks['k'] gks.pop('k') maxtaus=[] for acalck,info in gks.iteritems(): taus=[] for afreq,infos in info['kinfo'].iteritems(): taus.append((infos['tau'])) maxtaus.append(max(taus)) atau=average(maxtaus)*4 #the pt where conductivity is at the asymptotic value # a tuple hcacint=gkao.getkseries(params,hc,dt=dt) #todo: it's a waste to gen the xs. find units in the plot matplotlib.pyplot.plot(sp.linspace(0,len(hcacint)*dt,len(hcacint),endpoint=False),hcacint) matplotlib.pyplot.errorbar(atau,calcedcond[0],yerr=calcedcond[1] ,marker='o',c='r') return import matplotlib from matplotlib import rc rc('text',usetex=False) # r'string' matplotlib.rcParams['mathtext.default']='regular' def plotniceac(gkao,hc,dt=.001e-12,endt=20e-12): dtps=dt/1e-12 plt.xlabel(r'time (ps)'); plt.ylabel(r'$\left\langle S_{x}(t)\cdot S_{x}(0) \right\rangle/\left\langle S_{x}(0)\cdot S_{x}(0) \right\rangle$') ac=gkao.shortcutac(hc)[:int(endt/dt)] xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False) p=plot(xs,array(ac)/ac[0]) return (xs, ac),p def plotniceabsac(gkao,hc,dt=.001e-12,endt=20e-12): dtps=dt/1e-12 plt.xlabel(r'time (ps)'); plt.ylabel(r'$|\left\langle S_{x}(t)\cdot S_{x}(0) \right\rangle/\left\langle S_{x}(0)\cdot S_{x}(0) \right\rangle|$') acr=gkao.shortcutac(hc)[:int(endt/dt)] ac=abs(array(acr))/acr[0] xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False) p=plot(xs,ac) return (xs, acr),p def plotniceint(gkao,params,ac,dt=.001e-12,endt=20e-12): dtps=dt/1e-12 plt.xlabel(r'time (ps)'); plt.ylabel(r'Conductivity ($W\cdot m^{-1}K^{-1}$)') plt.ylim(0,15) ac=ac[:int(endt/dt)] ks=gkao.getkseries(params,ac,dt=dt) xs=sp.linspace(0,len(ac)*dtps,len(ac),endpoint=False) p=plot(xs,ks) return (xs,ks),p def plotniceacproc(gkao,params,hc,dt=.001e-12): gks=gkao.solvegktimeint2(params,hc=hc) n=len(gks['ks']);tau=gks['tau'];a=gks['a'] ac=plotniceabsac(gkao,hc,dt=dt,endt=30*tau)[0] ED=genexpdecay(a/ac[1][0],tau,0,dt=dt,n=n) pxx,pys=returnpeaksofabsac(ac[1]/ac[1][0],peaktol=.05,dt=dt) scatter(array(pxx)/1e-12,pys,color='red') plot(ED[0]/1e-12,ED[1],color='black') return def plotnicepsd(gkao,hc,**kwargs): plt.xlabel(r'frequency (THz)'); plt.ylabel(r'a.u./frequency') psdd=gkao.psd(hc,**kwargs) plot(psdd[0]/1e12,psdd[1]) return def plotnicepsds(gkao,hcs,**kwargs): plt.yscale('log');plt.ylim(1e-9,None) plt.xlim(0,30) for ahc in hcs: plotnicepsd(gkao,ahc) plt.legend(['x=0%','x=50%'],ncol=2,loc=4) return #moving avg not as general #useless def runningstd(ac,mu=0): sumsq=0 ninvals=0 stds=[] for anum in ac: if anum>0: ninvals+=1 sumsq+=anum**2 stds.append((sumsq*ninvals**-1)**.5) else: stds.append(stds[:-1]) return stds #exactly as runningstd2 def runningstd2(ac): return [sp.stats.tstd(ac[0:i+2],limits=(0,None))\ for i in xrange(0,len(ac))] #better than using avgs b/c dists are less sensitive to extremes #useless? def findstabilizedi(ac,conf=.95,xstd=.1,skip=0):#,noise=.05,chi2tol=8):nbins=30, sumsq=0 ninvals=0 ninstds=0 stds=[] i=0 for anum in ac: if anum>0: ninvals+=1 sumsq+=anum**2 curstd=xstd*(sumsq*ninvals**-1)**.5 if anum<curstd: ninstds+=1 stds.append(curstd) else: stds.append(stds[:-1]) #if i+1<=skip:print i;pass#?#can be skip past early vals if float(ninstds)/ninvals > conf:return i #print i,float(ninstds)/ninvals i+=1 #for i in xrange(ii+1,len(ac)-1): # #thisstd=sp.stats.tstd(ac[:i+2], #assert 0<conf and conf < 1 #minac=min(ac);maxac=max(ac) #avg should be about 0 #binranges=sp.linspace(minac,maxac,nbins+1)#last bin in hist2 is for # #>than last bin in array #and i will ignore it #95pc in the middle idea # def returnbinrange(binranges,binno):return binranges # def returnbinranges(binranges): # binsd=[] # for i in binranges[:-1:2]: #nmostbins= #def myhist(*args,**kwargs): sp.stats.histogram2(ac, #bins=sp.zeros( len(binranges)-1 ,dtype='int') #bins+=sp.stats.histogram2(ac[i],binranges)[0][:-1]#don't want the last bin #which is for vals from the last range to inf #chi2=sp.stats.normaltest(ac[i],binranges)[0] #i0 is chi2 #if chi2<chi2tol: return ac[0:i] #how many in limits? # tstdd=nan # ii=2 #ewwww not pythonic # while tstdd==nan:#being really pedantic here # tstdd=sp.stats.tstd(ac[0:ii],limits=(0,None)) # invals=[] # for i in ac[0:ii]: if i>0:invals.append(i) # ninvals=len(invals) # inavg=average(invals);del invals # # for i in xrange(ii+1,len(ac)-1): # #thisstd=sp.stats.tstd(ac[:i+2], # val=ac[i] # if val > 0: # inavg=(inavg)*ninvals/(ninvals+1)+val/(ninvals+1) #previous sum + new no. # ninvals+=1 # tstdd= from scipy import signal def detrendhc(hc,dt=.001e-12,minfreq=.01e12,win=None): if win==None:win=int(minfreq**-1/dt)# bp= xrange(0,len(hc),win) #pts in b/w detrending dts=sp.signal.detrend(hc,type='linear',bp=bp) #or type 'constant' return dts def genexpdecay(a,tau,w0hz,dt=.001e-12,n=1e6): ts=sp.linspace(0,dt*n,n,endpoint=False) #time series return [ts,a*cos(w0hz*2*pi*ts)*exp(-ts/tau)] def fftofexpdecay(*args,**kwargs): kwargs.setdefault('ang','rads') args=list(args) args[0]=args[0]**.5 #to take it 'back' to x(t) expd=( (genexpdecay(*args)[1]) ) #expd=expd/range(1,len(expd)+1) ##return expd #p=gkad( expd , ts=.001e-12) #ifftexpd=(sp.fftpack.irfft(expd)) #fftexpd=(sp.fftpack.rfft(expd)) #return ifftexpd p=gka.psd( expd , ts=.001e-12 ) if kwargs['ang']=='rads': return 2*sp.pi*p[0],p[1]#,(p[1]**.5) else: return p[0],p[1] def genhc(a,tau,w0hz,dt=.001e-12,n=1e6,seed=None): """params correspond to a*exp(-1/tau)*cos(w) in the A.C.""" #s/n up w/ n tau=float(tau) ts=sp.linspace(0,dt*n,n,endpoint=False) #time series aa=(sp.e**(1.0/(tau/dt)))**-1.0 #tau just from here sigma=(a-a*aa**2)**.5#a from here #an approx if w0hz==0: sinusoid=1 else: sinusoid=cos(2*pi*w0hz*ts)*(2**.5)##by observation! i think it works #..for any fnc drunk=array( ar(n,[aa],sigma=sigma,seed=seed) ) return sp.array([ts, drunk*sinusoid ]) def ar(n,alpha,sigma=1,seed=None): #magic #put no.s in even places in the input vec #vector input is linear #"cleaner" spectogram w/ longer vec #neg no.s for oscilations #peaks widths change w/ if seed!=None:sp.random.seed(seed) #then it will assign a seed #sig=1 mu=0 errors = np.random.normal(mu, sigma, n) #sp.random.seed(123) #alpha = np.random.uniform(0,1,p) #sationary <1 values = np.zeros(n) for i in xrange(len(values)): # i changed range to xrange value = 0 n = len(alpha) if len(alpha)<=i else i for j in xrange(n): value = value + alpha[j] * values[i-j-1] values[i] = value + errors[i] return values def rawpsd(d): return (abs(sp.fft(d))**2)/len(d) def sumks(peaksparams): k=0 for apeakloc,pparams in peaksparams.iteritems(): k+=pparams['k'] return k def plotpeaks(peaksparams,maxfreq=1,n=300): for apeakloc,pparams in peaksparams.iteritems(): plot(sp.linspace(0,maxfreq,n) ,returnsumofpeaksfx([apeakloc],n,maxfreq=maxfreq)([pparams[0]],[pparams[1]])) return #from scipy.stats import cauchy def returnsumofpeaksfx(peaklocs,specsize,maxfreq=1): """returns a fnc that takes in dist. params (as an array) """ #kwargs.setdefault('specrange',specsize) #input params[] as vec # def dist(xs,x0,g,h): # pdf= lambda x: h/(1+((x-x0)/g)**2) # return sp.array(map(pdf,xs)) xs=sp.linspace(0 ,maxfreq #?!?!?! ,specsize) #so this shouldn't be regened #w/ each soln iteration xs2=xs**2 #for cauchy height=1/(pi*gamma) #def peakgen(h,f,freqscale): # def peakgen(h,f,hwhm): # #halfwidth at half max is frac relative to freq scale # return h/( ((xs-f)/hwhm)**2 + 1 ) #graphical #return h*g**2/((xs-f)**2+g**2) # return cauchy.pdf(xs*freqscale # ,scale=((pi*h)**-1) # ,loc=f*freqscale) def peakgen(A,w0,tau): #radians #"2*A*tau*(1+w^2*tau^2+w0^2*tau^2) #/(1+w^2*tau^2+2*tau^2*w*w0+w0^2*tau^2) #/(1+w^2*tau^2-2*tau^2*w*w0+w0^2*tau^2)" #tau=abs(tau)#don't want any neg. taus #..or As #A=abs(A) w02=(w0)**2; tau2=(tau)**2 return 2*A*tau*(1+xs2*tau2+w02*tau2) \ /(1+xs2*tau2+2*tau2*xs*w0+w02*tau2) \ /(1+xs2*tau2-2*tau2*xs*w0+w02*tau2) #return 2*A*tau/(1+((xs-w0)**2)*tau**2) # #goes w/ below # xs=xs*2*pi # xs2=xs**2 # def peakgen(A,zeta,tau): #Hz # #"8*A*tau*Pi^2*(4*Pi^2+4*tau^2*w^2*Pi^2+tau^2*zeta^2) # #/(4*Pi^2+tau^2*zeta^2-4*tau^2*zeta*w*Pi+4*tau^2*w^2*Pi^2) # #/(4*Pi^2+4*tau^2*w^2*Pi^2+4*tau^2*zeta*w*Pi+tau^2*zeta^2)" # zeta2=(zeta)**2; tau2=(tau)**2; pi2=pi**2 # # return 8*A*tau*pi2*(4*pi2+4*tau2*xs2*pi2+tau2*zeta2) \ # /(4*pi2+tau2*zeta2-4*tau2*zeta*xs*pi+4*tau2*xs2*pi2) \ # /(4*pi2+tau2*zeta2+4*tau2*zeta*xs*pi+4*tau2*xs2*pi2) def peaksgen(hs,peaklocs,hwhms): #return sum(map(peakgen,hs,peaklocs,[freqscale]*len(hs)),axis=0) #return sum(map(peakgen,hs,peaklocs),axis=0) return sum(map(peakgen,hs,peaklocs,hwhms),axis=0) return lambda heights,hwhms: peaksgen(heights,peaklocs,hwhms)#vec #return lambda heights: peaksgen(heights)#vec #(returnsumofpeaksfx([4k,5k,7k,15k,20k....],~1e6/2,~1e6/2) def returnsumofexpdecays(w0hzs,n,dt=1): """returns a fnc that takes in AC params """ xs=sp.linspace(0,dt*n,n,endpoint=False) def acgen(A,w0,tau): return A*sp.e**(-xs/tau)*cos(xs*2*pi*w0) def acsgen(aas,w0s,tawz): #return sum(map(peakgen,hs,peaklocs,[freqscale]*len(hs)),axis=0) #return sum(map(peakgen,hs,peaklocs),axis=0) return sum(map(acgen,aas,w0s,tawz),axis=0) return lambda az,taus: acsgen(az,w0hzs,taus)#vec #return lambda heights: peaksgen(heights)#vec #(returnsumofpeaksfx([4k,5k,7k,15k,20k....],~1e6/2,~1e6/2) def savitzky_golay(y, window_size, order, deriv=0): r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter. The Savitzky-Golay filter removes high frequency noise from data. It has the advantage of preserving the original shape and features of the signal better than other types of filtering approaches, such as moving averages techhniques. Parameters ---------- y : array_like, shape (N,) the values of the time history of the signal. window_size : int the length of the window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less then `window_size` - 1. deriv: int the order of the derivative to compute (default = 0 means only smoothing) Returns ------- ys : ndarray, shape (N) the smoothed signal (or it's n-th derivative). Notes ----- The Savitzky-Golay is a type of low-pass filter, particularly suited for smoothing noisy data. The main idea behind this approach is to make for each point a least-square fit with a polynomial of high order over a odd-sized window centered at the point. Examples -------- t = np.linspace(-4, 4, 500) y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape) ysg = savitzky_golay(y, window_size=31, order=4) import matplotlib.pyplot as plt plt.plot(t, y, label='Noisy signal') plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal') plt.plot(t, ysg, 'r', label='Filtered signal') plt.legend() plt.show() References ---------- .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), pp 1627-1639. .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery Cambridge University Press ISBN-13: 9780521880688 """ try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError, msg: raise ValueError("window_size and order have to be of type int") if window_size % 2 != 1 or window_size < 1: raise TypeError("window_size size must be a positive odd number") if window_size < order + 2: raise TypeError("window_size is too small for the polynomials order") order_range = range(order+1) half_window = (window_size -1) // 2 # precompute coefficients b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)]) m = np.linalg.pinv(b).A[deriv] # pad the signal at the extremes with # values taken from the signal itself firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) y = np.concatenate((firstvals, y, lastvals)) return np.convolve( m, y, mode='valid') # #def peaksinsegs(data, step): #breaks it into blocks. useless # data = data.ravel() # length = len(data) # if length % step == 0: # data.shape = (length/step, step) # else: # data.resize((length/step, step)) # max_data = np.maximum.reduce(data,1) # min_data = np.minimum.reduce(data,1) # return np.concatenate((max_data[:,np.NewAxis], min_data[:,np.NewAxis]), 1) #higher n should find more (false) peaks import itertools import random class winadvisor(object): #store bins collection def __init__(self,delta,n,**kwargs):#,data=None):#,stufftobin=None): self.delta=delta self.stop=False self.n=n self.kwargs=kwargs self.kwargs.setdefault('sort',True) self.evaled={} #possibly derive to keep only last few #self.lastresult=self.acceptorreject(data) #bin return # def bin(self,*stufftobin): # for tobin in stufftobin: def acceptorreject(self,data,**kwargs): """better to input ordered data""" if len(data)==1:return True#assert len(data)>=2 sort=kwargs.setdefault('sort',self.kwargs['sort']) #so the sort is just for this fnc call if sort==True: data=sorted(data) combogen=itertools.combinations(data,2) for acombo in combogen: if abs(acombo[0]-acombo[1])<self.delta: return False #for reject return True #can be used for a guess def upordownorend(self,twoindices,twodatas): #first data assumed lower index #can go up if lower index #i2-i1>=1 twoindices=tuple(twoindices) assert len(twoindices) and len(twodatas)==2 di=twoindices[1]-twoindices[0] loi=twoindices[0];hii=twoindices[1] assert di>0 # idd=dict( zip(indices,datas) ) # if (twoindices[0]) in self.lastpaidevaled.keys() \ # or (twoindices[1]) in self.lastpaidevaled.keys(): #see if it was already evaluated and get val from there try: lo=self.evaled[twoindices[0]] except: lo=self.acceptorreject(twodatas[0]) self.evaled.update({twoindices[0]:lo}) try: hi=self.evaled[twoindices[1]] except: hi=self.acceptorreject(twodatas[1]) self.evaled.update({twoindices[1]:hi}) if lo == True and hi==True: #try going higher #True anything below i lo for i in xrange(1,hii+1): self.evaled.setdefault(i,True) return 1 if lo == True and hi==False: if di>1: #optimum could be b/w indices for i in xrange(1,loi+1): self.evaled.setdefault(i,True) for i in xrange(hii,self.n+1): self.evaled.setdefault(i,False) return None else: self.stop=True return 0 # ie bingo if lo == False and hi==False: for i in xrange(loi,self.n+1): self.evaled.setdefault(i,False) return -1 #try going lower if lo == False and hi==True: #print loi, hii raise Exception, 'unexpected situation' def trytofindhifi(self,indices,datas): """..but w/o false peaks""" assert len(indices)>=2 assert len(indices)==len(datas) assert sorted(indices)==indices indices=list(indices) mini=indices[0]#min(indices) maxi=indices[-1]#max(indices) ii=iter(indices) idd=dict( zip(indices,datas) ) i2nexti=1 for curi in ii: nexti=indices[i2nexti] #print curi,nexti ude=self.upordownorend( [curi,nexti], [idd[curi],idd[nexti]] ) #print ude if ude==1: if nexti==maxi: if maxi==self.n:self.stop=True return maxi #else keep searching # if ude==None: return curi if ude==0: return curi if ude==-1 and curi==mini: return None #it's all crap i2nexti+=1 def getrandints(self,no,minn=1):#no. can get big # maxn=self.n # got=[] # if len(self.evaled.keys())==(self.n-minn+1): return None # for i in xrange(no): # rn=random.randint(minn,maxn) # #this loop becomes slow when the no.s are close to being exhausted # while (rn in got) or (rn in self.evaled.keys()):#conditions to keep looking # rn=random.randint(minn,maxn) # got.append(rn) # if len(self.evaled.keys())==(self.n-minn+1): return got #if exhausted # return sorted(got) maxn=self.n got=[] if len(self.evaled.keys())==(self.n-minn+1): return None choices=xrange(minn,maxn+1) choices=list(frozenset(choices)-frozenset(self.evaled.keys())) #much faster!! for i in xrange(no): rn=random.choice(choices) got.append(rn) choices.remove(rn) if len(self.evaled.keys())==(self.n-minn+1): return got #if exhausted return sorted(got) def returnpeaksofabsac(ac,dt=.001e-12,peaktol=.05): #not that sensitive to peaktol, peak tol .1 gives same ans #can input just 25% of full ac ac=sp.absolute(ac) n=len(ac) xs=sp.linspace(0,dt*n,n,endpoint=False) pt=max(ac)*peaktol pd= dict( peakdet(ac,pt,x=xs)[0] );del xs xf=pd.keys();yf=pd.values() return array(xf),array(yf) def fitexpdecay(xs,ys): xs=array(xs);ys=array(ys); def expdecay(t,a,tau): return a*sp.exp(-t/float(abs(tau))) def mined(a_n_tau): return ( ys - expdecay(xs,a_n_tau[0],a_n_tau[1]) )**2 #def minedjusttau(tau): return mined([max(ys),tau]) #s=sp.optimize.leastsq(minedjusttau, (xs[1]-xs[0])*3000 ) s=sp.optimize.leastsq(mined, [max(ys),(xs[1]-xs[0])*3000] ) #this way #just b/c i like the resulting tau better #print s return abs(s[0][0]),abs(s[0][1]) #a, tau def chopoffspectra(spectra,zerotol=.001): """ if expecting nothing beyond a certain freq""" maxval=zerotol*max(spectra[1]) #is the vals i #start from highest side and go low maxi=len(spectra[1])-1 for ani in xrange(len(spectra[1])): decreasingi=maxi-ani if spectra[1][decreasingi]<maxval:pass else: return spectra[0][:decreasingi],spectra[1][:decreasingi] def chopoffactail(ac,zerotol=.03):#,noisefactor=10):#,zerotol=.03):#: """ takes out the converged part of the FULL input ac""" aac=abs(array(ac)) # i don't want to deal with up and down # noise=average( aac[int(len(ac)/2.0):] )*noisefactor # print noise maxval=zerotol*max(aac) #is the vals i #start from highest side and go low maxi=len(aac)-1 for ani in xrange(len(aac)): decreasingi=maxi-ani if aac[decreasingi]<maxval:pass else: return ac[:decreasingi] def peakdet(v, delta, x = None): #looks for peak /shapes/ #tolerates noise in spectra """ Converted from MATLAB script at http://billauer.co.il/peakdet.html Currently returns two lists of tuples, but maybe arrays would be better function [maxtab, mintab]=peakdet(v, delta, x) %PEAKDET Detect peaks in a vector % [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local % maxima and minima ("peaks") in the vector V. % MAXTAB and MINTAB consists of two columns. Column 1 % contains indices in V, and column 2 the found values. % % With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices % in MAXTAB and MINTAB are replaced with the corresponding % X-values. % % A point is considered a maximum peak if it has the maximal % value, and was preceded (to the left) by a value lower by % DELTA. % Eli Billauer, 3.4.05 (Explicitly not copyrighted). % This function is released to the public domain; Any use is allowed. """ maxtab = [] mintab = [] if x is None: x = arange(len(v)) v = asarray(v) if len(v) != len(x): sys.exit('Input vectors v and x must have same length') if not isscalar(delta): sys.exit('Input argument delta must be a scalar') if delta <= 0: sys.exit('Input argument delta must be positive') mn, mx = Inf, -Inf mnpos, mxpos = NaN, NaN lookformax = True for i in arange(len(v)): this = v[i] if this > mx: mx = this mxpos = x[i] if this < mn: mn = this mnpos = x[i] if lookformax: if this < mx-delta: maxtab.append((mxpos, mx)) mn = this mnpos = x[i] lookformax = False else: if this > mn+delta: mintab.append((mnpos, mn)) mx = this mxpos = x[i] lookformax = True return maxtab, mintab try: gkb=batchfoldermgt('\\\\129.59.197.166\\aldosams\\research\\yag\\runtypes\\gk') except: gkb=batchfoldermgt('/home/aldosams/research/yag/runtypes/gk') gka=gkanalysis(gkb)
UTF-8
Python
false
false
2,014
19,112,604,497,175
b9a61f5d229274e0161e3465e4cc91602c524f1c
2f89875c097a4aaddd4dca052676f0df887abb46
/tests/simple_demo.py
939e1d0a14b26f8a798139d6b6dcc2534b7afd39
[ "LGPL-3.0-only" ]
non_permissive
pmundkur/libcrm114
https://github.com/pmundkur/libcrm114
71d31be2d38f58e0da191e98f381764b3a09e0e1
fe1580274d01fc2dc9e667d38239f7272b49f0cf
refs/heads/master
2020-05-24T14:02:11.078316
2013-04-07T20:35:10
2013-04-07T20:35:10
2,824,772
6
3
null
false
2013-04-07T20:35:11
2011-11-22T02:16:36
2013-04-07T20:35:10
2013-04-07T20:35:10
116
null
5
1
C
null
null
import string, pprint import pycrm114 as p import texts Alice_frag = \ "So she was considering in her own mind (as well as she could, for the\n" \ "hot day made her feel very sleepy and stupid), whether the pleasure\n" \ "of making a daisy-chain would be worth the trouble of getting up and\n" \ "picking the daisies, when suddenly a White Rabbit with pink eyes ran\n" \ "close by her.\n" Hound_frag = \ "\"Well, Watson, what do you make of it?\"\n" \ "Holmes was sitting with his back to me, and I had given him no\n" \ "sign of my occupation.\n" \ "\"How did you know what I was doing? I believe you have eyes in\n" \ "the back of your head.\"\n" Macbeth_frag = \ " Double, double, toil and trouble;\n" \ " Fire, burn; and cauldron, bubble.\n" \ " \n" \ " SECOND WITCH.\n" \ " Fillet of a fenny snake,\n" \ " In the caldron boil and bake;\n" \ " Eye of newt, and toe of frog,\n" \ " Wool of bat, and tongue of dog,\n" \ " Adder's fork, and blind-worm's sting,\n" \ " Lizard's leg, and howlet's wing,--\n" \ " For a charm of powerful trouble,\n" \ " Like a hell-broth boil and bubble.\n" \ Willows_frag = \ "'This is fine!' he said to himself. 'This is better than whitewashing!'\n" \ "The sunshine struck hot on his fur, soft breezes caressed his heated\n" \ "brow, and after the seclusion of the cellarage he had lived in so long\n" \ "the carol of happy birds fell on his dulled hearing almost like a shout." cb = p.ControlBlock(flags=(p.CRM114_SVM | p.CRM114_STRING), classes=[("Alice", True), ("Macbeth", False)], start_mem = 8000000) cb.dump(file("test_cb_dump.txt", 'w')) cb = p.ControlBlock.load(file("test_cb_dump.txt", 'r')) db = p.DataBlock(cb) print " Starting to learn the 'Alice in Wonderland' text" db.learn_text(0, texts.Alice) print " Starting to learn the 'MacBeth' text" db.learn_text(1, texts.Macbeth) print " Writing our datablock as 'simple_demo_datablock.txt'." db.dump(file("simple_demo_datablock.txt", 'w')) print " Reading text form back in." db = p.DataBlock.load(file("simple_demo_datablock.txt", 'r')) print " Classifying the 'Alice' text." s = db.classify_text(Alice_frag) print ("Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d" % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features())) for sc in s.scores(): print ("documents: %d features: %d hits: %d prob: %f pR: %f" % (sc["documents"], sc["features"], sc["hits"], sc["prob"], sc["pR"])) print " Classifying the 'Macbeth' text." s = db.classify_text(Macbeth_frag) print ("Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d" % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features())) for sc in s.scores(): print ("documents: %d features: %d hits: %d prob: %f pR: %f" % (sc["documents"], sc["features"], sc["hits"], sc["prob"], sc["pR"])) print " Classifying the 'Hound' text." s = db.classify_text(Hound_frag) print ("Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d" % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features())) for sc in s.scores(): print ("documents: %d features: %d hits: %d prob: %f pR: %f" % (sc["documents"], sc["features"], sc["hits"], sc["prob"], sc["pR"])) print " Classifying the 'Wind in the Willows' text." s = db.classify_text(Willows_frag) print ("Best match: %s Tot succ prob: %f overall_pR: %f unk_features: %d" % (s.best_match(), s.tsprob(), s.overall_pR(), s.unk_features())) for sc in s.scores(): print ("documents: %d features: %d hits: %d prob: %f pR: %f" % (sc["documents"], sc["features"], sc["hits"], sc["prob"], sc["pR"]))
UTF-8
Python
false
false
2,013
13,228,499,323,168
4f13ecfb5c2883b890629b36336467ba0ee8ab37
8a6cdc50c434eecd30f6ec964518d299901dccfb
/uamobile/factory/softbank.py
b2610f963f17c1c14b24cf6f64f09252d0f02230
[ "MIT" ]
permissive
csakatoku/uamobile
https://github.com/csakatoku/uamobile
ad8fa2663d45386298b14ff8895b160dcdb429c8
7be1f739369bb00b0ca099593d0d9dfaf52fb3b8
refs/heads/master
2021-01-18T09:49:13.364754
2010-06-18T07:30:38
2010-06-18T07:30:38
687,654
1
0
null
false
2014-10-29T10:43:59
2010-05-26T17:18:08
2013-11-14T09:59:47
2010-06-18T07:30:53
300
8
3
2
Python
null
null
# -*- coding: utf-8 -*- from uamobile.factory.base import AbstractUserAgentFactory from uamobile.softbank import SoftBankUserAgent from uamobile.parser import SoftBankUserAgentParser class SoftBankUserAgentFactory(AbstractUserAgentFactory): device_class = SoftBankUserAgent parser = SoftBankUserAgentParser()
UTF-8
Python
false
false
2,010
14,207,751,852,955
848f5cf125ec7d4d75324f9fce32b380ab8075c7
ce8f4fa31e5682b33672d1c348dd5d958da06fbb
/problem10.py
6230d8e3005627bd7c265ffd7119797eb8a2af6f
[]
no_license
djmittens/euler
https://github.com/djmittens/euler
0b5d7e04b07715880ef85ae7bd3426f12199eddb
5b0fa070c147b52c5874e0bad018e4c3d9f5dd06
refs/heads/master
2020-03-29T15:29:02.338032
2014-01-06T06:09:04
2014-01-06T06:09:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from common import * print sum(filter(prime, range(2, 2000000)))
UTF-8
Python
false
false
2,014
14,508,399,556,037
a77a1c38889ec1b7c6c216190e22713dfc7e8c73
40f2c9b0659ed981c2039189ff54466731160fdd
/test/bpl_doctest.py
568d09b739391ccd72dcc90f9dcd7c9ae62310c0
[]
no_license
straszheim/boost-python
https://github.com/straszheim/boost-python
cfe538c088c864d3044859b464a985efad2c73f6
119cb5a08939a6ea412d5892db9ae01296e1d083
refs/heads/master
2017-10-30T21:03:17.513168
2012-08-21T08:58:38
2012-08-21T08:58:38
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright Troy D. Straszheim 2009. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # # A script that runs doctests that works on python 2.2 and up. # The catch: only do portable stuff in the doctests. # import sys python_version = sys.version_info[0] * 100 \ + sys.version_info[1] * 10 \ + sys.version_info[2] if python_version < 300: import py2_helpers as helpers else: import py3_helpers as helpers pythonpath = sys.argv[1] testmodulename = sys.argv[2] sys.argv = sys.argv[2:] sys.path.append(pythonpath) testmodule = __import__(testmodulename) shouldthrow = helpers.shouldthrow globs = {'shouldthrow' : shouldthrow, 'python_version' : python_version } def main(docstring): if python_version < 300: # # exception types are printed fully qualified in py3k # docstring = docstring.replace('Boost.Python.ArgumentError', 'ArgumentError') docstring = docstring.replace('Boost.Python.ArgumentError', 'ArgumentError') from doctest import Tester t = Tester(globs=globs) (failures, tries) = t.runstring(docstring, sys.argv[0]) t.summarize(verbose=1) sys.exit(failures > 0) else: import doctest parser = doctest.DocTestParser() dt = parser.get_doctest(docstring, globs=globs, name=sys.argv[0], filename=None, lineno=None) print(dt) runner = doctest.DocTestRunner(verbose=1) runner.run(dt) (failed, attempted) = runner.summarize(verbose=True) sys.exit(failed > 0) # doctest.testfile(scriptfile, module_relative=False) if s in testmodule: if isinstance(testmodule.s, string): main(string) elif main in testmodule: if isinstance(testmodule.main, function): testmodule.main(sys.argv) else: raise RuntimeError("neither doctest string nor main found in module")
UTF-8
Python
false
false
2,012
9,526,237,464,446
2512b80318f39da2086012cbdbfbc3d16014a962
39ccf37ee3a51763441cf0cbb494908cf05eb4cb
/lexer_fo.py
29c8ef4543f33af3cc3e088a73dae33919368b8d
[]
no_license
cburschka/python-logic
https://github.com/cburschka/python-logic
3c041f455ad24342f30f16f522aa46863a03c2ed
c1da12a0583086316b03b3a61a071ee1338e36ce
refs/heads/master
2021-01-10T19:51:20.777936
2014-02-21T08:10:18
2014-02-21T08:10:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import symbols as SYM import parser.symbol import parser.lexer class Symbol(parser.symbol.Term): pass class Equality(Symbol): pass class Relation(Symbol): pass class Function(Symbol): pass class Constant(Symbol): pass class Operator(parser.symbol.Term): pass class Quantor(Operator): pass class Junctor(Operator): pass class Not(Operator): pass class LeftParen(Operator): pass class RightParen(Operator): pass class Variable(parser.symbol.Term): pass class Comma(Operator): pass lexer = lambda signature: parser.lexer.lexer( meta = { SYM.EXISTS: Quantor, SYM.FORALL: Quantor, SYM.NOT: Not, SYM.AND: Junctor, SYM.OR: Junctor, SYM.IMP: Junctor, SYM.EQ: Junctor, '(': LeftParen, ')': RightParen, '=': Equality, ',': Comma }, names = dict( [(name, sym) for name, (sym, d) in signature['relations'].items()] + [(name, sym) for name, (sym, d) in signature['functions'].items()] + [(name, sym) for name, sym in signature['constants'].items()] ), variable = Variable, char_filter = lambda c: ('1' <= c <= '9' or 'a' <= c <= 'z' or 'A' <= c <= 'Z') )
UTF-8
Python
false
false
2,014
18,751,827,231,650
30d7ec270cdf58ce34ab16d5ef14c6954845a4a1
2a2c0e9e0aef33c43a65b8b0d703c28d86ed8831
/bakery_cli/ttfont.py
8a6d2c531bf6520192ea82fd42a5d0c537e23cc1
[ "Apache-2.0" ]
permissive
anthrotype/fontbakery
https://github.com/anthrotype/fontbakery
31dbf6fc9804a3d046a06a2db3b964d95682d49a
5717c39e9c999b62cacbdfde1fc1ee96ae049e5a
refs/heads/master
2021-01-15T09:08:45.361703
2014-12-04T14:20:41
2014-12-04T14:20:41
27,259,674
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: utf-8 # Copyright 2013 The Font Bakery Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See AUTHORS.txt for the list of Authors and LICENSE.txt for the License. import re import os.path as op from fontTools import ttLib class BaseFont(object): @staticmethod def get_ttfont(path): return Font(path) @staticmethod def get_ttfont_from_metadata(path, font_metadata, is_menu=False): path = op.join(op.dirname(path), font_metadata.filename) if is_menu: path = path.replace('.ttf', '.menu') return Font.get_ttfont(path) class Font(BaseFont): def __init__(self, fontpath): if fontpath[-4:] == '.ttx': self.ttfont = ttLib.TTFont(None) self.ttfont.importXML(fontpath, quiet=True) else: self.ttfont = ttLib.TTFont(fontpath) self.ascents = AscentGroup(self.ttfont) self.descents = DescentGroup(self.ttfont) self.linegaps = LineGapGroup(self.ttfont) def __getitem__(self, key): """ Returns TTFont table with key name >>> font = Font("tests/fixtures/ttf/Font-Bold.ttf") >>> font['name'].tableTag 'name' """ return self.ttfont[key] def get_program_bytecode(self): """ Return binary program code from "prep" table. >>> font = Font("tests/fixtures/ttf/Font-Bold.ttf") >>> font.get_program_bytecode() '\\xb8\\x01\\xff\\x85\\xb0\\x04\\x8d' """ try: return self['prep'].program.getBytecode() except KeyError: return "" def get_bounding(self): """ Returns max and min bbox font >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.get_bounding() (-384, 1178) """ if self.ttfont.sfntVersion == 'OTTO': return self['head'].yMin, self['head'].yMax ymax = 0 for g in self['glyf'].glyphs: char = self['glyf'][g] if hasattr(char, 'yMax') and ymax < char.yMax: ymax = char.yMax ymin = 0 for g in self['glyf'].glyphs: char = self['glyf'][g] if hasattr(char, 'yMin') and ymin > char.yMin: ymin = char.yMin return ymin, ymax @property def license_url(self): """ Return LicenseURL from "name" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.license_url u'http://scripts.sil.org/OFL' """ for name in self.names: if name.nameID == 14: return Font.bin2unistring(name) @property def macStyle(self): return self['head'].macStyle @property def italicAngle(self): return self['post'].italicAngle @property def names(self): return self['name'].names @property def glyphs(self): """ Returns list of glyphs names in fonts >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(len(font.glyphs)) 502 """ return self.ttfont.getGlyphOrder() @property def OS2_usWeightClass(self): """ OS/2.usWeightClass property value >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.OS2_usWeightClass) 400 """ return self['OS/2'].usWeightClass @property def OS2_usWidthClass(self): """ Returns OS/2.usWidthClass property value >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.OS2_usWidthClass 5 """ return self['OS/2'].usWidthClass @property def OS2_fsType(self): """ OS/2.fsType property value >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.OS2_fsType) 8 """ return self['OS/2'].fsType def platform_entry(self, entry): if entry.platformID == 1 and entry.langID == 0: return Font.bin2unistring(entry) elif entry.platformID == 3 and entry.langID == 0x409: return Font.bin2unistring(entry) @property def fullname(self): """ Returns fullname of fonts >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.fullname u'Monda Regular' """ for entry in self.names: if entry.nameID != 4: continue value = self.platform_entry(entry) if value: return value return '' @property def _style_name(self): for entry in self.names: if entry.nameID != 2: continue value = self.platform_entry(entry) if value: return value return '' @property def stylename(self): """ Returns OpenType specific style name >>> font = Font("tests/fixtures/ttf/Font-Bold.ttf") >>> font.stylename u'Bold' """ return self._style_name @property def _family_name(self): for entry in self.names: if entry.nameID != 1: continue value = self.platform_entry(entry) if value: return value return '' @property def familyname(self): """ Returns fullname of fonts >>> font = Font("tests/fixtures/ttf/Font-Bold.ttf") >>> font.familyname u'Font' """ return self._family_name @property def ot_family_name(self): """ Returns Windows-only Opentype-specific FamilyName """ for entry in self.names: # This value must be only for windows platform as in # mac it addresses some problems with installing fonts with # that ids if entry.nameID != 16 or entry.platformID != 3: continue value = self.platform_entry(entry) if value: return value return '' @property def ot_style_name(self): """ Returns Windows-only Opentype-specific StyleName """ for entry in self.names: # This value must be only for windows platform as in # mac it addresses some problems with installing fonts with # that ids if entry.nameID != 17 or entry.platformID != 3: continue value = self.platform_entry(entry) if value: return value return '' @property def ot_full_name(self): """ Returns Windows-only Opentype-specific FullName """ for entry in self.names: # This value must be only for windows platform as in # mac it addresses some problems with installing fonts with # that ids if entry.nameID != 18 or entry.platformID != 3: continue value = self.platform_entry(entry) if value: return value return '' @property def post_script_name(self): """ Returns fullname of fonts >>> font = Font("tests/fixtures/ttf/Font-Bold.ttf") >>> font.post_script_name u'Font-Bold' """ for entry in self.names: if entry.nameID != 6: continue value = self.platform_entry(entry) if value: return value return '' def retrieve_cmap_format_4(self): """ Returns cmap table format 4 >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.retrieve_cmap_format_4().platEncID) 3 """ for cmap in self['cmap'].tables: if cmap.format == 4: return cmap def advance_width(self, glyph_id=None): """ AdvanceWidth of glyph from "hmtx" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.advance_width("a")) 572 """ if not glyph_id: return self['hhea'].advanceWidthMax try: return self['hmtx'].metrics[glyph_id][0] except KeyError: return None @staticmethod def bin2unistring(record): if b'\000' in record.string: return record.string.decode('utf-16-be') elif not isinstance(record.string, unicode): return unicode(record.string, 'unicode_escape') return record.string def get_glyf_length(self): """ Length of "glyf" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.get_glyf_length()) 21804 """ return self.ttfont.reader.tables['glyf'].length def get_loca_length(self): """ Length of "loca" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.get_loca_length()) 1006 """ return self.ttfont.reader.tables['loca'].length def get_loca_glyph_offset(self, num): """ Retrieve offset of glyph in font tables >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.get_loca_glyph_offset(15)) 836 >>> int(font.get_loca_glyph_offset(16)) 904 """ return self['loca'].locations[num] def get_loca_glyph_length(self, num): """ Retrieve length of glyph in font loca table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> int(font.get_loca_glyph_length(15)) 68 """ return self.get_loca_glyph_offset(num + 1) - self.get_loca_glyph_offset(num) def get_loca_num_glyphs(self): """ Retrieve number of glyph in font loca table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.get_loca_num_glyphs() 503 """ return len(self['loca'].locations) def get_hmtx_max_advanced_width(self): """ AdvanceWidthMax from "hmtx" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.get_hmtx_max_advanced_width() 1409 """ advance_width_max = 0 for g in self['hmtx'].metrics.values(): advance_width_max = max(g[0], advance_width_max) return advance_width_max @property def advance_width_max(self): """ AdvanceWidthMax from "hhea" table >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.advance_width_max 1409 """ return self.advance_width() def get_upm_height(self): return self['head'].unitsPerEm def get_highest_and_lowest(self): high = [] low = [] if self.ttfont.sfntVersion == 'OTTO': return high, low maxval = self.ascents.get_max() minval = self.descents.get_min() for glyph, params in self['glyf'].glyphs.items(): if hasattr(params, 'yMax') and params.yMax > maxval: high.append(glyph) if hasattr(params, 'yMin') and params.yMin < minval: low.append(glyph) return high, low def save(self, fontpath): self.ttfont.save(fontpath) def is_none_protected(func): def f(self, value): if value is None: return func(self, value) return f class AscentGroup(object): def __init__(self, ttfont): self.ttfont = ttfont def set(self, value): self.hhea = value self.os2typo = value self.os2win = value def get_max(self): """ Returns largest value of ascents >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.ascents.get_max() 1178 """ return max(self.hhea, self.os2typo, self.os2win) def hhea(): doc = """Ascent value in 'Horizontal Header' (hhea.ascent) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.ascents.hhea 1178 """ def fget(self): return self.ttfont['hhea'].ascent @is_none_protected def fset(self, value): self.ttfont['hhea'].ascent = value return locals() hhea = property(**hhea()) def os2typo(): doc = """Ascent value in 'Horizontal Header' (OS/2.sTypoAscender) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.ascents.os2typo 1178 """ def fget(self): return self.ttfont['OS/2'].sTypoAscender @is_none_protected def fset(self, value): self.ttfont['OS/2'].sTypoAscender = value return locals() os2typo = property(**os2typo()) def os2win(): doc = """Ascent value in 'Horizontal Header' (OS/2.usWinAscent) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.ascents.os2win 1178 """ def fget(self): return self.ttfont['OS/2'].usWinAscent @is_none_protected def fset(self, value): self.ttfont['OS/2'].usWinAscent = value return locals() os2win = property(**os2win()) class DescentGroup(object): def __init__(self, ttfont): self.ttfont = ttfont def set(self, value): self.hhea = value self.os2typo = value self.os2win = value def get_min(self): """ Returns least value of descents. >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.descents.get_min() -384 """ return min(self.hhea, self.os2typo, self.os2win) def hhea(): doc = """ Descent value in 'Horizontal Header' (hhea.descent) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.descents.hhea -384 """ def fget(self): return self.ttfont['hhea'].descent @is_none_protected def fset(self, value): self.ttfont['hhea'].descent = value return locals() hhea = property(**hhea()) def os2typo(): doc = """Descent value in 'Horizontal Header' (OS/2.sTypoDescender) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.descents.os2typo -384 """ def fget(self): return self.ttfont['OS/2'].sTypoDescender @is_none_protected def fset(self, value): self.ttfont['OS/2'].sTypoDescender = value return locals() os2typo = property(**os2typo()) def os2win(): doc = """Descent value in 'Horizontal Header' (OS/2.usWinDescent) >>> font = Font("tests/fixtures/ttf/Font-Regular.ttf") >>> font.descents.os2win 384 """ def fget(self): return self.ttfont['OS/2'].usWinDescent @is_none_protected def fset(self, value): self.ttfont['OS/2'].usWinDescent = abs(value) return locals() os2win = property(**os2win()) class LineGapGroup(object): def __init__(self, ttfont): self.ttfont = ttfont def set(self, value): self.hhea = value self.os2typo = value def hhea(): doc = "The hhea.lineGap property" def fget(self): return self.ttfont['hhea'].lineGap @is_none_protected def fset(self, value): self.ttfont['hhea'].lineGap = value return locals() hhea = property(**hhea()) def os2typo(): doc = "The OS/2.sTypoLineGap property" def fget(self): return self.ttfont['OS/2'].sTypoLineGap @is_none_protected def fset(self, value): self.ttfont['OS/2'].sTypoLineGap = value return locals() os2typo = property(**os2typo()) class FontTool: @staticmethod def get_tables(path): """ Retrieves tables names existing in font >>> FontTool.get_tables("tests/fixtures/ttf/Font-Regular.ttf") ['GDEF', 'gasp', 'loca', 'name', 'post', 'OS/2', 'maxp', 'head', \ 'kern', 'FFTM', 'GSUB', 'glyf', 'GPOS', 'cmap', 'hhea', 'hmtx', 'DSIG'] """ font = ttLib.TTFont(path) return font.reader.tables.keys() def getName(font, pairs): value = None for pair in pairs: value = font['name'].getName(*pair) if value: break if value.isUnicode(): value = value.string.decode('utf-16-be') else: value = value.string assert value, u'{} seems to be missed in NAME table'.format(pairs) return value def getSuggestedFontNameValues(font): family_name = getName(font, [[1, 3, 1], [1, 1, 0]]) subfamily_name = getName(font, [[2, 3, 1], [2, 1, 0]]) full_name = getName(font, [[4, 3, 1], [4, 1, 0]]) subfamilies = ['Regular', 'Bold', 'Italic', 'Semi Bold Italic', 'Semi Bold', 'Heavy', 'Heavy Italic', 'Extra Light Italic', 'Extra Light', 'Medium', 'Extra Bold', 'Medium Italic', 'Extra Bold Italic', 'Bold Italic', 'Thin Italic', 'Thin', 'Light Italic', 'Light', 'Black', 'Black Italic'] if full_name == family_name: try: family_name, subfamily_name = full_name.split(' ', 1)[:] except ValueError: pass if subfamily_name == 'Normal' or subfamily_name == 'Roman': subfamily_name = 'Regular' elif subfamily_name == 'Heavy': subfamily_name = 'Black' elif subfamily_name == 'Heavy Italic': subfamily_name = 'Black Italic' return {'family': family_name, 'subfamily': subfamily_name}
UTF-8
Python
false
false
2,014
15,427,522,573,947
cfe3d1d9d3659ef969a464715b7757d72f8a7fa5
9b03417874df98ca57ff593649a1ee06056ea8ad
/urls.py
64df2b7b3877df44579970b1bdeb9e5461339646
[]
no_license
mzupan/intake
https://github.com/mzupan/intake
2c115510c0461b09db310ddc6955943161c783b7
a4b5e0e1e5d441ad9624a9eb44bc1ab98ccf3e22
refs/heads/master
2020-04-22T01:46:06.212654
2010-11-23T17:01:50
2010-11-23T17:01:50
1,010,081
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls.defaults import * urlpatterns = patterns('', (r'^api/', include('api.urls')), (r'^server/', include('server.urls')), (r'^group/', include('server.urls_group')), (r'^admin/', include('admin.urls')), (r'^/?$', 'index.views.show_index'), # # login/logout # (r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'index/login.html'}), (r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/login/?logout=1'}), ) # # handling static pages for development only # from django.conf import settings if settings.LOCAL_DEVELOPMENT: urlpatterns += patterns("django.views", url(r"%s(?P<path>.*)/$" % settings.MEDIA_URL[1:], "static.serve", { "document_root": settings.MEDIA_ROOT, }) )
UTF-8
Python
false
false
2,010
13,606,456,403,294
b57fab85dc87f07c1f8e8862cc7687be9165853c
73318ea0fd00b76af332f74b22dd9a8f9f86a29a
/src/create_test_users.py
37d1d0fff21dc0fbd32eb865a670143050ba0038
[]
no_license
ambv/vanityfair
https://github.com/ambv/vanityfair
6edc4695cdf27a69f1b3823456b95e03f1e72ff6
c4106aa7e995873dd8338a8b04dfc8d03d7714c3
refs/heads/master
2016-04-01T20:47:39.114388
2013-06-11T09:51:35
2013-06-11T09:51:35
4,831,987
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2012 by Łukasz Langa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from random import choice from django.contrib.auth.models import User first_names = ['Jan', 'Mateusz', 'Krzysztof', 'Stefan', 'Ambroży', 'Marian', 'Eugeniusz', 'Bonifacy', 'Bożydar', 'Amadeusz', 'Ludwik', 'Kubuś', 'Roman', 'Zygzak'] last_names = ['Pipka', 'Buraczek', 'Nieodpowiedni', 'Idiom', 'Zabawny', 'Pełnopoważny', 'Niemoga', 'Mariola', 'Dung', 'Fubar', 'Osiemnasty', 'Siedemnasty', 'Szesnasty', 'Kwaśny'] nicks = ['lightning', 'thunder', 'storm', 'hard disk', 'ender', 'aragorn', 'mordor', 'gandalf', 'smeagol', 'gollum', 'bilbo', 'frodo', 'legolas', 'hobbit', 'elf', 'krasnolud', 'wiedźmin', 'przyczepa', 'telefon', 'długi', 'mocny', 'szary', 'cichy', 'słabizna', 'wycharowany', 'czarodziej', 'potter', 'voldemort', 'wróżka', 'sieciowiec', 'informatyk', 'poeta', 'pantograf', 'maszynista', 'kurier', 'bohater', 'niemoralny', 'casanova', 'śmiszek', 'clint', 'kenny', 'cartman', 'kucharz', 'klucznik', 'kwazimodo', 'toudi', 'iktorn', 'smerf', 'gargamel', 'klakier', 'puszek', 'żelazko', 'king', 'poland', 'kazik', 'olo'] for i in xrange(100): username = 'gienek{}'.format(i) u = User(username=username, is_active=True, email='{}@allegro.pl'.format(username), first_name=choice(first_names), last_name=choice(last_names)) u.set_password(username) u.save()
UTF-8
Python
false
false
2,013
15,771,119,921,793
3de983d60d77aaebea5a875918fc85ad18bd730b
7596eb26e37621eb7e42988cd75fe651c711b88a
/samba/bin/python/samba/tests/samba3sam.py
fc3348fc44c24ca6080fb5a94bb5d43ff8072b6d
[ "GPL-2.0-only", "GPL-3.0-only", "LicenseRef-scancode-free-unknown", "LGPL-2.1-or-later", "GPL-1.0-or-later", "LGPL-3.0-only" ]
non_permissive
ebrainte/Samba
https://github.com/ebrainte/Samba
7aba47d32e582f235ffd06f8c19ee5746d1c4cf2
8c023bcbee5fa5d071c14ab0cbf38dcab30e4094
refs/heads/master
2021-01-23T20:22:09.461215
2013-11-22T16:48:50
2013-11-22T16:48:50
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
/root/samba-4.0.10/python/samba/tests/samba3sam.py
UTF-8
Python
false
false
2,013
13,726,715,520,691
1a6612015a5a69b2fe0daf68e643c69f9a33a5e9
445beaa2c50d62a837047b3dbd5132ef5470f8db
/Lista-1 Zumbis/exercicio02_masanori_01.py
ff59d1cef36feca1ff761697a1f43095ae4aff4b
[]
no_license
thayannevls/pythonZumbi
https://github.com/thayannevls/pythonZumbi
ecee77d640e6ad7e487348eae1ec2eba5e833f22
9ffd39aea2f2927bdb5d58828dfbc75756d3c197
refs/heads/master
2020-04-09T07:24:28.655236
2014-03-09T20:50:02
2014-03-09T20:50:02
17,462,490
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
N= int(input('Digite o número que você quer que seja convertido:')) print (N*1000, 'milimetro')
UTF-8
Python
false
false
2,014
12,859,132,087,384
d5b3d629daf1f0f2609bfa72d012ad70bd8722dd
1e66682e3fc5d64a5446409aacd887b9b79b9795
/gccutils.py
6b49c1655569c4fc7af4073516358e32c17e6149
[ "GPL-3.0-or-later", "GPL-3.0-only" ]
non_permissive
jasonxmueller/gcc-python-plugin
https://github.com/jasonxmueller/gcc-python-plugin
b61e7ba666a76e2d330e1a15486ceb1963644b67
f77982e513cb04617c8c848dca83751105333890
refs/heads/master
2021-01-02T09:27:17.531410
2012-03-16T18:48:53
2012-03-16T18:48:53
3,734,215
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright 2011 David Malcolm <[email protected]> # Copyright 2011 Red Hat, Inc. # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. import gcc from six.moves import xrange def get_src_for_loc(loc): # Given a gcc.Location, get the source line as a string import linecache return linecache.getline(loc.file, loc.line).rstrip() def get_field_by_name(typeobj, name): check_isinstance(typeobj, (gcc.RecordType, gcc.UnionType, gcc.QualUnionType)) for field in typeobj.fields: if field.name == name: return field def get_global_typedef(name): # Look up a typedef in global scope by name, returning a gcc.TypeDecl, # or None if not found for u in gcc.get_translation_units(): if u.language == 'GNU C++': gns = gcc.get_global_namespace() return gns.lookup(name) if u.block: for v in u.block.vars: if isinstance(v, gcc.TypeDecl): if v.name == name: return v def get_variables_as_dict(): result = {} for var in gcc.get_variables(): result[var.decl.name] = var return result def get_global_vardecl_by_name(name): # Look up a variable in global scope by name, returning a gcc.VarDecl, # or None if not found for u in gcc.get_translation_units(): if u.language == 'GNU C++': gns = gcc.get_global_namespace() return gns.lookup(name) for v in u.block.vars: if isinstance(v, gcc.VarDecl): if v.name == name: return v def get_nonnull_arguments(funtype): """ 'nonnull' is an attribute on the fun.decl.type http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html It can either have no arguments (all pointer args are non-NULL), or be a list of integers. These integers are 1-based. Return a frozenset of 0-based integers, giving the arguments for which we can assume the "nonnull" property. (Note the 0-based vs 1-based differences) Compare with gcc/tree-vrp.c: nonnull_arg_p """ check_isinstance(funtype, gcc.FunctionType) if 'nonnull' in funtype.attributes: result = [] nonnull = funtype.attributes['nonnull'] if nonnull == []: # All pointer args are nonnull: for idx, parm in enumerate(funtype.argument_types): if isinstance(parm, gcc.PointerType): result.append(idx) else: # Only the listed args are nonnull: for val in nonnull: check_isinstance(val, gcc.IntegerCst) result.append(val.constant - 1) return frozenset(result) else: # No "nonnull" attribute was given: return frozenset() def invoke_dot(dot): from subprocess import Popen, PIPE if 1: fmt = 'png' else: # SVG generation seems to work, but am seeing some text-width issues # with rendering of the SVG by eog and firefox on this machine (though # not chromium). # # Looks like X coordinates allocated by graphviz don't contain quite # enough space for the <text> elements. # # Presumably a font selection/font metrics issue fmt = 'svg' p = Popen(['dot', '-T%s' % fmt, '-o', 'test.%s' % fmt], stdin=PIPE) p.communicate(dot.encode('ascii')) p = Popen(['xdg-open', 'test.%s' % fmt]) p.communicate() def pprint(obj): pp = TextualPrettyPrinter() pp.pprint(obj) def pformat(obj): pp = TextualPrettyPrinter() return pp.pformat(obj) class PrettyPrinter(object): def __init__(self): self.show_addr = False def attr_to_str(self, name, value): if name == 'addr': return hex(value) if isinstance(value, str): return repr(value) return str(value) def iter_tree_attrs(self, obj): # Iterate through the interesting attributes of the object: for name in dir(obj): # Ignore private and "magic" attributes: if name.startswith('_'): continue value = getattr(obj, name) # Ignore methods: if hasattr(value, '__call__'): continue if not self.show_addr: if name == 'addr': continue # Don't follow infinite chains, e.g. # ptr to ptr to ... of a type: if isinstance(obj, gcc.Type): if (name == 'pointer' or name.endswith('equivalent')): continue #print 'attr %r obj.%s: %r' % (name, name, value) yield (name, value) class TextualPrettyPrinter(PrettyPrinter): """Convert objects to nice textual dumps, loosely based on Python's pprint module""" def __init__(self): super(TextualPrettyPrinter, self).__init__() self.maxdepth = 5 def pprint(self, obj): import sys sys.stdout.write(self.pformat(obj)) def make_indent(self, indent): return indent * ' ' def pformat(self, obj): return self._recursive_format_obj(obj, set(), 0) def indent(self, prefix, txt): return '\n'.join([prefix + line for line in txt.splitlines()]) def _recursive_format_obj(self, obj, visited, depth): def str_for_kv(key, value): return ' %s = %s\n' % (key, value) check_isinstance(obj, gcc.Tree) visited.add(obj.addr) result = '<%s\n' % obj.__class__.__name__ r = repr(obj) s = str(obj) result += str_for_kv('repr()', r) if s != r: result += str_for_kv('str()', '%r' % s) # Show MRO, stripping off this type from front and "object" from end: superclasses = obj.__class__.__mro__[1:-1] result += str_for_kv('superclasses', superclasses) for name, value in self.iter_tree_attrs(obj): if depth < self.maxdepth: if isinstance(value, gcc.Tree): if value.addr in visited: result += str_for_kv('.%s' % name, '... (%s)' % self.attr_to_str(name, repr(value))) else: # Recurse formatted_value = self._recursive_format_obj(value, visited, depth + 1) indented_value = self.indent(' ' * (len(name) + 6), formatted_value) result += str_for_kv('.%s' % name, indented_value.lstrip()) continue # Otherwise: just print short version of the attribute: result += str_for_kv('.%s' % name, self.attr_to_str(name, value)) result += '>\n' return result class DotPrettyPrinter(PrettyPrinter): # Base class for various kinds of data visualizations that use graphviz # (aka ".dot" source files) def to_html(self, text): html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", # 'dot' doesn't seem to like these: '{': '&#123;', '}': '&#125;', ']': '&#93;', } return "".join(html_escape_table.get(c,c) for c in str(text)) def _dot_td(self, text, align="left", colspan=1, escape=1, bgcolor=None, port=None): if escape: text = self.to_html(text) attribs = 'align="%s" colspan="%i"' % (align, colspan) if bgcolor: attribs += ' bgcolor="%s"' % bgcolor if port: attribs += ' port="%s"' % port return ('<td %s>%s</td>' % (attribs, text)) def _dot_tr(self, td_text): return ('<tr>%s</tr>\n' % self._dot_td(td_text)) try: from pygments.formatter import Formatter from pygments.token import Token from pygments.styles import get_style_by_name class GraphvizHtmlFormatter(Formatter, DotPrettyPrinter): """ A pygments Formatter to turn source code fragments into graphviz's pseudo-HTML format. """ def __init__(self, style): Formatter.__init__(self) self.style = style def style_for_token(self, token): # Return a (hexcolor, isbold) pair, where hexcolor could be None # Lookup up pygments' color for this token type: col = self.style.styles[token] isbold = False # Extract a pure hex color specifier of the form that graphviz can # deal with if col: if col.startswith('bold '): isbold = True col = col[5:] return (col, isbold) def format_unencoded(self, tokensource, outfile): from pprint import pprint for t, piece in tokensource: # graphviz seems to choke on font elements with no inner text: if piece == '': continue # pygments seems to add this: if piece == '\n': continue # avoid croaking on '\n': if t == Token.Literal.String.Escape: continue color, isbold = self.style_for_token(t) if 0: print ('(color, isbold): (%r, %r)' % (color, isbold)) if isbold: outfile.write('<b>') # Avoid empty color="" values: if color: outfile.write('<font color="%s">' % color + self.to_html(piece) + '</font>') else: outfile.write(self.to_html(piece)) if isbold: outfile.write('</b>') from pygments import highlight from pygments.lexers import CLexer from pygments.formatters import HtmlFormatter def code_to_graphviz_html(code): style = get_style_by_name('default') return highlight(code, CLexer(), # FIXME GraphvizHtmlFormatter(style)) using_pygments = True except ImportError: using_pygments = False class CfgPrettyPrinter(DotPrettyPrinter): # Generate graphviz source for this gcc.Cfg instance, as a string def __init__(self, cfg, name=None): self.cfg = cfg if name: self.name = name def block_id(self, b): if b is self.cfg.entry: return 'entry' if b is self.cfg.exit: return 'exit' return 'block%i' % id(b) def block_to_dot_label(self, bb): # FIXME: font setting appears to work on my machine, but I invented # the attribute value; it may be exercising a failure path result = '<font face="monospace"><table cellborder="0" border="0" cellspacing="0">\n' result += '<tr> <td>BLOCK %i</td> <td></td> </tr>\n' % bb.index curloc = None if isinstance(bb.phi_nodes, list): for stmtidx, phi in enumerate(bb.phi_nodes): result += '<tr><td></td>' + self.stmt_to_html(phi, stmtidx) + '</tr>\n' if isinstance(bb.gimple, list) and bb.gimple != []: for stmtidx, stmt in enumerate(bb.gimple): if curloc != stmt.loc: curloc = stmt.loc code = get_src_for_loc(stmt.loc).rstrip() pseudohtml = self.code_to_html(code) # print('pseudohtml: %r' % pseudohtml) result += ('<tr><td align="left">' + self.to_html('%4i ' % stmt.loc.line) + pseudohtml + '<br/>' + (' ' * (5 + stmt.loc.column-1)) + '^' + '</td></tr>') result += '<tr><td></td>' + self.stmt_to_html(stmt, stmtidx) + '</tr>\n' else: # (prevent graphviz syntax error for empty blocks): result += self._dot_tr(self.block_id(bb)) result += '</table></font>\n' return result def code_to_html(self, code): if using_pygments: return code_to_graphviz_html(code) else: return self.to_html(code) def stmt_to_html(self, stmt, stmtidx): text = str(stmt).strip() text = self.code_to_html(text) bgcolor = None # Work towards visualization of CPython refcounting rules. # For now, paint assignments to (PyObject*) vars and to ob_refcnt # fields, to highlight the areas needing tracking: # print 'stmt: %s' % stmt if 0: # hasattr(stmt, 'lhs'): # print 'stmt.lhs: %s' % stmt.lhs # print 'stmt.lhs: %r' % stmt.lhs if stmt.lhs: # print 'stmt.lhs.type: %s' % stmt.lhs.type # Color assignments to (PyObject *) in red: if str(stmt.lhs.type) == 'struct PyObject *': bgcolor = 'red' # Color assignments to PTR->ob_refcnt in blue: if isinstance(stmt.lhs, gcc.ComponentRef): # print(dir(stmt.lhs)) # print 'stmt.lhs.target: %s' % stmt.lhs.target # print 'stmt.lhs.target.type: %s' % stmt.lhs.target.type # (presumably we need to filter these to structs that are # PyObject, or subclasses) # print 'stmt.lhs.field: %s' % stmt.lhs.field if stmt.lhs.field.name == 'ob_refcnt': bgcolor = 'blue' return self._dot_td(text, escape=0, bgcolor=bgcolor, port='stmt%i' % stmtidx) def edge_to_dot(self, e): if e.true_value: attrliststr = '[label = true]' elif e.false_value: attrliststr = '[label = false]' elif e.loop_exit: attrliststr = '[label = loop_exit]' elif e.fallthru: attrliststr = '[label = fallthru]' elif e.dfs_back: attrliststr = '[label = dfs_back]' else: attrliststr = '' return (' %s -> %s %s;\n' % (self.block_id(e.src), self.block_id(e.dest), attrliststr)) def extra_items(self): # Hook for expansion return '' def to_dot(self): if hasattr(self, 'name'): name = self.name else: name = 'G' result = 'digraph %s {\n' % name result += ' subgraph cluster_cfg {\n' #result += ' label="CFG";\n' result += ' node [shape=box];\n' for block in self.cfg.basic_blocks: result += (' %s [label=<%s>];\n' % (self.block_id(block), self.block_to_dot_label(block))) for edge in block.succs: result += self.edge_to_dot(edge) # FIXME: this will have duplicates: #for edge in block.preds: # result += edge_to_dot(edge) result += ' }\n' # Potentially add extra material: result += self.extra_items() result += '}\n' return result class TreePrettyPrinter(DotPrettyPrinter): # Generate a graphviz visualization of this gcc.Tree and the graphs of # nodes it references, as a string def __init__(self, root): print('root: %s' % root) check_isinstance(root, gcc.Tree) self.root = root self.show_addr = False self.maxdepth = 6 # for now def tr_for_kv(self, key, value): return ('<tr> %s %s</tr>\n' % (self._dot_td(key), self._dot_td(value))) def label_for_tree(self, obj): result = '<table cellborder="0" border="0" cellspacing="0">\n' r = repr(obj) s = str(obj) result += self.tr_for_kv('repr()', r) if s != r: result += self.tr_for_kv('str()', '%r' % s) # Show MRO, stripping off this type from front and "object" from end: superclasses = obj.__class__.__mro__[1:-1] result += self.tr_for_kv('superclasses', superclasses) for name, value in self.iter_tree_attrs(obj): result += ('<tr> %s %s </tr>\n' % (self._dot_td(name), self._dot_td(self.attr_to_str(name, value)))) result += '</table>\n' return result def tree_id(self, obj): return 'id%s' % id(obj) def tree_to_dot(self, obj): check_isinstance(obj, gcc.Tree) return (' %s [label=<%s>];\n' % (self.tree_id(obj), self.label_for_tree(obj))) def recursive_tree_to_dot(self, obj, visited, depth): print('recursive_tree_to_dot(%r, %r)' % (obj, visited)) check_isinstance(obj, gcc.Tree) result = self.tree_to_dot(obj) visited.add(obj.addr) if depth < self.maxdepth: for name, value in self.iter_tree_attrs(obj): if isinstance(value, gcc.Tree): if value.addr not in visited: # Recurse result += self.recursive_tree_to_dot(value, visited, depth + 1) # Add edge: result += (' %s -> %s [label = %s];\n' % (self.tree_id(obj), self.tree_id(value), name)) return result def to_dot(self): self.root.debug() result = 'digraph G {\n' result += ' node [shape=record];\n' result += self.recursive_tree_to_dot(self.root, set(), 0) result += '}\n' return result def cfg_to_dot(name, cfg): pp = CfgPrettyPrinter(name, cfg) return pp.to_dot() def tree_to_dot(tree): pp = TreePrettyPrinter(tree) return pp.to_dot() class Table(object): '''A table of text/numbers that knows how to print itself''' def __init__(self, columnheadings=None, rows=[], sepchar='-'): self.numcolumns = len(columnheadings) self.columnheadings = columnheadings self.rows = [] self._colsep = ' ' self._sepchar = sepchar def add_row(self, row): assert len(row) == self.numcolumns self.rows.append(row) def write(self, out): colwidths = self._calc_col_widths() self._write_separator(out, colwidths) self._write_row(out, colwidths, self.columnheadings) self._write_separator(out, colwidths) for row in self.rows: self._write_row(out, colwidths, row) self._write_separator(out, colwidths) def _calc_col_widths(self): result = [] for colIndex in xrange(self.numcolumns): result.append(self._calc_col_width(colIndex)) return result def _calc_col_width(self, idx): cells = [str(row[idx]) for row in self.rows] heading = self.columnheadings[idx] return max([len(c) for c in (cells + [heading])]) def _write_row(self, out, colwidths, values): for i, (value, width) in enumerate(zip(values, colwidths)): if i > 0: out.write(self._colsep) formatString = "%%-%ds" % width # to generate e.g. "%-20s" out.write(formatString % value) out.write('\n') def _write_separator(self, out, colwidths): for i, width in enumerate(colwidths): if i > 0: out.write(self._colsep) out.write(self._sepchar * width) out.write('\n') class CallgraphPrettyPrinter(DotPrettyPrinter): def node_id(self, cgn): return 'cgn%i' % id(cgn) def node_to_dot_label(self, cgn): return str(cgn.decl.name) def edge_to_dot(self, e): attrliststr = '' return (' %s -> %s %s;\n' % (self.node_id(e.caller), self.node_id(e.callee), attrliststr)) def to_dot(self): result = 'digraph Callgraph {\n' #result += ' subgraph cluster_callgraph {\n' result += ' node [shape=box];\n' for cgn in gcc.get_callgraph_nodes(): result += (' %s [label=<%s>];\n' % (self.node_id(cgn), self.node_to_dot_label(cgn))) for edge in cgn.callers: result += self.edge_to_dot(edge) #result += ' }\n' result += '}\n' return result def callgraph_to_dot(): pp = CallgraphPrettyPrinter() return pp.to_dot() def check_isinstance(obj, types): """ Like: assert isinstance(obj, types) but with better error messages """ if not isinstance(obj, types): raise TypeError('%s / %r is not an instance of %s' % (obj, obj, types))
UTF-8
Python
false
false
2,012
790,274,022,311
acd4027afe6f8299db74a23b5d143b417779529d
240efd23bff4397d3ac799121632819c43378d70
/bonzo/errors.py
e09a32e3c17e462519355adc3ae5ee722b446b0d
[ "Apache-2.0", "LicenseRef-scancode-public-domain" ]
non_permissive
rwdim/bonzo
https://github.com/rwdim/bonzo
857826beb670cf730bd9feba30b0483cc4c79cd3
9c28fdb27647c82baf880dfaf058fcb95540a625
refs/heads/master
2021-01-19T21:13:29.155635
2014-09-14T04:03:51
2014-09-14T04:03:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """SMTP exceptions for response to the client.""" class SMTPError(Exception): """An exception that will turn into an SMTP error response. :arg int status_code: SMTP status code. For a status codes list, see: http://www.greenend.org.uk/rjk/tech/smtpreplies.html. :arg string message: Message to be written to the stream in order to response to the client. :arg string log_message: Message to be written to the log for this error. May contain ``%s``-style placeholders, which will be filled in with remaining positional parameters. """ def __init__(self, status_code, message, log_message=None, *args): self.status_code = status_code self.message = message self.log_message = log_message self.args = args def __str__(self): message = 'SMTP %d: %s' % (self.status_code, self.message) if not self.log_message: return message return message + ' (' + (self.log_message % self.args) + ')' class InternalConfusion(SMTPError): """Used to return a ``451`` status code. """ def __init__(self): super(InternalConfusion, self).__init__(451, 'Internal confusion') class UnrecognisedCommand(SMTPError): """Used to return a ``500`` status code. """ def __init__(self): super(UnrecognisedCommand, self).__init__(500, 'Error: bad syntax') class BadArguments(SMTPError): """Used to return a ``501`` status code. :arg string syntax: Syntax returned to the client. """ def __init__(self, syntax): super(BadArguments, self).__init__(501, 'Syntax: %s' % syntax) class NotImplementedCommand(SMTPError): """Used to return a ``502`` status code. :arg string command: Command not implemented for the server. """ def __init__(self, command): message = 'Error: command "%s" not implemented' % command super(NotImplementedCommand, self).__init__(502, message) class BadSequence(SMTPError): """Used to return a ``503`` status code. :arg string message: Message to be written to the stream and to response to the client. """ def __init__(self, message): super(BadSequence, self).__init__(503, message)
UTF-8
Python
false
false
2,014
13,589,276,547,722
e9c31a1751b2f5e302a1acdd5367a15adfe7c987
944587dc229af167e7bdb9988c6f19e4d00eb664
/resultat.py
754c9d70d57a2b0736aa94b4de00f1c2b64ad682
[]
no_license
guennobzh/vetathlon
https://github.com/guennobzh/vetathlon
bb87ab7563949823097cd4cce71b145502a151ba
430a3f57ae77e65ebe0ecf0a53bdad86ebb0def4
refs/heads/master
2021-01-01T06:05:31.535768
2014-05-11T15:49:32
2014-05-11T15:49:32
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from mod_python import apache, Session from datab import _Datab from tools import _Head def index(req, categorie): #Cré l'objet base de donnée objBdd = _Datab() if categorie == 'seniorsg': #Général sénior titre = 'Classement général sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorsg': #Général junior titre = 'Classement général junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorse': #sénior equipe titre = 'Classement par équipe sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorsi': #sénior individuel titre = 'Classement individuel sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorsp': #sénior pieton titre = 'Classement piéton sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'seniorsv': #sénior vtt titre = 'Classement vtt sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'masculinese': #equipe masculine senior titre = 'Classement par équipe sénior masculine' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorshi': #sénior homme individuel titre = 'Classement individuel sénior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorshp': #sénior homme pieton titre = 'Classement piéton sénior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'seniorshv': #sénior homme vtt titre = 'Classement vtt sénior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'femininese': #equipe féminine sénior titre = 'Classement par équipe sénior féminine' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorsfi': #sénior femme individuel titre = 'Classement individuel sénior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'seniorsfp': #sénior femme pieton titre = 'Classement piéton sénior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'seniorsfv': #sénior femme vtt titre = 'Classement vtt sénior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'juniorse': #equipe junior titre = 'Classement par équipe junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorsi': #junior individuel titre = 'Classement individuel junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorsp': #junior pieton titre = 'Classement piéton junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'juniorsv': #junior vtt titre = 'Classement vtt junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'masculinesje': #equipe junior masculine titre = 'Classement par équipe junior masculine' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorshi': #junior homme individuel titre = 'Classement individuel junior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorshp': #junior homme pieton titre = 'Classement piéton junior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'juniorshv': #junior homme vtt titre = 'Classement vtt junior homme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpv`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'femininesje': #equipe junior feminine titre = 'Classement par équipe junior féminine' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorsfi': #junior femme individuel titre = 'Classement individuel junior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` = `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste = pieton donc les individuel*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'juniorsfp': #junior femme pieton titre = 'Classement piéton junior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_pieton` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ order by `tmp_pieton` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'juniorsfv': #junior femme vtt titre = 'Classement vtt junior femme' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_vtt` != '00:00:00' /*selectionne uniquement les fiches ayant un temps pieton*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `particpv`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ order by `tmp_vtt` /*Trie les resultat par ordre croisant de temps pieton*/ ;''') elif categorie == 'mixtess': # mixte sénior titre = 'Classement par équipe mixte sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and ((`particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ and `particpv`.`sexe` = 1) /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ or (`particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ and `particpv`.`sexe` = 2)) /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ''') elif categorie == 'mistesj': # mixte junior titre = 'Classement par équipe mixte junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ and ((`particpp`.`sexe` = 2 /*selectionne uniquement les fiches d\'on le coureur est un homme*/ and `particpv`.`sexe` = 1) /*selectionne uniquement les fiches d\'on le vetetiste est une femme*/ or (`particpp`.`sexe` = 1 /*selectionne uniquement les fiches d\'on le coureur est une femme*/ and `particpv`.`sexe` = 2)) /*selectionne uniquement les fiches d\'on le vetetiste est un homme*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ''') elif categorie == 'sj': #Sénior/Junior titre = 'Classement par équipe Sénior/Junior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est >= a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est < a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') elif categorie == 'js': #Junior/Sénior titre = 'Classement par équipe Junior/Sénior' listDossard = objBdd.execute('''select `dossards`.*, /*selectionne tous les champs de dossards*/ `particpp`.`nom`, `particpp`.`prenom`, /*selectionne les nom et prenom du pieton*/ `particpv`.`nom`, `particpv`.`prenom` /*selectionne les nom et prenom du vététiste*/ from `dossards`, `participants` as `particpp`,`participants` as `particpv`, `config` /*utilise dossards et 2* la table particimants comme particpp et particpv et la table config*/ where `particpp`.`id` = `dossards`.`pieton` /*fait correspondre l\'id du pieton a sa fiche*/ and `particpv`.`id` = `dossards`.`vtt` /*fait correspondre l\'id du vetetiste a sa fiche*/ and year(from_days(datediff(`config`.`date`, `particpp`.`date_nais`))) < `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du pieton est < a l\'age junior*/ and year(from_days(datediff(`config`.`date`, `particpv`.`date_nais`))) >= `config`.`age_junior` /*selectionne uniquement les fiches d\'ont l\'age du vetetiste est >= a l\'age junior*/ and `tmp_total` != '00:00:00' /*selectionne uniquement les fiches ayant fini la course*/ and `etat` = 0 /*selectionne uniquement les fiches d\'on l\'etat est partant*/ and `dossards`.`pieton` != `dossards`.`vtt` /*selectionne uniquement les fiches d\'on le vététiste != pieton donc les equipe*/ order by `tmp_total` /*Trie les resultat par ordre croisant de temps total*/ ;''') retour = '' #Définie le type mine req.content_type = "text/html;charset=UTF-8" #début du tableau retour = '''<center> <table border="1"> <tbody><h1>%s</h1> <tr class="tete-tableau"> <td style="text-align: center; width: 50px;">Place</td> <td style="text-align: center; width: 100px;">Dossard</td> <td style="text-align: center; width: 200px;">Courreur</td> <td style="text-align: center; width: 200px;">Vététiste</td> <td style="text-align: center; width: 120px;">Temps pieton </td> <td style="text-align: center; width: 120px;">Temps vtt</td> <td style="text-align: center; width: 120px;">Temps total<br></td> </tr> '''%(titre) #crée la vatiable pour la couleur des ligne paire lclass = 0 #crée la vatiable pour le numero de place place = 1 for infodossard in listDossard: #test si il s'agit d'une equipe if infodossard[1] == infodossard[2]: nomc = infodossard[8]+' '+infodossard[9] nomv = '' fusionc = '2' else: nomc = infodossard[8]+' '+infodossard[9] nomv = '<td style="text-align: center;">%s %s</td>'%(infodossard[10], infodossard[11]) fusionc = '1' #débinie la class css de la ligne if lclass == 0: classl = 'l-impaire' lclass = 1 else: classl = 'l-paire' lclass = 0 retour += '''<tr class="%s"> <td class="tete-tableau" style="text-align: right;">%s</td> <td style="text-align: right;">%s&nbsp&nbsp&nbsp&nbsp&nbsp&nbsp&nbsp</td> <td colspan="%s" style="text-align: center;">%s</td> %s <td style="text-align: center;">%s</td> <td style="text-align: center;">%s</td> <td style="text-align: center;">%s</td> </tr> '''%(classl, place, infodossard[0], fusionc, nomc, nomv, infodossard[5], infodossard[6], infodossard[7]) place += 1 retour += '</tbody></table>' return _Head(retour)
UTF-8
Python
false
false
2,014
8,091,718,399,451
832a5bff1b880530f7a113aec3ef698f06c99339
4f9faebed856e704cea05d593f03f40a9bded5ca
/dumbo/timelines.py
9f73a24a5dfb68da8c9926484e5de1aabea8cc05
[]
no_license
jazzwang/hadoop-timelines
https://github.com/jazzwang/hadoop-timelines
c86f457d1b2bc2d2cbf7befbc79e5d5fc32f0896
1946ba52b0fd3cf00108d48e088e6d4b7f8d00fd
refs/heads/master
2021-01-12T19:51:20.050417
2009-06-29T05:18:01
2009-06-29T05:18:01
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import re import urllib, httplib import dumbo ATTRIBUTES_PATTERN = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *') INT_PROPS = frozenset(('SUBMIT_TIME','START_TIME','FINISH_TIME','SHUFFLE_FINISHED','SORT_FINISHED')) scale = 1000 def mapper(key, line): event, rest = line.split(" ",1) attrs = {} for name, value in re.findall(ATTRIBUTES_PATTERN, rest): attrs.setdefault(name, []) attrs[name].append(int(value)/scale if name in INT_PROPS else value) if 'JOBNAME' in attrs: if event == 'Job': # Job has multiple JOBNAME, taking the longest for now. lame. names = sorted(attrs['JOBNAME'],lambda x,y: cmp(len(y), len(x))) yield names[0], (event, attrs) else: yield attrs['JOBNAME'][0], (event, attrs) def reducer(key, values): mapStartTime = {} mapEndTime = {} reduceStartTime = {} reduceShuffleTime = {} reduceSortTime = {} reduceEndTime = {} finalAttempt = {} wastedAttempts = [] submitTime = None finishTime = None for event, attrs in values: attrs = dict((k,v[0]) for k,v in attrs.items()) if event == 'Job': #print >> sys.stderr, 'reduce', key, attrs.keys() if "SUBMIT_TIME" in attrs: submitTime = attrs["SUBMIT_TIME"] if "FINISH_TIME" in attrs: finishTime = attrs["FINISH_TIME"] elif event == 'MapAttempt': attempt = attrs["TASK_ATTEMPT_ID"] time = attrs.get("START_TIME", 0) if time != 0: mapStartTime[attempt] = time elif "FINISH_TIME" in attrs: mapEndTime[attempt] = attrs["FINISH_TIME"] if attrs.get("TASK_STATUS", "") == "SUCCESS": task = attrs["TASKID"] if task in finalAttempt: wastedAttempts.append(finalAttempt[task]) finalAttempt[task] = attempt else: wastedAttempts.append(attempt) elif event == 'ReduceAttempt': attempt = attrs["TASK_ATTEMPT_ID"] time = attrs.get("START_TIME", 0) if time != 0: reduceStartTime[attempt] = time elif "FINISH_TIME" in attrs: task = attrs["TASKID"] if attrs.get("TASK_STATUS", "") == "SUCCESS": if task in finalAttempt: wastedAttempts.append(finalAttempt[task]) finalAttempt[task] = attempt else: wastedAttempts.append(attempt) reduceEndTime[attempt] = attrs["FINISH_TIME"] if "SHUFFLE_FINISHED" in attrs: reduceShuffleTime[attempt] = attrs["SHUFFLE_FINISHED"] if "SORT_FINISHED" in attrs: reduceSortTime[attempt] = attrs["SORT_FINISHED"] final = frozenset(finalAttempt.values()) runningMaps = [] shufflingReduces = [] sortingReduces = [] runningReduces = [] waste = [] if not submitTime or not finishTime: dumbo.core.incrcounter('Timelines', 'Incomplete Jobs', 1) return for t in range(submitTime, finishTime): runningMaps.append(0) shufflingReduces.append(0) sortingReduces.append(0) runningReduces.append(0) waste.append(0) for task in mapEndTime.keys(): if task in mapStartTime: for t in range(mapStartTime[task]-submitTime, mapEndTime[task]-submitTime): if task in final: runningMaps[t] += 1 else: waste[t] += 1 for task in reduceEndTime.keys(): if task in reduceStartTime: if task in final: for t in range(reduceStartTime[task]-submitTime, reduceShuffleTime[task]-submitTime): shufflingReduces[t] += 1 for t in range(reduceShuffleTime[task]-submitTime, reduceSortTime[task]-submitTime): sortingReduces[t] += 1 for t in range(reduceSortTime[task]-submitTime, reduceEndTime[task]-submitTime): runningReduces[t] += 1 else: for t in range(reduceStartTime[task]-submitTime, reduceEndTime[task]-submitTime): waste[t] += 1 params = {'maps': runningMaps, 'shuffles': shufflingReduces, 'merges': sortingReduces, 'reducers': runningReduces, 'waste': waste } params = dict([(k,",".join(str(c) for c in v)) for k,v in params.items()]) params['start'] = submitTime params['end'] = finishTime params['mapcount'] = len([k for k in mapEndTime.keys() if k in mapStartTime and k in final]) params['redcount'] = len([k for k in reduceEndTime.keys() if k in reduceStartTime and k in final]) conn = httplib.HTTPConnection("hadoop-timelines.appspot.com:80") conn.request("POST", "/timelines", urllib.urlencode(params)) response = conn.getresponse() yield key, response.getheader('location') if __name__ == "__main__": dumbo.run(mapper, reducer)
UTF-8
Python
false
false
2,009
1,168,231,139,682
b8b77c7979011d60dedfa890d543d1134269a7b0
f35013a2907f0527b6408da65d81fbad43bc4270
/foreign/admin.py
d8421a9f8a65c0f620ccae9c1e1e8eef4b9cb570
[]
no_license
hero007asd/million_ads
https://github.com/hero007asd/million_ads
167598f865d5cbeaf07fbeaa5ad65719370e407e
3b6da01554d043f9ea40ffbe79e2bdd96a661fca
refs/heads/master
2016-09-06T03:29:35.823690
2014-05-03T07:13:17
2014-05-03T07:13:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from django import forms from foreign import models from datetime import date from django.utils.translation import ugettext_lazy as _ from django.utils.html import format_html_join from django.utils.safestring import mark_safe # class OrderForm(forms.ModelForm): # class Meta: # model = models.Order class OrderAdmin(admin.ModelAdmin): list_display = ('user','balance','lots','start_pts','close_pts','start_time','orderProfit','orderPts','uppser_case_name') search_fields = ('user',) # list_filter = ('start_time','lots','user__ini_money') list_filter = ('start_time','lots',) date_hierarchy = 'start_time' ordering = ('-start_time','lots') # fields = ('user','start_pts','lots','balance') # filter_horizontal&filter_vertical; for many 2 many keys raw_id_fields = ('user',) fieldsets=( (None,{ 'fields':('user','start_pts','lots','balance') }), ('Advanced options',{ # 'classes':('extrapretty','wide'), 'description':'advanced options test', 'classes':('collapse','wide'), 'fields':('currency_type',('stop_profit_pts','stop_loss_pts'),'close_pts')#,'close_time','profit') }), ) # formfield_overrides = { # models.TextField: {'widget':RichTextEditorWidget}, # } # exclude = ['close_pts'] # form = OrderForm def uppser_case_name(self,obj): return ('%s' % obj.currency_type).upper() uppser_case_name.short_description = 'CurrencyType1' # class TypeAdmin(admin.ModelAdmin): # formfield_overrides={ # models.TextField:{'widget':RichTextEditorWidget}, # } class DecadeBornListFilter(admin.SimpleListFilter): title = _('decade born') parameter_name='decade' def lookups(self,request,model_admin): return ( ('80',_('in the eighties')), ('90',_('in the nineties')), ) def queryset(self,request,queryset): if self.value() == '80s': return queryset.filter(birthday__gte=date(1980,1,1),birthday__lte=date(1989,12,31)) if self.value() == '90s': return queryset.filter(birthday__gte=date(1990,1,1),birthday__lte=date(1999,12,31)) class PersonAdmin(admin.ModelAdmin): list_display = ('name','decade_born_in','is_active','born_in_fifities') list_display_links = ('name','decade_born_in') list_filter = (DecadeBornListFilter,) list_per_page = 10 prepopulated_fields = {'slug':('name',)} readonly_fields = ('address_report',) def address_report(self,instance): return format_html_join(mark_safe('<br/>'), '{0}', ((line,) for line in instance.name), ) or '<span class="errors">i can\'t determine this address.</span>' # return format_html_join(mark_safe('<br/>'), '{0}', ((line,) for line in instance.birthday), ) or '<span class="errors">i can\'t determine this address.</span>' address_report.short_description = 'address' address_report.allow_tags = True #TODO # README #list_max_show_all = 20 #list_select_related = ('person','blog') #IS_OK # list_editable = ('is_active',) # Register your models here. admin.site.register(models.UserTemp) # admin.site.register(models.Order) admin.site.register(models.Order,OrderAdmin) admin.site.register(models.CurrencyType) admin.site.register(models.Person,PersonAdmin)
UTF-8
Python
false
false
2,014
3,874,060,550,232
37a02cff98df8becc540883d8a77140eafdcc624
3be8da1d39bef1e09e4c8e7a6b736d7fc74a3c0f
/webserver/opentrain/ot_api/views.py
a1656d035d88ab0be7336c6c7437f93a893b532a
[ "BSD-3-Clause" ]
permissive
amitzini/OpenTrain
https://github.com/amitzini/OpenTrain
bbe5b2fc1b1b118931f7aac94667083c1b5cf4da
25ff81df668a9eba1c4369f9a789e34c60b44096
refs/heads/master
2020-04-01T22:36:01.131143
2014-10-27T22:07:40
2014-10-27T22:07:40
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json import common.ot_utils import datetime from django.http.response import HttpResponse, HttpResponseBadRequest from django.conf import settings import urllib from django.views.generic import View from django.shortcuts import render def show_docs(request): ctx = dict() ctx['apis'] = ApiView.get_api_insts() return render(request,'ot_api/docs.html',ctx) class ApiView(View): def _prepare_list_resp(self,req,items,info=None): info = info or dict() count = len(items) total_count = info.get('total_count',len(items)) meta=dict(count=count,total_count=total_count) if total_count > count: if total_count > info['offset'] + info['limit']: d = req.GET.dict() d['offset'] = info['offset'] + info['limit'] meta['next'] = req.path + '?' + urllib.urlencode(d) content = dict(objects=items,meta=meta) return HttpResponse(content=json.dumps(content),content_type='application/json',status=200) def get_json_resp(self,content,status=200): return HttpResponse(content=json.dumps(content),content_type='application/json',status=status) def get_bool(self,key,defval=None): val = self.GET.get(key,None) if val is None: return defval val = val.lower() if val == 'false': return False if val == 'true': return True return bool(int(val)) def get_doc(self): return self.__doc__ def get_api_url_nice(self): u = self.api_url u = u.replace('$','').replace('^','') u = '/api/1/' + u return u @classmethod def get_api_insts(cls): return [c() for c in cls.get_api_classes()] @classmethod def get_api_classes(cls): return cls.__subclasses__() @classmethod def get_urls(cls): from django.conf.urls import url urls = [] for ac in cls.get_api_classes(): urls.append(url(ac.api_url,ac.as_view())) return urls class TripIdsForDate(ApiView): """ Return list of trips for given date given paramters: one of: date : in format dd/mm/yyyy today : 0/1 """ api_url = r'^trips/trips-for-date/$' def get(self,request): import timetable.services date = request.GET.get('date') today = self.get_bool('today',False) if not today and not date: raise Exception('Must have either today or date') if today: dt = common.ot_utils.get_localtime_now().date() else: day,month,year = date.split('/') dt = datetime.date(year=int(year),month=int(month),day=int(day)) trips = timetable.services.get_all_trips_in_date(dt) objects=[trip.gtfs_trip_id for trip in trips] result = dict(objects=objects, meta=dict(total_count=len(objects))) return self.get_json_resp(result) class TripDetails(ApiView): """ Return details for trip with id trip_id (given in url) details include the points in order to draw the trip on map """ api_url = r'^trips/(?P<gtfs_trip_id>\w+)/details/$' def get(self,request,gtfs_trip_id): import timetable.services trip = timetable.services.get_trip(gtfs_trip_id) result = trip.to_json_full() return self.get_json_resp(result) class TripDetails(ApiView): """ Return details for trip with id trip_id (given in url) details include the points in order to draw the trip on map """ api_url = r'^trips/(?P<gtfs_trip_id>\w+)/stops/$' def get(self,request,gtfs_trip_id): import timetable.services from analysis.models import RtStop device_id = request.GET.get('device_id') if device_id is None: return HttpResponseBadRequest('Must specify device_id') trip = timetable.services.get_trip(gtfs_trip_id) rt_stops = RtStop.objects.filter(tracker_id=device_id,trip__gtfs_trip_id=gtfs_trip_id) result = trip.to_json_full(with_shapes=False,rt_stops=rt_stops) return self.get_json_resp(result) class CurrentTrips(ApiView): """ Return current trips """ api_url = r'^trips/current/$' def get(self,request): import analysis.logic current_trips = analysis.logic.get_current_trips() return self._prepare_list_resp(request, current_trips) class TripsLocation(ApiView): """ Return location (exp and cur) of trips given in comma separated GET paramter trip_ids """ api_url = r'^trips/cur-location/$' def get(self,request): import analysis.logic trip_ids = request.GET.get('trip_ids',None) if not trip_ids: return HttpResponseBadRequest('Must specify trip_ids') live_trips = analysis.logic.get_trips_location(trip_ids.split(',')) result = dict(objects=live_trips) result['meta'] = dict() return self.get_json_resp(result) class Devices(ApiView): """ Return list of devices """ api_url = r'^devices/$' def get(self,request): import analysis.logic devices = analysis.logic.get_devices_summary() return self._prepare_list_resp(request,devices) class DeviceReports(ApiView): """ Return reports for given device with id device_id <br>use <b>stops_only</b>=1 to get only stops <br>use <b>full</b>=1 to get also wifis """ api_url = r'^devices/(?P<device_id>[\w ]+)/reports/' def get(self,request,device_id): import analysis.logic info = dict() info['since_id'] = int(request.GET.get('since_id',0)) info['limit'] = int(request.GET.get('limit',200)) info['offset'] = int(request.GET.get('offset',0)) info['stops_only'] = bool(int(request.GET.get('stops_only',0))) info['bssid'] = request.GET.get('bssid') info['full'] = bool(int(request.GET.get('full',0))) reports = analysis.logic.get_device_reports(device_id,info) return self._prepare_list_resp(request,reports,info) class DeviceStatus(ApiView): """ Returns the status of curret device, e.g. its real time location <br/> Should be used mainly for testing """ api_url = r'^devices/(?P<device_id>[\w ]+)/status/' def get(self,request,device_id): import algorithm.train_tracker result = algorithm.train_tracker.get_device_status(device_id) return self.get_json_resp(result) class BssidsToStopIds(ApiView): """ returns map of bssids to stops """ api_url = r'^stops/bssids/' def get(self,request): import algorithm.bssid_tracker data = algorithm.bssid_tracker.get_bssid_data_for_app() return self.get_json_resp(data) class AllStops(ApiView): """ return lists of stops with bssids """ api_url = r'^stops/$' def get(self,request): from timetable.models import TtStop stops = TtStop.objects.all().order_by('gtfs_stop_id') import algorithm.bssid_tracker data = algorithm.bssid_tracker.get_bssids_by_stop_ids() content = [stop.to_json(bssids=data.get(stop.gtfs_stop_id,[])) for stop in stops] return self.get_json_resp(content) class DistBetweenShapes(ApiView): api_url = r'^stops/distance/$' def get(self,request): import timetable.services if 'gtfs_stop_id1' not in request.GET or 'gtfs_stop_id2' not in request.GET: return HttpResponse(status=400,content='gtfs_stop_id1 and gtfs_stop_id2 are mandatory') content = timetable.services.find_distance_between_gtfs_stops_ids(request.GET['gtfs_stop_id1'],request.GET['gtfs_stop_id2']) return self.get_json_resp(content) class BssidToStop(ApiView): """ Returns stop info for each bssid get bssids as paramter """ api_url = r'^analysis/bssid-info/' def get(self,request): bssids = self.GET.get('bssids').split(',') all = self.get_bool('all',False) pass
UTF-8
Python
false
false
2,014
14,362,370,673,035
bfbc9143120fa72c8a09613d9f9085e295b9ed70
da9567a1a19352a18ab3d8318bdf178fea794fb3
/Code/Python/sandbox/pipe/pypipe.py
11b5b02ac1c293f00528e4aa89963a2eedddbd2b
[]
no_license
antroy/Home
https://github.com/antroy/Home
4b2a03360c06ac859563152d51f88c243f443f69
0a08b553601b9828ed4f85536e0e22e8aabf812b
refs/heads/master
2016-09-06T14:50:57.469866
2009-11-09T21:47:30
2009-11-09T21:47:30
366,737
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import sys def suck_input(): lines = [] source = sys.stdin while True: data = source.read(1024) lines.append(data) if not data: break return ''.join(lines) def main(out_f): text = suck_input() out_handle = open(out_f, 'w') out_handle.write(text) out_handle.close() if __name__ == "__main__": out_f = sys.argv[1] main(out_f)
UTF-8
Python
false
false
2,009
14,345,190,807,721
180fa949bb3235c600599141acfff4cc51ce86ac
639494a7ac8fcd9f7cd00533669b1888cc626cd2
/src/servlets/closed/Main.py
811691b255376550326f8e4b416ed5ba8a6fbf2d
[ "BSD-2-Clause" ]
permissive
kstaken/Syncato
https://github.com/kstaken/Syncato
17fb859b1e0b7a8373b919f0e1a8d12b916881e3
860822e08b08b88b749961a1d61be902ce3b8ea3
refs/heads/master
2020-04-05T22:48:28.727982
2013-05-19T05:49:27
2013-05-19T05:49:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# # See the file LICENSE for redistribution information. # # Copyright (c) 2003 Kimbro Staken. All rights reserved. # # $Id: Main.py,v 1.6 2003/12/15 14:57:48 kstaken Exp $ from string import replace import libxml2 from XMLFormServlet import XMLFormServlet from DocumentBuilder import DocumentBuilder from XMLFragment import XMLFragment from BaseDatabase import NotFoundError class Main(XMLFormServlet): def __init__(self): XMLFormServlet.__init__(self) def respondToGet(self, transaction): request = transaction.request() response = transaction.response() pathInfo = request.extraURLPath() if (pathInfo == "" or pathInfo == "/"): # We handle gets by just returning the basic edit page content = self.weblog.db.runTransform("<dummy/>", "item/editor", "") self.sendResponseText(response, content) elif (pathInfo.startswith("/collection")): # The type of collection is part of the url type = replace(pathInfo, "/collection/", "") collection = type + "/collection" # Locate the collection description file = open(self.weblog.db.locateFile(collection, ".xml")).read() # Convert it to HTML content = self.weblog.db.runTransform(file, "admin/collection", "") self.sendResponseText(response, content) def respondToPost(self, transaction): request = transaction.request() response = transaction.response() entryType = request.field('type', "") fields = request.fields() try: entryDocument = DocumentBuilder(self.weblog, fields) content = entryDocument.serialize() print content # Convert the rest of the form into an XML form spec. formDocument = self._buildFormDocument(fields) errorText = "" try: # If there was a problem building the initial document we just # want to display an error. errorText = entryDocument.getErrorText() if (errorText != ""): raise ProcessingError() # Otherwise we continue processing the document. else: # Add the entry document into the form specification. formDocument.getDocument().addNode("/form/content", entryDocument.getRootElement()) # Hand the form spec to the action processor for this entry # type. content = formDocument.serialize() # print content result = self.weblog.db.runTransform(content, entryType + "/action", "", "admin/action") print result actionResult = XMLFragment(result) # If there were any errors in processing we send an error # to the user. errors = actionResult.xpathEval("/results/error-text") if (len(errors) > 0): for error in errors: errorText = error.content + "<br/>" raise ProcessingError() # Otherwise we figure ou what button was clicked so that we # forward the user to the proper place. else: button = formDocument.xpathEval("/form/button/node()")[0].name #print button style = self.getStyle(request, actionResult, "/results/action/" + button) #print style self.sendResponse(response, entryDocument, style) except ProcessingError, e: # Make sure the document actually has content and then add the # error message to it. try: root = entryDocument.getRootElement() entryDocument.getDocument().addNode("/node()/error-text", errorText, 1) except: entryDocument.getDocument().addNode("/dummy/error-text", errorText, 1) print entryDocument.serialize() style = self.getStyle(request, formDocument, "/form/action/error") self.sendResponse(response, entryDocument, style) except NotFoundError, e: doc = XMLFragment("<error-text>Document not found</error-text>") self.sendResponse(response, doc, "admin/error") def _buildFormDocument(self, fields): updatedFields = {} for field in fields: if (not field.startswith("/") and not field.startswith("#")): updatedFields["/form/" + field] = fields[field] return DocumentBuilder(self.weblog, updatedFields) def getStyle(self, request, document, path, defaultStyle = "admin/error"): style = document.xpathEval(path) if (len(style) > 0): style = style[0].content if (style == "referer"): style = request.environ()['HTTP_REFERER'] else: style = defaultStyle return style def sendResponse(self, response, document, target): # if the target is a URL we send a redirect if (target.startswith("http://")): self.sendRedirect(response, target) # Otherwise we're forwarding to a stylesheet else: content = document.serialize() # print target print content result = self.weblog.db.runTransform(content, target, "") self.sendResponseText(response, result) class ProcessingError (Exception): """ Exception that is thrown when an error occurs during processing. """ pass
UTF-8
Python
false
false
2,013
283,467,879,641
7db779f7c72be69a673fd058dd969ed2c36791bc
bec07c5c10ff0d3aa13a2a21d886215dcfa3757b
/server.py
95baef265784092ab582fc28307fa56d72b12b2b
[]
no_license
nourlcn/icome-pyzmq
https://github.com/nourlcn/icome-pyzmq
869a638902af79792cee8a1b44eae43653dd5d32
39e7d29a9363858a9cd5a6c7bfa9b5e66bafc405
refs/heads/master
2021-01-25T03:27:46.076718
2012-08-09T16:52:33
2012-08-09T16:52:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import zmq import time import os import Image images_list = file('/your/file/path','r').readlines() total = len(images_list) index=0 def get_one_filename(): global index return images_list[index][:-1] def simple_main(): global index global total context = zmq.Context() socket = context.socket(zmq.REP) socket.bind("tcp://localhost:5555") while (index < total-1): # Wait for next request from client message = socket.recv() print "Current, No. ", index+1 time.sleep (3) # Do some 'work' filename = get_one_filename() socket.send(filename) index += 1 while(True): msg = socket.recv() socket.send("END") return 0 if __name__ == '__main__': simple_main()
UTF-8
Python
false
false
2,012
16,767,552,346,670
196c2edd4d139bddc084f3d2c6bb66d8a34d2547
19e5118d622ccf89cdcb27d19e4eb8a9c0cca908
/exercices/solution.py
dd8dbf3661b0b83f635e68e2d0a1c971aa188673
[]
no_license
jeanbcaron/course-material
https://github.com/jeanbcaron/course-material
492662989de689b35f49770529828bd484bc6480
810cdf9f71e412bdf5fbb0444064d4b3f61eae67
refs/heads/master
2021-01-14T12:07:33.883350
2014-09-27T09:43:45
2014-09-27T09:43:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def is_alpha(inp): import string alphab = string.ascii_letters for letter in inp if inp in alphab: return True else: return False
UTF-8
Python
false
false
2,014
12,945,031,437,825
85f913ed7ad21dfcc850750aa74af470416929a7
c6bfed62f906316569801f199227d90676a739f9
/pulp_rpm/test/unit/test_extension_admin_iso_repo_list.py
feabb18ed50ec6906bb087eeecf15f6bfd08f2d6
[]
no_license
chaobin/rcm-pulp-rpm
https://github.com/chaobin/rcm-pulp-rpm
eb9dfce275d301e8951f9bd60240b385edfa869c
180c4f713e8e7490cf54122af7cf051be1c55ea4
refs/heads/master
2016-08-04T20:21:07.321354
2014-01-02T20:43:48
2014-01-02T20:43:48
16,002,762
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # # Copyright © 2013 Red Hat, Inc. # # This software is licensed to you under the GNU General Public # License as published by the Free Software Foundation; either version # 2 of the License (GPLv2) or (at your option) any later version. # There is NO WARRANTY for this software, express or implied, # including the implied warranties of MERCHANTABILITY, # NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should # have received a copy of GPLv2 along with this software; if not, see # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. from copy import deepcopy from gettext import gettext as _ import mock from pulp_rpm.devel import rpm_support_base from pulp.common.plugins import importer_constants from pulp_rpm.common import constants from pulp_rpm.extension.admin.iso import repo_list # These are some test repos that are returned by the repo_mock() repo object. They were copied from # real repositories from a working database and then massaged into this form for testing purposes TEST_REPOS = [ {'display_name': 'test_iso_repo', 'description': None, 'distributors': [ {'repo_id': 'test_iso_repo', '_ns': 'repo_distributors', 'last_publish': '2013-05-21T12:41:17-04:00', 'auto_publish': True, 'scheduled_publishes': [], 'distributor_type_id': 'iso_distributor', '_id': {'$oid': '519ba0a0b1a8a15a1fcae0b1'}, 'config': {}, 'id': 'iso_distributor'}], '_ns': 'repos', 'notes': {'_repo-type': 'iso-repo'}, 'content_unit_counts': {'iso': 3}, 'importers': [ {'repo_id': 'test_iso_repo', '_ns': 'repo_importers', 'importer_type_id': 'iso_importer', 'last_sync': '2013-05-21T12:44:52-04:00', 'scheduled_syncs': [], '_id': {'$oid': '519ba0a0b1a8a15a1fcae0b0'}, 'config': { importer_constants.KEY_FEED: 'http://pkilambi.fedorapeople.org/test_file_repo/', importer_constants.KEY_MAX_DOWNLOADS: 1, importer_constants.KEY_MAX_SPEED: 50000}, 'id': 'iso_importer'}], '_id': {'$oid': '519ba0a0b1a8a15a1fcae0af'}, 'id': 'test_iso_repo', '_href': '/pulp/api/v2/repositories/test_iso_repo/'}, # This is an ISO repository that uses SSL certificates. This helps us test that the certificates # get scrubbed appropriately by the ISORepoListCommand. {'display_name': 'cdn', 'description': None, 'distributors': [ {'repo_id': 'cdn', '_ns': 'repo_distributors', 'last_publish': None, 'auto_publish': False, 'scheduled_publishes': [], 'distributor_type_id': 'iso_distributor', '_id': {'$oid': '5163309cb1a8a160d0117efd'}, 'config': {constants.CONFIG_SSL_AUTH_CA_CERT: 'A cert', constants.CONFIG_SERVE_HTTPS: True, constants.CONFIG_SERVE_HTTP: False}, 'id': 'iso_dist'}], '_ns': 'repos', 'notes': {'_repo-type': 'iso-repo'}, 'content_unit_counts': {'iso': 5}, 'importers': [ {'repo_id': 'cdn', '_ns': 'repo_importers', 'importer_type_id': 'iso_importer', 'last_sync': '2013-04-08T18:12:20-04:00', 'scheduled_syncs': [], '_id': {'$oid': '5163309cb1a8a160d0117ef3'}, 'config': { importer_constants.KEY_FEED: 'https://cdn.redhat.com/iso', importer_constants.KEY_SSL_CA_CERT: 'CA Certificate', importer_constants.KEY_SSL_CLIENT_CERT: 'Client Certificate', 'id': 'cdn', importer_constants.KEY_SSL_CLIENT_KEY: 'Client Key'}, 'id': 'iso_importer'}], '_id': {'$oid': '5163309cb1a8a160d0117eea'}, 'id': 'cdn', '_href': '/pulp/api/v2/repositories/cdn/'}, # This is an RPM repository. It should get filtered out by get_repositories(), and it should be # shown by get_other_repositories(). {'display_name': 'zoo', 'description': None, 'distributors': [ {'repo_id': 'zoo', '_ns': 'repo_distributors', 'last_publish': '2013-04-30T10:27:31-04:00', 'auto_publish': True, 'scheduled_publishes': [], 'distributor_type_id': 'yum_distributor', '_id': {'$oid': '517fd4c3b1a8a112da54b1ba'}, 'config': {'http': False, 'relative_url': '/repos/pulp/pulp/demo_repos/zoo/', 'https': True}, 'id': 'yum_distributor'}, {'repo_id': 'zoo', '_ns': 'repo_distributors', 'last_publish': None, 'auto_publish': False, 'scheduled_publishes': [], 'distributor_type_id': 'export_distributor', '_id': {'$oid': '517fd4c3b1a8a112da54b1bb'}, 'config': {'http': False, 'https': True}, 'id': 'export_distributor'}], '_ns': 'repos', 'notes': {'_repo-type': 'rpm-repo'}, 'content_unit_counts': {'package_group': 2, 'package_category': 1, 'rpm': 32, 'erratum': 4}, 'importers': [ {'repo_id': 'zoo', '_ns': 'repo_importers', 'importer_type_id': 'yum_importer', 'last_sync': '2013-04-30T10:27:29-04:00', 'scheduled_syncs': [], '_id': {'$oid': '517fd4c3b1a8a112da54b1b9'}, 'config': {'feed_url': 'http://repos.fedorapeople.org/repos/pulp/pulp/demo_repos/zoo/'}, 'id': 'yum_importer'}], '_id': {'$oid': '517fd4c3b1a8a112da54b1b8'}, 'id': 'zoo', '_href': '/pulp/api/v2/repositories/zoo/'}] def repo_mock(): repo = mock.MagicMock() repo.repositories = mock.MagicMock() response = mock.MagicMock() response.response_body = deepcopy(TEST_REPOS) repo.repositories.return_value = response return repo class TestISORepoListCommand(rpm_support_base.PulpClientTests): """ Test the ISORepoListCommand class. """ @mock.patch('pulp_rpm.extension.admin.iso.repo_list.ListRepositoriesCommand.__init__', side_effect=repo_list.ListRepositoriesCommand.__init__, autospec=True) def test___init__(self, list_repo_init): """ Test the __init__() method. """ list_command = repo_list.ISORepoListCommand(self.context) list_repo_init.assert_called_once_with(list_command, self.context, repos_title=_('ISO Repositories')) self.assertEqual(list_command.all_repos_cache, None) def test__all_repos(self): """ Test the _all_repos() method. """ self.context.server.repo = repo_mock() list_command = repo_list.ISORepoListCommand(self.context) query_params = {} all_repos = list_command._all_repos(query_params) # The mock should have been called, and all_repos should just be our TEST_REPOS self.context.server.repo.repositories.assert_call_once_with(query_params) self.assertEqual(all_repos, TEST_REPOS) # The cache should be filled now self.assertEqual(list_command.all_repos_cache, TEST_REPOS) # Calling it again should not increase the mock's call count since the cache should be used list_command._all_repos(query_params) self.assertEqual(self.context.server.repo.repositories.call_count, 1) def test_get_other_repositories(self): """ Test the get_other_repositories() method. """ self.context.server.repo = repo_mock() list_command = repo_list.ISORepoListCommand(self.context) query_params = {} other_repos = list_command.get_other_repositories(query_params) # The only "other repo" is the third test one, the "zoo" RPM repo self.assertEqual(other_repos, [TEST_REPOS[2]]) def test_get_repositories(self): """ Test the get_repositories() method. """ self.context.server.repo = repo_mock() list_command = repo_list.ISORepoListCommand(self.context) query_params = {} iso_repos = list_command.get_repositories(query_params) # Let's inspect the repos to make sure they have all the correct properties # There should be two ISO repos (cdn and iso). zoo was an RPM repo self.assertEqual(len(iso_repos), 2) # The first repo should be test_iso_repo, unaltered self.assertEqual(iso_repos[0], TEST_REPOS[0]) # The second repo should be cdn, but the SSL certificates should have been removed expected_cdn = deepcopy(TEST_REPOS[1]) expected_cdn['importers'][0]['config']['feed_ssl_configured'] = 'True' expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CLIENT_CERT) expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CLIENT_KEY) expected_cdn['importers'][0]['config'].pop(importer_constants.KEY_SSL_CA_CERT) expected_cdn['distributors'][0]['config'].pop(constants.CONFIG_SSL_AUTH_CA_CERT) expected_cdn['distributors'][0]['config']['repo_protected'] = 'True' self.assertEqual(iso_repos[1], expected_cdn)
UTF-8
Python
false
false
2,014
16,234,976,403,170
cca173a1d79cec7d7eb36298984b4a25fe9e682c
3e2c82189e007b53752e162b4997e1fedd9c5cb1
/inputModule.py
e7e6448ddef9c13d8c3fbbb348834fbdbfdfb85e
[]
no_license
cdepatie/uoguelph
https://github.com/cdepatie/uoguelph
3b1100465547734dd83ca0294b9ca41309032e20
1d2ea0a6d331289d98fd1bdc81595236d91020ef
refs/heads/master
2015-08-14T16:58:22.446020
2014-11-23T22:08:29
2014-11-23T22:08:29
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import os import sys from output import * from calculations import * def parseFunction(function): parsedArray = function.partition("=") if len(function) == 1: inputString = function elif parsedArray[1] == "": inputString = parsedArray[0] else: inputString = parsedArray[2] validChars = ['pow', '^', 'sin', 'cos', 'tan', 'csc', 'sec', 'cot', 'asin', 'acos', 'atan', 'acsc', 'asec', 'acot', 'e', 'pi', '.', '+', '/', '\\', '*', '%', '-', 'log', 'ln', 'sqrt', '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '!','x','X', ','] inputStringTemp = '' #strip newlines/carriage returns, whitespace, convert backslashes to forward slashes, and convert the string to lower case, in that order inputString = inputString.replace('\n','').replace('\r','') inputString = inputString.strip() inputString = inputString.replace('\\','/') inputString = inputString.replace(' ','') inputString = inputString.lower() #iterate through the string, counting each instance of open and closed bracket. After the entire string has been traversed, compare the counts for equality leftBracket = 0 rightBracket = 0 for character in inputString: if(character == '('): leftBracket += 1 continue elif(character == ')'): rightBracket += 1 continue else: continue if(rightBracket != leftBracket): logger(["ERROR","The equation you input does not have a matching number of brackets!"]) return ["ERROR","The equation you input does not have a matching number of brackets!"] #iterate through the input string, replacing instances of values in validChars with empty strings. If anything remains afterwards, it is an invalid input and an error is returned inputStringTemp = inputString for element in validChars: inputStringTemp = inputStringTemp.replace(element, '') if(len(inputStringTemp) != 0): logger(["ERROR","The equation you have input contains one or more invalid expressions: " + inputStringTemp]) return ["ERROR","The equation you have input contains one or more invalid expressions: " + inputStringTemp] #iterate through each character of the input string, setting operator to True if the character is a mathematical operator #if the subsequent character is another operator, return an error. otherwise set operator to False and continue operator = False for character in inputString: if(character == '+' or character == '-' or character == '/' or character == '*' or character == '%'): if(operator == True): logger(["ERROR","The equation you have input contains one or more instances of adjacent operators. eg. +*"]) return ["ERROR","The equation you have input contains one or more instances of adjacent operators. eg. +*"] else: operator = True continue if(operator == True): operator = False return ["GRAPH",inputString] def parseInputString(inputString): #a list used as a dictionary for valid content validChars = ['pow', '^', 'sin', 'cos', 'tan', 'csc', 'sec', 'cot', 'asin', 'acos', 'atan', 'acsc', 'asec', 'acot', 'e', 'pi', '.', '+', '/', '\\', '*', '%', '-', 'log', 'ln', 'sqrt', '(', ')', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '!', ','] inputStringTemp = '' #check if the inputString is a file. If so, check the extension to ensure it is .txt inputFile = os.path.isfile(inputString) if(inputFile): isTextFile = inputString.endswith(".txt") if(isTextFile): return ["FILE",inputString] else: logger(["ERROR","User input a file other than .txt"]) return ["ERROR","Please ensure your input file is a .txt file."] #strip newlines/carriage returns, whitespace, convert backslashes to forward slashes, and convert the string to lower case, in that order inputString = inputString.replace('\n', '').replace('\r', '') inputString = inputString.strip() inputString = inputString.replace('\\','/') inputString = inputString.replace(' ','') inputString = inputString.lower() #iterate through the string, counting each instance of open and closed bracket. After the entire string has been traversed, compare the counts for equality leftBracket = 0 rightBracket = 0 for character in inputString: if(character == '('): leftBracket += 1 continue elif(character == ')'): rightBracket += 1 continue else: continue if(rightBracket != leftBracket): logger(["ERROR","The equation input does not have a matching number of brackets."]) return ["ERROR","The equation you input does not have a matching number of brackets!"] #iterate through the input string, replacing instances of values in validChars with empty strings. If anything remains afterwards, it is an invalid input and an error is returned inputStringTemp = inputString for element in validChars: inputStringTemp = inputStringTemp.replace(element, '') if(len(inputStringTemp) != 0): logger(["ERROR","The equation input contains one or more invalid expressions: " + inputStringTemp]) return ["ERROR","The equation you have input contains one or more invalid expressions: " + inputStringTemp] #iterate through each character of the input string, setting operator to True if the character is a mathematical operator #if the subsequent character is another operator, return an error. otherwise set operator to False and continue operator = False for character in inputString: if(character == '+' or character == '-' or character == '/' or character == '*' or character == '%'): if(operator == True): logger(["ERROR","The equation input contains one or more instances of adjacent operators. eg. +*"]) return ["ERROR","The equation you have input contains one or more instances of adjacent operators. eg. +*"] else: operator = True continue if(operator == True): operator = False return ["GOOD",inputString] #a simple function that can be passed a list, and inputHandler will send the data to the relevant function based on the zeroeth element of the list def inputHandler(inputList): if(inputList[0] == "GOOD"): returnList = mathHandler(inputList[1]) return returnList elif(inputList[0] == "FILE"): returnList = readFile(inputList[1]) return returnList elif(inputList[0] == "GRAPH"): returnList = graphHandler(inputList[1]) return returnList elif(inputList[0] == "ERROR"): return inputList else: logger(["ERROR","inputHandler received an unexpected input: [" + inputList[0] + "," + inputList[1] + "]"]) return ["ERROR","inputHandler received an unexpected input: " + inputList[0] + "," + inputList[1]] def readFile(filePath): returnList = ["FILE_R"] tempString = '' lineCounter = 1 #basic try/except to open the file try: inputFile = open(filePath, 'r') except IOError: logger(["ERROR","Could not open input file, IOError exception."]) return ["ERROR","Could not open input file."] #read the entire file into a list, storing each line as an element inputList = inputFile.readlines() #partition the line around the @ delimiter, using the 0th element of the result as a tag for graph/calculation for e in inputList: element = e.strip() if(len(element) != 0): tempString = element.partition('@') if(tempString[0] == 'GRPH'): outputList = parseFunction(tempString[2]) if(outputList[0] == 'ERROR'): outputList.append(outputList[1]) outputList[1] = "LINE " + str(lineCounter) + ": " + tempString[2] elif(tempString[0] == 'CALC'): outputList = parseInputString(tempString[2]) if(outputList[0] == 'ERROR'): outputList.append(outputList[1]) outputList[1] = "LINE " + str(lineCounter) + ": " + tempString[2] else: logger(["ERROR", "LINE: " + str(lineCounter) ,"Failure to import file contents: please compare function format with README."]) outputList = ["ERROR", "LINE: " + str(lineCounter) ,"Failure to import file contents: please compare function format with README."] if(outputList == None): logger(["ERROR","parseInputString return a None object when reading the input file."]) continue else: returnList.append(outputList) lineCounter += 1 return returnList
UTF-8
Python
false
false
2,014
8,358,006,405,928
7e48a297512da16ebfcceb8024c2f758896def10
4ce702c1b6c6019be3abc95903d54dffbe7a9ede
/fraction.py
0af394abf47894fbfa58c29adb08b6ae7d5590bb
[]
no_license
fromMars/fraction
https://github.com/fromMars/fraction
6644fbbfd4a38e76f1e43cb6b8d5beb0831d2653
65228a3944959a8ecc5a6fdcc45491b2d1071fff
refs/heads/master
2021-01-15T21:07:28.193393
2014-12-26T04:56:33
2014-12-26T04:56:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys def seprate_number(number): int_part = str(number).split(".")[0] dec_part = str(number).split(".")[1] dec_type = int(dec_part[-1:]) print "seprate integer and decimal:", int_part, dec_part, dec_type return (int_part, dec_part, dec_type) def seprate_decimal(decimal_number): base = int("1" + "0"*len(str(decimal_number))) base0 = base decimal_number0 = decimal_number gcd = g_c_d(base, decimal_number, 0, [0]) gcd_number = gcd[-1:][0] print "base:{0}, decimal_number:{1}".format(base, decimal_number) return str(decimal_number/gcd_number) + "/" + str(base/gcd_number) def g_c_d(base, decimal_number, count, tmp_mod): count += 1 mod = base % decimal_number if mod==0: print "Got Result:" + str(tmp_mod) #return tmp_mod else: tmp_mod.append(mod) print str(count) + ": mod: {0}, tmp_mod: {1}".format(str(mod), str(tmp_mod)) g_c_d(decimal_number, mod, count, tmp_mod) return tmp_mod def to_fraction(number): seprated = seprate_number(number) dec_result = seprate_decimal(int(seprated[1])) result = str(seprated[0]) + "|" + dec_result print result if __name__ == "__main__": w = float(sys.argv[1]) h = float(sys.argv[2]) to_fraction(w/h)
UTF-8
Python
false
false
2,014
11,716,670,786,155
b43cecc93635d703ce9b059cd6e28afa7f718e13
0733df4c3fb92762b484c6f4c4600d7a308e3572
/cphct/plugins.py
4e55c4c491391fa4654bc52aced88191e713fdd1
[ "GPL-2.0-only" ]
non_permissive
josh-gree/cphcttoolbox
https://github.com/josh-gree/cphcttoolbox
efed90e063b9c3c1b30a68b42c068b6707e16615
406fc60fe075a9fb0a2a10231bac684a4f88bbaa
refs/heads/master
2016-08-04T15:45:52.524279
2014-12-04T11:15:56
2014-12-04T11:15:56
39,253,012
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # plugins - application plugin framework # Copyright (C) 2012-2014 The Cph CT Toolbox Project lead by Brian Vinter # # This file is part of Cph CT Toolbox. # # Cph CT Toolbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Cph CT Toolbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # USA. # # -- END_HEADER --- # """Plugin framework for optional application extensions that may be provided by users. All plugin modules should implement the mandatory init/exit functions: plugin_init(conf, ..) plugin_exit(conf, ..) that will automatically be called once with the actual plugin arguments before and after all plugin use respectively. They may be used to set up and tear down plugin data structures but can just return without any actions otherwise. The actual plugin actions are implemented in the four functions: load_input(input_data, input_meta, conf, ..) preprocess_input(input_data, input_meta, conf, ..) postprocess_output(output_data, output_meta, conf, ..) save_output(output_data, output_meta, conf, ..) that take at least an array, a meta data list and a configuration dictionary as input, but where '..' should include any other valid positional or named arguments that may be passed to the plugin. They should modify the array inline if possible and always return the resulting array. Please note that the plugin functions will typically be called on a chunk of the complete input and output. Thus the functions should be flexible enough to handle variable chunks, or detect and fail if chunk is incompatible with the processing. """ import os import sys import traceback allowed_plugin_hooks = ['load_input', 'preprocess_input', 'postprocess_output', 'save_output'] internal_plugin_hooks = ['plugin_init', 'plugin_exit'] def unsupported_handler(*args, **kwargs): """Shared helper to mark plugin unsuitable for particular operations. Parameters ---------- *args : positional arguments Any positional arguments. **kwargs : keyword arguments Any keyword arguments. Raises ------- ValueError : Always as operation doesn't make sense. """ raise ValueError('unsupported operation for plugin!') def add_unsupported(plugin_mod, handler): """Adds the *handler* function for any targets from allowed_plugin_hooks that *plugin_mod* does not implement itself. This is in order to allow plugins to only explicitly implement relevant handlers yet still gracefully handle cases where a user tries to execute a plugin in an unhandled context. Parameters ---------- plugin_mod : module object A plugin module previously loaded. handler : function A function to assign to targets that *plugin_mod* does not implement. """ for target in allowed_plugin_hooks: if not hasattr(plugin_mod, target): setattr(plugin_mod, target, handler) def plugin_base_dirs(conf): """Return a list of base plugin search directories. Plugins are picked up from sub directories of the: * global toolbox installation path (/path/to/cphcttoolbox) * toolbox dot directory in the user home (~/.cphcttoolbox) * current directory (./) and in that order. Parameters ---------- conf : dict Configuration dictionary. Returns ------- output : list of str Returns a list of plugin search directory paths """ # Parent dir of this module dir is toolbox base global_base = conf['cphcttoolbox_base'] user_base = os.path.expanduser(os.path.join('~', '.cphcttoolbox')) local_base = os.path.abspath('.') return [global_base, user_base, local_base] def app_plugin_dirs(app_names, engine_dir, conf): """Return a list of application-specific plugin search directories for the application registering with the names in the *app_names* list and the given *engine_dir*. The list contains plugin directory paths sorted in growing priority order. Parameters ---------- app_names : list of str List of application names. engine_dir : str Back end calculation engine sub directory name. conf : dict Configuration dictionary. Returns ------- output : list of str Returns a list of engine-specific plugin directories for the given application. """ plugin_paths = [] plugin_dirs = plugin_base_dirs(conf) # Search and add plugin directories in increasing priority order plugin_prefixes = ['cphct', ''] + app_names for base_path in plugin_dirs: for plugin_dir in ['%splugins' % pre for pre in plugin_prefixes]: dir_path = os.path.join(base_path, plugin_dir, engine_dir) plugin_paths.append(dir_path) return plugin_paths def app_plugin_paths(app_names, engine_dir, conf): """Return a list of available plugins for the application registering with the names in the *app_names* list and the given *engine_dir*. The list contains tuples of plugin location and name and it is sorted in growing priority order in case a plugin name appears more than once. Plugins are picked up from the: * global toolbox installation path (/path/to/cphcttoolbox) * toolbox dot directory in the user home (~/.cphcttoolbox) * current directory (./) and in that order. For each search location any 'cphctplugins', 'plugins' and 'Xplugins' directories (where X is a name in *app_names*) will be searched for python modules in the *engine_dir* sub directories. I.e. cphctplugins/npy will be searched for numpy plugins. Parameters ---------- app_names : list of str List of application names. engine_dir : str Back end calculation engine sub directory name. conf : dict Configuration dictionary. Returns ------- output : list of tuple Returns a list of plugin directory and name tuples """ plugin_paths = [] plugin_dirs = app_plugin_dirs(app_names, engine_dir, conf) # Search and add plugins in increasing priority order for dir_path in plugin_dirs: if os.path.isdir(dir_path): dir_files = os.listdir(dir_path) for file_name in dir_files: (mod_name, ext) = os.path.splitext(file_name) mod_path = os.path.join(dir_path, mod_name) if ext == '.py' or os.path.isdir(mod_path): plugin_paths.append((dir_path, mod_name)) return plugin_paths def load_plugins(app_names, engine_dir, conf): """Load plugins specified in conf using plugin paths based on *app_names* and *engine_dir*. In case of multiple plugins with the same base name the plugin_paths list is used as growing priority order so that the last matching plugin is used. Returns a tuple containing a dictionary with loaded plugins and a dictionary containing any loading errors encountered. Both dictionaries map from names in allowed_plugin_hooks to the actual data. Enabled hooks automatically get their optional internal plugin hooks set for init and clean up. The *engine_dir* parameter is used as a prefix for the inserted *conf* values. Plugin arguments from the corresponding *conf* entries are copied so that all subsequent plugin actions can rely solely on the returned dictionary. Parameters ---------- app_names : list of str List of application names. engine_dir : str Back end calculation engine sub directory name. conf : dict Configuration dictionary. Returns ------- output : (dict, dict) Returns a 2-tuple of a plugin dictionary and load error dictionary. """ (plugins, errors) = ({}, {}) orig_sys_path = sys.path internal_targets = ['%s_%s' % (engine_dir, i) for i in internal_plugin_hooks] external_targets = ['%s_%s' % (engine_dir, i) for i in allowed_plugin_hooks] # For automatic init and clean up of enabled plugins for auto_target in internal_targets: plugins[auto_target] = [] # Locate and load plugins in increasing priority order plugin_paths = app_plugin_paths(app_names, engine_dir, conf) for target in external_targets: (plugins[target], errors[target]) = ([], []) # Conf entry is (name, args, kwargs) tuple index = 0 for (req_mod, args, kwargs) in conf.get(target, []): use_plugin = None # Search backwards from the end to apply priority for (dir_path, mod_name) in plugin_paths[::-1]: if req_mod == mod_name: use_plugin = (dir_path, mod_name) break if use_plugin: (plugin_dir, plugin_name) = use_plugin # Load plugin with plugin_dir as first source but with # original module path appended for external dependencies. # Automatically add init and exit hooks for all enabled # plugins. Please note that we repeat init and exit for every # occurrence of a plugin because it may require individual init # and exit for each set of arguments. # Immediately remove module from sys.modules after import to # avoid caching when loading module of same name for another # engine. # We insert the plugin call arguments from conf here for # complete information in returned plugins dictionary. # Finally we prepare the plugin instance __plugin_state__ # dictionary for use in individual plugin executions. sys.path = [plugin_dir] + orig_sys_path try: plugin_mod = __import__(plugin_name) plugin_mod.__plugin_state__['target'] = target plugin_mod.__plugin_state__['id'] = index add_unsupported(plugin_mod, unsupported_handler) del sys.modules[plugin_name] plugin_tuple = (plugin_name, plugin_mod, args, kwargs) plugins[target].append(plugin_tuple) for auto_target in internal_targets: plugins[auto_target].append(plugin_tuple) except Exception, exc: err = 'Failed to load %s plugin from %s:\n%s' \ % (plugin_name, plugin_dir, traceback.format_exc(exc)) errors[target].append((plugin_name, err)) else: err = 'No such plugin "%s" in plugin directories %s' \ % (req_mod, ', '.join(app_plugin_dirs(app_names, engine_dir, conf))) errors[target].append((req_mod, err)) index += 1 sys.path = orig_sys_path return (plugins, errors) def execute_plugin( hook, name, plugin_mod, args, kwargs, ): """Execute matching *hook* function from *plugin_mod* plugin module with the provided positional *args* and named *kwargs*. Parameters ---------- hook : str Name of hook function name : str Name of plugin plugin_mod : module Plugin module previously loaded and prepared. args : list of str List of arguments for plugin kwargs : dict Dictionary of keyword and value pair arguments for plugin Returns ------- output : ndarray or None The processed ndarray for main hooks and None for init/exit hooks. Raises ------- ValueError If plugin or hook does not match any supplied plugins. """ if hook.endswith('_plugin_init'): return plugin_mod.plugin_init(*args, **kwargs) elif hook.endswith('_load_input'): return plugin_mod.load_input(*args, **kwargs) elif hook.endswith('_preprocess_input'): return plugin_mod.preprocess_input(*args, **kwargs) elif hook.endswith('_postprocess_output'): return plugin_mod.postprocess_output(*args, **kwargs) elif hook.endswith('_save_output'): return plugin_mod.save_output(*args, **kwargs) elif hook.endswith('_plugin_exit'): return plugin_mod.plugin_exit(*args, **kwargs) else: raise ValueError('invalid plugin hook %s for %s (%s)' % (hook, name, plugin_mod)) def set_plugin_var( conf, key, value, replace=False, ): """Set plugin variable *key* to *value*. This is used to share variables between plugins. Parameters ---------- conf : dict Configuration dictionary. key : str Variable name. value : object Variable value. replace : bool, optional If True, existing variable *key* is replaced by *value*. Raises ------- ValueError If *replace* is False and *key* already exists """ if replace or key not in conf['plugin_shared_vars']: conf['plugin_shared_vars'][key] = value else: msg = \ "plugin var: '%s' already exists, use replace=True to overwrite" \ % key raise ValueError(msg) def get_plugin_var(conf, key): """Get plugin variable value for *key*. This is used to share variables between plugins. Parameters ---------- conf : dict Configuration dictionary. key : str Variable name. Returns ------- output : object or None Returns the value for *key* or None if *key* was not previously set """ output = None if key in conf['plugin_shared_vars']: output = conf['plugin_shared_vars'][key] return output
UTF-8
Python
false
false
2,014
5,153,960,790,793
d6bb0b6afb6681b0e90f3a3fe9f8dfb8d5b3da65
b875f61a0aac6c87d6256f8eaef54211a261a7e5
/src/www/tools/connectdb.py
110cc92d39d910e16cf89b2f475a89684407c59d
[]
no_license
jakewan/kestava
https://github.com/jakewan/kestava
f7afc812db1b02861b384b026f0cef957efb4549
d5e5559621baa8eb1d39ba3851589967f6ac53dd
refs/heads/master
2016-05-21T23:45:47.413223
2010-12-11T18:52:18
2010-12-11T18:52:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import cherrypy from model.database import make_connection, close_connection class ConnectDb(cherrypy.Tool): def __init__(self): super(ConnectDb, self).__init__('before_request_body', self.__connect) def __connect(self): make_connection() class DisconnectDb(cherrypy.Tool): def __init__(self): super(DisconnectDb, self).__init__('on_end_request', self.__disconnect) def __disconnect(self): if hasattr(cherrypy.request, 'db'): close_connection() cherrypy.tools.connect_db = ConnectDb() cherrypy.tools.disconnect_db = DisconnectDb()
UTF-8
Python
false
false
2,010
2,388,001,865,196
839bcba20ae6342284bf76349d48f311deaa935b
3f04a8f4ab3f4aaaa288f93c6e8fdaa1fc3dd044
/1.download/ex11.py
1fd4e92e1c2185a876109f8dfff83640dd2fb29c
[]
no_license
gengletao/learn-python
https://github.com/gengletao/learn-python
7f8ae92c22d520312345ebd24969e6c75a834f6e
62b756f8184ed774fe0911e856994afee8c243ba
refs/heads/master
2016-09-01T19:05:34.749283
2011-10-08T16:32:33
2011-10-08T16:32:33
2,416,030
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Author: letao geng <[email protected]> # Copyright (C) alipay.com 2011 ''' ''' import os import sys def main(): ''' main function ''' print "How old are you?", age = raw_input() print "How tall are you?", height = raw_input() print "How much do you weight?", weight = raw_input() print "So, you're %r old, %r tall and %r heavy." % ( age, height, weight) print 'Done' if __name__ == '__main__': main()
UTF-8
Python
false
false
2,011
7,378,753,856,050
703288dd7d7e89e65a01a82cc5eaba103933cc0d
db258fdb98d36eef012f527f05270cb1eab8b5bf
/ListeNumBBis.py
d4d73c84030a4e602de1cc3fa005b4f9bd479a27
[]
no_license
lisalam/Code_VRD
https://github.com/lisalam/Code_VRD
31440e4b4897deb705c578c1c9557b9160ffea4c
bda284754b1095cea07bbe231f53448dcb67d2d7
refs/heads/master
2021-01-10T08:10:12.278550
2013-05-15T13:59:23
2013-05-15T13:59:23
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: iso-8859-15 -*- import os, sys import javax.swing as swing import java.awt as awt from javax.swing import BorderFactory from javax.swing.border import EtchedBorder, TitledBorder from java.awt import Font from java.awt import TextField, Panel, GridLayout, ComponentOrientation, Label, Checkbox, BorderLayout, Button, Color, FileDialog, Frame, Font import sys import os import time import glob import os.path as path import getpass import shutil import random import math username=getpass.getuser() mypath=os.path.expanduser(os.path.join("~","Dropbox","Macros_Lisa","Code_VRD")) sys.path.append(mypath) from org.python.core import codecs codecs.setDefaultEncoding('utf-8') class ListeNumBBis(swing.JFrame): def __init__(self, listnumb): swing.JFrame.__init__(self, title="Numero de Boite") self.setDefaultCloseOperation(swing.JFrame.DISPOSE_ON_CLOSE) self.__listnumb = listnumb self.run() def run(self): self.size = (200, 300) self.contentPane.layout = awt.BorderLayout() line = BorderFactory.createEtchedBorder(EtchedBorder.LOWERED) Panel1=swing.JPanel(awt.FlowLayout(awt.FlowLayout.CENTER)) Panel1.setBorder(line) label=swing.JLabel("") label.setText("Liste des numeros de boites") Panel1.add(label) Panel2=swing.JPanel(awt.FlowLayout(awt.FlowLayout.CENTER)) Panel2.setBorder(line) self.__displistnumb = swing.JList(self.__listnumb) self.__displistnumb.setVisibleRowCount(14) self.__displistnumb.setFixedCellWidth(75) Panel2.add(self.__displistnumb) barre = swing.JScrollPane(self.__displistnumb) Panel2.add(barre) Panel3=swing.JPanel(awt.FlowLayout(awt.FlowLayout.RIGHT)) Panel3.setBorder(line) self.contentPane.add(Panel1, awt.BorderLayout.NORTH) self.contentPane.add(Panel2, awt.BorderLayout.CENTER) self.contentPane.add(Panel3, awt.BorderLayout.SOUTH) if __name__ == "__main__": listnumb=[] num1= ("1") num2 = ("2") num3 = ("3") num4 = ("4") listnumb=[num1,num2,num3,num4] numb = ListeNumBBis(listnumb) numb.show()
UTF-8
Python
false
false
2,013
14,499,809,602,883
005c434784c32d1eeb9e7e9e8d4c3abbad79f782
8494e4c915de65ab270bfb04b53da6ed1b5b18f9
/e43.py
8cc0daded2af6460a777c080a73190d641b5c978
[]
no_license
spanishbear/learnPythonTheHardWay
https://github.com/spanishbear/learnPythonTheHardWay
f2209fb3dd5825052231dc87f35b551bb94e204c
16dc1882c2f45473c2be4b03c7633d07095efaed
refs/heads/master
2021-01-20T11:39:18.811606
2014-02-21T20:48:57
2014-02-21T20:48:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from sys import exit from random import randint class Scene(object): def enter(self): print "This scene is not configured." exit(1) class Engine(object): def __init__(self, scene_map): print "Engine __init_ has scene_map", scene_map self.scene_map = scene_map def play(self): current_scene = self.scene_map.opening_scene() print "Play's first scene", current_scene while True: print "\n-------------" next_scene_name = current_scene.enter() print "next scene", next_scene_name current_scene = self.scene_map.next_scene(next_scene_name) print "map returns new scene", current_scene class Death(Scene): def enter(self): print "Whoops. You died." print "Try again." class CentralCorridor(Scene): def enter(self): print """You are the last surviving member of your ship and your last mission is to get the neutron destruct bomb from the Weapons Armory and blow the ship up after getting into an escape pod. You're running down the central corridor as silent as a mouse. Really, it was so silent that it was deafening. You see, your shoes were designed to be completely soundless. Then, outta nowhere a Gothon jumps out. You are shocked for two reasons: One, how in the hell did this Gothon find you so easily? Second, you expected a terrible monster with sharp, jagged features. A creature straight from the depths of hell, with rows of teeth that one bite feels like a million bites at once, claws so sharp that one slice would behead a person, a stench so foul that would knock the breath out of you. Oddly enough though, these Gothons truly were what their name says. Literally. They were just gothic humans. Here you are with a smooth-faced gothic boy standing before you, blocking the armory door. But you simply do not have the time to think about this conundrum. How was the entire crew defeated by these Gothons? Anyway, what do you do? You have the options of "shooting", "dodging his attack", "running away in confusion", or "telling a joke". """ action = raw_input("> ") if action == "shooting": print """Bang! Bang! Bang! You closed your eyes while shooting him because you didn't want to see the truth that you were shooting a young boy. Unfortunately, you emptied your clip and you did not hit him AT ALL. He's terribly upset that you tried to kill him. He goes into an extreme rage and kills you instantly with one punch. """ return 'death' elif action == "running away in confusion": print """After finding out what a Gothon truly is, you immediately run away with the most puzzling look on your face. Suddenly, there are other goths. They're watching you run towards them and they almost laugh at the face you're making. But they shoot you instead. """ return 'death' elif action == "dodging his attack": print """When the Gothon sees you, he tries to shoot with you his Gothic-themed gun. You dodge the gun shot successfully, but you didn't realize that he had a knife in his other hand which he quickly uses to kill you as your dodging. """ elif action == "telling a joke": print """Lucky for you, you had a few gothic friends when you were growing up. You are accustomed to their ways and you know what will make them laugh. After all, they just want to be happy. You say a joke, a pretty lame joke actually, and the Gothon stops, tries not to laugh, then bursts out laughing and can't move. While he's laughing, you shoot him right between his eyes. Then you keep walking towards the armory door and you're in!""" return 'laser_weapon_armory' else: print "Huh? Try again" return 'central_corridor' class LaserWeaponArmory(Scene): def enter(self): print """ You walk in the room and see a box lying in the middle of the room. You walk up to it and see that there's a lock on the box. You have to guess the 3-digit code . You have only 10 times to do so. """ code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9)) guess = raw_input("[keypad]> ") guesses = 0 while guess != code and guesses < 10: print "bzzzzzzz" guesses += 1 guess = raw_input("[keypad]> ") if guess == code: print """The box finally opens up and you pick up the neutron bomb. Then you run to the bridge and place it in the right spot. """ return 'the_bridge' else: print "Obviously your will to live has dwindled. Incorrect code. The Gothons blow the ship." return 'death' class TheBridge(Scene): def enter(self): print """You are on the bridge with the bomb. You've planted the bomb so now all you want to do is set detonote the bomb. """ action = raw_input("> ") if action == "throw the bomb": print "Bomb was thrown and you died""" return 'death' elif action == "slowly place the bomb": print "Yay!" else: print "Does not compute. Try again." return 'the_bridge' class EscapePod(Scene): def enter(self): print "You try to escape. You jump into a pod and leave!" return 'finished' class Map(object): scenes = { 'central_corridor': CentralCorridor(), 'laser_weapon_armory': LaserWeaponArmory(), 'the_bridge': TheBridge(), 'escape_pod': EscapePod(), 'death': Death() } def __init__(self, start_scene): self.start_scene = start_scene def next_scene(self, scene_name): self.scene_name = scene_name print "start_scene in next_scene" val = Map.scenes.get(scene_name) print "next_scene returns", val def opening_scene(self): return self.next_scene(self.start_scene) a_map = Map('central_corridor') print "THIS IS THE A_MAP VARIABLE", a_map a_game = Engine(a_map) print "THIS IS THE A_GAME VARIABLE", a_game a_game.play()
UTF-8
Python
false
false
2,014
2,448,131,381,726
7235d20d6dfd7422847c1b25a3b94a9835f4de4e
cb229e438db6f6e713e66edf947d44b2953aaca8
/esrchsmry.py
9810aeb6c8dfca20430a52d79f6f4418b840914d
[]
no_license
oscarpeterjohansson/Pyntrez
https://github.com/oscarpeterjohansson/Pyntrez
873e6738f4afc89f92351213bff88a72d5c14694
84aed747a7818bf5f0b29b68f8c83cf9227e26cb
refs/heads/master
2021-01-10T20:45:05.381364
2014-12-02T09:52:26
2014-12-02T09:52:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python """ Extract the main elements from the XML Esearch output The following elements will be parsed out and be written to a new file in tabular format: QueryTranslation Count RetMax RetStart QueryKey WebEnv IdList From the command-line: use -h/--help for information on execution """ __author__ = "Johansson, O." __email__ = "[email protected]" __contributors__ = "" __version__ = "1.0" __licence__ = "GPL-3" import argparse import re import sys ESPATTERNS = ( # this an appropriate order "QueryTranslation", "Count", "RetMax", "RetStart", "QueryKey", "WebEnv", "IdList" ) def find_text(dta): """ Find patterns in xml string """ row = [] for p in ESPATTERNS: # ".*?", non-greedy (ie. first match); # DOTALL -> . matches all, including newline m = re.search("(?P<%s>(?<=<%s>).*?(?=</%s>))" % (p,p,p), dta, re.DOTALL) v = "" if m != None: v = m.group(p) if p == "IdList": IdList = re.findall("(?<=<Id>).*(?=</Id>)", v) v = ','.join(IdList) row.append(v) return row def esrchsmry(inputfile,outputfile): """ Extract relevant information from the xml output of the Esearch utils """ try: with open(inputfile,'r') as fd1, open(outputfile,'w') as fd2: xml = fd1.read() t_res = find_text(xml) txt = '\r' + '\t'.join(ESPATTERNS) + "\r\n" s_res = "\t".join([str(t) for t in t_res]) + "\r\n" txt += s_res fd2.write(txt) except (IOError,) as e: ms = "\rIOError: sorry, no output this time\r\n" sys.stdout.write(ms) parser = argparse.ArgumentParser( prog = sys.argv[0], conflict_handler = "resolve", description = """ Extract the main elements from the XML Esearch output utils. The following elements will be parsed out and be written to a new file in tabular format: QueryTranslation, Count, RetMax, RetStart, QueryKey, WebEnv, IdList """, add_help = True ) parser.add_argument( "--input", dest = "inputfile", required = True, help = """ Path to xml file with output returned by the Esearch utils """ ) parser.add_argument( "--output", dest = "outputfile", required = True, help = """ Path to file to contain output returned by this program """ ) def main(argv): """ For command-line use """ n_argv = parser.parse_args(argv) d_argv = vars(n_argv) esrchsmry(**d_argv) parser.exit(status=0, message=None) if __name__ == "__main__": main(sys.argv[1:])
UTF-8
Python
false
false
2,014
18,519,899,011,331
2b6ad86de494e62df725260216805e3586ff01fb
a94f779b762463d80a775db7efc08b47ff60aac1
/days/4/classes/extras/duck-typing/2.py
52747fd8eacd3c9c809012fd85ca9a0be8b0850b
[]
no_license
sivajipr/python-course
https://github.com/sivajipr/python-course
602a99d941dc6df1dabb17dfc284dcffd140e003
176c04426f0cbef1c4beb888300dd911eb708b97
refs/heads/master
2016-09-05T09:29:30.649858
2014-09-19T08:18:36
2014-09-19T08:18:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Duck: def quack(self): print 'Duck: quack quack' class LooksLikeADuck: def quack(self): print 'LooksLikeADuck: quack quack' def run(self): print 'LooksLikeADuck: run' def handle_duck(d): d.quack() d = LooksLikeADuck() handle_duck(d)
UTF-8
Python
false
false
2,014
40,816
068caeacf2bc2a1c76ea182636f5f045b419d493
ea7d84b2cf9f72454e4c5223c4b30b0fc9df6210
/rmse.py
67887c8c644ea48b75ea02ae6e7120220f6586c1
[]
no_license
AlexeyMK/senior_design
https://github.com/AlexeyMK/senior_design
17a415aaa29d15b48826f3db16d3028805b48dbd
91a1941d5af3da4a930287f073a4cea77679a905
refs/heads/master
2020-05-20T00:46:21.788119
2012-04-18T07:43:34
2012-04-18T07:43:34
3,110,578
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# rmse.py # Calculate error for experiments # Test: rmse.py [name of csv file] # !/usr/bin/python import math import sys import util def get_single_rmse(actual_val, data_val): return math.fabs(1 + (float)(actual_val) * 2/5 - (float)(data_val)) def calculate_rmse(actual, data): err_list = [] for i in range(len(actual)): err_list.append(get_single_rmse(actual[i], data[i])) avg_err = sum(err_list) / len(err_list) sq_list = [x**2 for x in err_list] std_err = math.sqrt(sum(sq_list) / len(sq_list)) return (avg_err, std_err) def main(filename): actual, data = util.process_csv_list(filename) print "average error: %f \nstandard error: %f" % (calculate_rmse(actual, data)) if __name__ == '__main__': sys.exit(main(sys.argv[1]))
UTF-8
Python
false
false
2,012
8,400,956,044,577
f78493f605ac3e6e4d0d100ba44950c0b3c59838
231131309cf5e5861b6a3972dab2570ba9107edb
/buybread/src/test.py
6492490b481eecd516a7c860c4bec8c4c2191406
[]
no_license
enzolv/PythonExercises
https://github.com/enzolv/PythonExercises
a950ca189371f1d004afe5b3d9d884de5fe9e51a
17df7ae654fda3cb0d742931e937162aa4d2ab5e
refs/heads/master
2020-12-24T16:59:29.814579
2012-11-30T10:21:39
2012-11-30T10:21:39
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
food=[] store = ["bread","butter","cheese"] def bbread (): if "bread" in store: return "bread" return None pass def bsausage (): if "sausage" in store: return "sausage" return None pass def bbutter (): if "butter" in store: return "butter" return None pass
UTF-8
Python
false
false
2,012
12,240,656,816,958
d29c9814107e6cc57fa65f419bc5816a8a215074
32cfd6a8df9b24059ed7bee0b7bf99b6c0268f6e
/framework/seocortex/utils/proxied_requests.py
18c36b876880cde6c908516ca23f148d9e2e0d1a
[]
no_license
blorenz/seocortex
https://github.com/blorenz/seocortex
5cd7acb647fbc4908e6045d2a89bdd2ade922434
3f1f7e8ac4a12e24e7f2cb58407ce52babfe5cf8
refs/heads/master
2016-09-05T21:36:01.039128
2012-04-23T13:33:46
2012-04-23T13:33:46
3,951,299
0
3
null
null
null
null
null
null
null
null
null
null
null
null
null
# Python imports from requests.auth import HTTPProxyAuth from requests.sessions import session from random import randint #DEFAULT_PROXIES = ['72.8.142.216:62048', '68.168.215.94:62048', '68.168.214.126:62048', '68.168.215.228:62048', '72.8.191.131:62048', '72.8.142.20:62048', '72.8.191.57:62048', '72.8.191.18:62048', '72.8.191.146:62048', '72.8.191.158:62048', '72.8.142.213:62048', '68.168.215.65:62048', '72.8.191.254:62048', '68.168.215.70:62048', '68.168.214.150:62048', '72.8.142.229:62048', '68.168.214.171:62048', '68.168.215.241:62048', '72.8.142.32:62048', '68.168.214.67:62048', '68.168.215.80:62048', '68.168.215.173:62048', '68.168.214.186:62048', '68.168.214.121:62048', '72.8.191.105:62048'] DEFAULT_PROXIES = [ {'ip':'50.117.24.226', 'port':'3131', 'u' : '31a89a8cbaae43b6', 'p' : '56024ab39ee54dcf'}, {'ip':'50.117.68.212', 'port':'3131', 'u' : 'cbf69c8854ba4d04', 'p' : 'b7bb6255996749dd'}, {'ip':'50.117.69.0', 'port':'3131', 'u' : 'fb8c9c5ee75e4f57', 'p' : '4add5380d904463c'}, {'ip':'50.117.69.212', 'port':'3131', 'u' : '4edb9bc1c2fa4cf2', 'p' : '192384efd5e24fa4'}, {'ip':'50.117.70.1', 'port':'3131', 'u' : '9404854294b04337', 'p' : '784c84deb85c44f8'}, {'ip':'50.117.70.212', 'port':'3131', 'u' : '4d4fff780c02423d', 'p' : 'd918ca12a0ed4a7b'}, {'ip':'50.117.71.1', 'port':'3131', 'u' : '0baf28faba404420', 'p' : '189b850e3fe24d09'}, {'ip':'50.117.71.213', 'port':'3131', 'u' : '791b457dd2414762', 'p' : '7ed5670febb34e0d'}, {'ip':'173.208.130.10', 'port':'3131', 'u' : 'd947f173e03449aa', 'p' : '22853a3d90154a90'}, {'ip':'173.208.130.249', 'port':'3131', 'u' : 'b5ad668b72a84b93', 'p' : '8601db9523f84456'}, {'ip':'173.208.145.164', 'port':'3131', 'u' : 'abd0ed0ca4d24913', 'p' : 'a9431c8f37ba4509'}, {'ip':'173.208.145.82', 'port':'3131', 'u' : '53b9e92da15247e5', 'p' : 'f7250e69cfa845ef'}, {'ip':'173.208.153.235', 'port':'3131', 'u' : 'c0fbf735c6fe4ca9', 'p' : 'f2aa81122bda4f6a'}, {'ip':'173.208.158.167', 'port':'3131', 'u' : '82513de71c1248f3', 'p' : 'b01bfe42a4ff492c'}, {'ip':'50.117.64.10', 'port':'3131', 'u' : 'e1a213b3c10d47c8', 'p' : '67da9da435384bc0'}, {'ip':'50.117.64.24', 'port':'3131', 'u' : '1124e174b2274f35', 'p' : '4a4e1bd0c87444eb'}, {'ip':'50.117.65.15', 'port':'3131', 'u' : '2882da2640de4f0a', 'p' : '07e47cdbd8714484'}, {'ip':'50.117.65.74', 'port':'3131', 'u' : '37b82961ace9499a', 'p' : 'af015f7d99ae4ded'}, {'ip':'50.117.66.233', 'port':'3131', 'u' : 'd7b536dbb5844906', 'p' : 'c302b1cf423a49d8'}, {'ip':'50.117.67.167', 'port':'3131', 'u' : 'f0fab0d16a274379', 'p' : '9f4f024b325f4201'}, ] # Auth information DEFAULT_USERNAME = "davindergrover" DEFAULT_PASSWORD = "uMP4FEvlWGGy" DEFAULT_AUTH = "%s:%s" % (DEFAULT_USERNAME, DEFAULT_PASSWORD) FETCH_HEADERS = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.21 (KHTML, like Gecko) Chrome/19.0.1042.0 Safari/535.21', 'Accept-Encoding': ', '.join(('identity', 'deflate', 'compress', 'gzip')), 'Accept': '*/*' } ## # Just simply wrap all the main requests module's methods # this is brain dead code TBH ## def get_proxy(offset = 0, username = DEFAULT_USERNAME, password = DEFAULT_PASSWORD, proxy_list = DEFAULT_PROXIES): p = proxy_list[offset] ip = p['ip'] port = p['port'] username = p['u'] proxy = {"http" : "http://%s:%s" % (ip, port)} proxy['auth'] = HTTPProxyAuth(p['u'], p['p']) return proxy def get_proxies(*args, **kwargs): i = 0 # Breaks when proxy is None while True: proxy = get_proxy(offset = i, *args, **kwargs) if proxy is None: break yield proxy i += 1 ## # @return A random proxy url ## def get_random_proxy(*args, **kwargs): p_list = kwargs.get('proxy_list', [None]) offset = randint(0, len(p_list)-1) return get_proxy(offset = offset, *args, **kwargs) ## # Throws in a proxy if none specified ## def request(method, url, params=None, data=None, headers = FETCH_HEADERS, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, return_response=True, config=None, use_proxies = True): # This is the only thing we do if proxies is None and use_proxies: proxies = get_random_proxy(proxy_list = DEFAULT_PROXIES) if not proxies is None: auth = proxies['auth'] del proxies['auth'] s = session() kwargs = { 'method' : method, 'url' : url, 'params' : params, 'data' : data, 'headers' : headers, 'cookies' : cookies, 'files' : files, 'auth' : auth, 'timeout' : timeout, 'allow_redirects' : allow_redirects, 'proxies' : proxies, 'hooks' : hooks, 'return_response' : return_response, 'config' : config, } return s.request(**kwargs) def get(url, **kwargs): kwargs.setdefault('allow_redirects', True) return request('GET', url, **kwargs) def head(url, **kwargs): kwargs.setdefault('allow_redirects', True) return request('HEAD', url, **kwargs) def post(url, data='', **kwargs): return request('post', url, data=data, **kwargs) def put(url, data='', **kwargs): return request('put', url, data=data, **kwargs) def patch(url, data='', **kwargs): return request('patch', url, data='', **kwargs) def delete(url, **kwargs): return request('delete', url, **kwargs)
UTF-8
Python
false
false
2,012
11,536,282,190,755
03eb412c976881cca60191c6f9c7f02e894234cd
d941938417bab130154c78f732606daa7b107e4a
/testing_runtime/web/job.py
87fc376cb88eaefc9edf4b1d979fae51563b2dc1
[]
no_license
skliarpawlo/ganymede
https://github.com/skliarpawlo/ganymede
abc8c7fac03b51a41cf92efacdf4170dd271d890
3a847635634d383d01dbeb70ef969202b0b7a8c9
refs/heads/master
2016-09-07T18:55:51.680687
2013-11-01T15:18:25
2013-11-01T15:18:25
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: utf-8 from django.shortcuts import render_to_response from django.http import HttpResponse from testing_runtime.models import Job, Task, StoredTest, User, EnvScript from core import db import json from testing_runtime.web import tests from django.utils.translation import ugettext as _ from decorators import html from django.template import RequestContext def json_to_envs( js, job_id=None ) : if js is None : return [] envs_params = json.loads( js ) envs = [] for env_dict in envs_params : model = EnvScript( **env_dict ) model.job_id = job_id envs.append( model ) return envs def json_to_users( js ) : if js is None : return [] users_ids = json.loads( js ) users_collect = [] for user_id in users_ids : users_collect.append( db.user_session.query( User ).filter( User.user_id == user_id ).one() ) return users_collect def fetch_users_info( job = None ) : res = [] users = db.user_session.query( User ).all() if not (job is None) : checked_users = json_to_users( job.users ) for x in users : checked = False if not (job is None) : if x in checked_users : checked = True res.append( { "id" : x.user_id, "name" : x.username, "email" : x.email, "checked" : checked } ) return res def json_to_tests( js ) : tests_ids = json.loads( js ) tests_collect = [] for test_id in tests_ids : tests_collect.append( db.session.query( StoredTest ).filter( StoredTest.test_id == test_id ).one() ) return tests_collect def add_job(request) : title = html.title([ _('Add job'), _('Jobs'), 'Ganymede' ]) request.page = "job.add" if request.method == 'POST' : name = request.POST[ 'name' ] repo = request.POST[ 'repo' ] branch = request.POST[ 'branch' ] users = request.POST[ 'users' ] deploy = request.POST[ 'deploy' ] github = request.POST[ 'github' ] job_tests = json_to_tests(request.POST[ 'tests' ]) whose = request.user.username job = Job( name=name, repo=repo, branch=branch, tests=job_tests, users=users, deploy=deploy, whose=whose, github=github ) job.envs = json_to_envs( request.POST[ 'envs' ] ) db.session.add(job) for env in job.envs : db.session.add( env ) json_resp = json.dumps( { "status" : "ok" } ) return HttpResponse(json_resp, mimetype="application/json") else : tests_data = tests.gather_tests_info() users_data = fetch_users_info() return render_to_response( 'job/add/add_job.html', { 'title' : title, 'tests' : tests_data, 'users' : users_data, 'repos' : [], 'branches' : ['develop', 't-kz'] }, context_instance=RequestContext(request) ) def list_jobs(request) : title = html.title( [ _('Jobs'), 'Ganymede' ] ) jobs = [] for job in db.session.query(Job).all() : try : last_task = db.session.query(Task).\ filter(Task.job_id == job.job_id).\ order_by(Task.add_time.desc()).limit(1).one() except : last_task = None jobs.append( { "job_id" : job.job_id, "name" : job.name, "repo" : job.repo, "branch" : job.branch, "whose" : job.whose if not job.whose is None else "-", "last_status" : _( "Not executed" ) if (last_task is None) else last_task.status.capitalize(), "last_task_id" : None if (last_task is None) else last_task.task_id } ) return render_to_response( 'job/list/list.html', { 'title' : title, 'jobs' : jobs }, context_instance=RequestContext(request) ) def remove_job(request) : job_id = request.POST[ 'job_id' ] db.session.query(Job).filter(Job.job_id == job_id).delete() json_resp = json.dumps( { "status" : "ok" } ) return HttpResponse(json_resp, mimetype="application/json") def update_job(request, job_id) : title = html.title( [ _('Update job') + " #" + str(job_id), _('Jobs'), 'Ganymede' ] ) if request.method == 'POST' : job = db.session.query(Job).filter( Job.job_id == int(job_id) ).one() job.name = request.POST[ 'name' ] job.repo = request.POST[ 'repo' ] job.branch = request.POST[ 'branch' ] job.exec_time = request.POST[ 'exec_time' ] if request.POST[ 'exec_time' ] != "" else None job.tests = json_to_tests( request.POST[ 'tests' ] ) job.users = request.POST[ 'users' ] job.deploy = request.POST[ 'deploy' ] if not request.POST[ 'deploy' ] == u'' else None job.github = request.POST[ 'github' ] for env in job.envs : db.session.delete( env ) db.session.commit() new_envs = json_to_envs( request.POST[ 'envs' ], job.job_id ) for env in new_envs : job.envs.append( env ) try : db.session.commit() json_resp = json.dumps( { "status" : "ok" } ) except Exception as e : db.session.rollback() json_resp = json.dumps( { "status" : "error", "content" : str(e) } ) return HttpResponse(json_resp, mimetype="application/json") else : job_model = db.session.query( Job ).filter( Job.job_id == job_id ).one() job = { "job_id" : job_model.job_id, "name" : job_model.name, "repo" : job_model.repo, "branch" : job_model.branch, "envs" : job_model.envs, "exec_time" : job_model.exec_time.strftime("%H:%M") if not job_model.exec_time is None else "", "tests" : job_model.tests, "deploy" : job_model.deploy, "github" : job_model.github } tests_ids = [] for x in job_model.tests : tests_ids.append( x.test_id ) tests_data = tests.gather_tests_info( tests_ids ) users_data = fetch_users_info( job_model ) return render_to_response( 'job/update/update_job.html', { 'title' : title, 'job' : job, 'users' : users_data, 'tests' : tests_data, 'repos' : [], 'branches' : ['develop', 't-kz'] }, context_instance=RequestContext(request) )
UTF-8
Python
false
false
2,013
14,285,061,246,046
e08375f6d23fa98b08bde59e3650754be7820f99
38a7ff3c3c1c0c78473266ab271b27d2d02f383e
/ccorr_mat_test.py
3ddc7556ed92367f8fe88b15b0a8437283422067
[]
no_license
mailletf/brain-parcellater
https://github.com/mailletf/brain-parcellater
7a95e6c88e7cda8265c69aebbe2fb121428c9213
e7d1415192c224712db2c459e167481ffddee1aa
refs/heads/master
2020-05-18T10:57:15.085794
2014-08-01T18:56:05
2014-08-01T18:56:05
10,571,730
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as N import scipy.spatial.distance as D import scipy.sparse as S import scipy.stats as SS import conn_ccorr_mat as CCM import unittest size = 4 class CrossCorrelationTest(unittest.TestCase): def setUp(self): def computeCrossCorrMat(m): cm = N.zeros(m.shape) for i in xrange(size): for j in xrange(size): #cm[i,j] = N.correlate(m[i], m[j]) cm[i,j] = 1 - D.cosine(m[i], m[j]) #cm[i,j] = SS.pearsonr(m[i], m[j])[0] return cm self.mat = N.random.random((size,size)) self.cc_mat = computeCrossCorrMat(self.mat) self.bin_mat = N.round(self.mat) self.cc_bin_mat = computeCrossCorrMat(self.bin_mat) def test_nonBinaryTest(self): test_mat = [S.dok_matrix((1,size), int) for x in xrange(size)] for i in xrange(size): for j in xrange(size): test_mat[i][0,j] = self.mat[i,j] cc_test_mat = CCM.cross_correlate_matrix_nonbinary(test_mat) N.testing.assert_almost_equal(cc_test_mat, self.cc_mat) def test_binaryTest(self): test_mat = [S.dok_matrix((1,size), int) for x in xrange(size)] for i in xrange(size): for j in xrange(size): if self.mat[i,j]>0: test_mat[i][0,j] = self.bin_mat[i,j] cc_test_mat = CCM.cross_correlate_matrix_binary(test_mat) N.testing.assert_almost_equal(cc_test_mat, self.cc_bin_mat) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,014
7,344,394,095,729
acf6ba39039944ffc94d45306620a0506e2be5d3
d7ee1fd4ec2d1a4704c7eaed798962868aebd290
/myproject/app/announcement/views.py
3b0b0fd15c2482db93bef2de3318ef7bdcf7c55f
[]
no_license
wwj718/sklok
https://github.com/wwj718/sklok
04146259fd102390875ed8ec4fa2840df2644f9c
fbd5cbf3826d7d01cee1a68493d9c4e6df75491f
refs/heads/master
2021-01-22T14:39:11.142418
2013-11-07T13:19:29
2013-11-07T13:19:29
13,374,341
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding=utf-8 from django.shortcuts import render_to_response from models import Announcement from django.template import RequestContext from django.shortcuts import get_object_or_404 ''' def show_all_news(request): #包含分页功能 list_items = News.objects.filter(categories='N') variables = RequestContext(request,{'list_items':list_items}) return render_to_response("Introduction.html",variables) ''' #分类取: def get_by_id(request,id): #包含分页功能 id=int(id) announcement = get_object_or_404(Announcement, pk=id) variables = RequestContext(request,{'announcement':announcement}) return render_to_response("announcement_detail.html",variables)
UTF-8
Python
false
false
2,013
11,038,065,998,556
3d80989ef44e4cb6b0397cc90f62c96987a6e1b4
a704892d86252dde1bc0ff885ea5e7d23b45ce84
/addons-community/partner_category_view/__terp__.py
9942153504e544ddb60d15affbcf8f28038b5106
[]
no_license
oneyoung/openerp
https://github.com/oneyoung/openerp
5685bf8cce09131afe9b9b270f6cfadf2e66015e
7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5
refs/heads/master
2016-03-31T18:22:41.917881
2013-05-24T06:10:53
2013-05-24T06:10:53
9,902,716
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
{ 'name': 'Partner Category View', 'version': '1.0', 'category': 'Generic Modules/Others', 'description': """This module add list of partner to the partner category view. Warning current version of OpenERP server (22nd of April 2010) have a bug (the view is mixed up). https://bugs.launchpad.net/openobject-server/+bug/455547 To quick fix it: go into you server directory and then if server version is 5.0 do: wget http://launchpadlibrarian.net/45016785/patch patch -p0 < patch if server version is trunk do: wget http://launchpadlibrarian.net/45017686/trunk.patch patch -p0 < trunk.patch """, 'author': 'Nicolas De Smet', 'website': 'http://ndesmet.be', 'depends': ['base'], 'update_xml': ['view.xml'], 'installable': True, 'active': False, }
UTF-8
Python
false
false
2,013
10,101,763,126,357
6538982a64732d353dce9fbbfa54266cc671ea0c
118c3d42606b5c582485bfd437fc514dee78b226
/vt.py
4327981d78b2b981cd8a7b6c172921d6639d5175
[]
no_license
investlab/virustotal-scanner-python
https://github.com/investlab/virustotal-scanner-python
0e00f816bb246eb4a7d55fd58e8f2d62e4ea1e63
fd6f3cda45d1f4a631040e9a7a5ed223396ea5c9
refs/heads/master
2022-08-12T15:56:28.974684
2014-12-07T21:34:21
2014-12-07T21:34:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json import urllib import urllib2 import hashlib import postfile import time import sys ########################################################################## #global var error codes used in three classes below IS_BATCH_ERROR = 2 INVALID_TYPE_ERROR = 3 BAD_RESPONSE_ERROR = 4 STAT_FAILED = 5 INVALID_RESULTS_ERROR = 6 ######################## # fileinfo class # # # ######################## class fileinfo(): """ fileinfo class contains static methods to return hash values to Scan and VirusTotal classes below """ #md5hash - reurn md5 hash of file @staticmethod def md5hash(path): block_size = 128 f = open(path, "rb") md5 = hashlib.md5() while True: data = f.read(block_size) if not data: break md5.update(data) f.close() return md5.hexdigest() ######################## # Virus Total Class # # # ######################## class VirusTotal(object): def __init__(self, api_key, resource="NA", path="NA"): """ VirusTotal object which interacts directly with the VirusTotal public API Should not be used directly: Use Scan object below to work with files/urls/reports attributes: resource: can be scan_id/hash (optional) path: full path to single file (optional) api_key (required) """ self.resource = resource self.path = path self.api_key = api_key ####################################################################### # _submit_resource # # internal method to submit values to VT to query info # # takes method argument which can be either 'report' or 'rescan' # ####################################################################### def _submit_resource(self, method): url = "https://www.virustotal.com/vtapi/v2/file/" + method parameters = {"resource": self.resource, "apikey": self.api_key} #send request data = urllib.urlencode(parameters) req = urllib2.Request(url, data) response = urllib2.urlopen(req) if(response.getcode() != 200): return BAD_RESPONSE_ERROR json_out = response.read() return json.loads(json_out) ####################################################################### # _is_batch # # internal method to determine if json output from VT # # was a batch scan or single # # requires json output from vt as argument # ####################################################################### @staticmethod def _isbatch(json_output): if(type(json_output) == list): return True else: return False ####################################################################### # _is_notbatch # # internal method to determine if json output from VT # # was a batch scan or single # # requires json output from vt as argument # ####################################################################### @staticmethod def _isnotbatch(json_output): if(type(json_output) == dict): return True else: return False ####################################################################### # has_file # # method to determine if VirusTotal has file in it's DB # # # # Note: this will send a web request to virustotal everytime # # function is called # ####################################################################### def has_file(self): method = "report" json_out = self._submit_resource(method) #batch - list #reg - dict if(self._isbatch(json_out)): return IS_BATCH_ERROR if(self._isnotbatch(json_out)): if(json_out["response_code"]) == 1: return True else: return False else: return INVALID_TYPE_ERROR ####################################################################### # submit_file # # method to submit file to VT for analysis. # # This will return scan_id immediatley but will take several minutes# # to analyze fully - see query_status() # # # # Returns dictionary with status code, message, and scan_id # # # # Note: this will send a web request to virustotal everytime # # function is called # ####################################################################### def submit_file(self): host = "www.virustotal.com" selector = "http://www.virustotal.com/vtapi/v2/file/scan" fields = [("apikey", self.api_key)] file_to_send = open(self.path, "rb").read() files = [("file", self.path, file_to_send)] json_out = postfile.post_multipart(host, selector, fields, files) json_out = json.loads(json_out) response = json_out["response_code"] msg = json_out["verbose_msg"] if(response != 1): return_json = {"code":0,"val":msg} return return_json elif(response == 1): return_json = {"code":1,"val":msg,"scan_id":json_out["scan_id"]} return return_json ####################################################################### # rescan_file # # method to rescan file already in DB using scan_id or hash # # This will return scan_id immediatley but will take several minutes# # to analyze fully - see query_status() # # # # Returns scan_id # # # # Note: this will send a web request to virustotal everytime # # function is called # ####################################################################### def rescan_file(self): method = "rescan" json_output = self._submit_resource(method) if(json_output["response_code"]==1): return json_output["scan_id"] else: return BAD_RESPONSE_ERROR ####################################################################### # query_status # # check status of scan already submiited using scan_id/hash # # # # Returns 1 if successful # # # # Note: this will send a web request to virustotal everytime # # function is called and can take several minutes to complete # ####################################################################### def query_status(self): found = 0 count = 0 method = "report" while (found == 0): count += 1 time.sleep(60) json_out = self._submit_resource(method) if(count > 6): return STAT_FAILED if((json_out["response_code"] == 1 and json_out["verbose_msg"] != "Scan request successfully queued, come back later for the report")): found = 1 return 1 ####################################################################### # get_report # # method to get report from hash/scan_id # # # # Returns raw json report # # # # Note: this will send a web request to virustotal everytime # # function is called # ####################################################################### def get_report(self): method = "report" report_json = self._submit_resource(method) if(report_json == BAD_RESPONSE_ERROR): print("failed at get_report() - bad response from VT") elif not report_json: return INVALID_RESULTS_ERROR else: return report_json ####################################################################### # gather_report_details # # internal method to get positive vt result details # # such as vendor names,malware names, and dates # # # # Returns dictonary of values if successful # # Returns 1 if no vendors flagged the resource # # # ####################################################################### @staticmethod def _gather_report_details(json): if(json['response_code'] == 1): scans = json['scans'] failed_flag = 0 detect_flag = 0 vendors = [] detect_list = [] not_detect_list = [] output = {} for key in scans: vendors.append(key) for val in vendors: detect = scans[val]["detected"] if(detect): detect_flag = 1 detect_list.append(val) else: failed_flag = 1 not_detect_list.append(val) if(detect_flag == 0): return 1 elif(failed_flag): for val in detect_list: output[val] = {"version":scans[val]['version'], "result":scans[val]['result'], "update":scans[val]['update']} output['detect_list'] = detect_list output['failed'] = 1 output['failedlist_key'] = {"failed_list":not_detect_list} else: for val in detect_list: output[val] = {"version":scans[val]['version'], "result":scans[val]['result'], "update":scans[val]['update']} output['failed'] = 0 return output else: return 0 ####################################################################### # make_awesome_report # # Print full scan report (uses _gather_report_details) # # # ####################################################################### def make_awesome_report(self): raw_json = self.get_report() if(raw_json == INVALID_RESULTS_ERROR): #this will happen if invalid hash is submitted print "Resouce not found - invalid hash probably" elif(raw_json['response_code'] == 1): #display status information print("Scan Date " + str(raw_json['scan_date'])) print("sha1: " + str(raw_json['sha1']) + " \n" + "md5: " + str(raw_json['md5'])) ratio = float(raw_json['positives'])/float(raw_json['total']) * float(100) print("Positive results: " + repr(raw_json['positives']) + '\n' + "Total Results: " + repr(raw_json['total'])) print("Detection rate: {0:.0f}%".format(ratio)) #print detailed report output = self._gather_report_details(raw_json) if(output == 1): print "No vendors flagged this resource as malicious" elif(output != 0): vendor = output['detect_list'] for val in vendor: print "\nVendor: %s \n Result: %s Version: %s Update: %s \n" % (val, str(output[val]['result']), str(output[val]['version']),str(output[val]['update'])) if(output['failed']): print "The following vendors did not flag this as malicious: \n %s" % (str(" -- ".join(output['failedlist_key']['failed_list']))) else: print "ERROR: Not found." #Resource not found in DB elif(raw_json['response_code'] == 0): print " Resource not found in Virus Total database " print " resource: " + raw_json['resource'] ######################## # Scan Class # # # ######################## class Scan(object): def __init__(self,api_key,path="NA"): """ This object is a wrapper for the 'VirusTotal' object which defines most of the logic regarding VT scanning This object can be called directly to invoke scanning functions. available methods: object.scan_file(api_key,path) object.get_report(api_key,hash) attributes: path to single file (optional) api key (required) """ self.path = path self.api_key = api_key ####################################################################### # _get_filehash # # method to get file hash # # # # Returns md5hash # # # ####################################################################### def _get_filehash(self): md5hash = fileinfo.md5hash(self.path) return md5hash ####################################################################### # scan_file - call this directly # # method to submit file for analysis - set_verbose flag to 1 # # for more info printed, default is 0 # # # # If file exists in DB - file will not be submitted or rescanned # # # # Prints report # # # ####################################################################### def scan_file(self,verbose_flag=0): md5hash = self._get_filehash() if(verbose_flag): print("md5 hash of file: " + md5hash) vt = VirusTotal(self.api_key,md5hash,self.path) has_file = vt.has_file() if(has_file == IS_BATCH_ERROR or has_file == INVALID_TYPE_ERROR): print("scan_file() failed at has_file()") elif(has_file == True): if(verbose_flag): print("VT already has file -- querying report:") vt.make_awesome_report() else: if(verbose_flag): print("submitting file - this could take several minutes") return_dict = vt.submit_file() if(return_dict['code']== 1): scan_id = return_dict['scan_id'] if(verbose_flag): print("scan_id: " + scan_id) if(vt.query_status() != STAT_FAILED): vt.make_awesome_report() else: print("scan_file() failed because we were unable to query the status of the file sent (note that file was sent)") else: print("scan_file() failed because we were unable to send file -- " + return_dict['msg']) ####################################################################### # scan_report - call this directly # # method to submit hash to get report- set_verbose flag to 1 # # for more info printed, default is 0 # # # # If hash does not exist in DB - exit with error # # # # Prints report # # # ####################################################################### def scan_report(self,hash,verbose_flag=0): if(verbose_flag): print("resource to search: " + hash + '\n') vt = VirusTotal(self.api_key,hash) vt.make_awesome_report() ########################################################################## def example(): api = "<API kEY>" resource = "d74b1df3ab16b36d48850f5d57b346b0" #dexter malware hash path = "testfile.txt" #upload/scan file scanner = Scan(api,path) scanner.scan_file(verbose_flag=1) #check report based on file hash or scanid scanner2 = Scan(api) scanner2.scan_report(resource,verbose_flag=1) def main(): print "This script contains the class implementations to interact with VirusTotal - do not call directly" if __name__ == "__main__": main()
UTF-8
Python
false
false
2,014
13,812,614,865,636
e999c6117b005784087bf3ab01f62aae28d911cb
2dc33f2fd71c1a0063183f26751a8ef4a2f2cfe9
/backend/documents/clustering.py
c47dd661b2c3012042cbfedb895cafbc464ae36a
[]
no_license
ruiaf/sumnews
https://github.com/ruiaf/sumnews
40c6ab773738ec3b75474372d9a8bdab85022a4b
a93e0757046015b5fa785c6fcf95467b505a6912
refs/heads/master
2016-09-08T02:35:21.091167
2014-04-05T17:17:30
2014-04-05T17:17:30
17,527,511
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging import threading import time import settings import utils class ClusterMaker(threading.Thread): def __init__(self, index, *args, **kwargs): self.index = index self.comparator = DocComparator(self.index) self.objects = [] self.responsibility = {} self.availability = {} self.lock = threading.Lock() self.add_list = [] self.add_list_lock = threading.Lock() threading.Thread.__init__(self, *args, **kwargs) def add(self, doc): self.add_list_lock.acquire() self.add_list.append(doc) self.add_list_lock.release() def process_add_list(self): self.add_list_lock.acquire() self.lock.acquire() ts = time.time() for obj in self.add_list: self.objects.append(obj) self.responsibility[obj] = {} self.availability[obj] = {} self.responsibility[obj][obj] = 0.0 self.availability[obj][obj] = 0.0 for other_doc in self.objects: if self.comparator.similarity(obj, other_doc) >= settings.CLUSTERING_MINIMUM_SIMILARITY: self.responsibility[obj][other_doc] = 0.0 self.availability[obj][other_doc] = 0.0 self.responsibility[other_doc][obj] = 0.0 self.availability[other_doc][obj] = 0.0 te = time.time() if len(self.add_list): logging.info("Finished adding %d objects to clustering in %2.2f seconds", len(self.add_list), te - ts) self.add_list = [] self.add_list_lock.release() self.lock.release() def clear(self): logging.info("Clearing clusters") self.lock.acquire() self.add_list_lock.acquire() self.comparator = DocComparator(self.index) self.objects = [] self.responsibility = {} self.availability = {} self.add_list = [] self.lock.release() self.add_list_lock.release() def run(self): while True: ts = time.time() self.iterate_affinity() te = time.time() logging.info("Finished affinity iteration for: %d documents in %2.2f seconds", len(self.objects), te - ts) self.process_add_list() time.sleep(settings.CLUSTERING_INTERVAL) def run_for_unittest(self): for i in range(10): ts = time.time() self.iterate_affinity() te = time.time() logging.info("Finished affinity iteration for: %d documents in %2.2f seconds", len(self.objects), te - ts) self.process_add_list() def iterate_affinity(self): self.lock.acquire() for i in self.objects: values = utils.max2((self.availability[i][k_prime] + self.comparator.similarity(i, k_prime), k_prime) for k_prime in self.responsibility[i].keys()) for k in self.responsibility[i].keys(): sim = self.comparator.similarity(i, k) max_value = values[0][0] if values[0][1] is k: if len(values) > 2: max_value = values[1][0] else: max_value = 0.0 self.responsibility[i][k] = ((settings.CLUSTERING_DUMPING_FACTOR * self.responsibility[i][k]) + (1 - settings.CLUSTERING_DUMPING_FACTOR) * (sim - max_value)) for k in self.objects: sum_value = 0.0 for i_prime in self.responsibility[k].keys(): sum_value += max(0.0, self.responsibility[i_prime][k]) for i in self.responsibility[k].keys(): self.availability[i][k] = (settings.CLUSTERING_DUMPING_FACTOR * self.availability[i][k] + (1 - settings.CLUSTERING_DUMPING_FACTOR) * min(0.0, (self.responsibility[k][k] + sum_value - max(0.0, self.responsibility[i][k]) - max(0.0, self.responsibility[k][k])))) self.availability[k][k] = ((settings.CLUSTERING_DUMPING_FACTOR * self.availability[k][k]) + ((1 - settings.CLUSTERING_DUMPING_FACTOR) * (sum_value - max(0.0, self.responsibility[k][k])))) for i in self.objects: if not i.exemplar is i and i in i.exemplar.children: i.exemplar.children.remove(i) exemplar = max((self.availability[i][k_prime] + self.responsibility[i][k_prime], k_prime) for k_prime in self.availability[i].keys()) i.exemplar = exemplar[1] i.responsibility_parent = self.responsibility[i][exemplar[1]] i.availability_parent = self.availability[i][exemplar[1]] i.similarity_parent = self.comparator.similarity(i, exemplar[1]) if not i.exemplar is i: i.exemplar.children.append(i) self.lock.release() class DocComparator(object): def __init__(self, inverted_index): self.index = inverted_index self.cache = {} def similarity(self, doc1, doc2): if doc1 is doc2: return settings.CLUSTERING_DEFAULT_PREFERENCE if (doc1, doc2) not in self.cache: intersection_words = doc1.words() & doc2.words() other_words = doc1.words() ^ doc2.words() intersection_word_weight = sum(self.index.tf_idf(word) for word in intersection_words) other_words_weight = sum(self.index.tf_idf(word) for word in other_words) sim = intersection_word_weight / (intersection_word_weight + other_words_weight + 0.01) if sim >= settings.CLUSTERING_MINIMUM_SIMILARITY: self.cache[(doc1, doc2)] = sim self.cache[(doc2, doc1)] = sim return sim return self.cache[(doc1, doc2)]
UTF-8
Python
false
false
2,014
15,513,421,922,416
feb6a6af974cc8e469aeffb0f0b4282fb68531d0
a555236d3ae5725e37aa6b5c5d8b50593f74ed69
/python/while.py
1fdd940f65af43d8ea944af988a0b56cfb9e5bd0
[]
no_license
xc145214/practise
https://github.com/xc145214/practise
82b2e2adeafbf3aa29dcdef10613bec82e52a0b8
959f6882ae7d367353ea172972298d29ec713219
refs/heads/master
2016-09-06T12:08:23.951316
2014-12-15T09:42:58
2014-12-15T09:42:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" usage:while 循环demo """ number = 23 running = True #true大写 while running: guess = int(input("please input a int number:")) if guess == number: print('Congradulations, you guessed it') running = False #stop the loop elif guess < number: print('No, it is a little higher than you guess') else: print("No, it is a little lower than you guess") else: print("the while loop is over") print("Down")
UTF-8
Python
false
false
2,014
5,403,068,860,103
09f820e7af3095637395471af50fea7d7cee25e8
db2b114efae7a1b75a42680d19429b71f8240436
/directory_app.py
b040481487908e7dfd54a83078089b7532e2190d
[]
no_license
ikon42/reddit-unite
https://github.com/ikon42/reddit-unite
2b036f3f904977b4a4b99b7c6f17c1ed8eb436e2
1fc42de11eeb0180e734bbc822288ccaa62a15a2
refs/heads/master
2020-04-13T21:07:19.713565
2010-11-11T03:24:27
2010-11-11T03:24:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import web import template import util from models import User from forms import search_form urls = ( '/search/?', 'search', '', 'redis', '/([a-zA-Z0-9_]+)?/?', 'index', '/map/(.*)/?', 'user_map', ) class index: '''Displays a user list or a list of user lists''' def GET(self, name): t = template.env.get_template('user_list.html') list_list = [ # list_list should be generated somehow 'global', 'Lists all users from all locales.', ] user_list = [] if (name is None): return t.render(util.data( title='User Lists', instructions='''Users are grouped into "user lists" that group them geographically. These lists are automatically generated and will change based upon the relative size of various user populations.''', list_list=map( lambda (li): {'name': li[0], 'scope': li[1]}, zip(*[list_list[i::2] for i in range(2)]), ), )) elif (name.lower() in list_list[::2]): for i in User.all(): x = util.strip_private_data(i) if x is not None: user_list.append(x) else: raise web.notfound() return t.render(util.data( title='Display all members', instructions='''Public member listing''', users=user_list, )) class user_map: '''Generates all different kinds of maps!''' def GET(self, name): t = template.env.get_template('user_map.html') return t.render(util.data()) class search: '''Allows users to search for other users based on public information''' def GET(self): q = web.input() t = template.env.get_template('search.html') f = search_form() try: if q.query: results = [] user_list = [] query = q.query.split(' ') for i in User.all(): x = util.strip_private_data(i) if x is not None: user_list.append(x) for p in user_list: for i in query: if i in dict(p).values(): results.append(p) return t.render(util.data( title='Find who you\'re looking for!', form=f, results=results if results else None, )) else: web.debug('q.query doesn\'t exist and it didn\'t thow an exception!') raise Warning('Odd, huh?') except: return t.render(util.data( title='Find who you\'re looking for!', form=f, )) class redis: def GET(self): raise web.seeother('/') app = web.application(urls, locals())
UTF-8
Python
false
false
2,010
6,064,493,859,900
33fa695d11439397d0c5a2ff77827a728c6c0f80
c9d677cfd20117259680dc4cf3420274519be768
/buildUT2004Mod.py
67e7b74d93d9b10941b88d3071b32d07b7d7e341
[ "GPL-3.0-only" ]
non_permissive
brobinson9999/pyun-build
https://github.com/brobinson9999/pyun-build
c143f092a6d175c583e3f1f2768fedd8c0a075d2
70e226da546a6f2b650bfa31582c6bfbd7d35b35
refs/heads/master
2021-01-01T16:21:04.753731
2011-09-11T17:37:20
2011-09-11T17:37:20
645,196
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python from buildUTMod import * def buildUT2k4modINI(filePath,modName,modTitle="DummyBuild",modLogo="DummyBuildLogo",modDesc="DummyBuildDesc",modCmdLine="DummyBuildCmdLine",modURL="DummyURL"): fileData = "[MOD]\nModTitle=" + modTitle + "\nModLogo=" + modLogo + "\nModDesc=" + modDesc + "\nModCmdLine=" + modCmdLine + "\nModURL=" + modURL + "\n" bruteIO.removeFile(filePath) bruteIO.writeFile(filePath, fileData) def buildUT2k4modSystemINI(baseINIPath, filePath, modName, dependencies, nondependencies): fileData = bruteIO.readFile(baseINIPath) editPackagesString = "" for dependency in dependencies: editPackagesString = editPackagesString + "EditPackages=" + dependency + "\n" editPackagesString = editPackagesString + "EditPackages=" + modName + "\n" searchString = "CutdownPackages=Core" modifiedFileData = fileData.replace(searchString, editPackagesString + searchString) for nondependency in nondependencies: modifiedFileData = modifiedFileData.replace("EditPackages=" + nondependency, "") bruteIO.removeFile(filePath) bruteIO.writeFile(filePath, modifiedFileData) def buildUT2k4mod(ut2004baseDir, modName, sourceDirectories, dependencies, nondependencies, exportcache=True, deleteAfterBuild=True,modTitle="DummyBuild",modLogo="DummyBuildLogo",modDesc="DummyBuildDesc",modCmdLine="DummyBuildCmdLine",modURL="DummyURL"): systemDirectory = os.path.join(ut2004baseDir, "System") modSystemDirectory = os.path.join(ut2004baseDir, modName, "System") # Clear old files and directories. print "Clearing old files and directories..." baseModPath = os.path.join(ut2004baseDir, modName) bruteIO.removeFile(os.path.join(systemDirectory, modName + ".u")) bruteIO.removeDirectory(baseModPath) # Create needed directories and files. print "Creating environment for " + modName + "..." sourceCodeDestinationDirectory = os.path.join(baseModPath, modName, "classes") os.makedirs(sourceCodeDestinationDirectory) os.makedirs(modSystemDirectory) buildUT2k4modINI(os.path.join(ut2004baseDir, modName, "UT2k4mod.ini"), modTitle, modLogo, modDesc, modCmdLine, modURL) buildUT2k4modSystemINI(os.path.join(systemDirectory, "UT2004.ini"), os.path.join(modSystemDirectory, modName + ".ini"), modName, dependencies, nondependencies) # Copy source code. for sourceDirectory in sourceDirectories: print "Copying files from " + sourceDirectory + "..." copyAll.copyAll(sourceDirectory, sourceCodeDestinationDirectory, makeLinksInsteadOfCopying=False) # Do the actual compile. # We don't need to pipe stdin for any reason - it is a workaround. If it isn't specified as a pipe Python attempts to duplicate # the input handle and in some circumstances that can fail. print "Compiling " + modName + "..." uccPath = os.path.join(systemDirectory, "UCC.exe") p = subprocess.Popen([uccPath + " make -mod=" + modName],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) # p = subprocess.Popen([uccPath, "make", "-mod=" + modName],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) outputTuple = p.communicate() outputText = outputTuple[0] + outputTuple[1] import re outputText = re.sub(r'\r\n', "\n", outputText) outputText = re.sub(r'Analyzing...\n', "", outputText) outputText = re.sub(r'-*[a-zA-Z0-9_]* - Release-*\n', "", outputText) outputText = re.sub(r'Parsing [a-zA-Z0-9_]*\n', "", outputText) outputText = re.sub(r'Compiling [a-zA-Z0-9_]*\n', "", outputText) outputText = re.sub(r'Importing Defaults for [a-zA-Z0-9_]*\n', "", outputText) print outputText compileReturnCode = p.returncode if (compileReturnCode != 0): return False print "Deploying " + modName + "..." shutil.copyfile(os.path.join(modSystemDirectory, modName + ".u"), os.path.join(systemDirectory, modName + ".u")) print "Running Tests..." executeUnrealscriptTestCommandlets(sourceCodeDestinationDirectory, modName, systemDirectory, uccPath) if (exportcache): print "Generating Cache..." subprocess.call([uccPath, "dumpint", modName + ".u"]) subprocess.call([uccPath, "exportcache", modName + ".u"]) print "Cleaning up..." if (deleteAfterBuild): bruteIO.removeDirectory(baseModPath) print "Finished building " + modName + "." return True
UTF-8
Python
false
false
2,011
6,150,393,169,216
c2bbd1f65c1e4824c48f54c4f2c36064c27d8863
07ae5f3577aa53b19a31cd648ecb7ad4c868943c
/time-measurer.py
4034b7167c8cdaf9fb69ff8aa78c0297769062f0
[]
no_license
anatolyburtsev/script_time_measurer
https://github.com/anatolyburtsev/script_time_measurer
c302a3f29ce5533cacc20b36b30864f5eeefc98c
ba0ef603fd24d15e21cce6bbb4fe04a6f774bd98
refs/heads/master
2021-01-13T02:16:24.675626
2014-10-20T12:31:23
2014-10-20T12:31:23
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # запускает переданную программу со всеми переданными ключами, меряет время выполнения и шлет в графит # !!!important | - не обрабатывается # 12/10/2014 Anatoly Burtsev [email protected] #TODO read GRAPHITE_HOST and PORT from config import os import sys import time import subprocess as sb import socket GRAPHITE_HOST = "localhost" GRAPHITE_PORT = 2003 HOST = socket.gethostname().split('.')[0] if len(sys.argv) == 1: exit(0) command = sys.argv[1:] script_name_position=1 # skip "/usr/bin/flock -w 0 /tmp/.flock_tmp" # may be need improve if sys.argv[script_name_position] == "/usr/bin/flock": script_name_position = 5 SCRIPT = sys.argv[script_name_position] #/usr/bin/gdb -> gdb SCRIPT=SCRIPT.split('/')[-1] #special for differ "get_smth.py thing1" and "get_smth.py thing2" if len(sys.argv[script_name_position:]) >= 2 and sys.argv[script_name_position+1][0] not in '0123456789-': SCRIPT = SCRIPT + '_' + sys.argv[script_name_position+1] #delele extenstion SCRIPT = SCRIPT.replace('.sh','').replace('.py','') P = sb.Popen( command, stdout=sb.PIPE, stderr=sb.PIPE ) T0 = time.time() out, err = P.communicate() dT = time.time() - T0 sys.stdout.write(out) sys.stderr.write(err) #send to graphite MESSAGE = 'stats.timemeasurer.%s.%s %d %d\n' % (HOST, SCRIPT, int(dT), int(time.time())) #print(MESSAGE) sock = socket.create_connection( (GRAPHITE_HOST, GRAPHITE_PORT)) sock.sendall( MESSAGE ) sock.close()
UTF-8
Python
false
false
2,014
12,343,736,014,204
6b54b1dea646098d46efe9c760d00b2838d356c5
2b616c1d43329e30bf4b70e9b42ff6d9ab26ab45
/apk_exporter_tool.py
12e0be12d3c7657f82cc474c02ffe3cf6966d237
[ "GPL-1.0-or-later", "LGPL-2.1-or-later", "GPL-3.0-or-later", "GPL-3.0-only", "LGPL-2.0-or-later", "LicenseRef-scancode-warranty-disclaimer" ]
non_permissive
coskundeniz/apk-exporter
https://github.com/coskundeniz/apk-exporter
8b8fd6624ec701cdcc0b22aa3dcb3055bb63dd86
3529f3bee20dc503323e532dec66eb1ffc032bbc
refs/heads/master
2020-04-17T23:09:15.835138
2013-09-20T13:11:28
2013-09-20T13:11:28
12,931,584
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from Tkinter import * from tkFileDialog import askdirectory from apk_exporter import ApkExporter class ApkExporterGui(Frame): def __init__(self, parent): Frame.__init__(self, parent, background="white") self.parent = parent self.dirname = None self.info = StringVar() self.package_name_var = StringVar() self.version_code_var = StringVar() self.version_name_var = StringVar() self.version_code_entry = StringVar() self.version_code_entry.set("") self.version_name_entry = StringVar() self.version_name_entry.set("") self.selected_unsigned = IntVar() self.init_ui() self.app = ApkExporter() def init_ui(self): """ initialize gui """ self.parent.title("Apk Exporter") self.pack(fill=BOTH, expand=1) # place widgets self.place_browse_button() self.place_package_name_lbl() self.place_version_code_lbl() self.place_version_name_lbl() self.place_package_name_var() self.place_version_code_var() self.place_version_name_var() self.place_version_code_entry() self.place_version_name_entry() self.place_sign_checkbox() self.place_export_button() self.place_exit_button() self.place_info() # add horizontal line frame = Frame(self, relief=RAISED, bd=1) frame.grid(row=6, columnspan=3, sticky=W+E) def place_browse_button(self): browse_button = Button(self, text="BROWSE PROJECT DIRECTORY", command=self.ask_directory) browse_button.grid(row=0, columnspan=3, padx=120, pady=10) def place_package_name_lbl(self): package_name_lbl = Label(self, text="Package Name: ", bg="white") package_name_lbl.grid(row=1, column=0, pady=5, padx=25, ipady=5, sticky=E) def place_version_code_lbl(self): version_code_lbl = Label(self, text="Version Code: ", bg="white") version_code_lbl.grid(row=2, column=0, pady=5, padx=25, ipady=5, sticky=E) def place_version_name_lbl(self): version_name_lbl = Label(self, text="Version Name: ", bg="white") version_name_lbl.grid(row=3, column=0, pady=5, padx=25, ipady=5, sticky=E) def place_package_name_var(self): package_name_var = Label(self, textvariable=self.package_name_var, bg="white") package_name_var.grid(row=1, column=1, pady=5, sticky=W) def place_version_code_var(self): version_code_var = Label(self, textvariable=self.version_code_var, bg="white") version_code_var.grid(row=2, column=1, pady=5, sticky=W) def place_version_name_var(self): version_name_var = Label(self, textvariable=self.version_name_var, bg="white") version_name_var.grid(row=3, column=1, pady=5, sticky=W) def place_version_code_entry(self): version_code_entry = Entry(self, textvariable=self.version_code_entry, bg="white", justify=CENTER) version_code_entry.grid(row=2, column=2, pady=5, sticky=W) def place_version_name_entry(self): version_code_entry = Entry(self, textvariable=self.version_name_entry, bg="white", justify=CENTER) version_code_entry.grid(row=3, column=2, pady=5, sticky=W) def place_sign_checkbox(self): sign_checkbox = Checkbutton(self, text="Unsigned Release", variable=self.selected_unsigned, bg="white") sign_checkbox.grid(row=4, columnspan=2, padx=25, pady=5, sticky=W) def place_export_button(self): self.export_button = Button(self, text="Export Apk", command=self.export_apk) self.export_button.grid(row=5, column=1, pady=20, sticky=E) def place_exit_button(self): exit_button = Button(self, text="Exit", width=9, command=self.parent.quit) exit_button.grid(row=5, column=2, pady=20) def place_info(self): info = Label(self, textvariable=self.info, bg="white") info.grid(row=7, columnspan=3, padx=15, pady=5, sticky=W) def ask_directory(self): if(os.name == "nt"): self.dirname = askdirectory(initialdir='C:\\', title='Select Project Directory') elif(os.name == "mac"): self.dirname = askdirectory(initialdir='/Users/%s' % os.getlogin(), title='Select Project Directory') else: self.dirname = askdirectory(initialdir='/home/%s' % os.environ['USER'], title='Select Project Directory') self.app.dirname = self.dirname #extract package name, version code and name self.app.extract_info() self.info.set("Extracted manifest info") # update screen self.package_name_var.set(self.app.package) self.version_code_var.set(self.app.version_code) self.version_name_var.set(self.app.version_name) def export_apk(self): # get version changes if(self.version_code_entry != ""): self.app.version_code = self.version_code_entry.get() self.app.change_version_code(self.app.get_root()) if(self.version_name_entry != ""): self.app.version_name = self.version_name_entry.get() self.app.change_version_name(self.app.get_root()) if(self.version_code_entry.get() == "" and self.version_name_entry.get() == ""): pass else: self.app.write_changes() # handle signing changes and export apk self.app.unsigned = self.selected_unsigned.get() self.app.export_apk() self.info.set("Finished exporting apk") def run(): root = Tk() width = 450 height = 300 screen_width = root.winfo_screenwidth() screen_height = root.winfo_screenheight() x = (screen_width - width) / 2 y = (screen_height - height) / 2 root.geometry("%dx%d+%d+%d" %(width, height, x, y)) root.resizable(width=FALSE, height=FALSE) app = ApkExporterGui(root) root.mainloop() if __name__ == '__main__': run()
UTF-8
Python
false
false
2,013
12,996,571,070,828
9c9f5a295e3d157192c35f0e92a7f7ba347ca2cb
0e2f7f9f51fd066f5dd7c7ac334e7cbbccb337d8
/hinstagram.py
9695ab00f0e06f4d322bcf55e299797e41193807
[]
no_license
jayhack/Hinstagram
https://github.com/jayhack/Hinstagram
ab73a3487bc9fa827f070161f84b97c495795a48
09e1641d17c36f7e7b25d945fc2a2a920cf559d7
refs/heads/master
2020-05-18T02:49:14.638736
2014-05-04T14:30:20
2014-05-04T14:30:20
19,409,348
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Hinstagram: Visualizing Images as Histograms # -------------------------------------------- # by Jay Hack ([email protected]), Spring 2014 from collections import Counter import numpy as np import matplotlib.pyplot as plt import Image import seaborn as sns def make_gaussian_img (height, width, sample_size=(1000000,)): gx = np.random.normal (loc=width/2, scale=width/4, size=sample_size).astype(np.int) gy = np.random.normal (loc=height/2, scale=height/4, size=sample_size).astype(np.int) def in_range (x, dim): return x >= 0 and x <= dim intensities = Counter([(y, x) for y, x in zip(gy, gx) if in_range(x, width) and in_range(y, height)]) gaussian_img = np.zeros ((height, width)) for i in range(height): for j in range(width): gaussian_img[i][j] = intensities[(i, j)] return gaussian_img if __name__ == '__main__': #=====[ Step 1: load image as grayscale ]===== image_name = 'IMG_0519.jpg' raw_img = 255 - np.array(Image.open (image_name).convert('L').resize((240, 360))) height, width = raw_img.shape #=====[ Step 2: *sample* a gaussian distribution ]===== gaussian_img = make_gaussian_img (height, width) #=====[ Step 3: convolve image with gaussian, normalize ]===== convolved_img = np.multiply (raw_img, gaussian_img).astype (np.float) convolved_img = convolved_img / (np.max(np.max(convolved_img)) / 255) convolved_img = convolved_img.astype (np.uint8) #=====[ Step 4: unpack into points - apparently this is the only way seaborn accepts... ]===== X, Y = [], [] for i in range(height): for j in range(width): Y += [height - i] * convolved_img[i][j] X += [j] * convolved_img[i][j] #=====[ Step 5: make and display histogram ]===== sns.jointplot (np.array(X), np.array(Y), color='#219f85') plt.axis ('off') plt.show ()
UTF-8
Python
false
false
2,014
9,242,769,659,558
b11f60373a6b9bd8f14db3db3cf0a3dfb4515c4c
c81c47b78a7ee94b745f90974ab34d9e31e13855
/apps/imageproxy/proxy.py
666e7d2476bcb2045c99d329b390de77811dcd94
[]
no_license
brstgt/cream-testsite
https://github.com/brstgt/cream-testsite
9a0b2972b3aa0c1bf42747c42ce209586cc8f6e5
f2712b28ec12e737175c1898890ecc7ce49a9e00
refs/heads/master
2016-08-08T03:48:49.673692
2012-03-22T13:02:44
2012-03-22T13:02:44
3,604,724
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import re, os, urllib2, tempfile from django.core.files.base import File from PIL import Image as PilImage from models import Image from settings import SIZES, OWN, EXCLUDE, URL_RETRIEVE_TIMEOUT class ProxyError(Exception): pass class FormatError(ProxyError): pass class RetrieveError(ProxyError): pass class ExcludeError(ProxyError): pass class Proxy: ProxyError = ProxyError FormatError = FormatError RetrieveError = RetrieveError ExcludeError = ExcludeError def __init__(self, user): self.user = user def store_set(self, set): img = Image() img.userId = self.user img.url = set['url'] img.save() name = "%d.%s" % (img.id, set['format'].lower()) size = set['sizes'] img.full.save(name, File(size['full']), save=False) img.medium.save(name, File(size['medium']), save=False) img.thumb.save(name, File(size['thumb']), save=False) img.save() return img def upload(self, upload): converter = Converter(SIZES) fileset = converter.process_upload(upload) set = self.store_set(fileset) converter.clean_set(fileset) return set def resolve(self, url): resolver = Resolver() try: set = resolver.resolve(url) except Image.DoesNotExist: converter = Converter(SIZES) fileset = converter.process_url(url) set = self.store_set(fileset) converter.clean_set(fileset) return set @staticmethod def elect_size(set, geometry): if set.fullWidth < geometry[0] and set.fullHeight < geometry[1]: return set.full if set.mediumWidth < geometry[0] and set.mediumHeight < geometry[1]: return set.medium return set.thumb class Resolver: def find_set_by_id(self, id): return Image.objects.get(pk=id) def find_set_by_url(self, url): return Image.objects.get(url=url) def resolve(self, url): for match in OWN: matches = re.search(match, url) if matches: id = matches.group(1) return self.find_set_by_id(id) for match in EXCLUDE: matches = re.search(match, url) if matches: raise ExcludeError("This URL may not be proxied") return self.find_set_by_url(url) class Converter: def __init__(self, sizes): self.sizes = sizes def process_gif(self, source, target, size): from pgmagick import ImageList, Geometry list = ImageList() list.readImages(source) list.scaleImages(Geometry(size['size'][0], size['size'][1])) list.writeImages(target.name) pass def process(self, source, target, size): try: image = PilImage.open(source) except Exception: raise FormatError("Format not supported") if image.format == 'GIF' and size['allowAnimation']: self.process_gif(source, target, size) return image.format image.thumbnail(size['size'], PilImage.ANTIALIAS) format = 'JPEG' extension = 'JPG' if image.format == 'GIF': format = 'GIF' extension = 'GIF' image.save(target.name, format) return extension def get_temp_file(self): # Use tempfile.mkstemp, since it will actually create the file on disk. (filedescriptor, filepath) = tempfile.mkstemp() # Close the open file using the file descriptor, since file objects # returned by os.fdopen don't work, either os.close(filedescriptor) # Open the file on disk return open(filepath, "w+b") def process_set(self, source): set = { 'sizes': {} } for key, size in self.sizes.items(): target = self.get_temp_file() set['format'] = self.process(source, target, size) set['sizes'][key] = target return set def process_upload(self, upload): original = self.get_temp_file() original.write(upload.read()) original.close() set = self.process_set(original.name) set['url'] = '' os.remove(original.name) return set def process_url(self, url): original = self.get_temp_file() try: remote = urllib2.urlopen(url, None, URL_RETRIEVE_TIMEOUT) original.write(remote.read()) original.close() except Exception: raise RetrieveError("URL " + url + " could not be retrieved") set = self.process_set(original.name) os.remove(original.name) set['url'] = url return set def clean_set(self, set): for key, file in set['sizes'].items(): os.remove(file.name)
UTF-8
Python
false
false
2,012
18,597,208,415,605
212d40b810f42b2b398ccf53524ecf192225f5ec
243e2a25fe12f9f7e0fda524af4968ec0500f513
/test_scripts/test_sep_method.py
f2a1ffcb6bbaed92dd347924885edac8664a9225
[ "GPL-2.0-only", "GPL-1.0-or-later", "LGPL-2.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-generic-exception", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-proprietary-license", "GPL-2.0-or-later" ]
non_permissive
pez2001/sVimPy
https://github.com/pez2001/sVimPy
e6e515167ce510c1f9150d963af381fa826da285
01adfffcaf80ed5deb33c24fb31d6de105aef834
refs/heads/master
2021-01-01T05:33:35.417567
2013-10-29T21:10:14
2013-10-29T21:10:14
3,014,874
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class test(): def main(self,x,l): if(l==2): return self.main("2nd",l+1) print("main:",x,",l:",l) t = test() t.main("1st",0)
UTF-8
Python
false
false
2,013
377,957,170,924
eb9bd953f9d3aa0c985bb991c82071df54858c74
92b68c1c0f671f2435a8579e8115c9cc685f1def
/gnutrition-0.31/src/gnutr_stock.py
4f9e89e6f6a2a0cc9504a7127449c253f5efa743
[ "GPL-3.0-only" ]
non_permissive
ckoch786/GNUstuff
https://github.com/ckoch786/GNUstuff
714db3772fa16831e07ad1c4e0330d7d23998d78
4b63a2755c52fcb44413a2977de26b399db967f6
refs/heads/master
2021-01-15T23:45:50.350764
2011-12-28T14:54:17
2011-12-28T14:54:17
3,063,133
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# gnutrition - a nutrition and diet analysis program. # Copyright( C) 2000-2002 Edgar Denny ([email protected]) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import gtk import install def create_stock(): drct = install.dir + '/pixmaps/' recipe_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'cake.png') plan_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'plan.png') food_pixbuf = gtk.gdk.pixbuf_new_from_file( drct + 'banana.png') recipe_iconset = gtk.IconSet( recipe_pixbuf) plan_iconset = gtk.IconSet( plan_pixbuf) food_iconset = gtk.IconSet( food_pixbuf) icon_factory = gtk.IconFactory() icon_factory.add( 'gnutr-recipe', recipe_iconset) icon_factory.add( 'gnutr-plan', plan_iconset) icon_factory.add( 'gnutr-food', food_iconset) icon_factory.add_default() gtk.stock_add(( ('gnutr-recipe', '_Recipe', gtk.gdk.MOD1_MASK, ord( "r"), "uk"), ('gnutr-plan', '_Plan', gtk.gdk.MOD1_MASK, ord( "p"), "uk"), ('gnutr-food', '_Food', gtk.gdk.MOD1_MASK, ord( "f"), "uk"))) create_stock()
UTF-8
Python
false
false
2,011
1,640,677,549,939
4d320d2e36f0f17b6fff5a932562e60c5b63ebc3
7c141cca004e389a4249c79bc1aa95217a2ed1fe
/30-Cms/arab/arabpy/arab/channel.py
88f1df4b70add8d9c2b83072c8a98b69a66e9fa9
[]
no_license
EbenZhang/arab
https://github.com/EbenZhang/arab
bd2d29e3f4cf618d710dbec4716cb8928e92b562
986be1018e2cab6b1c7fd9d4242dfc1cda489c0a
refs/heads/master
2020-03-30T08:29:28.686074
2011-10-15T15:06:16
2011-10-15T15:06:16
2,561,638
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python #coding=utf-8 from google.protobuf.service import * import zmq import error import types class Channel(RpcChannel): def __init__(self,server_address,timeout = 10000): '''server_address: base on zmq protocol, eg.tcp://127.0.0.1:5555 timeout: in milliseconds ''' self.zmq_ctx = zmq.Context() self.rpc_socket = self.zmq_ctx.socket(zmq.REQ) self.rpc_socket.connect(server_address) self.timeout = timeout def CallMethod(self, method_descriptor, rpc_controller, request, response_class, done): self.rpc_socket.send(method_descriptor.full_name + '\0' + request.SerializeToString()) poller = zmq.Poller() poller.register(self.rpc_socket,zmq.POLLIN) sockets = dict(poller.poll(self.timeout)) if self.rpc_socket in sockets: data = self.rpc_socket.recv() dummyResp = types.ErrResp() dummyResp.ParseFromString(data) if dummyResp.error.error_code != types.ErrorCode.OK: return dummyResp response = response_class() response.ParseFromString(data) else: #time for msg stay in the queue self.rpc_socket.setsockopt(zmq.LINGER, 0) self.rpc_socket.close() dummyResp = types.ErrResp() dummyResp.error.error_code = types.ErrorCode.TIME_OUT return dummyResp response.error.error_code = types.ErrorCode.OK return response
UTF-8
Python
false
false
2,011
9,990,093,953,289
f59c1d59f2c57dd2072c62983dc80a50944b8403
480754b93cf677a218c5ef0650a30febecd1d8b6
/src/205.py
2d8e38f026af5c6b3340e057d1e9f29569bfe1cd
[]
no_license
kaizensoze/project-euler
https://github.com/kaizensoze/project-euler
5e90c576fa24b27bacbf30dd21a611c0e8f080fb
7cbd0ba10f938a85d4d3fc7550942293a644f054
refs/heads/master
2020-04-14T13:50:54.601979
2014-06-08T04:54:15
2014-06-08T04:54:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import decimal pCount = 0 pt = {} cCount = 0 ct = {} for a in range(1,5): for b in range(1,5): for c in range(1,5): for d in range(1,5): for e in range(1,5): for f in range(1,5): for g in range(1,5): for h in range(1,5): for i in range(1,5): sum = a+b+c+d+e+f+g+h+i if sum in pt.keys(): pt[sum] += 1 else: pt[sum] = 1 pCount += 1 for a in range(1,7): for b in range(1,7): for c in range(1,7): for d in range(1,7): for e in range(1,7): for f in range(1,7): sum = a+b+c+d+e+f if sum in ct.keys(): ct[sum] += 1 else: ct[sum] = 1 cCount += 1 prob = 0 decimal.getcontext().prec = 15 c = decimal.Decimal(cCount) p = decimal.Decimal(pCount) for pk in pt.keys(): for ck in ct.keys(): if pk > ck: pResult = decimal.Decimal(pt[pk]) cResult = decimal.Decimal(ct[ck]) lProb = (pResult*cResult) / (c*p) prob += lProb print(prob)
UTF-8
Python
false
false
2,014
15,281,493,651,547
864231f0797ad8b34ca0204cc9fa5baebaec5378
d732556164a432e22472ea3aa43c48dd611ffb74
/scripts/create_genome_index.py
bdb365ff1cc0839751df62af40f5261ad6001a38
[ "MIT" ]
permissive
verdurin/gimmemotifs
https://github.com/verdurin/gimmemotifs
30c94ccdda33bcfd4701a8cff5f514619e37aeb1
83c99ddb60d1f7414b2ffe0f23e6690a245f57cd
refs/heads/master
2021-01-18T07:49:29.020055
2011-03-17T09:52:48
2011-03-17T09:52:48
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # Copyright (c) 2009-2010 Simon van Heeringen <[email protected]> # # This module is free software. You can redistribute it and/or modify it under # the terms of the MIT License, see the file COPYING included with this # distribution. from gimmemotifs.genome_index import * from gimmemotifs.config import * from optparse import OptionParser import sys default_index = "/usr/share/gimmemotifs/genome_index/" try: config = MotifConfig() default_index = config.get_index_dir() except: pass parser = OptionParser() parser.add_option("-i", "--indexdir", dest="indexdir", help="Index dir (default %s)" % default_index, metavar="DIR", default=default_index) parser.add_option("-f", "--fastadir", dest="fastadir", help="Directory containing fastafiles", metavar="DIR") parser.add_option("-n", "--indexname", dest="indexname", help="Name of index", metavar="NAME") (options, args) = parser.parse_args() if not options.fastadir or not options.indexname: parser.print_help() sys.exit(1) if not os.path.exists(options.indexdir): print "Index_dir %s does not exist!" % (options.indexdir) sys.exit(1) fasta_dir = options.fastadir index_dir = os.path.join(options.indexdir, options.indexname) g = GenomeIndex() g = g.create_index(fasta_dir, index_dir)
UTF-8
Python
false
false
2,011
10,385,230,971,350
51f45833ff4804a3a0235d336b85db5208cc4b65
8e630ecff5c9022fd1ac80af5b23271a6ce2ad59
/ecolect/items.py
9f79757b043a7d7cca01cfbbe977a299bf2a0d8f
[ "Apache-2.0" ]
permissive
treejames/ecolect
https://github.com/treejames/ecolect
a2bc5f6c5e338afaac829b360f2d1ce0072a6432
decf311016bb94f589d673459079cecef8c62962
refs/heads/master
2018-01-15T12:30:10.802382
2014-04-30T07:09:29
2014-04-30T07:11:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Item, Field class PostItem(Item): # define the fields for your item here like: # name = Field() title = Field() url = Field() project_name = Field() project_address = Field() project_investment= Field() builder_name = Field() builder_address = Field() eia_name = Field() eia_address = Field() page_content = Field() pollutions = Field() start_date = Field() post_start_date = Field() post_end_date = Field() crawled = Field() spider = Field()
UTF-8
Python
false
false
2,014
8,151,847,953,971
55e9d612ec8333a98ccdfe1c7115dc510c2d277c
f30f510abec73c5270325d7e0e0891270269e1ec
/modules/viewer/viewerimage.py
1de589358c7479035425634a1ad278cfa0cf5f6b
[ "LGPL-2.0-or-later", "GPL-2.0-only", "GPL-2.0-or-later" ]
non_permissive
pombredanne/dff
https://github.com/pombredanne/dff
46b94ca4ab1ff363934cc3624a99f8b334783155
a0453b38b79c96a68b3ee241a490488092fb5a8d
HEAD
2017-05-26T12:01:00.985089
2010-03-18T23:18:19
2010-03-18T23:18:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# DFF -- An Open Source Digital Forensics Framework # Copyright (C) 2009-2010 ArxSys # This program is free software, distributed under the terms of # the GNU General Public License Version 2. See the LICENSE file # at the top of the source tree. # # See http://www.digital-forensic.org for more information about this # project. Please do not directly contact any of the maintainers of # DFF for assistance; the project provides a web site, mailing lists # and IRC channels for your use. # # Author(s): # Solal Jacob <[email protected]> # from PyQt4 import QtCore, QtGui from PyQt4.QtCore import Qt, QSize, QString, SIGNAL, QThread from PyQt4.QtGui import QPixmap, QImage, QPushButton, QLabel, QWidget, QHBoxLayout, QVBoxLayout, QScrollArea, QIcon, QMatrix from api.vfs import * from api.module.module import * from api.module.script import * from api.magic.filetype import FILETYPE import sys import time import re class QRotateButton(QPushButton): def __init__(self, angle, icon): QPushButton.__init__(self, QIcon(QString(icon)), "") self.angle = angle def mousePressEvent(self, event): self.animateClick() self.emit(SIGNAL("clicked"), self.angle) class QZoomButton(QPushButton): def __init__(self, zoom, icon): QPushButton.__init__(self, QIcon(QString(icon)), "") self.zoom = zoom def mousePressEvent(self, event): self.animateClick() self.emit(SIGNAL("zoomed"), self.zoom) class LoadedImage(QLabel): def __init__(self): QLabel.__init__(self) #self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored) self.node = None self.angle = 0 self.factor = 1 self.imgWidth = 0 self.baseImage = QImage() self.cpixmap = QPixmap() self.matrix = QMatrix() def load(self, node, type): self.node = node file = self.node.open() buff = file.read() file.close() self.baseImage.loadFromData(buff, type) def adjust(self, imgwidth): self.imgWidth = imgwidth self.currentImage = self.baseImage.scaled(QSize(self.imgWidth, self.imgWidth), Qt.KeepAspectRatio, Qt.FastTransformation) self.setPixmap(QPixmap.fromImage(self.currentImage)) self.adjustSize() def resize(self, zoomer): w = self.currentImage.width() * zoomer self.currentImage = self.baseImage.scaled(QSize(w, w), Qt.KeepAspectRatio, Qt.FastTransformation) self.setPixmap(QPixmap.fromImage(self.currentImage)) self.adjustSize() def rotate(self, angle): matrix = QMatrix() matrix.rotate(angle) self.currentImage = self.currentImage.transformed(matrix) self.baseImage = self.baseImage.transformed(matrix) self.setPixmap(QPixmap.fromImage(self.currentImage)) self.adjustSize() def fitbest(self): self.currentImage = self.baseImage.scaled(QSize(self.imgWidth, self.imgWidth), Qt.KeepAspectRatio, Qt.FastTransformation) self.setPixmap(QPixmap.fromImage(self.currentImage)) self.adjustSize() def notSupported(self): #self.setPixmap(None) self.setText("Format Not Supported") self.adjustSize() class Metadata(QWidget): def __init__(self): QWidget.__init__(self) import time #class SortImages(QThread): # def __init__(self): # QThread.__init__(self) # self.images = {} # self.ft = FILETYPE() # self.reg_viewer = re.compile(".*(JPEG|JPG|jpg|jpeg|GIF|gif|bmp|BMP|png|PNG|pbm|PBM|pgm|PGM|ppm|PPM|xpm|XPM|xbm|XBM).*", re.IGNORECASE) # def getImageType(self, node): # type = None # if node.attr.size != 0: # map = node.attr.smap # try: #XXX temporary patch for windows magic # f = node.attr.smap["type"] # except IndexError: # #XXX temporary patch for windows magic # self.ft.filetype(node) # f = node.attr.smap["type"] # res = self.reg_viewer.match(f) # if res != None: # type = f[:f.find(" ")] # return type # def setFolder(self, folder): # self.folder = folder # self.images = {} # def run(self): # self.images = {} # for node in self.folder: # type = self.getImageType(node) # if type != None: # self.images[node] = type class ImageView(QWidget, Script): def __init__(self): Script.__init__(self, "viewerimage") self.type = "imageview" self.icon = None self.vfs = vfs.vfs() self.ft = FILETYPE() self.reg_viewer = re.compile(".*(JPEG|JPG|jpg|jpeg|GIF|gif|bmp|BMP|png|PNG|pbm|PBM|pgm|PGM|ppm|PPM|xpm|XPM|xbm|XBM).*", re.IGNORECASE) self.loadedImage = LoadedImage() self.sceneWidth = 0 #self.sorter = SortImages() def start(self, args): self.node = args.get_node("file") self.curnode = self.node #self.parent = self.node.parent #self.sorter.setFolder(self.parent) #self.sorter.start() #self.getImage() def createMenuItems(self): self.l90button = QRotateButton(-90, ":rotate-left.png") self.r90button = QRotateButton(90, ":rotate-right.png") self.rotate180button = QRotateButton(180, ":rotate-180.png") self.zoomin = QZoomButton(float(1.25), ":zoom-in.png") self.zoomout = QZoomButton(float(0.8), ":zoom-out.png") self.fitbest = QPushButton("fitbest") #self.previous = QPushButton("previous") #self.next = QPushButton("next") self.connect(self.l90button, SIGNAL("clicked"), self.rotate) self.connect(self.r90button, SIGNAL("clicked"), self.rotate) self.connect(self.rotate180button, SIGNAL("clicked"), self.rotate) self.connect(self.zoomin, SIGNAL("zoomed"), self.zoom) self.connect(self.zoomout, SIGNAL("zoomed"), self.zoom) self.connect(self.fitbest, SIGNAL("clicked()"), self.fitbestgeom) #self.connect(self.previous, SIGNAL("clicked()"), self.setPreviousImage) #self.connect(self.next, SIGNAL("clicked()"), self.setNextImage) def drawMenu(self): self.hbox = QHBoxLayout() self.setLayout(self.vbox) self.hbox.addWidget(self.l90button) self.hbox.addWidget(self.r90button) self.hbox.addWidget(self.rotate180button) self.hbox.addWidget(self.zoomin) self.hbox.addWidget(self.zoomout) #self.hbox.addWidget(self.previous) #self.hbox.addWidget(self.next) self.hbox.addWidget(self.fitbest) self.vbox.addLayout(self.hbox) #def getIdx(self): # idx = 0 # res = -1 # for node in self.parent.next: # if node.name == self.node.name: # res = idx # idx += 1 # return res #type: 0 = forward, 1 = backward #def getImage(self, type=1): # pass #idx = self.parent.next.(self.curnode) #print nodes #for node in self.parent.next[self.idx:]: # type = getImageType(node) # if type != None: #self.setImage() #def setPreviousImage(self): # if self.idx == 0: # self.idx = len(self.parent.next) # self.node = self.parent.next[self.idx] # else: # self.idx -= 1 # self.node = self.parent.next[self.idx] # self.setImage() #def setNextImage(self): # pass def setImage(self): if self.node.attr.size != 0: map = self.node.attr.smap try: #XXX temporary patch for windows magic f = self.node.attr.smap["type"] except IndexError: #XXX temporary patch for windows magic self.ft.filetype(node) f = self.node.attr.smap["type"] res = self.reg_viewer.match(f) if res != None: type = f[:f.find(" ")] self.loadedImage.load(self.node, type) else: self.loadedImage.notSupported() #not supported format #self.loadedImage.notSupported() def g_display(self): QWidget.__init__(self, None) self.factor = 1 self.vbox = QVBoxLayout() self.setLayout(self.vbox) self.scrollArea = QScrollArea() self.scrollArea.setWidget(self.loadedImage) self.scrollArea.setAlignment(Qt.AlignCenter) self.vbox.addWidget(self.scrollArea) self.createMenuItems() self.drawMenu() self.setImage() def zoom(self, zoomer): self.factor *= zoomer self.loadedImage.resize(zoomer) if self.factor > 3.33: self.zoomin.setEnabled(False) elif self.factor < 0.33: self.zoomout.setEnabled(False) else: self.zoomin.setEnabled(True) self.zoomout.setEnabled(True) def fitbestgeom(self): self.factor = 1 self.loadedImage.adjust(self.sceneWidth) self.zoomin.setEnabled(True) self.zoomout.setEnabled(True) def rotate(self, angle): self.loadedImage.rotate(angle) def updateWidget(self): self.sceneWidth = self.scrollArea.geometry().width() self.loadedImage.adjust(self.sceneWidth) def resizeEvent(self, e): self.sceneWidth = self.scrollArea.geometry().width() self.loadedImage.adjust(self.sceneWidth) class viewerimage(Module): def __init__(self): Module.__init__(self, "viewerimage", ImageView) self.conf.add("file", "node") self.conf.add_const("mime-type", "JPEG") self.conf.add_const("mime-type", "GIF") self.conf.add_const("mime-type", "PNG") self.tags = "viewer"
UTF-8
Python
false
false
2,010
1,683,627,200,041
9f472aabe345931f1bc7a1ffcd8fbfbf8954ee29
c1c004b9721f7c16075e289870a35730377543ab
/musicdb/nonclassical/models.py
d132228134e8402dc839eab51ce569979d40c781
[ "AGPL-3.0-only" ]
non_permissive
MechanisM/musicdb
https://github.com/MechanisM/musicdb
acc7411b2a53ba18df39b22aeb1cbfaac0541e14
0c779da9ae67b505fc5ed3722d1cbef32404c18d
refs/heads/master
2021-01-20T01:38:10.204813
2011-12-02T17:56:12
2011-12-02T17:56:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import urllib from mutagen import mp3, easyid3, File as MutagenFile from django.db import models from django.conf import settings from django.core.files import File as DjangoFile from django.db.models.aggregates import Sum from musicdb.common.models import AbstractArtist, Nationality, MusicFile, File from musicdb.db.mixins import NextPreviousMixin from musicdb.db.fields import MySlugField, FirstLetterField, DirNameField from musicdb.db.std_image.fields import StdImageField from .managers import AlbumManager, TrackManager """ Non-classical models. """ __all__ = ('Artist', 'Album', 'CD', 'Track') class Artist(AbstractArtist, NextPreviousMixin): name = models.CharField(max_length=250) is_solo_artist = models.BooleanField( 'Artist represents a single person', default=False, ) nationality = models.ForeignKey( Nationality, blank=True, null=True, related_name='nonclassical_artists', ) name_first = FirstLetterField('name') dir_name = DirNameField('name') class Meta: ordering = ('name',) def __unicode__(self): return self.name @models.permalink def get_absolute_url(self): return ('nonclassical-artist', (self.slug,)) def long_name(self): if self.is_solo_artist: try: last, first = self.name.split(', ', 1) return "%s %s" % (first, last) except ValueError: return self.name return self.name slug_name = long_name class Album(models.Model, NextPreviousMixin): title = models.CharField(max_length=200) artist = models.ForeignKey(Artist, related_name='albums') year = models.IntegerField(blank=True, null=True) cover = StdImageField(upload_to='album_covers', size=(300, 300), thumbnail_size=(125, 125), blank=True) slug = MySlugField('title') dir_name = DirNameField('get_dir_name') objects = AlbumManager() class Meta: ordering = ('year', 'title') def __unicode__(self): if self.year: return u"%s (%d)" % (self.title, self.year) return self.title def delete(self, *args, **kwargs): for track in self.get_tracks(): track.delete() super(Album, self).delete(*args, **kwargs) @models.permalink def get_absolute_url(self): return ('nonclassical-album', (self.artist.slug, self.slug)) def get_dir_name(self): if self.year: return "%d %s" % (self.year, self.title) return self.title def get_tracks(self): return MusicFile.objects.filter(track__cd__album=self). \ order_by('track__cd', 'track') def get_nonclassical_tracks(self): return Track.objects.filter(cd__album=self). \ order_by('cd__num', 'track') def total_duration(self): return self.get_tracks().aggregate(Sum('length')).values()[0] or 0 def next(self): return super(Album, self).next(artist=self.artist) def previous(self): return super(Album, self).previous(artist=self.artist) def set_artwork_from_url(self, url): tempfile, headers = urllib.urlretrieve(url) try: self.cover = DjangoFile(open(tempfile)) self.save() except: self.cover.delete() raise finally: try: os.unlink(tempfile) except: pass class CD(models.Model): album = models.ForeignKey(Album, related_name='cds') num = models.IntegerField() class Meta: ordering = ('num',) unique_together = ('album', 'num') verbose_name_plural = 'CDs' def __unicode__(self): return u"CD %d of %d from %s" % \ (self.num, self.album.cds.count(), self.album) def get_tracks(self): return MusicFile.objects.filter(track__cd=self).order_by('track') def total_duration(self): return self.get_tracks().aggregate(Sum('length')).values()[0] or 0 class Track(models.Model): title = models.CharField(max_length=250) cd = models.ForeignKey(CD, related_name='tracks') num = models.IntegerField() music_file = models.OneToOneField(MusicFile, related_name='track') dir_name = DirNameField('get_dir_name') objects = TrackManager() class Meta: ordering = ('num',) unique_together = ('cd', 'num') def __unicode__(self): return self.title def get_dir_name(self): return "%02d %s.mp3" % (self.num, self.title) def metadata(self): album = self.cd.album return { 'title': self.title, 'album': unicode(album.title), 'artist': unicode(album.artist.long_name()), 'tracknumber': str(self.num), 'date': str(album.year) or '', } @classmethod def quick_create(cls, abspath, cd, track_title, track_num): audio = MutagenFile(abspath) if isinstance(audio, mp3.MP3): extension = 'mp3' location = os.path.join( 'albums', '%d' % cd.id, '%.2d.%s' % (track_num, extension), ) file = File.create_from_path(abspath, location) music_file = MusicFile.objects.create( file=file, rev_model='track', ) return cls.objects.create( cd=cd, num=track_num, title=track_title, music_file=music_file, )
UTF-8
Python
false
false
2,011
3,040,836,853,491
3a01f9dd22eaf042760cc0d71d562a18ee3213e6
1b98c70426580d6cebf36b6f9ed807fe7a9c0729
/rl-overhead.py
378dcc7ba1e834f8041a900f03f1bf347ce6a4f1
[]
no_license
jvimal/eyeq-tests
https://github.com/jvimal/eyeq-tests
54a1bba50d7019c07b09fdd147b831a5a823b5ba
d99d05d001d5a3d9fce53b66c6587f605245b555
refs/heads/master
2020-06-06T17:31:13.127695
2013-03-21T17:24:46
2013-03-21T17:24:46
8,303,987
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import sys import argparse import multiprocessing from common import * import termcolor as T from expt import Expt from iperf import Iperf from time import sleep from host import * import os parser = argparse.ArgumentParser(description="RL overhead.") parser.add_argument('--rate', dest="rate", action="store", help="Aggregate rate of RL.", required=True) parser.add_argument('--dir', dest="dir", action="store", help="Directory to store outputs.", required=True) parser.add_argument('-n', dest="n", action="store", type=int, help="Number of RLs.", default=1) parser.add_argument('-P', dest="P", action="store", type=int, help="Number of TCP connections.", default=20000) parser.add_argument('-t', dest="t", action="store", type=int, help="Time to run expt in seconds.", default=120) parser.add_argument('--rl', dest="rl", choices=["newrl", "htb"], help="Choose rate limiter") parser.add_argument("--profile", dest="profile", help="Directory to store profile data. Omit if you don't want to profile", default=None) args = parser.parse_args() class RlOverhead(Expt): def start(self): h1 = Host("10.0.1.1") h2 = Host("10.0.1.2") hlist = HostList(h1, h2) self.hlist = hlist n = self.opts('n') hlist.cmd("rmmod newrl") hlist.remove_qdiscs() hlist.configure_rps() n = self.opts('n') if self.opts("rl") == "newrl": dev = h1.get_10g_dev() rate = self.opts("rate") h1.cmd("insmod ~/vimal/exports/newrl.ko rate=%s dev=%s" % (rate, dev)) else: # insert htb qdiscs in hierarchy dev = h1.get_10g_dev() ceil = '%sGbit' % (int(self.opts('rate')) / 1000) cmd = "tc qdisc add dev %s root handle 1: htb default 1" % dev h1.cmd(cmd) cmd = "tc class add dev %s classid 1:1 parent 1: htb" % dev cmd += " rate %s ceil %s mtu 64000" % (ceil, ceil) h1.cmd(cmd) h1.start_monitors(self.opts("dir")) # Start iperf server iperf = Iperf({'-p': 5001}) self.procs.append(iperf.start_server(h2)) sleep(1) # Start all iperf clients iperf = Iperf({'-p': 5001, '-P': self.opts("P"), '-i': '10', '-c': h2.get_10g_ip(), 'dir': os.path.join(self.opts("dir"), "iperf"), '-t': self.opts('t')}) self.procs.append(iperf.start_client(h1)) if self.opts("profile"): self.h1 = h1 h1.start_profile(dir=self.opts("profile")) return def stop(self): if self.opts("profile"): self.h1.stop_profile(dir=self.opts("profile")) self.hlist.killall() self.hlist.remove_qdiscs() self.hlist.cmd("rmmod newrl") if __name__ == "__main__": RlOverhead(vars(args)).run()
UTF-8
Python
false
false
2,013
2,929,167,709,339
02339c95bfaf7ffd198283c6afe0914310c7ae51
87502d05a8fc8408aa50554d170dac06c634c797
/ui/ranklistitemwidget.py
fe7a6c6da5265f6dce9d024b23ba4cf20217da2b
[ "GPL-2.0-only" ]
non_permissive
Jexxar/software-center
https://github.com/Jexxar/software-center
d986f65f17d807e3b413913209171662f5ac9289
d5b73c689a39721d6483f949f085204261c9d86a
refs/heads/master
2018-01-13T18:24:37.774564
2014-09-11T15:55:05
2014-09-11T15:55:05
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- ### BEGIN LICENSE # Copyright (C) 2013 National University of Defense Technology(NUDT) & Kylin Ltd # Author: # Shine Huang<[email protected]> # Maintainer: # Shine Huang<[email protected]> # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. from PyQt4.QtGui import * from PyQt4.QtCore import * from ui.ukrliw import Ui_RankListWidget class RankListItemWidget(QWidget): def __init__(self, name, rank, parent=None): QWidget.__init__(self,parent) self.ui_init() self.ui.name.setText(name) self.ui.number.setText(str(rank)) self.ui.number.setAlignment(Qt.AlignCenter) # letter spacing # font = QFont() # font.setLetterSpacing(QFont.PercentageSpacing, 95.0) # self.ui.name.setFont(font) self.ui.name.setStyleSheet("QLabel{font-size:13px;color:#666666;}") self.ui.number.setStyleSheet("QLabel{font-size:15px;font-style:italic;color:#999999;}") def ui_init(self): self.ui = Ui_RankListWidget() self.ui.setupUi(self) self.show() def enterEvent(self, event): self.resize(200, 52) def leaveEvent(self, event): self.resize(200, 24)
UTF-8
Python
false
false
2,014
3,272,765,104,823
f66b69d407cfc217f4b330f6f22f8036c3a99f96
67b01421b5058dc3d9df4a7ac52169984938bb16
/Examples/outer()/outer.py # import inner
de5d6b281c36e6ed3087cf1daabbcf4cf0132953
[]
no_license
cancerhermit/outer.py
https://github.com/cancerhermit/outer.py
d2176551554c6f2445816fb665245e2110dd0ad5
c1dad052311d85f2de84ebabaab212e9bdc78eec
refs/heads/master
2015-08-10T16:08:27.262190
2014-07-16T08:05:34
2014-07-16T08:05:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import inner
UTF-8
Python
false
false
2,014
1,906,965,517,628
90b52a098a98da7c6f9b4fceb14a7a6d2f7b290b
07208e00e630949d378c0a97311b7f34024cf8ed
/yts/cryptopage/models.py
7cb4f8476d87194d129d3e5ff330cc6abb833924
[]
no_license
ytsschool/main
https://github.com/ytsschool/main
494d402f7d4cbb73c7f13e0e1a6b3367959b235c
386f66043d348d4acfca8bd4e4718e09044d7fe4
refs/heads/master
2021-01-19T09:44:23.414087
2014-03-20T18:49:02
2014-03-20T18:49:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from django import forms import random texts = { 1: "Not to know is bad, not to wish to know is worse.", 2: "Success doesn't come to you... you go to it.", 3: "Formal education will make you a living. Self-education will make you a fortune.", 4: "Those who cannot change their minds cannot change anything.", 5: "If anything is worth trying at all, it's worth trying at least 10 times.", 6: "Two things are infinite: the universe and human stupidity; and I'm not sure about the universe.", 7: "Being entirely honest with oneself is a good exercise.", 8: "The possession of unlimited power will make a despot of almost any man. There is a possible Nero in the gentlest human creature that walks.", 9: "Every English poet should master the rules of grammar before he attempts to bend or break them.", 10: "To limit the press is to insult a nation; to prohibit reading of certain books is to declare the inhabitants to be either fools or slaves.", } # Create your models here. class Cryptotext(models.Model): text1 = "let it be let it be speaking words of wisdom let it be" @staticmethod def encrypt(text): encrypted = "" for i in text: encrypted += chr(ord(i) + 3) return encrypted @staticmethod def random_text(): return texts[random.randint(1,10)] class ContactForm(forms.Form): decrypted = forms.CharField(max_length=200)
UTF-8
Python
false
false
2,014
10,462,540,361,492
b86f1509d034c7686fda6434f91e7c1c53e69071
07b8a01fc847348b276a576c57ae38d21114e414
/Revolutionary Technology/Prime Number Generator.py
49707fc5fc74be6fefb9384cd1eb38e60b0cc4ba
[]
no_license
AngryNerds/general-projects
https://github.com/AngryNerds/general-projects
6df27cf26d33a82f9712482e30965727eae01c0c
70a24e94c1c508b03670d5866bd61de99c1b4984
refs/heads/master
2021-01-01T19:41:59.059106
2014-12-10T08:03:04
2014-12-10T08:03:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
num = 2 remainders = [] while True: print num num = num + 1
UTF-8
Python
false
false
2,014
3,178,275,851,248
9dda823777e9931fc23a2fd21cd49f42ccfb4d70
0cbd1b5a91cf0b1c763656d8c90f8ac081efe674
/rules.py
2d389b482361d43404c6b8005423092daa6335ed
[]
no_license
taliesinb/chartreuse
https://github.com/taliesinb/chartreuse
ec3259a59640aa0dce5a9331aa4c91ad025502ea
c8f152f6ef80fdcca2749e5c89c600a0caa4bc97
refs/heads/master
2021-01-01T05:35:43.645149
2011-08-27T23:18:29
2011-08-27T23:18:29
1,395,527
4
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from utils import list_replace, identity def singleton(x): if type(x) not in [list]: return [x] else: return x def rules(symbol, patterns, action=identity): return [rule(symbol, singleton(p), action) for p in patterns] class rule(object): def __init__(self, symbol, pattern, action=identity): self.symbol = symbol if type(pattern) == str: pattern = [pattern] self.pattern = pattern self.action = action def first_symbol(self): return self.pattern[0] def replace(self, reps): if self.symbol in reps: del self # is this okay? else: list_replace(self.pattern, reps) def __repr__(self): return self.symbol.ljust(15) + "->\t" + str(self.pattern).ljust(20)
UTF-8
Python
false
false
2,011
11,776,800,347,765
d85c438f2fb35cd659b0f8163fb539af1bf6fba5
4d3fe4579fd39a8042e87de0132f1824d8627c19
/myapps/forms.py
554abc986814e15448f11efab84b46513df0b28d
[]
no_license
jzxyouok/yunfan
https://github.com/jzxyouok/yunfan
185a061cab6cee5d83a976e3afc86bebbccf49a5
62e99261d4d2d41fb2de65f9c3d8eb7aee80913b
refs/heads/master
2020-06-30T23:47:02.821912
2014-03-21T02:45:41
2014-03-21T02:45:41
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding=utf-8 #todo 从前台表单传来问题id数据 from django import forms from models import Answer,Question class AnswerForm(forms.ModelForm): #ModelForm 使form能save数据到数据库 class Meta: model = Answer#关联起来 description = forms.CharField(label=("问题描述"), max_length=200, widget=forms.Textarea(), error_messages={'invalid': ("")} ) def __init__(self, *args, **kwargs): super(AnswerForm, self).__init__(*args, **kwargs) def clean_description(self): description = self.cleaned_data['description'] if len(description) <=8: raise forms.ValidationError("拜托,多些几个字会死啊") return description def clean_owner(self): owner = self.cleaned_data['owner'] if owner == 'lzj' : raise forms.ValidationError('这个问题让wwj来回答') return owner class QuestionForm(forms.ModelForm): #ModelForm 使form能save数据到数据库 class Meta: model = Question#关联起来 exclude = ('owner') title = forms.CharField(label=(""), max_length=30, widget=forms.TextInput(attrs={'placeholder': '问题名称', }), error_messages={'invalid': ("")} ) description = forms.CharField(label=(""), max_length=200, widget=forms.Textarea(attrs={'placeholder': '问题描述', }), error_messages={'invalid': ("字数不够")} ) def __init__(self, *args, **kwargs): super(QuestionForm, self).__init__(*args, **kwargs) def clean_description(self): description = self.cleaned_data['description'] if len(description) <=8: raise forms.ValidationError("字数不够") return description def clean_owner(self): owner = self.cleaned_data['owner'] if owner == 'lzj' : raise forms.ValidationError('这个问题让wwj来回答') return owner
UTF-8
Python
false
false
2,014
15,994,458,249,751
b760564f14d3520adfe5b299f512dd4868ae43df
9a5b06660494299dfa9711d1ffce43ca3af45e57
/aurora/webcomponents/session.py
4479dd5e895c37657feb1612c5394d1b95ad0c85
[ "BSD-3-Clause" ]
permissive
yeiniel/aurora
https://github.com/yeiniel/aurora
9e32097e70333d0e5034fd48d8020ab0a88ac5f4
77cd9cda2493f5a8c9b6ee759e5c2855c6a892f5
refs/heads/master
2020-05-17T07:59:00.330797
2013-01-21T13:34:17
2013-01-21T13:34:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright (c) 2011, Yeiniel Suarez Sosa. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Yeiniel Suarez Sosa. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections import hashlib import hmac import random import time from aurora.webapp import foundation __all__ = ['SessionProvider'] class SessionProvider: """ Provide state to the HTTP protocol. This component has two use cases: * In a Web application that use the Aurora Web framework infrastructure. * In a Web application with a custom infrastructure. In the first case you need to provide an implementation for the `get_request` optional dependency service and add the `after_handle` listener service to the application instance `after_handle` event. Under this conditions you can use the `get_session` provided service without the Web request object argument. This is known as the higher level service. In the second case the exposed services are the ones known as the lower level services: the `get_session` and `persist_session` services. As long as there is no implementation for the optional Web request object provisioning service you need to pass explicitly the Web request object as argument for the `get_session` service and call the `persist_session` service in all you handlers. """ # # stubs for component dependencies # secret = None # secret string used to create the (id, hash) pair. cookie_name = 'aurora-sid' # name of the cookie used to persist # information on the client browser. max_age = 3300 # browser cookie maximum age def get_cache(self) -> collections.MutableMapping: """ Return the session cache. The session cache is a mapping that use as key the session id. By default this service return an in-memory session cache and this implementation is not suitable in the use case of multiple application instances behind a load balancing proxy. :return: A mapping. """ try: return self.__cache except AttributeError: self.__cache = {} return self.__cache def get_request(self) -> foundation.Request: """ Web request been handled by the application. The Web application can handle one Web request at a time. This service return the Web request currently been handled. """ raise NotImplementedError() # # component implementation # def __init__(self, secret: str, get_request=None): """ Initialize the session provider component. If the `get_request` service is provided then the `after_handle` service is exposed by the component. :param secret: The secret used to create the (id, hash) pair. :param get_request: A :func:`aurora.webapp.infrastructure.Application.get_request` compliant service. """ self.secret = secret if get_request: self.get_request = get_request def generate_id(self) -> str: rnd = ''.join((str(time.time()), str(random.random()), self.secret)) return hashlib.sha1(rnd.encode()).hexdigest()[:8] def make_hash(self, id: str) -> str: return hmac.new( self.secret.encode(), id.encode(), hashlib.sha1).hexdigest()[:8] def get_session_info(self, request: foundation.Request) -> (str, str): if hasattr(request, '_session_id'): id = request._session_id hash = request._session_hash else: cn = self.cookie_name if cn in request.cookies and \ request.cookies[cn][8:] == self.make_hash( request.cookies[cn][:8]): id = request.cookies[self.cookie_name][:8] hash = request.cookies[self.cookie_name][8:] else: id = self.generate_id() hash = self.make_hash(id) request._session_id = id request._session_hash = hash return id, hash # # services provided by the component # def get_session(self, request=None) -> collections.MutableMapping: """ Return the session mapping associated to a Web request object. If the Web request object is not given then the one returned by the `on_request` optional dependency service will be used as default. This method create the session mapping on first access if needed. :param request: A Web request object. :return: The session mapping. """ if not request: request = self.get_request() id, hash = self.get_session_info(request) return self.get_cache().setdefault(id, {}) def persist_session(self, request: foundation.Request, response: foundation.Response): """ Persist session identification information on the client browser. If the session cache mapping is empty then it is destroyed and no session information is sent to the browser. :param request: A Web request object. :param response: A Web response object. """ id, hash = self.get_session_info(request) if id in self.get_cache() and len(self.get_cache()[id]) > 0: response.set_cookie(self.cookie_name, ''.join((id, hash)), self.max_age, request.script_name) elif id in self.get_cache(): del self.get_cache()[id] def post_dispatch(self, response: foundation.Response): """ Service meant to be used as a application `after_handle` listener. It is only available if the `get_request` optional dependency service is provided because it use that service to make Web request provisioning. :param response: The Web response object. """ self.persist_session(self.get_request(), response)
UTF-8
Python
false
false
2,013
5,446,018,554,043
681f7e04284f409ffd65161de2b6e23900424df6
0b5c0259b13821e6a8990929dceee92caf043217
/lib/db/game.py
b8f0693397efb84a845af1d1c2f124bc98090244
[]
no_license
bfellers/game_collector
https://github.com/bfellers/game_collector
91e2aabbb3c345cad1e12ce16a995e64b2fc56bd
bc01af753d69596afe2da941082c56e3d8d07aeb
refs/heads/master
2016-08-05T07:09:55.103148
2012-06-10T23:11:25
2012-06-10T23:11:25
3,294,184
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from lib.util import load_image class Game(): def __init__(self, description=None, developer=None, genre=None, id=None, image=None, isbn=None, location=None, notes=None, platform=None, price=None, publisher=None, rating=None, release_date=None, score=None, theme=None, title=None): self.description = description self.developer = developer self.genre = genre self.id = id self.image = load_image(image) self.isbn = isbn self.location = location self.notes = notes self.platform = platform self.price = price self.publisher = publisher self.rating = rating self.release_date = release_date self.score = score self.theme = theme self.title = title def __repr__(self): ''' What printing the object displays. ''' output = "ID:{id}, Title:{title}, Rating:{rating}, " \ "Developer:{developer}" output = output.format(id=self.id, title=self.title, rating=self.rating, developer=self.developer) return output def get_id(self): ''' Return game id ''' return self.id def get_title(self): ''' Return game title ''' return self.title def get_rating(self): ''' Return game rating ''' return self.rating def get_image(self): ''' Return game image ''' return self.image def set_id(self, id): ''' @param id is new id value Set new game id. ''' self.id = id return self.id def set_title(self, title, is_key=False): ''' @param title is new title value @param is_key denotes whether new value is key or value Set game title return updated title add logic to update db record (maybe make commit function) ''' self.title = title return self.title def set_rating(self, rating, is_key=False): ''' @param rating is new rating value @param is_key denotes whether new value is key or value set game rating return updated rating make so you can pass add logic to update db record (maybe make commit function) ''' self.rating = rating return self.rating def set_image(self, image): ''' @param image is an image file set new image file make sure we add logic here to update db immediately ''' self.image = image return self.image
UTF-8
Python
false
false
2,012
14,147,622,298,474
36cdd7366db7c415caa3f51a6f911bdcd9f21b26
bd827d8135634b637a4b647f5161593136057a00
/trunk/src/lib/python/RepoSuiteComponent.py
a1b5fdcab57ca1ff26e727c4fc29253969db7f35
[ "GPL-1.0-or-later", "GPL-3.0-or-later", "GPL-3.0-only" ]
non_permissive
alvesev/apt-satellites
https://github.com/alvesev/apt-satellites
fb9b4156b451b001001ab9363041cb1a26d74b7b
57477c099119f3532b9c7952c09c6e3f76f3ef7c
refs/heads/master
2021-01-18T13:46:00.586972
2014-03-30T10:52:12
2014-03-30T10:52:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python -B # # Copyright 2013-2014 Alex Vesev # # This file is part of Apt Satellites. # # Apt Satellites is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Apt Satellites is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Apt Satellites. If not, see <http://www.gnu.org/licenses/>. # ## class RepoSuiteComponent: name = 'default' architecturesPool = ['i386', 'amd64', 'all', 'source'] def __init__(self, newName = '', newArchPool = []): if newName: self.name = newName if newArchPool: self.architecturesPool = newArchPool if not type(newName) == type(''): raise Exception("New name must be a string but it is not.") if not type(newArchPool) == type([]): raise Exception("New architectures pool must be a list but it is not.")
UTF-8
Python
false
false
2,014
11,879,879,557,354
56eb2035275721d0acafe1fbf6ed8c7608f02978
694c8f0bb14dabd58516dcbadfa93c99d9dcbb8b
/problem20.py
5c5be4bf2f77dd8b857154a4a322d2ae144b07d4
[]
no_license
JacAbreu/ProjetoEuler
https://github.com/JacAbreu/ProjetoEuler
4484cb2dcb1942e848fdeab68413d1d1abc23a83
8d284446aca2d74eecd14af5276f5b6626d504db
refs/heads/master
2021-01-25T03:19:49.722077
2012-11-04T02:16:08
2012-11-04T02:16:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Find the sum of the digits in the number 100! from math import * def factorial (n): if n == 0: return 1 else: return n*factorial(n-1) result = factorial(100) sum_digits_factorial_100 = 0 result_aux = result while(result_aux > 0): sum_digits_factorial_100 += result_aux % 10 result_aux /=10 print sum_digits_factorial_100
UTF-8
Python
false
false
2,012
8,160,437,893,980
d9f4394800730f755dacbf2be3b1ba982d8f17b3
abaceea434f17b543688f4d6b1b077fbb9d02789
/CoreNLP/pretraining_code/PretrainSTB.py
d1b16a0dcaba5dab531069f050ab4f1e7edecf74
[ "GPL-2.0-or-later" ]
non_permissive
kkawabat/Pseudo-Ensembles
https://github.com/kkawabat/Pseudo-Ensembles
3150aad8350f51b5b61840456f443af4f642a83e
6fa81df05b64528b92b262e7d7882d0033e75f24
refs/heads/master
2021-01-18T09:55:47.535261
2014-11-06T01:42:17
2014-11-06T01:42:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import numpy.random as npr import NLMLayers as nlml import NLModels as nlm import cPickle as pickle from HelperFuncs import zeros, ones, randn, rand_word_seqs import CorpusUtils as cu ####################### # Test scripting code # ####################### def some_nearest_words(keys_to_words, sample_count, W1=None, W2=None): assert(not (W1 is None)) if not (W2 is None): W = np.hstack((W1, W2)) else: W = W1 norms = np.sqrt(np.sum(W**2.0,axis=1,keepdims=1)) W = W / (norms + 1e-5) max_valid_key = np.max(keys_to_words.keys()) W = W[0:(max_valid_key+1),:] # source_keys = np.zeros((sample_count,)).astype(np.uint32) neighbor_keys = np.zeros((sample_count, 10)).astype(np.uint32) all_keys = np.asarray(keys_to_words.keys()).astype(np.uint32) for s in range(sample_count): i = npr.randint(0,all_keys.size) source_k = all_keys[i] neg_cos_sims = -1.0 * np.sum(W * W[source_k], axis=1) sorted_k = np.argsort(neg_cos_sims) source_keys[s] = source_k neighbor_keys[s,:] = sorted_k[1:11] source_words = [] neighbor_words = [] for s in range(sample_count): source_words.append(keys_to_words[source_keys[s]]) neighbor_words.append([keys_to_words[k] for k in neighbor_keys[s]]) return [source_keys, neighbor_keys, source_words, neighbor_words] def record_word_vectors(w2k, cam, file_name): """Write some trained word vectors to the given file.""" wv_file = open(file_name, 'w') all_words = w2k.keys() all_words.sort() wv_std_dev = 0.0 for word in all_words: key = w2k[word] word_vec = cam.word_layer.params['W'][key] wv_std_dev += np.mean(word_vec**2.0) word_vals = [word] word_vals.extend([str(val) for val in word_vec]) wv_file.write(" ".join(word_vals)) wv_file.write("\n") wv_file.close() wv_std_dev = np.sqrt(wv_std_dev / len(all_words)) print("approximate word vector std-dev: {0:.4f}".format(wv_std_dev)) return def init_biases_with_lups(cam, w2lup, w2k): """Init class layer biases in cam with log unigram probabilities.""" for w in w2lup: cam.class_layer.params['b'][w2k[w]] = max(w2lup[w], -6.0) return if __name__=="__main__": # select source of phrases to pre-train word vectors for. data_dir = './training_text/train_and_dev' # TO USE TRAIN AND DEV SETS #data_dir = './training_text/train_only' # TO USE ONLY TRAIN SET # set some parameters. min_count = 2 # lower-bound on frequency of words in kept vocab sg_window = 5 # context size or skip-gram sampling ns_count = 15 # number of negative samples for negative sampling wv_dim = 70 # dimensionality of vectors to pre-train cv_dim = 10 # this won't be used. it's safe to ignore lam_l2 = 0.5 * wv_dim**0.5 # will be used to constrain vector norms # generate the training vocabulary sentences = cu.SentenceFileIterator(data_dir) key_dicts = cu.build_vocab(sentences, min_count=2, compute_hs_tree=True, \ compute_ns_table=True, down_sample=0.0) w2k = key_dicts['words_to_keys'] k2w = key_dicts['keys_to_words'] w2lups = key_dicts['word_log_probs'] neg_table = key_dicts['ns_table'] unk_word = key_dicts['unk_word'] hsm_code_dict = key_dicts['hs_tree'] sentences = cu.SentenceFileIterator(data_dir) tr_phrases = cu.sample_phrases(sentences, w2k, unk_word=unk_word, \ max_phrases=100000) # get some important properties of the generated training vocabulary max_cv_key = len(tr_phrases) + 1 max_wv_key = max(w2k.values()) max_hs_key = key_dicts['hs_tree']['max_code_key'] # initialize the model to-be-trained cam = nlm.CAModel(wv_dim, cv_dim, max_wv_key, max_cv_key, \ use_ns=True, max_hs_key=max_hs_key, \ lam_wv=lam_l2, lam_cv=lam_l2, lam_cl=lam_l2) # init parameters in word, context, and classification layers cam.use_tanh = True cam.init_params(0.02) # set parameters in context layer to 0s, across the board cam.context_layer.init_params(0.0) # tell the model to train subject to dropout and weight fuzzing cam.set_noise(drop_rate=0.5, fuzz_scale=0.02) # init prediction layer biases with log unigram probabilities init_biases_with_lups(cam, w2lups, w2k) # NOTE: given the properties of negative sampling, initializing with the # log unigram probabilities is actually kind of silly. But, we'll leave it # in there because I didn't know better at the time, and the resulting # vectors performed adequately. # initialize samplers for drawing positive pairs and negative contrastors pos_sampler = cu.PhraseSampler(tr_phrases, sg_window) neg_sampler = cu.NegSampler(neg_table=neg_table, neg_count=ns_count) # train all parameters using the training set phrases learn_rate = 1e-2 decay_rate = 0.975 for i in range(50): cam.train(pos_sampler, neg_sampler, 250, 50001, train_ctx=False, \ train_lut=True, train_cls=True, learn_rate=learn_rate) learn_rate = learn_rate * decay_rate record_word_vectors(w2k, cam, "wv_d{0:d}_mc{1:d}.txt".format(wv_dim, min_count)) [s_keys, n_keys, s_words, n_words] = some_nearest_words( k2w, 10, \ W1=cam.word_layer.params['W'], W2=None) for w in range(10): print("{0:s}: {1:s}".format(s_words[w],", ".join(n_words[w]))) ############## # EYE BUFFER # ##############
UTF-8
Python
false
false
2,014
3,650,722,201,768
191cc70add324b414a8335c68879f2456047a05f
57fd2fc08771c8707ab567dba66f4a70ba0caec3
/src/manozodynas/views.py
dc24db4c41ea92ea1d3c10ad2fd9306d25b30642
[ "MIT" ]
permissive
mariukz3/manozodynas
https://github.com/mariukz3/manozodynas
d970148ebd9ba8214e19662bb022e839203f3414
907a6751a9ebc13563fbd20a969fbe620f6d4c4b
refs/heads/master
2021-01-16T20:48:52.464467
2014-05-26T16:40:54
2014-05-26T16:40:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from .forms import LoginForm from django.contrib.auth import login from manozodynas.models import Word from manozodynas.models import Translation from django.views.generic import CreateView def index_view(request): return render(request, 'manozodynas/index.html', {}) def login_view(request): if request.method == 'POST': form = LoginForm(request.POST) if form.is_valid(): user = form.cleaned_data['user'] if user is not None and user.is_active: login(request, user) return HttpResponseRedirect(reverse('index')) else: form = LoginForm() #import ipdb; ipdb.set_trace() return render(request, 'manozodynas/login.html', {'form':form}) def words_view(request): return render(request, 'manozodynas/list.html', {'list':Word.objects.all()}) class TypeWord(CreateView): model = Word template_name = 'manozodynas/type_word.html' success_url = '/list_words/' class TypeTranslation(CreateView): model = Translation template_name = 'manozodynas/type_translation.html' success_url = '/list_words/' def get_context_data(self, **kwargs): sarasas=super(TypeTranslation, self).get_context_data(**kwargs) a=self.kwargs.get("pk") sarasas["pk"]=a zod=Word.objects.get(pk=a) sarasas["var"]=zod return sarasas
UTF-8
Python
false
false
2,014
12,764,642,834,188
0fe86d81dcae1f2102ed4b4ccd9cbe4386e8ee04
7bb2971188326cf78f33f02588f7229798f1a5e8
/ksp_login/templatetags/ksp_login.py
21a758283a83f9ca057d5640ba1dda6c76988d55
[ "BSD-2-Clause" ]
permissive
black3r/ksp_login
https://github.com/black3r/ksp_login
a01ec49b455347737866f98a8e3f29d7ad5aefaa
0e16707c48b79d78e16e43cc83e8fa8ccd6427f6
refs/heads/master
2021-01-24T01:13:54.791000
2014-05-16T12:17:21
2014-05-16T12:17:52
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import template from django.contrib.auth.forms import AuthenticationForm register = template.Library() @register.simple_tag(takes_context=True) def ksp_login_next(context): request = context['request'] if 'next' in context: next_page = context['next'] elif 'next' in request.REQUEST: next_page = request.REQUEST['next'] else: next_page = request.get_full_path() return next_page @register.assignment_tag(takes_context=True) def ksp_login_auth_form(context): return AuthenticationForm
UTF-8
Python
false
false
2,014
18,811,956,787,860
281ce20f43c629cecae7a1dba2c403f19ee14c20
91cef398a5df7d0775bbd88a8af5a8bebc1874b0
/scripts/CalcBDT.py
890ea8653325f1750ee4a8c00446d708fe518a33
[]
no_license
EmmanueleSalvati/RazorCombinedFit_Git
https://github.com/EmmanueleSalvati/RazorCombinedFit_Git
eceed652ef9562752475103ddb75f06b24c8b6e1
a8184aedffedc5a68cf1eb82132db3f09607b1fa
refs/heads/master
2020-05-17T15:35:46.927516
2014-11-19T21:17:47
2014-11-19T21:17:47
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import ROOT as rt import array class BranchDumper(object): def __init__(self, tree, norm=None, xs=None): self.tree = tree self.norm = norm self.lumi = 19300. self.filter = 1.0 self.weightVal = 1.0 if norm is not None: point = (-1, -1) #point = (200,0) self.weightVal = (self.lumi*float(xs)*self.filter)/(1.0*int(self.norm[point])) print self.weightVal self.vars = None self.jets = range(6) def tag_jets(self): #order by btag output - high to low jets = sorted([(self.tree.jet_csv.at(i), i) for i in xrange(len(self.tree.jet_csv))], reverse=True) self.jets = [i for b, i in jets] def select(self): # TTbar MC return self.tree.metFilter and self.tree.hadBoxFilter and self.tree.hadTriggerFilter and self.tree.nCSVM > 0 and self.tree.MR >= 450 and self.tree.RSQ >= 0.03 and\ self.tree.nMuonTight == 0 and self.tree.nElectronTight == 0 and not self.tree.isolatedTrack10Filter and self.tree.nMuonLoose == 0 and self.tree.nElectronLoose == 0 # def select(self): # QCD CR from data # return self.tree.metFilter and self.tree.hadBoxFilter and self.tree.hadTriggerFilter and self.tree.nCSVL == 0 and self.tree.MR >= 450 and self.tree.RSQ >= 0.03 and\ # self.tree.nMuonTight == 0 and self.tree.nElectronTight == 0 and not self.tree.isolatedTrack10Filter and self.tree.nMuonLoose == 0 and self.tree.nElectronLoose == 0 def weight(self): return self.weightVal #return 1.0 def weightPU(self): return self.tree.pileUpWeightABCD def thetaH1(self): if self.tree.bestHemi == 1: return self.tree.hemi1ThetaH return self.tree.hemi2ThetaH def thetaH2(self): if self.tree.bestHemi == 2: return self.tree.hemi1ThetaH return self.tree.hemi2ThetaH def topMass1(self): if self.tree.bestHemi == 1: return self.tree.hemi1TopMass return self.tree.hemi2TopMass def topMass2(self): if self.tree.bestHemi == 2: return self.tree.hemi1TopMass return self.tree.hemi2TopMass def wMass1(self): if self.tree.bestHemi == 1: return self.tree.hemi1WMass return self.tree.hemi2WMass def wMass2(self): if self.tree.bestHemi == 2: return self.tree.hemi1WMass return self.tree.hemi2WMass def jetNpt(self, n): return self.tree.jet_pt.at(self.jets[n]) def jet1pt(self): return self.jetNpt(0) def jet2pt(self): return self.jetNpt(1) def jet3pt(self): return self.jetNpt(2) def jet4pt(self): return self.jetNpt(3) def jet5pt(self): return self.jetNpt(4) def jet6pt(self): return self.jetNpt(5) def jet1mult(self): return self.tree.jet_mult.at(self.jets[0]) def jet2mult(self): return self.tree.jet_mult.at(self.jets[1]) def jet3mult(self): return self.tree.jet_mult.at(self.jets[2]) def jet4mult(self): return self.tree.jet_mult.at(self.jets[3]) def jet5mult(self): return self.tree.jet_mult.at(self.jets[4]) def jet6mult(self): return self.tree.jet_mult.at(self.jets[5]) def jet1girth(self): return self.tree.jet_girth_ch.at(self.jets[0]) def jet2girth(self): return self.tree.jet_girth_ch.at(self.jets[1]) def jet3girth(self): return self.tree.jet_girth_ch.at(self.jets[2]) def jet4girth(self): return self.tree.jet_girth_ch.at(self.jets[3]) def jet5girth(self): return self.tree.jet_girth_ch.at(self.jets[4]) def jet6girth(self): return self.tree.jet_girth_ch.at(self.jets[5]) def nVertex(self): return self.tree.nVertex def headers(self): return ['nVertex', 'weightPU', 'weight', 'thetaH1', 'thetaH2', 'topMass1', 'topMass2', 'wMass1', 'wMass2', 'jet1pt', 'jet2pt', 'jet3pt', 'jet4pt', 'jet5pt', 'jet6pt', 'jet1mult', 'jet2mult', 'jet3mult', 'jet4mult', 'jet5mult', 'jet6mult', 'jet1girth', 'jet2girth', 'jet3girth', 'jet4girth', 'jet5girth', 'jet6girth'] #@staticmethod def headers_for_MVA(self): return ['thetaH1', 'thetaH2', 'topMass1', 'topMass2', 'wMass1', 'wMass2', 'jet1mult', 'jet2mult', 'jet3mult', 'jet4mult', 'jet5mult', 'jet6mult', 'jet1girth', 'jet2girth', 'jet3girth', 'jet4girth', 'jet5girth', 'jet6girth'] def values(self): values = [] for h in self.headers(): values.append(getattr(self, h)()) return values def make_tree(self, clone=False): rt.gROOT.ProcessLine(""" struct BranchDumper{\ Double_t weight;\ Double_t weightPU;\ Double_t thetaH1;\ Double_t thetaH2;\ Double_t topMass1;\ Double_t topMass2;\ Double_t wMass1;\ Double_t wMass2;\ Double_t jet1pt;\ Double_t jet2pt;\ Double_t jet3pt;\ Double_t jet4pt;\ Double_t jet5pt;\ Double_t jet6pt;\ Double_t jet1mult;\ Double_t jet2mult;\ Double_t jet3mult;\ Double_t jet4mult;\ Double_t jet5mult;\ Double_t jet6mult;\ Double_t jet1girth;\ Double_t jet2girth;\ Double_t jet3girth;\ Double_t jet4girth;\ Double_t jet5girth;\ Double_t jet6girth;\ Double_t MR;\ Double_t RSQ;\ Double_t nVertex;\ Double_t BDT;\ };""") from ROOT import BranchDumper if not clone: tree = rt.TTree('RMRTree', 'Multijet events') else: tree = self.tree.CloneTree(0) tree.SetDirectory(0) def setAddress(obj, flag): for branch in dir(obj): if branch.startswith('__'): continue tree.Branch(branch, rt.AddressOf(obj, branch), '%s/%s' % (branch, flag)) self.vars = BranchDumper() setAddress(self.vars, 'D') return tree def set_tree(self, tree, fill=True): #self.tag_jets() for h in self.headers(): setattr(self.vars, h, getattr(self, h)()) self.vars.MR = self.tree.MR self.vars.RSQ = self.tree.RSQ if fill: tree.Fill() class CalcBDT(object): def __init__(self, oldTree): self.sel = BranchDumper(oldTree) self.tree = self.sel.make_tree(False) self.reader = rt.TMVA.Reader() self.bdt_vars = {} for h in self.sel.headers_for_MVA(): self.bdt_vars['%s_var' % h] = array.array('f', [0]) self.reader.AddVariable(h, self.bdt_vars['%s_var' % h]) self.mr_var = array.array('f', [0]) self.rsq_var = array.array('f', [0]) self.nvertex_var = array.array('f', [0]) self.reader.AddSpectator('MR', self.mr_var) self.reader.AddSpectator('RSQ', self.rsq_var) self.reader.AddSpectator('nVertex', self.nvertex_var) # self.reader.BookMVA('BDT','/afs/cern.ch/user/w/wreece/public/Razor2012/BDT/Had/TMVAClassification_BDT.weights.xml') self.reader.BookMVA('BDT', 'TMVAClassification_BDT.weights.xml') self.bdt_val = 0.0 def select(self): return self.sel.select() def bdt(self): for h in self.sel.headers_for_MVA(): self.bdt_vars['%s_var'%h][0] = getattr(self.sel,h)() self.bdt_val = self.reader.EvaluateMVA('BDT') return self.bdt_val def Fill(self): self.sel.set_tree(self.tree, False) self.sel.vars.BDT = self.bdt() self.tree.Fill()
UTF-8
Python
false
false
2,014
10,986,526,389,569
93f7f9ff28fbdb0303e3c0cc3c47c72f3595bae2
f155cbaade22c2253de6dc475c94ede1a08ee212
/tento/web/app.py
b2f77f548b347abe097af0f2687ef4e610802fd2
[]
no_license
nl-seoultech/tento-server
https://github.com/nl-seoultech/tento-server
78e8d31ceacedca3c9b134e7986f1670c6303627
0c6e62953348c23ab92f15b774e71eb82dc97b8f
refs/heads/master
2016-09-05T23:15:52.894300
2014-09-22T06:38:37
2014-09-22T06:41:32
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from flask import Flask from . import user, login, music app = Flask(__name__) app.register_blueprint(user.bp, url_prefix='/users') app.register_blueprint(login.bp, url_prefix='/login') app.register_blueprint(music.bp, url_prefix='/musics')
UTF-8
Python
false
false
2,014
6,743,098,677,068
83f1d2360cab9cdae95ebb4a7e945de87a063364
f2cd8e94cd609349f591631fa233b2a62c980e4f
/heightmaps/test.py
413cf2532fb66c8aa2a3efec69a8d9e39d91f7bd
[]
no_license
JoeClacks/Khopesh
https://github.com/JoeClacks/Khopesh
ec3fc44f2a21f1810560066c75098a5838f44c9e
f9d9a9d583e162cfa8def856532b18f18d30ab66
refs/heads/master
2020-06-05T04:46:17.585882
2014-01-16T23:47:09
2014-01-16T23:47:09
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- #this file converts the 16-bit signed format into 16-bit unsigned greyscale #the command to get the png file from the orgional tiff is: #convert ETOPO1_Bed_g.tif -depth 16 -type Grayscale ETOPO1_Bed_g2.png import Image im = Image.open('ETOPO1_Bed_g2.png') #im = Image.open('test.png') xSize, ySize = im.size print im.size im2 = Image.new('I', im.size) for x in xrange(xSize): for y in xrange(ySize): pix = im.getpixel((x,y)) if(pix >= 32768): pix = pix - 32768 else: pix = pix + 32768 im2.putpixel((x,y),pix) print x, im2.save('ETOPO1_Bed_g3.png') #
UTF-8
Python
false
false
2,014
1,236,950,601,852
3014aef6b55093549681cdcaa3efc6738ac74cea
626525770a69c9b8c7725bd2baa7b2f85b7c6e88
/declare/__init__.py
e198153a1e60b40a90ad0543961db4b35b58311a
[ "MIT" ]
permissive
madjar/declare
https://github.com/madjar/declare
8bce643eb032c471f28d2d37735144f3d385505a
72a0cd5940fce3f003618ae2979fb6b93660810d
refs/heads/master
2021-01-21T13:11:54.761492
2013-09-30T09:09:39
2013-09-30T09:10:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from .core import Item, Field, MapField, prepare # noqa from .magic import MagicField MAGIC = MagicField()
UTF-8
Python
false
false
2,013
8,856,222,591,750
c1c0b61046a3d9c366fe22b117ca44c91a7f6ae3
8ab04c839dc51bcf0a830d45bbebb9f930e57429
/PieClockScreenlet.py
72b027f470f39505c7ad7583bcc672c38cbc965e
[]
no_license
ITikhonov/pieclock
https://github.com/ITikhonov/pieclock
a8aef6bc0726e2b4ad2c2b77c2ef77c5172fc280
56490877f5a4c95c6ed85c35e00b4710dc80d17e
refs/heads/master
2016-09-05T09:12:45.689611
2010-06-21T06:47:00
2010-06-21T06:47:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import screenlets from screenlets import Screenlet from screenlets.options import IntOption, BoolOption, TimeOption, FloatOption from screenlets.options import StringOption, FontOption, ColorOption from screenlets.services import ScreenletService from cairo import OPERATOR_DEST_OUT,OPERATOR_OVER from math import pi from datetime import datetime from gobject import timeout_add from calendar import monthrange,isleap def deg(a): return a*(pi/180) def circle(ctx,parts,r,w,c,v,b=10,d=0.5): ctx.set_operator(OPERATOR_OVER) wl=360.0/parts for i in range(0,parts): dr=1-d*(i%2) pie(ctx,(wl*i,wl*i+wl-b),(r,r+w*dr),c) ctx.set_operator(OPERATOR_DEST_OUT) pie(ctx, (0,wl*int(v) + (wl-b)*(v-int(v)) ), (r-0.01,r+w+0.01),(1.0,1.0,1.0,0.8)) def pie(ctx,a,r,c): a=deg(a[0]-90),deg(a[1]-90) if len(c)==3: ctx.set_source_rgb(*c) else: ctx.set_source_rgba(*c) ctx.arc(0,0,r[0], a[0],a[1]) ctx.arc_negative(0,0,r[1], a[1],a[0]) ctx.fill() class PieClockScreenlet (Screenlet): __name__ = 'PieClockScreenlet' __version__ = '0.0' __author__ = 'Ivan Tikhonov' __desc__ = 'Pie Clocks' def __init__ (self, parent_window=None, **keyword_args): """Create a new ClockScreenlet instance.""" # call super (we define to use our own service here) Screenlet.__init__(self,**keyword_args) self.__timeout = timeout_add(500, self.update) def on_init (self): print "OK - Clock has been initialized." self.add_default_menuitems() def update(self): self.redraw_canvas() return True def on_draw (self, ctx): ctx.scale(self.scale,self.scale) ctx.scale(self.width/2,self.height/2) ctx.translate(1,1) self._color_yd = (0.0,0.0,0.0) self._color_md = (0.2,0.2,0.0) self._color_wd = (0.5,0.0,1.0) self._color_hr = (0.0,0.5,1.0) self._color_mn = (0.0,0.5,0.0) self._color_sc = (0.8,0.8,0.8) now=datetime.now() mr=monthrange(now.year,now.month)[1] dr=365+isleap(now.year) circle(ctx, 4, 0.9,0.1, self._color_yd, (now-datetime(now.year,1,1)).days/float(dr/4.0),2,d=0) circle(ctx, mr, 0.775,0.1, self._color_md, now.day+now.hour/24.0+now.minute/(24*60.0), 4, 0.2) circle(ctx, 7, 0.65,0.1, self._color_wd, now.weekday()+now.hour/24.0+now.minute/(24*60.0),d=0) circle(ctx, 24, 0.35,0.15, self._color_hr, now.hour+now.minute/60.0, 4, 0.2) circle(ctx, 3, 0.2,0.1, self._color_mn, now.minute/20.0,d=0) circle(ctx, 2, 0.01,0.1, self._color_sc, (now.second + now.microsecond/1000000.0)/30.0,d=0) if __name__ == "__main__": import screenlets.session screenlets.session.create_session(PieClockScreenlet)
UTF-8
Python
false
false
2,010
13,091,060,352,257
1a0c1c0bd7a9804d8518f8990e8ab913da360484
7543fb0f4ce3a8b2643538d9016156c73ee0a564
/mpm_old/src/materialmodel2d.py
42f8b9a8d183824c25615a2f0d53623d228fc71f
[]
no_license
raulrn/simple-mpm
https://github.com/raulrn/simple-mpm
e213a3e8a2670a602e931f48a52e3abc21639e1e
139d9c5b9c19701da2a92059dbe1bfdca32869f7
refs/heads/master
2020-12-25T23:10:00.350591
2013-10-04T04:18:29
2013-10-04T04:18:29
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np #=============================================================================== class MaterialModel: # Defines material models - accessed using getStress # - actual computation done in static methods # Returns stress tensor and jacobian of deformation def __init__(self, modelName, props): self.modelName = modelName # Selects Material Model self.props = props def getStress( self, F ): model = getattr( self, self.modelName ) S,Ja = model(self.props, F); return (S,Ja) def changeProps( self, props ): self.props = props @staticmethod def planeStrainNeoHookean( props, F ): # Props - poisson, E I2 = F*0. I2[0,0] = I2[1,1] = 1. v = props['poisson'] E = props['modulus'] l = E * v / ((1.+v)*(1.-2.*v)) m = 0.5 * E / (1.+v) Ja = F[0,0]*F[1,1] - F[1,0]*F[0,1] S = I2*l*np.log(Ja)/Ja + m/Ja * (np.dot(F, F.T) - I2) return (S,Ja) @staticmethod def planeStrainNeoHookeanMaxStress( props, F ): # Props - poisson, E I2 = np.eye(2) v = props['poisson'] E = props['modulus'] sMax = props['maxStress'] l = E * v / ((1.+v)*(1.-2.*v)) m = 0.5 * E / (1.+v) Ja = np.linalg.det(F) S = I2*l*np.log(Ja)/Ja + m/Ja * (np.dot(F, F.T) - I2) if vonMises(S) > sMax: S = I2*0. Ja = 1. return (S,Ja) @staticmethod def vonMises( S ): return np.sqrt( S[0,0]*S[0,0] - S[0,0]*S[1,1] + S[1,1]*S[1,1] + 3.*S[1,0]*S[0,1] )
UTF-8
Python
false
false
2,013
1,812,476,244,333
d91b4189aa806e844e7aaef70ad124c8086dfb8c
577bbca525cf6d3fdcf9859597e40edbe1a312ac
/Python_learning/str2dict.py
9f9a9a2ad1b8e5c61548607a46d25bb8583186da
[]
no_license
elenmax0607/test
https://github.com/elenmax0607/test
a5e9eb25eaf6531a240c52757d9a07ad423e1205
102beeb78a273142b20bb68d8a7443131d038623
refs/heads/master
2016-08-07T04:33:46.039090
2014-02-15T03:57:36
2014-02-15T03:57:36
16,855,713
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#from time import time #t = time() #lista = [1,2,3,4,5,6,7,8,9,10] #listb =[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.01] #len1=len(lista) #len2=len(listb) #for i in xrange (1000000): # for a in xrange(len1): # temp=lista[a] # for b in xrange(len2): # x=temp+listb[b] #print x #print "total run time:" #print time()-t a = 'abcd1234' print a[:-1]
UTF-8
Python
false
false
2,014
10,170,482,593,879
f2324d2973deddfa6982de14e68968ced0fa5910
20d24dbb49e1e1a62bc1c1a1ad1803681821bf06
/tools/bson_splitter.py
1aa9494b62e389bb53bd282fe9f2282fe3e2f9e7
[ "Apache-2.0" ]
permissive
criteo/mongo-hadoop
https://github.com/criteo/mongo-hadoop
7790dc9c63baf778c43cbbfff3c42e548aaae560
f5426411d06967a10279b07e7bf9de49bda282aa
refs/heads/master
2021-01-21T00:43:50.856829
2013-09-12T11:41:48
2013-09-12T11:41:48
12,541,149
1
0
null
true
2013-09-12T11:41:48
2013-09-02T13:53:23
2013-09-12T11:41:48
2013-09-12T11:41:48
80,231
null
0
0
Java
null
null
import sys import struct import pymongo from bson import BSON import os SPLIT_SIZE = 64 * 1024 * 1024 def main(argv): split_bson(argv[0]) def split_bson(path): bsonfile_path = os.path.abspath(path) splitsfile_out = os.path.join(os.path.dirname(bsonfile_path), "." + os.path.basename(bsonfile_path) + ".splits") bsonfile = open(bsonfile_path,'r') splitsfile = open(splitsfile_out,'w') file_position = 0 cur_split_start = 0 cur_split_size = 0 while True: size_bits = bsonfile.read(4) if len(size_bits) < 4: if cur_split_size > 0: #print {"start":cur_split_start, "length": bsonfile.tell() - cur_split_start} splitsfile.write(BSON.encode({"s":long(cur_split_start), "l": long(bsonfile.tell() - cur_split_start)})) break size = struct.unpack("<i", size_bits)[0] - 4 # BSON size byte includes itself file_position += 4 if cur_split_size + 4 + size > SPLIT_SIZE: #print {"start":cur_split_start, "length": bsonfile.tell() - 4 - cur_split_start} splitsfile.write(BSON.encode({"s":long(cur_split_start), "l": long(bsonfile.tell() - 4 - cur_split_start)})) cur_split_start = bsonfile.tell() - 4 cur_split_size = 0 else: pass bsonfile.seek(file_position + size) file_position += size cur_split_size += 4 + size if __name__ == '__main__': main(sys.argv[1:])
UTF-8
Python
false
false
2,013
10,823,317,635,030
07331370e3d693d643ffbab2080288af21c1ceb8
9d795fb88259782cee7da9a9dfdb1d085634a4a7
/Tests/dna_starts_with.py
988fac882666e94f44ca231b35d3aebf0b11e3c8
[ "CC-BY-SA-3.0" ]
non_permissive
nataliethorne/adelaide_swc
https://github.com/nataliethorne/adelaide_swc
83e485ba512a4874a77424ae776eeccdd5a8c0e8
5560f857119b8c2776a8500ce04b8769c6304030
refs/heads/master
2020-05-03T04:56:45.138802
2013-09-26T06:55:36
2013-09-26T06:55:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def dna_starts_with(input_dna,start_dna): return input_dna[0:len(start_dna)]==start_dna
UTF-8
Python
false
false
2,013
2,791,728,752,057
b75e298c4e094d3e7cd544d73efcb59e9772d380
495171fa04685c63c61f630531a583ce46bb87d6
/Final_Lab_Python/Commands.py
e4de67522f4821c664be9265c50dd8f09eff7c68
[]
no_license
hlfshell/Robot-Framework-RBE3002
https://github.com/hlfshell/Robot-Framework-RBE3002
6165ba734c4a0ed1ec42739bb0cad1a3532ae4cb
cefebdb5819446c4637c3e0f4bdda274d6147e0d
refs/heads/master
2021-01-23T22:10:32.516888
2012-04-26T01:52:21
2012-04-26T01:52:21
4,143,152
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Commands.py Author: Keith Chester This holds all possible commands - should be kept in sync with Commands.h on robot side. ''' setID = chr(0x00) requestID = chr(0x01) InitializeRobot = chr(0x02) reset = chr(0x03) requestPinValue = chr(0xA0) requestADCchannel = chr(0xA1) enableMotors = chr(0xC0) disableMotors = chr(0xC1) setPID = chr(0xC3) setMotorRight = chr(0xC4) setMotorLeft = chr(0xC5) sendStepperTo = chr(0xC6) setServoTo = chr(0xD0) setMotorLeftForward = chr(0xD1) setMotorRightForward = chr(0xD3) setMotorLeftReverse = chr(0xD2) setMotorRightReverse = chr(0xD4) PING = chr(0xFF) SUCCESS = chr(0xFF) FAILURE = chr(0x00)
UTF-8
Python
false
false
2,012
16,870,631,568,839
82a30ba9cbc1bfffdf71c89d757519610a03fb78
310f654682ed30207994d4eebc61a5d0bfd1ccdf
/example_func.py
21dc62392b6fdf62de2dd2f585692ca6e0e3aecc
[]
no_license
AndyTimmins/adelaide_swc2
https://github.com/AndyTimmins/adelaide_swc2
607b8f1be70264dd2fa672c7c8aae9cdabfe6d24
ab7700bebf1e89ffd37582ee749fbca1b7221c3d
refs/heads/master
2021-01-19T14:59:08.352273
2013-09-25T04:51:23
2013-09-25T04:51:23
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def sum_two_numbers(first_number, second_number): result = first_number + second_number return result print sum_two_numbers(2, 7)
UTF-8
Python
false
false
2,013
15,092,515,107,573
6730806b61ad3487068f96f5b71a0703ad2a1052
cd3ef1af3167d3a16f37157156dede151bf99f34
/ranking.py
dfb003b3f09f1570f9fd19211c863e416df140cd
[]
no_license
tclh123/SSSTA-Ranking
https://github.com/tclh123/SSSTA-Ranking
6820ad75d850f21424febd57f38a5aa503d5f2e7
d27c69b253d2e92b38568989f90a993f3c9ebb1d
refs/heads/master
2016-09-06T01:34:29.001982
2012-11-30T20:34:49
2012-11-30T20:34:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'tclh123' from scoring import * def ranking(html, prob_num, player_num, max_pts=1000.0): """ return [('mizuki_tw', 1503), ('brotherroot', 1498), ... """ rank = {} scores = scoring(html, prob_num, player_num, max_pts) for username in scores: tmp = 0.0 for prob_id in scores[username]: tmp += scores[username][prob_id] rank[username] = int(tmp+0.5) return sorted(rank.iteritems(), key = lambda asd:asd[1] ,reverse = True) # return rank def test(): htmltest = "<table><tbody><tr><th>Rank</th><th>Id</th><th>Solve</th><th>Penalty</th><th>A</th><th>B</th><th>C</th><th>D</th><th>E</th><th>F</th><th>G</th></tr><tr><td>1</td><td>mizuki_tw</td><td>7</td><td>23:49:57</td><td>2:24:52(-5)</td><td>2:09:32(-3)</td><td>2:30:49</td><td>3:20:24(-2)</td><td>2:57:19(-1)</td><td>3:17:45</td><td>3:29:16</td><td></td></tr><tr><td>2</td><td>biamgo</td><td>7</td><td>24:10:13</td><td>2:27:22(-1)</td><td>2:46:14</td><td>3:05:14(-3)</td><td>3:14:22</td><td>3:31:13(-1)</td><td>3:39:16</td><td>3:46:32</td><td></td></tr><tr><td>3</td><td>ktboyyy</td><td>7</td><td>25:23:41</td><td>2:57:30(-5)</td><td>2:27:09</td><td>2:38:57</td><td>4:35:03(-2)</td><td>3:16:02</td><td>3:29:38</td><td>3:39:22</td><td></td></tr><tr><td>4</td><td>brotherroot</td><td>7</td><td>27:00:37</td><td>1:34:50(-2)</td><td>2:17:51(-5)</td><td>2:28:18(-1)</td><td>2:38:20</td><td>3:17:40(-7)</td><td>3:07:47(-1)</td><td>3:55:51(-7)</td><td></td></tr><tr><td>5</td><td>cripout</td><td>7</td><td>34:08:04</td><td>2:38:23(-5)</td><td>3:02:46(-1)</td><td>3:25:04(-1)</td><td>4:40:00(-2)</td><td>5:11:09(-5)</td><td>5:22:05</td><td>4:48:37(-1)</td><td></td></tr><tr><td>6</td><td>neveralso</td><td>7</td><td>34:23:34</td><td>2:43:47(-12)</td><td>2:35:28</td><td>2:40:37</td><td>6:01:28(-8)</td><td>3:11:44</td><td>5:32:14(-2)</td><td>3:58:16(-1)</td><td></td></tr><tr><td>7</td><td>gswxw</td><td>7</td><td>43:00:43</td><td>5:54:53(-13)</td><td>2:40:14</td><td>4:15:51(-5)</td><td>5:46:58(-4)</td><td>5:23:49</td><td>5:02:10(-1)</td><td>6:16:48</td><td></td></tr><tr><td>8</td><td>h549570564</td><td>7</td><td>45:21:08</td><td>5:56:42(-4)</td><td>3:55:32(-5)</td><td>4:12:00</td><td>6:53:57(-7)</td><td>5:41:32(-5)</td><td>5:05:37</td><td>6:35:48</td><td></td></tr><tr><td>9</td><td>perfect28</td><td>5</td><td>26:03:15</td><td>4:55:46(-5)</td><td>5:19:31(-3)</td><td>5:23:16(-1)</td><td> (-4)</td><td> (-8)</td><td>3:44:30</td><td>3:40:12</td><td></td></tr><tr><td>10</td><td>tclh123</td><td>4</td><td>11:56:44</td><td>2:35:18(-1)</td><td></td><td></td><td></td><td>3:00:14</td><td>3:00:28</td><td>3:00:44</td><td></td></tr><tr><td>11</td><td>lkid</td><td>3</td><td>14:13:10</td><td>3:31:45(-2)</td><td>4:49:11(-1)</td><td>4:52:14</td><td> (-2)</td><td></td><td></td><td></td><td></td></tr><tr><td>12</td><td>z451538473</td><td>1</td><td>3:15:39</td><td>2:55:39(-1)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>13</td><td>frustratingman</td><td>1</td><td>3:45:07</td><td>3:45:07</td><td> (-3)</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>14</td><td>zzs1324</td><td>0</td><td>0:00:00</td><td> (-7)</td><td> (-1)</td><td></td><td></td><td></td><td></td><td></td><td></td></tr></tbody></table>" print ranking(htmltest, 7, 13) if __name__ == "__main__": test()
UTF-8
Python
false
false
2,012
9,036,611,222,138
825663fda964de648ba36260d186e5bc902e6993
aeb910d12562abdbd428fafb43106f15ac2649e6
/homepage/cached_templates/templates/index.html.py
ad9cbde189a1fc24eb10e96b9b407c3a52914d83
[]
no_license
tk4d/MyStuffWebpage
https://github.com/tk4d/MyStuffWebpage
6482a5d02cdd64c21e80ab1842c11e1f438bf1e2
31f696649ad749145b345b8d06267a2a723f6f97
refs/heads/master
2021-01-23T17:30:32.188655
2014-01-21T14:59:57
2014-01-21T14:59:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding:ascii -*- from mako import runtime, filters, cache UNDEFINED = runtime.UNDEFINED __M_dict_builtin = dict __M_locals_builtin = locals _magic_number = 9 _modified_time = 1389655070.646056 _enable_loop = True _template_filename = 'C:\\Python33\\mystuff\\homepage\\templates/index.html' _template_uri = 'index.html' _source_encoding = 'ascii' import os, os.path, re _exports = ['center'] def _mako_get_namespace(context, name): try: return context.namespaces[(__name__, name)] except KeyError: _mako_generate_namespaces(context) return context.namespaces[(__name__, name)] def _mako_generate_namespaces(context): pass def _mako_inherit(template, context): _mako_generate_namespaces(context) return runtime._inherit_from(context, 'base.htm', _template_uri) def render_body(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: __M_locals = __M_dict_builtin(pageargs=pageargs) def center(): return render_center(context._locals(__M_locals)) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() # SOURCE LINE 2 __M_writer('\n\n') if 'parent' not in context._data or not hasattr(context._data['parent'], 'center'): context['self'].center(**pageargs) # SOURCE LINE 9 __M_writer(' \n') return '' finally: context.caller_stack._pop_frame() def render_center(context,**pageargs): __M_caller = context.caller_stack._push_frame() try: def center(): return render_center(context) STATIC_URL = context.get('STATIC_URL', UNDEFINED) __M_writer = context.writer() # SOURCE LINE 4 __M_writer('\n <img src="') # SOURCE LINE 5 __M_writer(str( STATIC_URL )) __M_writer('homepage/images/mystufflogo.png"/>\n <h3>The #1 Local Photo Store</h3>\n At Mystuff we offer the best products and services for your everyday photo needs.<br>\n Be sure to check out our daily deals for incredible savings!\n') return '' finally: context.caller_stack._pop_frame()
UTF-8
Python
false
false
2,014
16,587,163,713,010
e57a5a76519eae230eacdc0d7896ccedb9cf2262
79b3036b797bbb1b49fe4cca33e9cc752a332cc5
/wallet/model_signup.py
24a8316238665048f08e17c260f97daf7c8268c9
[]
no_license
mohakrajendra/dbmswallet
https://github.com/mohakrajendra/dbmswallet
ffe64f43cf3e0f12c9a03e6df7bba607430c7239
b66f17cd70c7ad0a08e112d4ca3a875af6f9b840
refs/heads/master
2020-05-30T05:37:40.769931
2012-07-12T08:23:40
2012-07-12T08:23:40
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models class userprofile(models.Model): name=models.CharField(max_length=100) website=models.CharField(max_length=200) email=models.EmailField() birth_date=models.DateField() sex=models.CharField(max_length=50) country=models.CharField(max_length=100) password=models.CharField(max_length=100) mailing=models.BooleanField() def __unicode__(self): return self.name #admin.site.register(userprofile)
UTF-8
Python
false
false
2,012
7,679,401,554,646
9b2f496554e746965b542d59d158e1e7d3bb6c9f
ba1066b0860a73020eb5c4ee0021f68e3639327c
/Sujet 1/machine.py
dbb27b2dc8f0730571f59bba3d99f69f9ba0fd36
[]
no_license
Hiestaa/TARP-ODNL
https://github.com/Hiestaa/TARP-ODNL
cf51678ce4940d2d84a167317eb70298863cc9b1
3a09054558ddc188f80abfd13ea51e1e99d64d68
refs/heads/master
2021-01-25T07:27:54.313545
2014-01-13T01:14:33
2014-01-13T01:14:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import Log from task import Task class Machine: """Represente une machine capable d'executer une operation De plus, assigner une tache a un machine sous la forme d'une liste d'operation permet d'automatiser le passage de l'operation suivante a la machine suivante lorsque l'operation courante est terminee""" def __init__(self, mid, log): self.id = mid self.next = None # prochaine machine de la liste self.currentoptime = 0 # temps total necessaire pour l'operation en cours self.currentopstate = 0 # etat du travail sur l'operation en cours self.working = False # la machine est-elle en train de travailler ? self.currenttask = None self.waitingtask = [] self.onopdone = None # callback a appeler lorsque l'operation est terminee self.ontaskdone = None # callback a appeler lorsque la tache est terminee self.time = 0 self.total_working_time = 0 self.total_waiting_time = 0 self.work_history = [] # list of tupples (taskid, opid, time of work) self.log = log def assignTask(self, task, onopdone, ontaskdone): self.log.log_event(self.time, "MachineEvent", "Machine "+str(self.id)+": assign task "+str(task.id)+" operation "+str(task.opdone)) self.currenttask = task self.assign_operation(self.currenttask.get_next_op()) self.start() self.onopdone = onopdone self.ontaskdone = ontaskdone self.work_history.append((task.id, task.opdone, self.currentoptime)) def assign_operation(self, optime): self.log.log_event(self.time, "MachineEvent", "Machine "+str(self.id)+": assign op time="+str(optime)) self.currentoptime = optime self.currentopstate = optime self.working = False def start(self): self.working = True def on_next_op_done(self): if self.waitingtask: task = self.waitingtask.pop(0) self.next.assignTask(task, self.on_next_op_done, self.ontaskdone) def update(self, time): self.time = time if self.working: self.total_working_time += 1 # if machine is working, decrease current op counter self.currentopstate -= 1 if self.currentopstate == 0: # the machine ended it's work # log that the operation has finished self.log.log_event_info(time, 'MachineEvent', "Machine " + str(self.id) + " just finished task " + str(self.currenttask.id) + " operation " + str(self.currenttask.opdone) + " (time: " + str(self.currentoptime)+')') if self.next: # notify task that it is complete self.currenttask.op_done() # add to the fifo of task waiting for the next machine to be free self.waitingtask.append(self.currenttask) else: # if next is None, this is the last machine of the chain # notify that the current task is done self.ontaskdone(self.currenttask) # note that we are not working anymore self.working = False # notify that the operation is done and machine is free self.onopdone() else: self.total_waiting_time += 1 # if there is a machine after this if self.next: # update the machine self.next.update(self.time) if self.waitingtask and self.next.working == False: # assign the first task = self.waitingtask.pop(0) self.next.assignTask(task, self.on_next_op_done, self.ontaskdone) def getNbMachines(self): if not self.next: return 1 else: return 1 + self.next.getNbMachines()
UTF-8
Python
false
false
2,014