{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \r\n\r\n\r\n //]]>\r\n"},"src_encoding":{"kind":"string","value":"WINDOWS-1252"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":1811,"cells":{"__id__":{"kind":"number","value":9234179725005,"string":"9,234,179,725,005"},"blob_id":{"kind":"string","value":"c473413ee148802bb77c6e4d321de7bd87fcfbaa"},"directory_id":{"kind":"string","value":"5e27c7f5426c169fd348b26e94b65c35f9cdc459"},"path":{"kind":"string","value":"/dragonfly/convert/trigger_true.py"},"content_id":{"kind":"string","value":"08df27650994e17a7b1fb2fa5e53bc28fdd48e2d"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"agoose77/hivesystem"},"repo_url":{"kind":"string","value":"https://github.com/agoose77/hivesystem"},"snapshot_id":{"kind":"string","value":"e2c9c27408233b5794151ca74f541d2e6063d58a"},"revision_id":{"kind":"string","value":"e1f55c5ea530a989477edb896dcd89f3926a31b8"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-07-21T23:07:37.178856","string":"2020-07-21T23:07:37.178856"},"revision_date":{"kind":"timestamp","value":"2014-08-23T02:13:19","string":"2014-08-23T02:13:19"},"committer_date":{"kind":"timestamp","value":"2014-08-23T02:13:19","string":"2014-08-23T02:13:19"},"github_id":{"kind":"number","value":20776359,"string":"20,776,359"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# untested\n\nimport bee\nfrom bee.segments import *\n\n\nclass trigger_true(bee.worker):\n inp = antenna(\"push\", \"trigger\")\n outp = output(\"push\", \"bool\")\n b_outp = buffer(\"push\", \"bool\")\n startvalue(b_outp, True)\n connect(b_outp, outp)\n trigger(inp, b_outp)\n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1812,"cells":{"__id__":{"kind":"number","value":15178414462911,"string":"15,178,414,462,911"},"blob_id":{"kind":"string","value":"d66da57458abcd43ce888bee54ecc8104da6607a"},"directory_id":{"kind":"string","value":"2dc81ba0488a5f9ecf01bd83e5b18bd781f617ad"},"path":{"kind":"string","value":"/registration/model/basis.py"},"content_id":{"kind":"string","value":"014ac18d17bcc7037eff1769f19c0028b334ef92"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"demianw/pyMedImReg-public"},"repo_url":{"kind":"string","value":"https://github.com/demianw/pyMedImReg-public"},"snapshot_id":{"kind":"string","value":"857f9c2cbc00a875b4d1cf8118159f828e3a855f"},"revision_id":{"kind":"string","value":"f6532f68d3a00fc982fc79053079f0a71405a5e4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-11-26T16:30:56.264948","string":"2020-11-26T16:30:56.264948"},"revision_date":{"kind":"timestamp","value":"2013-07-02T21:45:07","string":"2013-07-02T21:45:07"},"committer_date":{"kind":"timestamp","value":"2013-07-02T21:45:07","string":"2013-07-02T21:45:07"},"github_id":{"kind":"number","value":10549991,"string":"10,549,991"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"r\"\"\"\nModule with the basic classes defining transformation models\n\"\"\"\n\nimport numpy\nfrom ..util import vectorized_dot_product\n\n__all__ = ['Model']\n\nclass Model(object):\n r\"\"\"Base Class for Transformations\n\n A transformation is defined as a map:\n\n .. math::\n \\phi: \\Omega \\mapsto \\Omega\n\n\n where :math:`\\Omega \\subseteq \\Re^N` and the\n transform has a parameter vector :math:`\\theta \\in \\Re^M`\n with :math:`M` the number of parameters\n\n Notes\n ----------\n We define :math:`\\phi(x; \\theta) = (\\phi_1(x;\\theta),\\ldots, \\phi_N(x;\\theta))`, then\n the jacobian of the transform with respect to the parameter :math:`\\theta` as\n\n .. math::\n [D_\\theta\\phi(x; \\theta)]_{ij} = \\frac{\\partial \\phi_i(x; \\theta)}{\\partial \\theta_j},\n i=1\\ldots N, j=1\\ldots M\n\n and the jacobian of the transform with respect to the location :math:`x` as\n\n .. math::\n [D_x\\phi(x; \\theta)]_{ij} = \\frac{\\partial \\phi_i(x; \\theta)}{\\partial x_j},\n i, j =1\\ldots N\n\n\n attributes\n ----------\n `parameter` : array-like, shape (n_parameters)\n Stores the parameter vector :math:`\\theta` of the transform.\n\n `identity` : array-like, shape (n_parameters)\n Stores the parameter value :math:`\\theta_0` such that :math:`\\phi(x; \\theta_0) = x`.\n\n `bounds` : array-like, shape (n_parameters, 2)\n Stores the upper and lower bounds for each component of the parameter vectors\n :math:`\\theta` such that :math:`\\text{bounds}_{i0} \\leq \\theta_i \\leq \\text{bounds}_{i1}`\n\n\n References\n ----------\n \"\"\"\n\n def __init__(self):\n self.parameter = self.identity\n\n @property\n def identity(self):\n r\"\"\"\n Stores the parameter value :math:`\\theta_0` such that :math:`\\phi(x; \\theta_0) = x`.\n \"\"\"\n return None\n\n def transform_points(self, points):\n r\"\"\"Transform a set of points.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Points to be transformed\n\n\n Returns\n -------\n y : array-like, shape (n_points, n_dimensions)\n :math:`y = \\phi(x)`\n\n \"\"\"\n raise NotImplementedError()\n\n def transform_vectors(self, points, vectors):\n r\"\"\"Transform a set of vectors located in space.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the vectors to be transformed\n\n v : array-like, shape (n_points, n_dimensions)\n Vectors to be transformed\n\n Returns\n -------\n w : array-like, shape (n_points, n_dimensions)\n :math:`w = D_x^T\\phi(x) \\cdot w`\n\n where :math:`D_x\\phi(x)` is the Jacobian of :math:`\\phi(x)`\n with respect to the spatial position :math:`x`\n \"\"\"\n\n jacobians = self.jacobian_position(points)\n res = vectorized_dot_product(jacobians, vectors[..., None])[..., 0]\n return numpy.atleast_2d(res)\n\n def transform_tensors(self, points, tensors):\n r\"\"\"Transform a set of tensors located in space.\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the vectors to be transformed\n\n T : array-like, shape (n_points, n_dimensions, n_dimensions)\n Tensors to be transformed\n\n Returns\n -------\n S : array-like, shape (n_points, n_dimensions)\n :math:`S = D^T_x\\phi(x) \\cdot T \\cdot D_x\\phi(x)`\n\n where :math:`D_x\\phi(x)` is the Jacobian of :math:`\\phi(x)`\n with respect to the spatial position :math:`x`\n \"\"\"\n jacobians = self.jacobian_position(points)\n return vectorized_dot_product(\n vectorized_dot_product(jacobians.swapaxes(-1, -2), tensors),\n jacobians\n )\n\n def jacobian(self, points):\n r\"\"\"Transposed Jacobian of the transform with respect to its parameters\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions)\n :math:`J = D^T_\\theta\\phi(x)`\n \"\"\"\n\n raise NotImplementedError()\n\n def jacobian_position(self, points):\n r\"\"\"Transposed Jacobian of the transform with respect to its location\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_dimensions, n_dimensions)\n :math:`J = D^T_x\\phi(x)`\n \"\"\"\n\n raise NotImplementedError()\n\n def jacobian_parameter_jacobian_position(self, points):\n r\"\"\"Iterated Transposed Jacobian of the transform with respect to\n its parameter and Location\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J_{ijk} = \\frac{\\partial \\phi_k(x)}{\\partial \\theta_i \\partial x_j}`\n \"\"\"\n raise NotImplementedError()\n\n def jacobian_vector_matrices(self, points, vectors):\n r\"\"\"Transposed Jacobian with respect to the transform parameter\n of the expression :math:`D^T_x \\phi(x) \\cdot v`\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n v : array-like, shape (n_points, n_dimensions)\n Vectors at each point of x\n\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J = D^T_\\theta[D^T_x\\phi(x) \\cdot v]`\n \"\"\"\n jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points)\n\n DjacT_vector = vectorized_dot_product(\n jacobian_parameter_jacobian_position, # .swapaxes(-1, -2),\n vectors[:, None, :, None]\n )[:, :, :, 0]\n\n return DjacT_vector\n\n def jacobian_tensor_matrices(self, points, tensors):\n r\"\"\"Transposed Jacobian with respect to the transform parameter\n of the expression :math:`D_x^T \\phi(x) \\cdot T \\cdot D_x\\phi(x)`\n\n\n Parameters\n ----------\n x : array-like, shape (n_points, n_dimensions)\n Location of the Jacobian to be calculated\n\n T : array-like, shape (n_points, n_dimensions, n_dimensions)\n Tensors at each point of x\n\n\n Returns\n -------\n J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions)\n :math:`J = D^T_\\theta[D^T_x\\phi(x) \\cdot T\\cdot D_x\\phi(x)]`\n \"\"\"\n jacobians = self.jacobian_position(points)\n jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points)\n\n tensor_jac = vectorized_dot_product(tensors, jacobians)\n DjacT_tensor_jac = vectorized_dot_product(\n jacobian_parameter_jacobian_position.swapaxes(-1, -2),\n tensor_jac[:, None, :, :]\n )\n\n return DjacT_tensor_jac + DjacT_tensor_jac.swapaxes(-1, -2)\n\n def norm(self, points):\n raise NotImplementedError()\n\n @property\n def bounds(self):\n r\"\"\"\n Stores the upper and lower bounds for each component of the parameter vectors\n :math:`\\theta` such that :math:`\\text{bounds}_{i0} \\leq \\theta_i \\leq \\text{bounds}_{i1}`\n \"\"\"\n return None\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1813,"cells":{"__id__":{"kind":"number","value":7885559996688,"string":"7,885,559,996,688"},"blob_id":{"kind":"string","value":"7922c1c0439c9c6a264718ec7bac604ca754dc37"},"directory_id":{"kind":"string","value":"d08e8a6f0254a1c632ad3fd783cb9547198841c2"},"path":{"kind":"string","value":"/ahr2127_Project_3_Python/gomoku.py"},"content_id":{"kind":"string","value":"18a5e5bfa6659e1536bdd5f0b3e8472ba81d3b90"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"adamreis/AI_projects"},"repo_url":{"kind":"string","value":"https://github.com/adamreis/AI_projects"},"snapshot_id":{"kind":"string","value":"52ebd725d3f1974840b58fa7f2e92ca38f00203e"},"revision_id":{"kind":"string","value":"abe2a4ab2199fbd838ae0dd888e654d245bf00f9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-19T13:39:31.562845","string":"2020-05-19T13:39:31.562845"},"revision_date":{"kind":"timestamp","value":"2013-11-22T16:48:45","string":"2013-11-22T16:48:45"},"committer_date":{"kind":"timestamp","value":"2013-11-22T16:48:45","string":"2013-11-22T16:48:45"},"github_id":{"kind":"number","value":14159265,"string":"14,159,265"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"__date__ =\"Nov 6, 2013\"\n\n\nimport sys\nimport time\n\nfrom gomoku_state import GomokuState\nfrom gomoku_game import GomokuGame\nfrom gomoku_player import HumanPlayer, RandomPlayer, SmartPlayer\n\n\n\ndef usage():\n print \"\"\"\n usage:\n\n python gomoku.py [mode] [board dimension] [winning chain length] [time limit]\n\n ex: python gomoku.py 1 10 5 60\n\n \"\"\"\n\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n usage()\n sys.exit(2)\n\n mode, board_dimension, winning_length, time_limit = \\\n [int(i) for index, i in enumerate(sys.argv) if index]\n\n if mode==1:\n gomo = GomokuGame(board_dimension, winning_length, time_limit, HumanPlayer, SmartPlayer)\n elif mode==2:\n gomo = GomokuGame(board_dimension, winning_length, time_limit, HumanPlayer, RandomPlayer)\n elif mode==3:\n gomo = GomokuGame(board_dimension, winning_length, time_limit, SmartPlayer, SmartPlayer)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1814,"cells":{"__id__":{"kind":"number","value":3358664451358,"string":"3,358,664,451,358"},"blob_id":{"kind":"string","value":"364782d188f975a20c41f5064a180b616f5b9716"},"directory_id":{"kind":"string","value":"541e1080ef18536b4ebb1442391010eb861f51f7"},"path":{"kind":"string","value":"/Angle.py"},"content_id":{"kind":"string","value":"d771575b960b37bd20d772e0e20fde486e63864e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pd0wm/EPO4"},"repo_url":{"kind":"string","value":"https://github.com/pd0wm/EPO4"},"snapshot_id":{"kind":"string","value":"e898218b0452981cd2386249e6348288e61474ba"},"revision_id":{"kind":"string","value":"df24076e05f92a6a8ecf07c701687a2306371437"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-08T06:02:14.255238","string":"2016-08-08T06:02:14.255238"},"revision_date":{"kind":"timestamp","value":"2014-06-11T09:16:36","string":"2014-06-11T09:16:36"},"committer_date":{"kind":"timestamp","value":"2014-06-11T09:16:36","string":"2014-06-11T09:16:36"},"github_id":{"kind":"number","value":20395412,"string":"20,395,412"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import cmath\n\n\nauto_old = (1.0, 1.0)\nauto_new = (0.0, 2.0)\ncoord = (0.0, 1.0)\n\ndef angle_relative(angle_car, angle_dest):\n angle = angle_car - angle_dest\n if angle < -180:\n angle += 360\n elif angle > 180:\n angle -= 360\n return angle\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1815,"cells":{"__id__":{"kind":"number","value":8349416428102,"string":"8,349,416,428,102"},"blob_id":{"kind":"string","value":"d92fa5865cfd11b8f42d5fdc1d8ea4421b9fbb9e"},"directory_id":{"kind":"string","value":"564d1352ec876f09d3b048e30353d5c48e3e3479"},"path":{"kind":"string","value":"/app/__init__.py"},"content_id":{"kind":"string","value":"9d9d96bc8fe8bbe143c7534f15eee3f86ab93930"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"configuresystems/jane"},"repo_url":{"kind":"string","value":"https://github.com/configuresystems/jane"},"snapshot_id":{"kind":"string","value":"3a23ad2b9a61036be479064dd9a0c1e6cc301aef"},"revision_id":{"kind":"string","value":"d69b5a4c454ae90cb75858f62f789762b9292f68"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-15T23:39:47.875348","string":"2020-04-15T23:39:47.875348"},"revision_date":{"kind":"timestamp","value":"2014-08-17T19:59:43","string":"2014-08-17T19:59:43"},"committer_date":{"kind":"timestamp","value":"2014-08-17T19:59:43","string":"2014-08-17T19:59:43"},"github_id":{"kind":"number","value":21185752,"string":"21,185,752"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.debug = True\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n#if not app.debug:\n# import logging\n# from logging.handlers import RotatingFileHandler\n# error = RotatingFileHandler('tmp/error.log', 'a', 1 * 1024 * 1024, 10)\n# error.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n# app.logger.setLevel(logging.INFO)\n# error.setLevel(logging.INFO)\n# app.logger.addHandler(error)\n# app.logger.info('Jane - syncing with SkyNet')\n\nfrom app.core import api_views, web_views, models\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1816,"cells":{"__id__":{"kind":"number","value":4561255313930,"string":"4,561,255,313,930"},"blob_id":{"kind":"string","value":"c161ca351cec8946b9061494c6f570aef1fa661c"},"directory_id":{"kind":"string","value":"7e37f6e4fe873496fd769e47ac926bd8863e2524"},"path":{"kind":"string","value":"/python/maya/site-packages/amTools/rigging/shoulderSetup.py"},"content_id":{"kind":"string","value":"7dd4dbab9672529834a07c5be74d806d07fb2cad"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"0xb1dd1e/PipelineConstructionSet"},"repo_url":{"kind":"string","value":"https://github.com/0xb1dd1e/PipelineConstructionSet"},"snapshot_id":{"kind":"string","value":"4b585881abfbc6c9209334282af8745bbfeb937b"},"revision_id":{"kind":"string","value":"621349da1b6d1437e95d0c9e48ee9f36d59f19fd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T00:53:32.987717","string":"2021-01-18T00:53:32.987717"},"revision_date":{"kind":"timestamp","value":"2014-03-11T22:44:39","string":"2014-03-11T22:44:39"},"committer_date":{"kind":"timestamp","value":"2014-03-11T22:44:39","string":"2014-03-11T22:44:39"},"github_id":{"kind":"number","value":17955574,"string":"17,955,574"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nA GUI to automatically create the twist structure for an upper arm.\n\n\\b Requirements:\nAM_ShoulderConstraint.py\n\nTo use this tool, select one or more elbow joints and enter the desired data\ninto the option fields before pressing either the Create or Apply button.\n\nTo skin the model, the arm mesh should be skinned in segments for each twist\njoint, plus one additional for the shoulder joint, where the segment at the\nbase of the shoulder is skinned to the first twist joint and the final segment\nis skinned to the shoulder joint.\n\n\\par Setup Shoulder Options:\n- \\b Suffix \\b of \\b New \\b Twist \\b Joints:\nSpecifies the base naming suffix to apply to the newly created joints. Their\nprefix will match the shoulder on which they are twisting and they will also be\nnumbered from 1 to n.\n- \\b Number \\b of \\b Twist \\b Joints:\nSpecifies the number of twist joints to create for each shoulder. You must\ncreate at least one and the first will always have the shoulder constraint\napplied to it.\n\\par Shoulder Constraint Options:\n- \\b Spine \\b Object:\nSpecifies the name of the object to use for computing the shoulder's elevation\nangle. The shoulder constraint is designed with the expectation that this is\nthe terminal spine node that is the most direct parent of the shoulder joints\n(i.e. the ribcage). Though this will produce perfectly valid values if any\nintermediate joints exist (collar bone, scapula), such an intermediate joint\ncould be used instead, provided that the axes given for the spine node (below)\nare transformed into the intermediate joint\\'s local space.'\n- \\b Raised \\b Angle \\b Offset:\nSpecifies the amount that the first twist joint's up-vector constraint rotates\nback when the shoulder is raised. A value between 0 and 90 is ideal and should\neliminate flipping in a normal human range of motion. The default value of 45\nis recommended in most cases.'\n- \\b Shoulder \\b Aim \\b Axis:\nCorresponds to the axis in the upper arm's local space that aims toward the\nelbow joint.\n- \\b Shoulder \\b Front \\b Axis:\nCorresponds to the axis in the upper arm's local space that points toward the\ncharacter's front.\n- \\b Spine \\b Aim \\b Axis:\nCorresponds to the axis in the specified spine joint's local space that aims\ntoward the next vertebra (up).\n- \\b Spine \\b Front \\b Axis:\nCorresponds to the axis in the specified spine joint's local space that aims\ntoward the character's front.\n\n\\b Creation \\b Info:\n\n\\b Donations: http://adammechtley.com/donations/\n\n\\b License: The MIT License\n\nCopyright (c) 2011 Adam Mechtley (http://adammechtley.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the 'Software'), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\\namespace amTools.rigging.shoulderSetup\n\"\"\"\n\nimport sys\nimport maya.cmds as cmds\nimport amTools.utilities as utils\nimport amTools.utilities.ui as amui\n\n# verify requirements\nutils.plugins.verifyPlugin('AM_ShoulderConstraint', __file__)\n\n## options window name\nkSetupOptionsWindow = 'am_setupShoulderOptionsWindow'\n## name of the tool\nkToolName = 'Setup Shoulder'\n## current version of the tool\nkVersionNumber = '1.05'\n## date of current version\nkVersionDate = '2011.03.27'\n\ndef menuItem(*args):\n\t\"\"\"This function calls optionsWindow() from a menu item\"\"\"\n\toptionsWindow()\n\ndef optionsWindow():\n\t\"\"\"This function creates an options window for creating the shoulder twist\n\tstructure. When executing it, select the elbows in the arms you are setting\n\tup, then press Create or Apply.\"\"\"\n\t# create the main interface\n\tif cmds.window(kSetupOptionsWindow, q=True, ex=True):\n\t\tcmds.deleteUI(kSetupOptionsWindow)\n\tmainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))\n\t\n\t# build the menu bar\n\tcmds.menu(label='Help')\n\tamui.helpMenuItem(kToolName, __file__)\n\tamui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)\n\t\n\tmainForm = cmds.formLayout(nd=100)\n\t\n\t# build the section to get information about the new twist joints\n\tif_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')\n\tif_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)\n\t\n\t# position the input fields for the twist joints\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])\n\tcmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])\n\t\n\t# build the section to get information for the shoulder constraint\n\tconstraintFrame = eval('cmds.frameLayout(collapsable=True, label=\"Shoulder Constraint Options:\" %s)'%(amui.__frameAlignCenter__))\n\tconstraintForm = cmds.formLayout(nd=100)\n\t\n\t# attempt to guess what the spine is if there is a selection when the GUI is created\n\tspineText = 'CenterSpine'\n\tsel = cmds.ls(sl=True, l=True, type='transform')\n\tif sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list\n\t\ttry:\n\t\t\tshoulder = cmds.listRelatives(sel[0], p=True, f=True) # just use the first elbow in the selection\n\t\t\tcollar = cmds.listRelatives(shoulder[0], p=True, f=True)\n\t\t\tspine = cmds.listRelatives(collar[0], p=True, f=True)\n\t\t\tspineText = spine[0]\n\t\texcept: pass\n\t\t\n\tif_spine = cmds.textFieldGrp(label='Spine Object:', tx=spineText)\n\tif_raisedAngleOffset = cmds.floatSliderGrp(v=45, min=0, max=90, fmn=-180, fmx=180, label='Raised Angle Offset:', field=True)\n\tif_shoulderAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Shoulder Aim Axis:')\n\tif_shoulderFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Shoulder Front Axis:')\n\tif_spineAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Spine Aim Axis:')\n\tif_spineFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Spine Front Axis:')\n\t\n\t# position the input fields for the shoulder constraint\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_spine, 'left', 30), (if_spine, 'top', 5)], attachNone=[(if_spine, 'right'), (if_spine, 'bottom')])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_raisedAngleOffset, 'left', 30)], attachNone=[(if_raisedAngleOffset, 'right'), (if_raisedAngleOffset, 'bottom')], attachControl=[(if_raisedAngleOffset, 'top', 5, if_spine)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_shoulderAimAxis, 'left', 30)], attachNone=[(if_shoulderAimAxis, 'right'), (if_shoulderAimAxis, 'bottom')], attachControl=[(if_shoulderAimAxis, 'top', 5, if_raisedAngleOffset)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_shoulderFrontAxis, 'left', 30)], attachNone=[(if_shoulderFrontAxis, 'right'), (if_shoulderFrontAxis, 'bottom')], attachControl=[(if_shoulderFrontAxis, 'top', 5, if_shoulderAimAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_spineAimAxis, 'left', 30)], attachNone=[(if_spineAimAxis, 'right'), (if_spineAimAxis, 'bottom')], attachControl=[(if_spineAimAxis, 'top', 5, if_shoulderFrontAxis)])\n\tcmds.formLayout(constraintForm, edit=True, attachForm=[(if_spineFrontAxis, 'left', 30)], attachNone=[(if_spineFrontAxis, 'right'), (if_spineFrontAxis, 'bottom')], attachControl=[(if_spineFrontAxis, 'top', 5, if_spineAimAxis)])\n\t\n\tcmds.setParent('..') # go up to constraintForm\n\tcmds.setParent('..') # go up to mainForm\n\t\n\t# position the frame for the shoulder constraint\n\tcmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])\n\t\n\t# create the buttons to execute the script\n\tcmd_create='amTools.rigging.shoulderSetup.doOptions (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")'%(\n\t\tif_suffixName, \n\t\tif_numberTwistJoints, \n\t\tif_spine, \n\t\tif_raisedAngleOffset, \n\t\tif_shoulderAimAxis, \n\t\tif_shoulderFrontAxis, \n\t\tif_spineAimAxis, \n\t\tif_spineFrontAxis)\n\tutils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)\n\t\n\tcmds.showWindow(mainWindow)\n\ndef doOptions(input_suffix, input_numberTwistJoints, input_spine, input_raisedAngleOffset, input_shoulderAimAxis, input_shoulderFrontAxis, input_spineAimAxis, input_spineFrontAxis):\n\t\"\"\"This is the function called when the apply or create button is clicked\"\"\"\n\ttry:\n\t\t# validate selection\n\t\tselection = utils.dg.validateSelection(type='transform', name='elbow joint objects', min=1)\n\t\t\n\t\t# validate suffix\n\t\tsuffix = cmds.textFieldGrp(input_suffix, q=True, tx=True)\n\t\tutils.dg.validateAffix(suffix)\n\t\t\n\t\t# validate spine\n\t\tspine = cmds.textFieldGrp(input_spine, q=True, tx=True)\n\t\tutils.dg.verifyNode(spine)\n\t\t\n\t\t# set up the shoulder\n\t\tnumberTwistJoints = cmds.intSliderGrp(input_numberTwistJoints, q=True, v=True)\n\t\tnewSelection = []\n\t\t# perform setup for each elbow in the selection\n\t\tfor elbow in selection:\n\t\t\tshoulder = cmds.listRelatives(elbow, p=True, f=True)\n\t\t\tshoulderShort = cmds.listRelatives(elbow, p=True)\n\t\t\tnewJoints = doSetup(\n\t\t\t\tshoulderShort[0] + suffix, \n\t\t\t\tnumberTwistJoints, \n\t\t\t\telbow, \n\t\t\t\tshoulder[0], \n\t\t\t\tspine, \n\t\t\t\tcmds.floatSliderGrp(input_raisedAngleOffset, q=True, v=True), \n\t\t\t\tcmds.floatFieldGrp(input_shoulderAimAxis, q=True, v=True), \n\t\t\t\tcmds.floatFieldGrp(input_shoulderFrontAxis, q=True, v=True), \n\t\t\t\tcmds.floatFieldGrp(input_spineAimAxis, q=True, v=True), \n\t\t\t\tcmds.floatFieldGrp(input_spineFrontAxis, q=True, v=True))\n\t\t\tnewSelection += newJoints\n\t\t# select the newly created joints for easy editing\n\t\tcmds.select(newSelection)\n\texcept: raise\n\ndef doSetup(baseName, numberTwistJoints, elbow, shoulder, spine, raisedAngleOffset, shoulderAimAxis, shoulderFrontAxis, spineAimAxis, spineFrontAxis):\n\t\"\"\"This function creates the new twist joints and returns a list of their names.\"\"\"\n\ttry:\n\t\t# validate baseName\n\t\tutils.dg.validateNodeName(baseName)\n\t\t\n\t\t# validate incoming object names\n\t\tutils.dg.verifyNode(elbow)\n\t\tutils.dg.verifyNode(shoulder)\n\t\tutils.dg.verifyNode(spine)\n\t\t\n\t\t# get the translation value for the elbow\n\t\telbowTranslate = cmds.getAttr('%s.translate'%elbow)[0]\n\t\t\n\t\t# see if there is a side label\n\t\tbodySide = cmds.getAttr('%s.side'%shoulder)\n\t\t\n\t\t# find out what rotate order the shoulder is using\n\t\trotateOrder = cmds.getAttr('%s.rotateOrder'%shoulder)\n\t\t\n\t\t# create the twist joints\n\t\ttwistJoints = []\n\t\t\n\t\tfor i in range(numberTwistJoints):\n\t\t\tcmds.select(cl=True)\n\t\t\tnewJoint = cmds.joint(name='%s%s'%(baseName, i + 1))\n\t\t\t\n\t\t\t# set up the first joint\n\t\t\tif i == 0:\n\t\t\t\tnewJoint = cmds.parent(newJoint, shoulder)[0]\n\t\t\t\tjointRadius = 1.0\n\t\t\t\tjointOrient = []\n\t\t\t\tif cmds.objectType(shoulder, isType='joint'):\n\t\t\t\t\tjointRadius = cmds.getAttr('%s.radius'%shoulder) * 0.5\n\t\t\t\t\n\t\t\t\tcmds.setAttr('%s.radius'%newJoint, jointRadius)\n\t\t\t\tcmds.setAttr('%s.jointOrient'%newJoint, 0,0,0)\n\t\t\t\tcmds.setAttr('%s.translate'%newJoint, 0,0,0)\n\t\t\t\t\n\t\t\t\t# create the shoulder constraint\n\t\t\t\tcmds.am_shoulderConstraint(\n\t\t\t\t\tnewJoint,\n\t\t\t\t\tspineObject=spine,\n\t\t\t\t\tshoulderObject=shoulder,\n\t\t\t\t\trao=raisedAngleOffset, \n\t\t\t\t\tsha=shoulderAimAxis, \n\t\t\t\t\tshf=shoulderFrontAxis, \n\t\t\t\t\tspa=spineAimAxis, \n\t\t\t\t\tspf=spineFrontAxis)\n\t\t\t# set up the rest of the joints\n\t\t\telse:\n\t\t\t\tnewJoint = cmds.parent(newJoint, shoulder)[0]\n\t\t\t\tcmds.setAttr('%s.radius'%newJoint, jointRadius)\n\t\t\t\tcmds.setAttr('%s.jointOrient'%newJoint, 0,0,0)\n\t\t\t\tpct = float(i)/float(numberTwistJoints)\n\t\t\t\tcmds.setAttr('%s.translate'%newJoint, elbowTranslate[0]*pct, elbowTranslate[1]*pct, elbowTranslate[2]*pct)\n\t\t\t\t\n\t\t\t\t# create the orient constraint\n\t\t\t\torientConstraint = cmds.orientConstraint([twistJoints[0], shoulder, newJoint])\n\t\t\t\ttargetWeights = cmds.orientConstraint(q=True, weightAliasList=True)\n\t\t\t\tcmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[0]), numberTwistJoints - i)\n\t\t\t\tcmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[1]), i)\n\t\t\t\tcmds.setAttr('%s.interpType'%orientConstraint[0], 1)\n\t\t\t\t\n\t\t\t# set label and rotate order\n\t\t\tcmds.setAttr('%s.side'%newJoint, bodySide)\n\t\t\tcmds.setAttr('%s.type'%newJoint, 18)\n\t\t\tcmds.setAttr('%s.otherType'%newJoint, 'Shoulder Twist %s'%(i + 1), type='string')\n\t\t\tcmds.setAttr('%s.rotateOrder'%newJoint, rotateOrder)\n\t\t\t\n\t\t\t# add the new joint to the list to return\n\t\t\ttwistJoints.append(newJoint)\n\t\t\t\n\t\treturn twistJoints;\n\texcept: raise"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1817,"cells":{"__id__":{"kind":"number","value":10402410804797,"string":"10,402,410,804,797"},"blob_id":{"kind":"string","value":"2b5f72920a2a6ae0476c14d28647f9353725a0bc"},"directory_id":{"kind":"string","value":"f31285f1adf3a0c83120c2a8f91ac5ef6f5f0b8b"},"path":{"kind":"string","value":"/extract_alignments_psl.py"},"content_id":{"kind":"string","value":"f2198dab87e2a2460e88631e98cfd68bc44789ee"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"RobinQi/BioUtils"},"repo_url":{"kind":"string","value":"https://github.com/RobinQi/BioUtils"},"snapshot_id":{"kind":"string","value":"1596cc1cec382567459d13d9e57a97099dbce76d"},"revision_id":{"kind":"string","value":"72693760620b8afb1797fd9f23e1540a194ef929"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-13T06:12:55.111042","string":"2021-01-13T06:12:55.111042"},"revision_date":{"kind":"timestamp","value":"2014-07-21T18:02:45","string":"2014-07-21T18:02:45"},"committer_date":{"kind":"timestamp","value":"2014-07-21T18:02:45","string":"2014-07-21T18:02:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''Extract only alignments of sequences in a list file.'''\n\nimport sys\n\npslfile = sys.argv[1]\nlistfile = sys.argv[2]\ntry:\n seqtype = sys.argv[3]\nexcept IndexError:\n seqtype = 9\nelse:\n if seqtype == 'query':\n seqtype = 9\n elif seqtype == 'target':\n seqtype = 13\n else:\n print >> sys.stderr, 'Unregconized sequence type.'\n raise SystemExit\n\nsequences = set([seq.strip() for seq in open(listfile)])\n\nfor align in open(pslfile):\n query = align.split()[seqtype]\n if query in sequences:\n print align,\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1818,"cells":{"__id__":{"kind":"number","value":10960756579767,"string":"10,960,756,579,767"},"blob_id":{"kind":"string","value":"e7dc2ee01c3fb99b4754a679dccf7a5d956c2e1e"},"directory_id":{"kind":"string","value":"de0d7e99d970d75b5356744bacaf7f44fcc731a8"},"path":{"kind":"string","value":"/bin/legacy/tremor_interface.py"},"content_id":{"kind":"string","value":"e5419863191e4d24c0c7b4df957fcb7f04c2f774"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Valvador/NEEShubloader"},"repo_url":{"kind":"string","value":"https://github.com/Valvador/NEEShubloader"},"snapshot_id":{"kind":"string","value":"a3a9e6f072def12423d4516e560974a7f4a744da"},"revision_id":{"kind":"string","value":"a1007cfe37b895d38191d9380ba42b01d45430b3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-20T04:24:30.381117","string":"2020-05-20T04:24:30.381117"},"revision_date":{"kind":"timestamp","value":"2013-03-20T21:58:03","string":"2013-03-20T21:58:03"},"committer_date":{"kind":"timestamp","value":"2013-03-20T21:58:03","string":"2013-03-20T21:58:03"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#THIS WILL BE USED TO INTERFACE DIRECTLY WITH THE TREMOR FILE SERVER\nimport os\nimport datetime\nimport caching\nimport hub_interface as bhi\nimport ucsbsql_interface as bui\nimport neesftp_interface as bni\nimport report\nimport utils\nimport nees_logging\nimport time\nfrom config import *\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\n# I. HIGH LEVEL FUNCTIONS \n#-------------------------------------------------------------------------------------------------------------------------------------------\n\n#\n# I. A. Trial Placement Procedures\n#\n\ndef check_cache_place_trials(expnum, start_time, end_time):\n '''Executes default trial structuring while at the same time creating a new cache file to make sure that no'''\n nees_logging.log_current_time(neeshub_log_filename)\n caching.create_hub_cache(expnum)\n place_trials_default(expnum, start_time, end_time)\n\n\n\ndef place_trials_default(expnum, start_time, end_time, verbose=False):\n '''This is going to be the primary way of moving processed data from it's proper location\n to the PEN tool's subfolder. As long as the data is organized with our standard format where\n the metadata is located on the mysql database, this will handle all the uploading.\n WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.''' \n destination = experiment_path[expnum]\n current_trial = utils.find_last_trial(expnum) + 1 \n neeshub = bhi.conn\n mysqldb = bui.conn\n existing_evid_dict = caching.load_evid_dictionary(expnum)\n event_data_dicts = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum])\n default_folder = mysqldb.retrieve_data_folder()\n \n # Look at every event in the database between time constraints.\n for event in event_data_dicts:\n site_evt_number = event[cfg_evt_siteEvt]\n site_evt_time = event[cfg_evt_time]\n site_event_id = event[cfg_evt_evid]\n site_event_dist = event[cfg_evt_dist]\n site_event_ml = event[cfg_evt_ml]\n file_data_dicts = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum])\n \n # If this event has already been uploaded, report it and skip this event.\n if site_event_id in existing_evid_dict.values():\n nees_logging.log_existing_evid(site_event_id)\n continue\n \n # Don't do anything if there's no data\n if file_data_dicts == []:\n continue\n \n # Generate file structure on NEEShub and local system.\n description = utils.generate_description(event)\n trialtitle = datetime.datetime.utcfromtimestamp(site_evt_time).strftime(default_time_format)\n trial_doc_folder = \"%sTrial-%s/Documentation/\" % (destination, current_trial)\n report_name = 'report.csv'\n caching.update_all_cache_dictionaries(expnum, current_trial, site_event_id, site_event_ml, site_event_dist) \n utils.generate_trial_structure(destination, current_trial)\n report.create_report(trial_doc_folder, event)\n neeshub.post_full_trial(experiment_id[expnum], trialtitle, description, current_trial)\n \n # Find and move every file within an event to the created file structure. \n move_datafiles(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder, expnum)\n upload_and_post_report(expnum, current_trial, trial_doc_folder, report_name)\n \n \n # Move on to next trial for further processing.\n nees_logging.log_goto_nextline(neeshub_log_filename)\n current_trial += 1\n \n\ndef move_datafiles(file_data_dicts, \n event, \n destination, \n current_trial, \n trial_doc_folder, \n default_folder, \n expnum):\n '''Moves datafile from mysql-descripted location to file structure.\n file_ = mySQL-created dictionary that hold data location info.\n event = Dictionary containing event information for the event that \"file_\" belongs to.\n destination = Location of Experiment file structure as defined by configuration\n current_trial = Actually is the trial number that is being worked on.\n trial_doc_folder = Location of Documentation Files for the event. '''\n mysqldb = bui.conn\n julian_folder = datetime.datetime.utcfromtimestamp(event['time']).strftime(data_retrieval_time_format)\n\n # Upload every file associated with event.\n for file_ in file_data_dicts:\n filename = file_[cfg_fl_dfile]\n oscommand_source = \"%s%s%s\" % (default_folder, julian_folder, filename)\n oscommand_destination = \"%sTrial-%s/Rep-1/\" % (destination, current_trial)\n pubChan = \"%s_%s_%s\" % (file_[cfg_fl_net], file_[cfg_fl_sta], file_[cfg_fl_chan])\n channel_data_dict = mysqldb.retrieve_channel_position(pubChan, event[cfg_evt_time])\n file_extensions = utils.find_extensions(oscommand_source) \n\n report.append_report_if_valid(trial_doc_folder, \n file_, \n channel_data_dict, \n event[cfg_evt_evid])\n\n utils.copy_file_exts(oscommand_source, \n oscommand_destination, \n file_extensions)\n\n upload_and_post(expnum, \n current_trial, \n oscommand_destination, \n filename, \n file_extensions) \n \n\n#\n#TODO: THE BELOW ARE TOO MUCH DUPLICATE CODE, I need a generic UPLOAD and MULTI-PART POST that has FILE subroutines.\n#\n\n\ndef upload_and_post(expnum, \n trial_number, \n source_folder, \n filename, \n extensions, \n selector = http_file_path):\n '''This function is designed to be used in the upload process within the move_datafiles function.\n The filename has to be specified without extension, and the extension has to be specified separately.'''\n for extension in extensions:\n \n full_source_folder = source_folder + cfg_hub_ext_fold[extension] + '/'\n bhi.ftpconn.upload_file(full_source_folder, filename, extension)\n bhi.conn.multipart_post(filename, expnum, trial_number, extension, selector)\n\n \ndef upload_and_post_report(expnum, \n trial_number, \n source_folder, \n filename, \n selector = http_file_path):\n '''DUPLICATE CODE, SHOULD RESOLVE GENERIC MULTIPART GENERATOR REGARDLESS OF FILETYPE'''\n source_path = source_folder + filename\n bhi.ftpconn.upload_to_project(filename, source_path)\n bhi.conn.multipart_post_generic(filename, expnum, trial_number, selector) \n \n\n \n#\n# I. B. Update Report Only \n#\n\ndef place_reports_only(expnum, start_time, end_time):\n '''Used in the case that the log gives warning that individual channel information was missing. This allows \n the used to re-create the report.csv files without having to completely re-do the upload process.'''\n destination = experiment_path[expnum] \n mysqldb = bui.conn\n event_data_dicts = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum])\n default_folder = mysqldb.retrieve_data_folder() \n for event in event_data_dicts: \n site_evt_number = event[cfg_evt_siteEvt]\n site_event_id = event[cfg_evt_evid]\n file_data_dicts = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum])\n current_trial = caching.trial_num_from_evid(expnum, site_event_id)\n trial_doc_folder = \"%sTrial-%s/Documentation/\" % (destination, current_trial)\n report.create_report(trial_doc_folder, event)\n create_filereports(file_data_dicts, \n event, \n destination, \n current_trial, \n trial_doc_folder, \n default_folder) \n \n\ndef create_filereports(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder):\n mysqldb = bui.conn \n for file_ in file_data_dicts:\n pubChan = \"%s_%s_%s\" % (file_[cfg_fl_net], \n file_[cfg_fl_sta], \n file_[cfg_fl_chan])\n channel_data_dict = mysqldb.retrieve_channel_position(pubChan, \n event[cfg_evt_time])\n \n report.append_report_if_valid(trial_doc_folder, \n file_, \n channel_data_dict, \n event[cfg_evt_evid])\n \n \n#\n# TODO: THIS IS A ONE OFF PROCESSING SYSTEM\n# THIS WORKS ON EMILY'S AND TIM'S CODE. WE NEED TO FIND A WAY TO INTEGRATE IT\n# INTO A STANDARD SYSTEM. \"lengthofstuff=10\" IS A BIG NO-NO. THIS IS WAY TO TITLE\n# DEPENDENT.\n#\n\ndef place_trials(filepath, expnum, lengthofstuff=10):\n '''This uses the \"utils.find_last_trial\" function to analyze the the destination folder for it's\n Trial content. Based on that information, it will take the files from the given \"filepath\"\n and place them in proper Trial locations into the destination. The third variable,\n 'lengthofstuff' is defaulted at 8, and is used to compare whether the events happened\n on the same day, allowing this function to differential between different Trials.\n WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.'''\n destination = experiment_path[expnum]\n previous = '' \n current_trial = utils.find_last_trial(expnum)\n pathlist = sorted(os.listdir(filepath))\n neeshub = bhi.conn \n \n for f in pathlist:\n if previous != f[0:lengthofstuff]:\n current_trial += 1 \n precommand = \"mkdir -p %sTrial-%s/Rep-1/Derived_Data\" % (destination,current_trial)\n os.system(precommand)\n trialtitle = utils.get_trial_title(f, expnum) #Gives Julian Date: Year-Day\n description = experiment_description[expnum]\n neeshub.post_full_trial(experiment_id[expnum], trialtitle, description) \n command = \"cp %s/%s %sTrial-%s/Rep-1/Derived_Data\" % (filepath,\n f,\n destination,\n current_trial)\n os.system(command) #Places next Trial folder.\n previous = f[0:lengthofstuff]\n return current_trial \n\n \n\n#\n# DEBUGGING, REMOVE WHEN FINISHED\n#\n\n\n\n#The following is kept for troubleshooting purposes. This was before using %s formatting in my strings. If those methos fail use the ones below.\ndef place_trials_no_hub(filepath, expnum, lengthofstuff=8): \n '''This uses the \"utils.find_last_trial\" function to analyze the the destination folder for it's\n Trial content. Based on that information, it will take the files from the given \"filepath\"\n and place them in proper Trial locations into the destination.\n WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.'''\n destination = experiment_path[expnum]\n previous = ''\n trialscreated = 0 \n current_trial = utils.find_last_trial(destination)\n pathlist = sorted(os.listdir(filepath)) \n \n for f in pathlist:\n if previous != f[0:lengthofstuff]:\n precommand = \"mkdir -p \" +destination+\"Trial-\"+str(current_trial)+\"/Rep-1/Derived_Data\"\n os.system(precommand) \n command = \"cp \"+filepath+\"/\"+f+\" \"+destination+\"Trial-\"+str(current_trial)+\"/Rep-1/Derived_Data\"\n os.system(command)\n if previous == f[0:lengthofstuff]: \n current_trial += 1 \n trialscreated += 1 \n previous = f[0:lengthofstuff]\n return trialscreated \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1819,"cells":{"__id__":{"kind":"number","value":14010183327518,"string":"14,010,183,327,518"},"blob_id":{"kind":"string","value":"bfbb7d365c37fc4f1e397ada0836a81f14649cb4"},"directory_id":{"kind":"string","value":"46a68635e05e3784069e333ef1b1727a7d68f616"},"path":{"kind":"string","value":"/Lab Sheet 3/T5.py"},"content_id":{"kind":"string","value":"81373ef5aa78317dab1f00dd984a6a685c35135b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"theref/Computing-For-Mathematics"},"repo_url":{"kind":"string","value":"https://github.com/theref/Computing-For-Mathematics"},"snapshot_id":{"kind":"string","value":"4190d178550f6d302eaae928d089a39e0d6ceecb"},"revision_id":{"kind":"string","value":"6bb7d8ec985375f04e6352701b4f8828d7199c6f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-02T02:18:16.245986","string":"2016-09-02T02:18:16.245986"},"revision_date":{"kind":"timestamp","value":"2013-12-12T18:40:38","string":"2013-12-12T18:40:38"},"committer_date":{"kind":"timestamp","value":"2013-12-12T18:40:38","string":"2013-12-12T18:40:38"},"github_id":{"kind":"number","value":13867063,"string":"13,867,063"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2023-08-22T10:07:29","string":"2023-08-22T10:07:29"},"gha_created_at":{"kind":"timestamp","value":"2013-10-25T17:17:43","string":"2013-10-25T17:17:43"},"gha_updated_at":{"kind":"timestamp","value":"2013-12-12T18:39:03","string":"2013-12-12T18:39:03"},"gha_pushed_at":{"kind":"timestamp","value":"2023-08-22T10:07:29","string":"2023-08-22T10:07:29"},"gha_size":{"kind":"number","value":1260,"string":"1,260"},"gha_stargazers_count":{"kind":"number","value":1,"string":"1"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"TeX"},"gha_archived":{"kind":"bool","value":false,"string":"false"},"gha_disabled":{"kind":"bool","value":false,"string":"false"},"content":{"kind":"string","value":"numbers = open('W04_D01.txt', 'r').read().split('\\r\\n')\nnumbers = [ int(x) for x in numbers ]\nprint numbers.index(4558)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1820,"cells":{"__id__":{"kind":"number","value":18966575582615,"string":"18,966,575,582,615"},"blob_id":{"kind":"string","value":"2fc90870a0c2d80f89c89bf31d906a62096ca561"},"directory_id":{"kind":"string","value":"e8583f018de761273f64d5d4e202fdfb0d30d4fd"},"path":{"kind":"string","value":"/projectmanager/urls.py"},"content_id":{"kind":"string","value":"2e45608399b28653ca1bfcf75982ec19d47842b9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"QA-long/django-projectmanager"},"repo_url":{"kind":"string","value":"https://github.com/QA-long/django-projectmanager"},"snapshot_id":{"kind":"string","value":"2d74c4f78fc8337f6f33c303232dd65f84c63144"},"revision_id":{"kind":"string","value":"90f890cc51c4f850528b1e2a1600d49da4ec716f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T23:25:40.793582","string":"2021-01-17T23:25:40.793582"},"revision_date":{"kind":"timestamp","value":"2013-05-09T23:57:54","string":"2013-05-09T23:57:54"},"committer_date":{"kind":"timestamp","value":"2013-05-09T23:57:54","string":"2013-05-09T23:57:54"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls.defaults import *\n\nurlpatterns = patterns('projectmanager.views',\n (r'^calendar/$', 'project_time_calendar'),\n (r'^api/time/list/', 'api_project_time_list'),\n (r'^api/time/add/', 'api_project_time_add'),\n (r'^api/time/edit/', 'api_project_time_edit'),\n (r'^api/time/move/', 'api_project_time_move'),\n\n (r'^tasks/$', 'tasks'),\n (r'^tasks/(\\d+)/$', 'tasks'),\n (r'^tasks/(all)/$', 'tasks'),\n\n (r'^invoice/(\\d+)/$', 'invoice'),\n (r'^invoice/(\\d+)/.+\\.(pdf)$', 'invoice'),\n\n (r'^quote/(\\d+)/$', 'quote'),\n (r'^quote/(\\d+)/.+\\.(pdf)$', 'quote'),\n\n (r'^itemise/(\\d+)/$', 'projecttime_summary'),\n\n (r'^create_invoice_for_project/(\\d+)/$', 'create_invoice_for_project'),\n\n)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1821,"cells":{"__id__":{"kind":"number","value":19069654803701,"string":"19,069,654,803,701"},"blob_id":{"kind":"string","value":"0e4cd6fdc18af9d5e4039f681bdf9ad1a7c6c9b9"},"directory_id":{"kind":"string","value":"35a14aea825e40b6284388827407e17b9e4fd688"},"path":{"kind":"string","value":"/src/Utils.py"},"content_id":{"kind":"string","value":"7a6cf15e5590cce19062fd223dc1eed12d23cf40"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"spatzle/Practice-Battleship"},"repo_url":{"kind":"string","value":"https://github.com/spatzle/Practice-Battleship"},"snapshot_id":{"kind":"string","value":"77411a1ea950982924cfdce193ed07300ae8dd0f"},"revision_id":{"kind":"string","value":"9b062a12b3b1e958154c6089ab847d0632d6d971"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-13T02:03:22.612825","string":"2021-01-13T02:03:22.612825"},"revision_date":{"kind":"timestamp","value":"2011-03-23T14:35:32","string":"2011-03-23T14:35:32"},"committer_date":{"kind":"timestamp","value":"2011-03-23T14:35:32","string":"2011-03-23T14:35:32"},"github_id":{"kind":"number","value":1516553,"string":"1,516,553"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''\nCreated on 2011-03-23\n\n@author: joyce\n'''\nimport re\ndef matched(pattern, string, repl='_._'):\n\n r = re.compile(pattern)\n m = r.search(string)\n\n if m:\n return True\n# i = 0\n# while m:\n# m_start = m.start()\n# m_end = m.end()\n#\n# i += 1\n# print( '%d) start: %d, end: %d, str: %s' %\n# (i, m_start, m_end, string[m_start:m_end]) )\n#\n# if m.groups(): # capturing groups\n# print(' groups: ' + str(m.groups()))\n#\n# if m_end == len(string): # infinite loop if\n# break # m_start == m_end == len(string)\n# elif m_start == m_end: # zero-width match;\n# m_end += 1 # keep things moving along\n#\n# m = r.search(string, m_end) \n#\n# print( 'global replace (%s):\\n%s' %\n# (repl, re.sub(pattern, repl, string)) )\n\n else:\n return False"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1822,"cells":{"__id__":{"kind":"number","value":1795296330168,"string":"1,795,296,330,168"},"blob_id":{"kind":"string","value":"bdae7ae6dd6edbc0ff76db670409acf6f98255f8"},"directory_id":{"kind":"string","value":"c1992428b36cd8c5ccd072749e910856f9503fd8"},"path":{"kind":"string","value":"/test_magic_square.py"},"content_id":{"kind":"string","value":"e949c563c9777f5cf4a3379dece332a1c6f994c2"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"croach/magic_squares"},"repo_url":{"kind":"string","value":"https://github.com/croach/magic_squares"},"snapshot_id":{"kind":"string","value":"f7f102974e44f328ff8c06e7405842bb5157f273"},"revision_id":{"kind":"string","value":"759e8e1b7b18912c6958fcd7964614ecc0a47885"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-07-23T18:22:26","string":"2015-07-23T18:22:26"},"revision_date":{"kind":"timestamp","value":"2013-11-11T09:30:45","string":"2013-11-11T09:30:45"},"committer_date":{"kind":"timestamp","value":"2013-11-11T09:30:45","string":"2013-11-11T09:30:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import random\nimport unittest\n\nfrom magic_square import generate, distance, cost, neighbors, solve\nfrom utils import flatten, key\n\n\nclass MagicSquareTestCase(unittest.TestCase):\n def test_generate(self):\n # Test the lower bounds of the size of a magic square. Squares below the\n # minimum (2x2) should raise a value error, all others shoudl return a\n # square (i.e., a list of lists).\n self.assertEquals(list, type(generate(2))) # the smallest square allowed\n self.assertRaises(ValueError, generate, 1) # 1 less than the smallest\n self.assertRaises(ValueError, generate, 0) # 0 size square\n self.assertRaises(ValueError, generate, -10) # negative size square\n\n # The default should be a 3x3 square\n DEFAULT_SQUARE_DIMENSIONS = 3\n default_square = generate()\n self.assertEquals(DEFAULT_SQUARE_DIMENSIONS, len(default_square))\n for row in default_square:\n self.assertEquals(DEFAULT_SQUARE_DIMENSIONS, len(row))\n\n # Create a pool of randomly sized squares to test\n squares = [generate(random.randint(2, 100)) for _ in xrange(10)]\n\n for square in squares:\n # Make sure all rows in a square are the same length\n self.assertTrue(all(len(row) == len(square[0]) for row in square))\n\n # Squares should be square, i.e., width == height\n width = len(square)\n height = len(square[0])\n self.assertEquals(width, height)\n\n # All cells in each square should be unique, consecutive (when sorted),\n # and have only one empty cell (i.e., None)\n self.assertEquals([None] + range(1, width**2), sorted(flatten(square)))\n\n def test_distance(self):\n square = [\n [8, 5, 6],\n [2, 1, 4],\n [3, None, 7]\n ]\n\n self.assertEquals(2, distance(1, 1, square)) # 1 square (up 1, left 1)\n self.assertEquals(1, distance(2, 1, square)) # empty square (right 1)\n self.assertEquals(4, distance(2, 0, square)) # 3 square (up 2, right 2)\n\n def test_cost(self):\n # Perfect square\n perfect_square = [\n [1, 2, 3 ],\n [4, 5, 6 ],\n [7, 8, None]\n ]\n self.assertEquals(0, cost(perfect_square))\n\n # Pretty good\n good_square = [\n [8, 2, 1 ],\n [4, 6, 5 ],\n [3, 7, None]\n ]\n self.assertEquals(12, cost(good_square))\n\n # Pretty bad\n bad_square = [\n [None, 8, 7],\n [6, 2, 4],\n [3, 5, 1]\n ]\n self.assertEquals(24, cost(bad_square))\n\n self.assertTrue(cost(perfect_square) < cost(good_square) < cost(bad_square))\n\n def test_neighbors(self):\n def test_neighbors(square, expected_squares):\n result = set(key(n) for n in neighbors(square))\n expected = set(key(n) for n in expected_squares)\n self.assertEquals(result, expected)\n\n # Square with the empty tile in the center\n center_square = [\n [2, 8, 7],\n [6, None, 4],\n [3, 5, 1]\n ]\n neighboring_squares = [\n [\n [2, None, 7],\n [6, 8, 4],\n [3, 5, 1]\n ],\n [\n [2, 8, 7],\n [6, 5 , 4],\n [3, None, 1]\n ],\n [\n [2, 8, 7],\n [None, 6, 4],\n [3, 5, 1]\n ],\n [\n [2, 8, 7],\n [6, 4, None],\n [3, 5, 1]\n ]\n ]\n test_neighbors(center_square, neighboring_squares)\n\n # Square with the empty tile in a corner\n corner_square = [\n [None, 8, 7],\n [6, 2, 4],\n [3, 5, 1]\n ]\n neighboring_squares = [\n [\n [8, None, 7],\n [6, 2, 4],\n [3, 5, 1]\n ],\n [\n [6, 8, 7],\n [None, 2, 4],\n [3, 5, 1]\n ]\n ]\n test_neighbors(corner_square, neighboring_squares)\n\n # Square with the empty tile on a side\n side_square = [\n [4, 8, 7 ],\n [6, 2, None],\n [3, 5, 1 ]\n ]\n neighboring_squares = [\n [\n [4, 8, None],\n [6, 2, 7 ],\n [3, 5, 1 ]\n ],\n [\n [4, 8, 7 ],\n [6, 2, 1 ],\n [3, 5, None]\n ],\n [\n [4, 8, 7],\n [6, None, 2],\n [3, 5, 1]\n ]\n ]\n test_neighbors(side_square, neighboring_squares)\n\n def test_solve(self):\n solvable_square = [\n [None, 1],\n [3, 2]\n ]\n self.assertIsNotNone(solve(solvable_square, False))\n\n unsolvable_square = [\n [None, 1],\n [2, 3]\n ]\n self.assertIsNone(solve(unsolvable_square, False))\n\n\n\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1823,"cells":{"__id__":{"kind":"number","value":506806180402,"string":"506,806,180,402"},"blob_id":{"kind":"string","value":"60a4993e77fad1abe7d5ed1648ebe079ada5d28c"},"directory_id":{"kind":"string","value":"e443a0b5b31f454c7f7f5f96f1bd26d8fa144e26"},"path":{"kind":"string","value":"/World_crisis_data_base/phase_I/WCDB.py"},"content_id":{"kind":"string","value":"7311c869955073d16376ab74642bcb1eed32f844"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"XiaoqinLI/Element-software-engineering-and-design-of-database"},"repo_url":{"kind":"string","value":"https://github.com/XiaoqinLI/Element-software-engineering-and-design-of-database"},"snapshot_id":{"kind":"string","value":"0aeb6aa63fa68b31d7f8fec0b85faac5bef1db1b"},"revision_id":{"kind":"string","value":"c9b3d1ca28cebfba02efcdb98a9282e462fd1dfb"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T05:22:14.536760","string":"2021-01-19T05:22:14.536760"},"revision_date":{"kind":"timestamp","value":"2014-04-22T14:44:16","string":"2014-04-22T14:44:16"},"committer_date":{"kind":"timestamp","value":"2014-04-22T14:44:16","string":"2014-04-22T14:44:16"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# ---------------------------------------------------------------------------\r\n# projects/WCDB(phase1)/WCDB.py\r\n# Author: Xiaoqin LI\r\n# Description:\r\n #Create an import/export facility from the XML into Element Tree and back.\r\n #The input is guaranteed to have validated XML.\r\n #The import facility must import from a file.\r\n #The export facility must export to a file.\r\n #Import/export the XML on only the ten crises, ten organizations,\r\n #and ten people of the group.\r\n# Date: 03/09/2014\r\n# ----------------------------------------------------------------------------\r\n\r\n# -------\r\n# imports\r\n# -------\r\n\r\nimport sys\r\nimport xml.etree.ElementTree as ET\r\n\r\n# ----------\r\n# wcdb_read\r\n# ----------\r\n\r\ndef wcdb_read (r) :\r\n \"\"\"\r\n reads an input from a file which have a single top tag\r\n creates an element tree from string\r\n \"\"\"\r\n \r\n imported_str_data = \"\" + \"\".join(r.read()) + \"\"\r\n assert(type(imported_str_data) is str)\r\n data_tree = ET.fromstring(imported_str_data)\r\n assert(type(data_tree) is ET.Element)\r\n return data_tree\r\n\r\n# ----------\r\n# wcdb_write\r\n# ----------\r\n\r\ndef wcdb_write (w, data_tree):\r\n \"\"\"\r\n converts an element string to a string data\r\n exports the string data \r\n \"\"\"\r\n data_exported_string = ET.tostring(data_tree,encoding = \"unicode\", method = \"xml\")\r\n data_exported_string = data_exported_string[13:-14]\r\n assert(type(data_exported_string) is str)\r\n w.write(data_exported_string)\r\n\r\n# ----------\r\n# wcdb_solve\r\n# ----------\r\n\r\ndef wcdb_solve (stdin, stdout) :\r\n \"\"\"\r\n stdin is a reader\r\n stdout is a writer\r\n \"\"\"\r\n imported_tree = wcdb_read (stdin)\r\n wcdb_write (stdout, imported_tree)\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1824,"cells":{"__id__":{"kind":"number","value":7043746375605,"string":"7,043,746,375,605"},"blob_id":{"kind":"string","value":"4cbd3c90444fffc0391ec96d100ac0b3300771e9"},"directory_id":{"kind":"string","value":"739bb203a7f958a44c0f099bc829149d73d44f77"},"path":{"kind":"string","value":"/tests/settings_tests.py"},"content_id":{"kind":"string","value":"d0d5502d6fb66300ace739093b3ef6778d008ffe"},"detected_licenses":{"kind":"list like","value":["ISC"],"string":"[\n \"ISC\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"trehn/django-installer"},"repo_url":{"kind":"string","value":"https://github.com/trehn/django-installer"},"snapshot_id":{"kind":"string","value":"fbc5fdb16227ad3f7f1722a232f08f0dc90ec105"},"revision_id":{"kind":"string","value":"672c988ae33b311125e78202d2d0b3e298281d2c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-07T09:13:18.155326","string":"2020-06-07T09:13:18.155326"},"revision_date":{"kind":"timestamp","value":"2014-09-14T07:52:11","string":"2014-09-14T07:52:11"},"committer_date":{"kind":"timestamp","value":"2014-11-14T13:48:27","string":"2014-11-14T13:48:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"try:\n from configparser import SafeConfigParser\nexcept ImportError:\n from ConfigParser import SafeConfigParser\nfrom os import environ\nfrom tempfile import NamedTemporaryFile\nfrom unittest import TestCase\n\n\nIMPORT_MAGIC = \"\"\"\nimport sys\ntry:\n del sys.modules[\"django_installer.settings\"]\nexcept KeyError:\n pass\nfrom django_installer.settings import *\n\"\"\"\nIMPORTED_GLOBALS = ['__builtins__', 'sys']\n\n\nclass BaseURLTest(TestCase):\n def test_base_url(self):\n tmpfile = NamedTemporaryFile()\n\n config = SafeConfigParser()\n config.add_section(\"baseurl\")\n config.set(\"baseurl\", \"url\", \"https://www.example.com/foo\")\n config.write(tmpfile)\n tmpfile.flush()\n\n environ[\"DJANGO_INSTALLER_SETTINGS\"] = tmpfile.name\n env = {}\n exec(IMPORT_MAGIC, env)\n\n self.assertEqual(env.keys(), IMPORTED_GLOBALS + ['ALLOWED_HOSTS'])\n self.assertEqual(env['ALLOWED_HOSTS'], (\"www.example.com\",))\n\n\nclass DatabaseTest(TestCase):\n def test_database(self):\n tmpfile = NamedTemporaryFile()\n\n config = SafeConfigParser()\n config.add_section(\"database\")\n config.set(\"database\", \"engine\", \"django.db.backends.mysql\")\n config.set(\"database\", \"host\", \"db.example.com\")\n config.set(\"database\", \"name\", \"example\")\n config.set(\"database\", \"password\", \"secret\")\n config.set(\"database\", \"port\", \"3306\")\n config.set(\"database\", \"user\", \"jdoe\")\n config.write(tmpfile)\n tmpfile.flush()\n\n environ[\"DJANGO_INSTALLER_SETTINGS\"] = tmpfile.name\n env = {}\n exec(IMPORT_MAGIC, env)\n\n self.assertEqual(env.keys(), IMPORTED_GLOBALS + ['DATABASES'])\n self.assertEqual(env['DATABASES'], {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'example',\n 'HOST': 'db.example.com',\n 'USER': 'jdoe',\n 'PASSWORD': 'secret',\n 'PORT': '3306',\n }\n })\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1825,"cells":{"__id__":{"kind":"number","value":16200616672758,"string":"16,200,616,672,758"},"blob_id":{"kind":"string","value":"2417d454f03b44ceb8c310b71967a737b0969db4"},"directory_id":{"kind":"string","value":"fb9116de4f8536fda86397379c2ed64086e3681c"},"path":{"kind":"string","value":"/hmm.py"},"content_id":{"kind":"string","value":"30448a5537fb94966cfc70fa8c6d4b4102b3ad22"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only"],"string":"[\n \"GPL-2.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"pbouda/gesturefollower"},"repo_url":{"kind":"string","value":"https://github.com/pbouda/gesturefollower"},"snapshot_id":{"kind":"string","value":"dfbbc2031b1e4bc234b7da61f469f9db144ffcfe"},"revision_id":{"kind":"string","value":"a43e2b0e8cb983871b5e9a3e3c927b7a4fb82336"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-16T17:49:07.494015","string":"2021-01-16T17:49:07.494015"},"revision_date":{"kind":"timestamp","value":"2014-09-19T12:43:43","string":"2014-09-19T12:43:43"},"committer_date":{"kind":"timestamp","value":"2014-09-19T12:43:43","string":"2014-09-19T12:43:43"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import math\n\nimport scipy.stats as st\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nA1 = 1./3.\nA2 = 1./3.\nA3 = 1./3.\nMU = .2\n\nclass HMM:\n # This class converted with modifications from http://kastnerkyle.github.io/blog/2014/05/22/single-speaker-speech-recognition/\n # Implementation of: http://articles.ircam.fr/textes/Bevilacqua09b/index.pdf\n\n def __init__(self, n_states, reference):\n self.n_states = n_states\n self.reference = reference\n #self.random_state = np.random.RandomState(0)\n \n # Initial state\n # left-to-right HMM, we start with state 1\n self.prior = np.zeros(self.n_states)\n self.prior[0] = 1.\n\n self.A = np.zeros((self.n_states, self.n_states))\n #self.A = self._stochasticize(self.random_state.rand(self.n_states, self.n_states))\n for i in range(self.n_states):\n self.A[i, i] = A1\n if (i+1) < self.A.shape[1]:\n self.A[i, i+1] = A2\n if (i+2) < self.A.shape[1]:\n self.A[i, i+2] = A3\n self.A[-1, -1] = 1.\n \n self.mu = np.array([MU]*len(self.reference))\n \n def _forward(self, B):\n log_likelihood = 0.\n T = B.shape[1]\n alpha = np.zeros(B.shape)\n #T = B.shape[1]\n #T = B.shape[0]\n #print(B)\n #alpha = np.zeros((self.n_states, self.n_states, self.reference.shape[1]))\n #for t in range(self.n_states):\n for t in range(T):\n if t == 0:\n #print(B[:, t].shape)\n #print(self.prior.ravel().shape)\n #alpha[t] = (B.transpose(1,0) * self.prior).transpose(1,0)\n alpha[:, t] = B[:, t] * self.prior.ravel()\n else:\n #alpha[t] = B * np.dot(self.A.T, alpha[t-1])\n alpha[:, t] = B[:, t] * np.dot(self.A.T, alpha[:, t - 1])\n\n alpha_sum = np.sum(alpha[:, t])\n alpha[:, t] /= alpha_sum\n #log_likelihood = log_likelihood + np.log(alpha_sum)\n log_likelihood = log_likelihood + alpha_sum\n\n #print(B[:, 3])\n return log_likelihood, alpha\n \n def _state_likelihood(self, obs):\n obs = np.atleast_2d(obs)\n B = np.zeros((self.n_states, obs.shape[0]))\n for s in range(self.n_states):\n #B[s, :] = st.multivariate_normal.pdf(obs.T, mean=self.mu)\n b = np.zeros(obs.shape[0])\n for o in range(obs.shape[0]):\n b[o] = 0.\n b[o] = (1./(self.mu[s]*math.sqrt(2*math.pi))) * \\\n math.exp(\n -( (obs[o][0]-self.reference[s][0])**2 / (2*(self.mu[s]**2)) )\n )\n #B[s, :] = self._normalize(b)\n B[s, :] = b\n\n #Needs scipy 0.14\n #B[s, :] = st.multivariate_normal.pdf(obs.T, mean=self.mu[:, s].T, cov=self.covs[:, :, s].T)\n\n #This function can (and will!) return values >> 1\n #See the discussion here for the equivalent matlab function\n #https://groups.google.com/forum/#!topic/comp.soft-sys.matlab/YksWK0T74Ak\n #Key line: \"Probabilities have to be less than 1,\n #Densities can be anything, even infinite (at individual points).\"\n #This is evaluating the density at individual points...\n return B\n \n def _normalize(self, x):\n return (x + (x == 0)) / np.sum(x)\n \n def _stochasticize(self, x):\n return (x + (x == 0)) / np.sum(x, axis=1)\n\nif __name__ == \"__main__\":\n reference_signal = np.concatenate((\n np.zeros(50),\n np.sin(np.linspace(-np.pi, np.pi, 40)),\n np.zeros(50),\n np.sin(np.linspace(-np.pi, np.pi, 40)),\n np.zeros(50)))\n\n noise = np.random.normal(0,.1,230)\n offset = .2\n test_signal = np.concatenate((\n np.random.normal(0,.1,70) + offset,\n noise + reference_signal + reference_signal + offset))\n\n #test_signal = np.concatenate((\n # np.zeros((50,)),\n # reference_signal))\n\n # test signal 2 is just noise\n test_signal2 = np.random.normal(0,1,230)\n\n # plt.plot(reference_signal)\n # plt.plot(test_signal)\n # plt.plot(test_signal2)\n # plt.show()\n\n r = np.reshape(reference_signal, (-1, 1))\n t = np.reshape(test_signal[:150], (-1, 1))\n t2 = np.reshape(test_signal2, (-1, 1))\n\n # Build HMM based on reference data\n\n h = HMM(len(r), r)\n B = h._state_likelihood(t)\n B2 = h._state_likelihood(t2)\n B3 = h._state_likelihood(r)\n lik, alpha = h._forward(B)\n for t in range(alpha.shape[0]):\n print(np.argmax(alpha[t, :]))\n print(\"Likelihood for test data: {}\".format(lik))\n print(\"Likelihood for noise data: {}\".format(h._forward(B2)[0]))\n print(\"Likelihood for reference data: {}\".format(h._forward(B3)[0]))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1826,"cells":{"__id__":{"kind":"number","value":15736760213749,"string":"15,736,760,213,749"},"blob_id":{"kind":"string","value":"e0cc7dfa6cf2e4ae121e84ac080fe796ad64a61d"},"directory_id":{"kind":"string","value":"02822dd303104bb36b220a0d8d21275263057812"},"path":{"kind":"string","value":"/cmd/memtest-cmd.py"},"content_id":{"kind":"string","value":"0e3cf0c839d6a86402685a8589a0e5c6acf5b1b9"},"detected_licenses":{"kind":"list like","value":["LGPL-2.0-or-later","BSD-3-Clause","LGPL-2.0-only","Python-2.0"],"string":"[\n \"LGPL-2.0-or-later\",\n \"BSD-3-Clause\",\n \"LGPL-2.0-only\",\n \"Python-2.0\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"zoranzaric/bup"},"repo_url":{"kind":"string","value":"https://github.com/zoranzaric/bup"},"snapshot_id":{"kind":"string","value":"21bcd21bebecdeca644bafc248577f05a6634966"},"revision_id":{"kind":"string","value":"53ffc4d336b06b3cecac0d817d192d22cb75a1bd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T23:29:49.384092","string":"2020-12-24T23:29:49.384092"},"revision_date":{"kind":"timestamp","value":"2013-07-31T16:34:29","string":"2013-07-31T16:34:29"},"committer_date":{"kind":"timestamp","value":"2013-07-31T16:34:30","string":"2013-07-31T16:34:30"},"github_id":{"kind":"number","value":826859,"string":"826,859"},"star_events_count":{"kind":"number","value":12,"string":"12"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport sys, re, struct, time, resource\nfrom bup import git, bloom, midx, options, _helpers\nfrom bup.helpers import *\n\nhandle_ctrl_c()\n\n_linux_warned = 0\ndef linux_memstat():\n global _linux_warned\n #fields = ['VmSize', 'VmRSS', 'VmData', 'VmStk', 'ms']\n d = {}\n try:\n f = open('/proc/self/status')\n except IOError, e:\n if not _linux_warned:\n log('Warning: %s\\n' % e)\n _linux_warned = 1\n return {}\n for line in f:\n # Note that on Solaris, this file exists but is binary. If that\n # happens, this split() might not return two elements. We don't\n # really need to care about the binary format since this output\n # isn't used for much and report() can deal with missing entries.\n t = re.split(r':\\s*', line.strip(), 1)\n if len(t) == 2:\n k,v = t\n d[k] = v\n return d\n\n\nlast = last_u = last_s = start = 0\ndef report(count):\n global last, last_u, last_s, start\n headers = ['RSS', 'MajFlt', 'user', 'sys', 'ms']\n ru = resource.getrusage(resource.RUSAGE_SELF)\n now = time.time()\n rss = int(ru.ru_maxrss/1024)\n if not rss:\n rss = linux_memstat().get('VmRSS', '??')\n fields = [rss,\n ru.ru_majflt,\n int((ru.ru_utime - last_u) * 1000),\n int((ru.ru_stime - last_s) * 1000),\n int((now - last) * 1000)]\n fmt = '%9s ' + ('%10s ' * len(fields))\n if count >= 0:\n print fmt % tuple([count] + fields)\n else:\n start = now\n print fmt % tuple([''] + headers)\n sys.stdout.flush()\n \n # don't include time to run report() in usage counts\n ru = resource.getrusage(resource.RUSAGE_SELF)\n last_u = ru.ru_utime\n last_s = ru.ru_stime\n last = time.time()\n\n\noptspec = \"\"\"\nbup memtest [-n elements] [-c cycles]\n--\nn,number= number of objects per cycle [10000]\nc,cycles= number of cycles to run [100]\nignore-midx ignore .midx files, use only .idx files\nexisting test with existing objects instead of fake ones\n\"\"\"\no = options.Options(optspec)\n(opt, flags, extra) = o.parse(sys.argv[1:])\n\nif extra:\n o.fatal('no arguments expected')\n\ngit.ignore_midx = opt.ignore_midx\n\ngit.check_repo_or_die()\nm = git.PackIdxList(git.repo('objects/pack'))\n\nreport(-1)\n_helpers.random_sha()\nreport(0)\n\nif opt.existing:\n def foreverit(mi):\n while 1:\n for e in mi:\n yield e\n objit = iter(foreverit(m))\n \nfor c in xrange(opt.cycles):\n for n in xrange(opt.number):\n if opt.existing:\n bin = objit.next()\n assert(m.exists(bin))\n else:\n bin = _helpers.random_sha()\n\n # technically, a randomly generated object id might exist.\n # but the likelihood of that is the likelihood of finding\n # a collision in sha-1 by accident, which is so unlikely that\n # we don't care.\n assert(not m.exists(bin))\n report((c+1)*opt.number)\n\nif bloom._total_searches:\n print ('bloom: %d objects searched in %d steps: avg %.3f steps/object' \n % (bloom._total_searches, bloom._total_steps,\n bloom._total_steps*1.0/bloom._total_searches))\nif midx._total_searches:\n print ('midx: %d objects searched in %d steps: avg %.3f steps/object' \n % (midx._total_searches, midx._total_steps,\n midx._total_steps*1.0/midx._total_searches))\nif git._total_searches:\n print ('idx: %d objects searched in %d steps: avg %.3f steps/object' \n % (git._total_searches, git._total_steps,\n git._total_steps*1.0/git._total_searches))\nprint 'Total time: %.3fs' % (time.time() - start)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1827,"cells":{"__id__":{"kind":"number","value":824633732765,"string":"824,633,732,765"},"blob_id":{"kind":"string","value":"209aafd1625122c381d277ab4c2204c5bceca463"},"directory_id":{"kind":"string","value":"ce8f9cb01cc533fbba6055c6c6750b320bc1d43e"},"path":{"kind":"string","value":"/poembot/util/PoemImporter.py"},"content_id":{"kind":"string","value":"ff5076e632f40f0f6b67a5b407c6083946e35b33"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"seanxiaoxiao/poembot"},"repo_url":{"kind":"string","value":"https://github.com/seanxiaoxiao/poembot"},"snapshot_id":{"kind":"string","value":"ecec1a3e11aab12e4aa53650484b596a1386b554"},"revision_id":{"kind":"string","value":"61466698720ff44dc1e15437dea18fa77f5bd50d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T00:57:04.776307","string":"2021-01-20T00:57:04.776307"},"revision_date":{"kind":"timestamp","value":"2014-06-07T17:32:24","string":"2014-06-07T17:32:24"},"committer_date":{"kind":"timestamp","value":"2014-06-07T17:32:24","string":"2014-06-07T17:32:24"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nCI_RESOURCE_FIEL = \"../../resource/ci-set-first\"\nCI_TEMPLATE_RESOURCE_FILE = \"../../resource/ci-template\"\n\nfrom pymongo import Connection\n\ndef read_poems(path):\n with open(path, \"r\") as poem_file:\n results = []\n contents = poem_file.readlines()\n poem = {}\n for line in contents:\n line = line.strip()\n if len(line) == 0:\n if poem.get(\"template\") and poem.get(\"author\"):\n results.append(poem)\n poem = {}\n elif not poem.get(\"template\"):\n components = line.split('-')\n poem[\"template\"] = components[0]\n poem[\"author\"] = components[1]\n if len(components) >= 3:\n poem[\"title\"] = components[2]\n if len(components) >= 4:\n poem[\"summary\"] = components[3]\n poem[\"contents\"] = []\n else:\n poem[\"contents\"].append(line)\n return results\n\ndef read_template(path):\n with open(path, \"r\") as template_file:\n results = []\n contents = template_file.readlines()\n template = {}\n for line in contents:\n line = line.strip()\n if len(line) == 0:\n if template.get(\"title\"):\n results.append(template)\n template = {}\n elif not template.get(\"title\"):\n template[\"title\"] = line\n template[\"contents\"] = []\n else:\n template[\"contents\"].append(line)\n return results\n\ndef read_character(path):\n _vowel_map = vowel_map()\n _vowel_list = vowel_list()\n _vowel_tune_number = vowel_tune_number()\n\n with open(path, \"r\") as character_file:\n results = []\n contents = character_file.readlines()\n character = {}\n for line in contents:\n line = line.strip()\n components = line.split(\"\\t\")\n character[\"char\"] = components[0]\n character[\"pronunciation\"] = []\n for i in range(1, len(components)):\n pronunciations = components[i].split(',')\n for j in range(0, len(pronunciations)):\n pronunciation = pronunciations[j]\n character[\"pronunciation\"].append(_format_pronunciation(pronunciation, _vowel_map, _vowel_list, _vowel_tune_number))\n results.append(character)\n character = {}\n return results\n\ndef _format_pronunciation(pronunciation, _vowel_map, _vowel_list, _vowel_tune_number):\n formatted = \"\"\n tune = 0\n for c in pronunciation.decode('utf8'):\n if c in _vowel_list:\n formatted += _vowel_map[c]\n tune = _vowel_tune_number[c]\n else:\n formatted += c\n formatted += str(tune)\n return formatted\n\ndef vowel_map():\n result = {}\n result[u'a'] = 'a'\n result[u'ā'] = 'a'\n result[u'á'] = 'a'\n result[u'ǎ'] = 'a'\n result[u'à'] = 'a'\n result[u'e'] = 'e'\n result[u'ē'] = 'e'\n result[u'é'] = 'e'\n result[u'ě'] = 'e'\n result[u'è'] = 'e'\n result[u'i'] = 'i'\n result[u'ī'] = 'i'\n result[u'í'] = 'i'\n result[u'ǐ'] = 'i'\n result[u'ì'] = 'i'\n result[u'o'] = 'o'\n result[u'ō'] = 'o'\n result[u'ó'] = 'o'\n result[u'ǒ'] = 'o'\n result[u'ò'] = 'o'\n result[u'u'] = 'u'\n result[u'ū'] = 'u'\n result[u'ú'] = 'u'\n result[u'ǔ'] = 'u'\n result[u'ù'] = 'u'\n result[u'v'] = 'v'\n result[u'ǘ'] = 'v'\n result[u'ǚ'] = 'v'\n result[u'ǜ'] = 'v'\n return result\n\ndef vowel_list():\n return [u'ā', u'á', u'ǎ', u'à', u'ē', u'é', u'ě', u'è', u'ī', u'í', u'ǐ', u'ì', u'ō', u'ó', u'ǒ', u'ò', u'ū', u'ú', u'ǔ', u'ù', u'ǘ', u'ǚ', u'ǜ']\n\ndef vowel_tune_number():\n result = {}\n result[u'ā'] = 1\n result[u'á'] = 2\n result[u'ǎ'] = 3\n result[u'à'] = 4\n result[u'ē'] = 1\n result[u'é'] = 2\n result[u'ě'] = 3\n result[u'è'] = 4\n result[u'ī'] = 1\n result[u'í'] = 2\n result[u'ǐ'] = 3\n result[u'ì'] = 4\n result[u'ō'] = 1\n result[u'ó'] = 2\n result[u'ǒ'] = 3\n result[u'ò'] = 4\n result[u'ū'] = 1\n result[u'ú'] = 2\n result[u'ǔ'] = 3\n result[u'ù'] = 4\n result[u'ǘ'] = 2\n result[u'ǚ'] = 3\n result[u'ǜ'] = 4\n return result\n\ndef import_poems(poems):\n db = Connection().poembot\n poems_collection = db.poems\n poems_collection.insert(poems)\n\ndef import_templates(templates):\n db = Connection().poembot\n template_collection = db.templates\n template_collection.insert(templates)\n\ndef import_characters(characters):\n db = Connection().poembot\n character_collection = db.characters\n character_collection.insert(characters)\n\ndef remove_poems():\n db = Connection().poembot\n db.drop_collection(\"poems\")\n\ndef remove_templates():\n db = Connection().poembot\n db.drop_collection(\"templates\")\n\ndef remove_characters():\n db = Connection().poembot\n db.drop_collection(\"characters\")\n\ndef import_tokens(tokens):\n db = Connection().poembot\n token_collection = db.tokens\n token_collection.insert(tokens)\n\ndef remove_tokens():\n db = Connection().poembot\n db.drop_collection(\"tokens\")\n\ndef import_authors(authors):\n db = Connection().poembot\n authors_collection = db.authors\n authors_collection.insert(authors)\n\ndef remove_authors():\n db = Connection().poembot\n db.drop_collection(\"authors\")\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1828,"cells":{"__id__":{"kind":"number","value":7662221687682,"string":"7,662,221,687,682"},"blob_id":{"kind":"string","value":"a4f2c27250b02005b06c303ce5ed6cc14de4f17d"},"directory_id":{"kind":"string","value":"24137e4b1c04c43b0ce36f2dfaf067f5127a2f41"},"path":{"kind":"string","value":"/fizzbuzz2.py"},"content_id":{"kind":"string","value":"c396bfe781ce8647c288bfee9b06f7d62a58fa9a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"christopher-s-b/sandbox"},"repo_url":{"kind":"string","value":"https://github.com/christopher-s-b/sandbox"},"snapshot_id":{"kind":"string","value":"ed68756e2d29393dcd22169b7e908ffc98f77651"},"revision_id":{"kind":"string","value":"c8e6b7ba0f6bfd110e99193cf66623dde50f2ea4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T18:46:06.684300","string":"2021-01-17T18:46:06.684300"},"revision_date":{"kind":"timestamp","value":"2014-11-25T22:56:57","string":"2014-11-25T22:56:57"},"committer_date":{"kind":"timestamp","value":"2014-11-25T22:56:57","string":"2014-11-25T22:56:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\ndef mul_of(a,b):\n return 0 == a % b\n\ntargets = {3:\"fizz\", 5:\"buzz\"}\n\ndef fizzbuzz(n):\n matches = filter(lambda i: mul_of(n, i), targets.keys()) #curried\n return str(n) if len(matches) == 0 else \"\".join(map(lambda i: targets[i], matches))\n\nprint \", \".join(map(fizzbuzz, range(20)))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1829,"cells":{"__id__":{"kind":"number","value":18588618474054,"string":"18,588,618,474,054"},"blob_id":{"kind":"string","value":"346d570bc495b19d067ea0586986a5527f8213ce"},"directory_id":{"kind":"string","value":"e9a3c11ccf90339184edd17562eaf6d99c063cae"},"path":{"kind":"string","value":"/libraries/cakemail/CakeClient.py"},"content_id":{"kind":"string","value":"de37a6ebd24f3ab67cbc1f80cfcd47d0908877e9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"silent1mezzo/HackMTL"},"repo_url":{"kind":"string","value":"https://github.com/silent1mezzo/HackMTL"},"snapshot_id":{"kind":"string","value":"ef29ece97334ff85bc586349ab3ebf7cbd486634"},"revision_id":{"kind":"string","value":"d945354e96bca728014308bbd1db86f9dfc98937"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-22T14:10:45.478560","string":"2020-05-22T14:10:45.478560"},"revision_date":{"kind":"timestamp","value":"2010-11-27T21:20:25","string":"2010-11-27T21:20:25"},"committer_date":{"kind":"timestamp","value":"2010-11-27T21:20:25","string":"2010-11-27T21:20:25"},"github_id":{"kind":"number","value":1112867,"string":"1,112,867"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import CakeGlobals\r\nimport CakeFunctions\r\n\r\nCLASS_NAME = \"ClassClient\"\r\n\r\n# Activates a client\r\ndef Activate(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"Activate\", parameters, locale)\r\n\r\n\treturn CakeFunctions.ParseXML(methodNode)\r\n\r\n# Adds credits to a client\r\ndef AddCredits(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tCakeFunctions.Proccess(CLASS_NAME, \"AddCredits\", parameters, locale)\r\n\r\n\treturn True\r\n\r\n# Creates a new client\r\ndef Create(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"Create\", parameters, locale)\r\n\r\n\treturn CakeFunctions.ParseXML(methodNode)\r\n\r\n# Gets the credit balance\r\ndef GetCreditBalance(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"GetCreditBalance\", parameters, locale)\r\n\r\n\treturn CakeFunctions.ParseXML(methodNode)\r\n\r\n# Gets the credit transactions\r\ndef GetCreditTransactions(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"GetCreditTransactions\", parameters, locale)\r\n\r\n\treturn CakeFunctions.ParseXML(methodNode)\r\n\r\n# Retrieves the informations about the client\r\ndef GetInfo(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"GetInfo\", parameters, locale)\r\n\r\n\treturn CakeFunctions.ParseXML(methodNode)\r\n\r\n# Gets the list with a specified status\r\ndef GetList(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"GetList\", parameters, locale)\r\n\r\n\tres = CakeFunctions.ParseXML(methodNode, [\"client\"])\r\n\tCakeFunctions.ChangeKey(res, \"client\", \"clients\")\r\n\r\n\treturn res\r\n\r\n# Gets the timezones\r\ndef GetTimezones(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"GetTimezones\", parameters, locale)\r\n\r\n\tres = CakeFunctions.ParseXML(methodNode, [\"timezone\"])\r\n\tCakeFunctions.ChangeKey(res, \"timezone\", \"timezones\")\r\n\r\n\treturn res\r\n\r\n# Adds or removes credits to a client for the balance to be 0 at the end of the month\r\ndef ResetCredits(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tCakeFunctions.Proccess(CLASS_NAME, \"ResetCredits\", parameters, locale)\r\n\r\n\treturn True\r\n\r\n# Sets the parameters for a user\r\ndef SetInfo(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tCakeFunctions.Proccess(CLASS_NAME, \"SetInfo\", parameters, locale)\r\n\r\n\treturn True\r\n\r\n# Searchs for clients based on a query string\r\ndef Search(parameters, locale = CakeGlobals.DEFAULT_LOCALE):\r\n\tmethodNode = CakeFunctions.Proccess(CLASS_NAME, \"Search\", parameters, locale)\r\n\r\n\tres = CakeFunctions.ParseXML(methodNode, [\"client\"])\r\n\tCakeFunctions.ChangeKey(res, \"client\", \"clients\")\r\n\r\n\treturn res\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1830,"cells":{"__id__":{"kind":"number","value":11905649346418,"string":"11,905,649,346,418"},"blob_id":{"kind":"string","value":"1441317d07ed1890e330bd605343306a7de076f1"},"directory_id":{"kind":"string","value":"a4b76507c86458c250450e640f93fb76d5cccc9d"},"path":{"kind":"string","value":"/src/models/schema.py"},"content_id":{"kind":"string","value":"cedfdfbea65fc815c603e509a0e0540df8e5b3fb"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"NickolausDS/Uniquity"},"repo_url":{"kind":"string","value":"https://github.com/NickolausDS/Uniquity"},"snapshot_id":{"kind":"string","value":"9bfb6b8761c4c6e1833a7a6166be435eb2c67846"},"revision_id":{"kind":"string","value":"b8d3f31df5db0a628da99652e7a42452995c0c74"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T18:49:11.633354","string":"2021-01-23T18:49:11.633354"},"revision_date":{"kind":"timestamp","value":"2014-06-06T22:49:07","string":"2014-06-06T22:49:07"},"committer_date":{"kind":"timestamp","value":"2014-06-06T22:49:07","string":"2014-06-06T22:49:07"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nThis file shouldn't be used as of 0.4.2. It's still used by some objects. When they're \nrefactored, this will go away.\n\"\"\"\n\n\n#Register the types here\nTYPES = [\"FILE\", \"SCANPARENT\"]\n\nFILE = (\n\t(\"filename\", str),\n\t(\"shortname\", str),\n\t(\"basename\", str),\n\t(\"rootParent\", str),\n\t(\"size\", int),\n\t(\"niceSize\", str),\n\t(\"niceSizeAndDesc\", str),\n\t(\"weakHash\", str),\n\t(\"weakHashFunction\", str),\n\t(\"strongHash\", str),\n\t(\"strongHashFunction\", str),\n\t)\n\nSCANPARENT = (\n\t(\"filename\", str),\n\t)\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1831,"cells":{"__id__":{"kind":"number","value":16449724745596,"string":"16,449,724,745,596"},"blob_id":{"kind":"string","value":"8ddcddbfc47e5584416401c415f995a7d1a25e2a"},"directory_id":{"kind":"string","value":"a1a2af1fb3800f698a8b3c431f30d7a147f98791"},"path":{"kind":"string","value":"/gmlStore/models.py"},"content_id":{"kind":"string","value":"c3782b0a466da798ef515efc35c2f8ae66632953"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ashirley/mapbin"},"repo_url":{"kind":"string","value":"https://github.com/ashirley/mapbin"},"snapshot_id":{"kind":"string","value":"a2a045a8f0f8856dbeca80a8d7285a896b6e0612"},"revision_id":{"kind":"string","value":"7d7bfc2aa9fb967017e8126350e5f926ad3ed658"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T04:51:50.712833","string":"2021-01-19T04:51:50.712833"},"revision_date":{"kind":"timestamp","value":"2010-05-06T07:48:56","string":"2010-05-06T07:48:56"},"committer_date":{"kind":"timestamp","value":"2010-05-06T07:48:56","string":"2010-05-06T07:48:56"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.db import models\n\n# Create your models here.\nclass Annotation(models.Model):\n gml = models.XMLField()\n creation_date = models.DateTimeField('date created')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1832,"cells":{"__id__":{"kind":"number","value":5901285095027,"string":"5,901,285,095,027"},"blob_id":{"kind":"string","value":"e638675c88871cf5ac8e810e1bb23966126d9a5a"},"directory_id":{"kind":"string","value":"b475797332e9bf23bf90a433efb101b9af3101ed"},"path":{"kind":"string","value":"/partial/templating.py"},"content_id":{"kind":"string","value":"0cdd0b59219f5b02b5a192677d94cb26c4a484aa"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"RentennaDev/partial"},"repo_url":{"kind":"string","value":"https://github.com/RentennaDev/partial"},"snapshot_id":{"kind":"string","value":"2960ef1df958909f11dd76822260854ebda8ee44"},"revision_id":{"kind":"string","value":"f55a7919da4bf0a595854f173eb2380c0138946c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-03-26T06:28:57.679617","string":"2018-03-26T06:28:57.679617"},"revision_date":{"kind":"timestamp","value":"2013-11-21T00:48:58","string":"2013-11-21T00:48:58"},"committer_date":{"kind":"timestamp","value":"2013-11-21T00:48:58","string":"2013-11-21T00:48:58"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from jinja2 import Environment, BaseLoader, TemplateNotFound\n\nfrom partial.bundleComponent import BundleComponent\nfrom partial.scanner import ClassNotFound\n\nclass TemplateBundleComponent(BundleComponent):\n\n type = 'template'\n\ndef renderTemplate(template, context=None):\n if context is None: context = {}\n template = env.get_template(template)\n return template.render(**context)\n\nclass _PartialLoader(BaseLoader):\n\n def get_source(self, environment, template):\n from partial import scanner\n\n try:\n component = scanner.getBundleComponent('template', template)\n return (component, None, lambda: True)\n except ClassNotFound:\n raise TemplateNotFound(template)\n\nenv = Environment(\n loader=_PartialLoader(),\n)\n\nfrom partial import routing\n\nenv.globals['url'] = routing.generate\n\ndef _partial(partialName, **kwargs):\n from partial import render\n return render.partial(partialName, kwargs)\nenv.globals['partial'] = _partial\n\ndef _plural(number, singular, plural=None):\n return \"%s %s\" % (number, _pluralName(number, singular, plural))\nenv.globals['plural'] = _plural\n\ndef _pluralName(number, singular, plural=None):\n if number == 1:\n return singular\n elif plural is None:\n return \"%ss\" % singular\n else:\n return plural\nenv.globals['pluralName'] = _pluralName\n\ndef _nullable(val):\n if val is None:\n return \"\"\n elif isinstance(val, str):\n return unicode(val, encoding='utf-8', errors='replace')\n else:\n return val\n\nenv.finalize = _nullable"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1833,"cells":{"__id__":{"kind":"number","value":12945031465532,"string":"12,945,031,465,532"},"blob_id":{"kind":"string","value":"c31dcb4379b1911310710c28fc208747c3ebd80b"},"directory_id":{"kind":"string","value":"278f1f4727cafc582841bd60ed32ddb611acdf27"},"path":{"kind":"string","value":"/src/gui/gui_control.py"},"content_id":{"kind":"string","value":"e79e8d7e09093e704eaff05250a8774be93b1f37"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Ormazz/ESIR3-SR-katch"},"repo_url":{"kind":"string","value":"https://github.com/Ormazz/ESIR3-SR-katch"},"snapshot_id":{"kind":"string","value":"46d8b4106b6e1cb289f7bf8beb41ed2696d6b641"},"revision_id":{"kind":"string","value":"bf98e8af71524df05c3098144be03c3194d12efe"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-15T21:51:28.207234","string":"2016-09-15T21:51:28.207234"},"revision_date":{"kind":"timestamp","value":"2014-01-19T13:18:20","string":"2014-01-19T13:18:20"},"committer_date":{"kind":"timestamp","value":"2014-01-19T13:18:20","string":"2014-01-19T13:18:20"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from abc import ABCMeta, abstractmethod\nfrom control import katch\n\nclass GuiControl(metaclass=ABCMeta):\n\n\t_katch = katch.Katch()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1834,"cells":{"__id__":{"kind":"number","value":8881992385437,"string":"8,881,992,385,437"},"blob_id":{"kind":"string","value":"1d0ca17365d8d8127d056a917c053c15065128ae"},"directory_id":{"kind":"string","value":"b122d95ac1f059567ea4753fbd0ad4601caf3752"},"path":{"kind":"string","value":"/processing/commandline.py"},"content_id":{"kind":"string","value":"7450fdbc72e81701dbd24bdc61bbe34ef71afb68"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"JNazare/IntroduceMeTo"},"repo_url":{"kind":"string","value":"https://github.com/JNazare/IntroduceMeTo"},"snapshot_id":{"kind":"string","value":"442ab6f084688286ccda0a419cd082a6029907db"},"revision_id":{"kind":"string","value":"3edd082ed5266eefbafe01a7856aa996e6992166"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-30T18:50:04.003557","string":"2020-05-30T18:50:04.003557"},"revision_date":{"kind":"timestamp","value":"2013-05-06T00:55:17","string":"2013-05-06T00:55:17"},"committer_date":{"kind":"timestamp","value":"2013-05-06T00:55:17","string":"2013-05-06T00:55:17"},"github_id":{"kind":"number","value":9875695,"string":"9,875,695"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import query\n\nwhile True:\n var = raw_input(\"\\033[92mDear Introducemeto, introduce me to... \\033[0m\")\n try:\n print \">\", [x[0] for x in query.ranked_query(str(\"@IntroduceMeTo \" + var))]\n print \"\"\n except:\n print \"> NO HUMANS FOUND\"\n print \"\""},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1835,"cells":{"__id__":{"kind":"number","value":15882789092271,"string":"15,882,789,092,271"},"blob_id":{"kind":"string","value":"559f10eda7c7ae1c2b3888e9f8a5550b89fd5295"},"directory_id":{"kind":"string","value":"1f4204f903657884d9cccfd44b19ecb531b59ded"},"path":{"kind":"string","value":"/setup.py"},"content_id":{"kind":"string","value":"fd49ab17e6e9821e129493aeb8450171d8b9167b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"fmcc/StylometricAnalyser"},"repo_url":{"kind":"string","value":"https://github.com/fmcc/StylometricAnalyser"},"snapshot_id":{"kind":"string","value":"795a8e4abe264ee18ab3bcb34bd128bcd06ac5ca"},"revision_id":{"kind":"string","value":"e86305a63c95d8b533cab4a3be0010c2fee0ff14"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T08:38:44.961082","string":"2021-01-23T08:38:44.961082"},"revision_date":{"kind":"timestamp","value":"2013-08-31T20:23:36","string":"2013-08-31T20:23:36"},"committer_date":{"kind":"timestamp","value":"2013-08-31T20:23:36","string":"2013-08-31T20:23:36"},"github_id":{"kind":"number","value":11097508,"string":"11,097,508"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from database import *\nfrom database.models import *\nfrom database.utilities import get_or_create\nfrom settings import DB_PATH, NGRAM_LENGTHS, RESTRICT_VECTOR_SPACE\nfrom collections import Counter\nimport os\n\nif os.path.exists(DB_PATH):\n print('Database already exists')\nelse:\n Base.metadata.create_all(engine)\n session = Session()\n get_or_create(session, VectorSpace, space=set())\n get_or_create(session, GlobalNgrams, counts=Counter())\n get_or_create(session, GlobalVersion)\n session.commit()\n print(str(NGRAM_LENGTHS['MIN']) + ' - ' + str(NGRAM_LENGTHS['MAX']) + ' - ' + str(RESTRICT_VECTOR_SPACE))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1836,"cells":{"__id__":{"kind":"number","value":2327872292453,"string":"2,327,872,292,453"},"blob_id":{"kind":"string","value":"b485db0c38ac0f2fcd8c28154b55e0ad7b055539"},"directory_id":{"kind":"string","value":"481f3aa9767dfaeb99315ce3da45040439d2ca7a"},"path":{"kind":"string","value":"/peer.py"},"content_id":{"kind":"string","value":"ac7f1c3e5dc236944342de912be005ea454fc735"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jwilner/torrentPy"},"repo_url":{"kind":"string","value":"https://github.com/jwilner/torrentPy"},"snapshot_id":{"kind":"string","value":"eda61e366624be49e2e9698159dec57f6ab8f78d"},"revision_id":{"kind":"string","value":"a9838312456c681978299b09d636b803692ba3ff"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T01:07:41.270779","string":"2016-09-06T01:07:41.270779"},"revision_date":{"kind":"timestamp","value":"2013-12-17T00:25:38","string":"2013-12-17T00:25:38"},"committer_date":{"kind":"timestamp","value":"2013-12-17T00:25:38","string":"2013-12-17T00:25:38"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import messages\nimport config\nimport torrent_exceptions\nimport logging\nimport events\nfrom time import time\nfrom collections import deque\nfrom functools import partial\nfrom utils import four_bytes_to_int, StreamReader\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\nclass Peer(torrent_exceptions.ExceptionManager,\n messages.MessageManager,\n events.EventManager, object):\n '''Class representing peer for specific torrent download and\n providing interface with specific TCP socket'''\n\n def __init__(self, socket):\n logger.info('Instantiating peer %s', str(socket.getpeername()))\n self.socket = socket\n self.active = True\n self.peer_id = None\n\n self.address = socket.getpeername()\n self.ip, self.port = self.address\n\n self.outbox = deque()\n self.sent_folder, self.archive = [], []\n\n self.handshake = {'sent': False, 'received': False}\n\n self.handle_event(events.PeerRegistration(peer=self,\n read=self.handle_incoming,\n write=self.handle_outgoing,\n error=self.handle_exception))\n\n self.last_heard_from = time()\n self.last_spoke_to = 0\n\n self._read_buffer = ''\n self._pending_send = deque()\n\n self.outstanding_requests = set()\n\n self.am_choking, self.am_interested = True, False\n self.choking_me, self.interested_me = True, False\n\n # Am I anal? Maybe.\n attr_setter = partial(self.__setattr__)\n choking_me_setter = partial(attr_setter, 'choking_me')\n am_choking_setter = partial(attr_setter, 'am_choking')\n interested_me_setter = partial(attr_setter, 'interested_me')\n am_interested_setter = partial(attr_setter, 'am_interested')\n\n # Don't need to define a handler for KeepAlive, because undefined\n # messages fail silently but still update 'last_heard_from'\n\n self._message_handlers = {\n messages.INCOMING: {\n messages.Handshake: self._process_handshake,\n messages.Choke: lambda _: choking_me_setter(True),\n messages.Unchoke: lambda _: choking_me_setter(False),\n messages.Interested: lambda _: interested_me_setter(True),\n messages.NotInterested: lambda _: interested_me_setter(False),\n messages.Have: self._process_have,\n messages.Bitfield: self._process_bitfield,\n messages.Request: self._process_request,\n messages.Piece:\n lambda m: self.outstanding_requests.discard((m.index,\n m.begin)),\n\n messages.Cancel: self._process_cancel\n },\n messages.OUTGOING: {\n messages.Handshake: self._record_handshake,\n messages.Request:\n lambda m: self.outstanding_requests.add(m.get_triple()[:2]),\n\n messages.Cancel:\n lambda m: self.outstanding_requests.discard(m\n .get_triple()[:2]),\n\n messages.Choke: lambda _: am_choking_setter(True),\n messages.Unchoke: lambda _: am_choking_setter(False),\n messages.Interested: lambda _: am_interested_setter(True),\n messages.NotInterested: lambda _: am_interested_setter(False)\n }\n }\n\n # exception handling\n self._exception_handlers = {}\n\n def __str__(self):\n return ''.format(self.ip, self.port)\n\n def fileno(self):\n return self._socket.fileno()\n\n def handle_incoming(self):\n self.last_heard_from = time()\n\n for msg in self._read_from_socket():\n try:\n self.handle_message(msg)\n except torrent_exceptions.FatallyFlawedIncomingMessage as e:\n self.handle_exception(e)\n\n def handle_outgoing(self):\n sent_msgs = self._send_via_socket()\n\n if sent_msgs:\n self.last_spoke_to = time()\n\n for msg in sent_msgs:\n try:\n self.handle_message(msg)\n except torrent_exceptions.FatallyFlawedOutgoingMessage as e:\n self.handle_exception(e)\n\n if not self._pending_send:\n self.handle_event(events.PeerDoneSending(peer=self))\n\n def enqueue_message(self, msg):\n # if outbox is currently empty, then we'll want to tell the client\n notify = not self._pending_send\n self._pending_send.append([msg, len(msg)])\n\n if notify: # tell client\n self.handle_event(events.PeerReadyToSend(peer=self))\n\n def drop(self):\n '''Procedure to disconnect socket'''\n self.active = False\n self.socket.close()\n\n def _read_from_socket(self):\n new_string = self.socket.recv(config.DEFAULT_READ_AMOUNT)\n stream = StreamReader(self._read_buffer + new_string)\n\n try:\n while True:\n yield self._parse_string_to_message(stream)\n except torrent_exceptions.LeftoverException as e:\n self._read_buffer = e.leftover\n\n def _send_via_socket(self):\n '''Attempts to send message via socket. Returns a list of\n msgs sent -- potentially empty if sent was incomplete'''\n\n strung = ''.join(str(msg)[-length:]\n for msg, length in self._pending_send)\n amt_sent = self._socket.send(strung)\n\n sent_msgs = []\n\n while amt_sent:\n # loop over lengths of pending msgs, updating their remaining\n # amount or appending them to the response list if they've been\n # completely sent\n if self._pending_send[0][1] > amt_sent:\n self._pending_send[0][1] -= amt_sent\n amt_sent = 0\n else:\n amt_sent -= length\n # appends actual msg to self\n sent_msgs.append(self._pending_send.leftpop()[0])\n\n return sent_msgs\n\n def _parse_string_to_message(self, stream):\n parts = []\n try:\n if not self.handshake['received']: # must be handshake\n try:\n parts.append(ord(stream.read(1)))\n pstrlen = parts[0]\n # protocol string, reserved, info hash, peer_id\n for l in (pstrlen, 8, 20, 20):\n parts.append(stream.read(l))\n info_hash, peer_id = parts[3], parts[4]\n return messages.Handshake(peer_id, info_hash,\n reserved=parts[2],\n pstr=parts[1],\n msg_event=messages.INCOMING)\n except torrent_exceptions.RanDryException as e:\n leftover = ''.join(parts)+e.unused\n raise torrent_exceptions.LeftoverException(value=leftover)\n # normal message\n try:\n parts.append(stream.read(4))\n bytes_length_prefix = parts[0]\n length = four_bytes_to_int(bytes_length_prefix)\n if length == 0:\n return messages.KeepAlive()\n parts.append(stream.read(length))\n msg_body = parts[1]\n msg_id = ord(msg_body[0])\n return messages.lookup[msg_id](msg_body[1:],\n from_string=True,\n msg_event=messages.INCOMING)\n except torrent_exceptions.RanDryException as e:\n leftover = ''.join(parts)+e.unused\n raise torrent_exceptions.LeftoverException(value=leftover)\n except torrent_exceptions.MessageParsingError as e:\n self.handle_exception(e)\n\n def _record_handshake(self, msg):\n '''Fires as callback when handshake is sent. This is a method\n because assignment can't happen in lambdas...'''\n self.handshake['sent'] = True\n\n def _process_handshake(self, msg):\n self.handshake['received'] = True\n self.peer_id = msg.peer_id\n\n if msg.pstr != config.PROTOCOL:\n # will be caught by strategy\n raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self,\n msg=msg)\n\n if not self.handshake['sent']: # this is an unknown peer\n # will resolve to client, where it'll be handled\n self.handle_event(events.UnknownPeerHandshake(msg=msg, peer=self))\n\n def _process_have(self, msg):\n self.has[msg.piece_index] = 1\n\n def _process_bitfield(self, msg):\n quotient, remainder = divmod(self.torrent.num_pieces, 8)\n\n # this appropriately rounds up the required length of the bitfield\n req_len = (quotient+1)*8 if remainder != 0 else quotient*8\n\n if len(msg.bitfield) != req_len:\n # gets caught by strategy\n raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self,\n msg=msg)\n\n for i, p in enumerate(msg.bitfield):\n try:\n self.has[i] = p\n except IndexError:\n break\n\n def _process_request(self, msg):\n if self.am_choking:\n # peer is being obnoxious -- do something about it?\n pass\n if msg.length > config.MAX_REQUESTED_PIECE_LENGTH:\n raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self,\n msg=msg)\n self.wants.add((msg.index, msg.begin, msg.length))\n\n def _process_cancel(self, msg):\n if msg.length > config.MAX_REQUESTED_PIECE_LENGTH:\n raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self,\n msg=msg)\n self.wants.discard((msg.index, msg.begin, msg.length))\n\n def _process_piece(self, msg):\n # do something in this context?\n self.outstanding_requests.discard((msg.index, msg.begin))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1837,"cells":{"__id__":{"kind":"number","value":10737418264677,"string":"10,737,418,264,677"},"blob_id":{"kind":"string","value":"8071f52385d92ef99483ccbef1d619f19971ee35"},"directory_id":{"kind":"string","value":"41f5fb2b76efe6f7a10c96ff197b0785e247ca12"},"path":{"kind":"string","value":"/gather/gathered/rand_test.py"},"content_id":{"kind":"string","value":"6ad2b910e2535ac920da7050833603cbec016892"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause","BSD-Advertising-Acknowledgement"],"string":"[\n \"BSD-2-Clause\",\n \"BSD-Advertising-Acknowledgement\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"jtwhite79/my_python_junk"},"repo_url":{"kind":"string","value":"https://github.com/jtwhite79/my_python_junk"},"snapshot_id":{"kind":"string","value":"2f33d102e0e2875cf617b11dc31127678e9e9756"},"revision_id":{"kind":"string","value":"2ee0044f9b455d40e3b1967081aa7ac2dbfa64c9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T05:45:21.432421","string":"2021-01-23T05:45:21.432421"},"revision_date":{"kind":"timestamp","value":"2014-07-01T17:30:40","string":"2014-07-01T17:30:40"},"committer_date":{"kind":"timestamp","value":"2014-07-01T17:30:40","string":"2014-07-01T17:30:40"},"github_id":{"kind":"number","value":4587435,"string":"4,587,435"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\n\nrand = np.random.randn(1000000)\n\nnp.savetxt('rand.dat',rand)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1838,"cells":{"__id__":{"kind":"number","value":773094132951,"string":"773,094,132,951"},"blob_id":{"kind":"string","value":"a7b3296cea6db6903fd6117a8d75b5ee00e039b5"},"directory_id":{"kind":"string","value":"c9575089d08f9c1f701c7fdb5305b38142131975"},"path":{"kind":"string","value":"/test/git/test_diff.py"},"content_id":{"kind":"string","value":"9b7e9c73ff3d0f88978636ecd14438b5c590ea68"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"directeur/git-python"},"repo_url":{"kind":"string","value":"https://github.com/directeur/git-python"},"snapshot_id":{"kind":"string","value":"19e7de6863cd841ddd40335a67d3ae87ac868820"},"revision_id":{"kind":"string","value":"b00f3689aa19938c10576580fbfc9243d9f3866c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-04T11:44:22.840081","string":"2016-08-04T11:44:22.840081"},"revision_date":{"kind":"timestamp","value":"2008-09-16T06:08:47","string":"2008-09-16T06:08:47"},"committer_date":{"kind":"timestamp","value":"2008-09-17T06:08:09","string":"2008-09-17T06:08:09"},"github_id":{"kind":"number","value":84097,"string":"84,097"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# test_diff.py\n# Copyright (C) 2008 Michael Trier (mtrier@gmail.com) and contributors\n#\n# This module is part of GitPython and is released under\n# the BSD License: http://www.opensource.org/licenses/bsd-license.php\n\nfrom test.testlib import *\nfrom git import *\n\nclass TestDiff(object):\n def setup(self):\n self.repo = Repo(GIT_REPO)\n \n def test_list_from_string_new_mode(self):\n output = fixture('diff_new_mode')\n diffs = Diff.list_from_string(self.repo, output)\n assert_equal(1, len(diffs))\n assert_equal(10, len(diffs[0].diff.splitlines()))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2008,"string":"2,008"}}},{"rowIdx":1839,"cells":{"__id__":{"kind":"number","value":13477607385152,"string":"13,477,607,385,152"},"blob_id":{"kind":"string","value":"38e09e752d1699a42c468f835dc12c0ce717a2be"},"directory_id":{"kind":"string","value":"20ddd4e890bd69e7b4403c684dfd7d1d54f94803"},"path":{"kind":"string","value":"/hw1/analyze.py"},"content_id":{"kind":"string","value":"ff0fc08bccc9f147ddabf516581be1324df0127c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jasenmh/CS240A-Winter14"},"repo_url":{"kind":"string","value":"https://github.com/jasenmh/CS240A-Winter14"},"snapshot_id":{"kind":"string","value":"50292cf901d408f9384dee0c6b1eca9975f33627"},"revision_id":{"kind":"string","value":"5650a8b9e5c5008b0b8e921751f64da0d879e0bc"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-06T07:15:37.247654","string":"2016-08-06T07:15:37.247654"},"revision_date":{"kind":"timestamp","value":"2014-02-16T21:27:28","string":"2014-02-16T21:27:28"},"committer_date":{"kind":"timestamp","value":"2014-02-16T21:27:28","string":"2014-02-16T21:27:28"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nimport commands\nimport re\n\nMAXTIME = 600.0\nITERATIONS = 4\nDEBUG = False\n\nf = open(\"hw1data.csv\", \"w\")\n\nfor algo in range(1, 8):\n if DEBUG:\n print('Algorithm ' + str(algo) +':')\n\n matdim = 2\n multtime = 0\n tottime = 0\n\n while multtime < MAXTIME:\n\n itercount = 0\n while itercount < ITERATIONS:\n\n if DEBUG:\n print('\\t' + str(matdim) + 'x' + str(matdim) + 'matrices, run ' + str(itercount + 1))\n\n cmd = \"./matrix_multiply -n\" + str(matdim) + \" -a\" + str(algo)\n o = commands.getoutput(cmd)\n multtime = float(re.search('Time \\= (.*) sec, .*', o).group(1))\n tottime = tottime + multtime\n\n if DEBUG:\n print('\\t\\trun time ' + str(multtime))\n\n itercount = itercount + 1\n\n f.write(str(tottime/float(ITERATIONS)) + \", \")\n matdim = matdim << 1\n\n f.write('\\n')\n f.flush()\n\nif DEBUG:\n print('\\nAnalysis complete.')\n\nf.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1840,"cells":{"__id__":{"kind":"number","value":13477607377447,"string":"13,477,607,377,447"},"blob_id":{"kind":"string","value":"925588902072d5f2b8aedd9da0719c40986147aa"},"directory_id":{"kind":"string","value":"5a4d4d84097dc34bdb7a9a7bf4a07f0510694067"},"path":{"kind":"string","value":"/check.py"},"content_id":{"kind":"string","value":"aaf06d6c3602001811aee4e74e10d101bba3ec9c"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"baharev/apidocfilter"},"repo_url":{"kind":"string","value":"https://github.com/baharev/apidocfilter"},"snapshot_id":{"kind":"string","value":"274e10a85713260ce4fb88c469260e4f71fd7ad8"},"revision_id":{"kind":"string","value":"9387e8d3d04ecb35d6287b5f7d761a15b94a975a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-08-10T19:50:12.837033","string":"2020-08-10T19:50:12.837033"},"revision_date":{"kind":"timestamp","value":"2014-09-28T17:17:45","string":"2014-09-28T17:17:45"},"committer_date":{"kind":"timestamp","value":"2014-09-28T17:17:45","string":"2014-09-28T17:17:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from importlib import import_module\nfrom inspect import getdoc\n\ndef attribs(name):\n mod = import_module(name)\n print name\n print 'Has __all__?', hasattr(mod, '__all__') \n print 'Has __doc__?', hasattr(mod, '__doc__')\n print 'doc: ', getdoc(mod)\n\nif __name__=='__main__':\n attribs('cairo')\n attribs('zope')\n attribs('A.B.C')\n \n import hacked\n class Object(object):\n pass\n \n opt = Object()\n opt.ignore_errors = False\n a, d = hacked.get_all_attr_has_docstr('/home/ali/ws-pydev/apidocfilter/A/B', \n '/home/ali/ws-pydev/apidocfilter/A/B/C',\n opt)\n print(a)\n print(d)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1841,"cells":{"__id__":{"kind":"number","value":4346506933482,"string":"4,346,506,933,482"},"blob_id":{"kind":"string","value":"abcf43eedf7809c5dda937841797eaf34253fc96"},"directory_id":{"kind":"string","value":"7fb1fee4bda177bf81a094bcd02d5c7092630630"},"path":{"kind":"string","value":"/midtermproject/hitch/forms.py"},"content_id":{"kind":"string","value":"4ae3a5e421a6a60fccc62a8e624372566d520f94"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"wdesalu/Hitch"},"repo_url":{"kind":"string","value":"https://github.com/wdesalu/Hitch"},"snapshot_id":{"kind":"string","value":"3d7c81996418927017fe7615898d149d86a2b303"},"revision_id":{"kind":"string","value":"1ff38b831dff028f694b8dff4cfe41ba5f61cd99"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-01T13:44:53.345532","string":"2020-06-01T13:44:53.345532"},"revision_date":{"kind":"timestamp","value":"2013-12-11T01:20:37","string":"2013-12-11T01:20:37"},"committer_date":{"kind":"timestamp","value":"2013-12-11T01:20:37","string":"2013-12-11T01:20:37"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django import forms\n\nfrom django.contrib.auth.models import User\nfrom models import *\n\n# ---------- Driver Registration ----------------\nclass RegistrationForm(forms.Form):\n\tusername = forms.CharField(max_length=200,\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Name','required':'on','autofocus':'on'}))\n\temailGiven = forms.EmailField(max_length=200,\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Email','required':'on','autofocus':'on'}))\n\tclientNumber = forms.IntegerField(\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Client Capacity','required':'on','autofocus':'on'}))\n\tppm = forms.IntegerField(\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Price Per Mile','required':'on','autofocus':'on'}))\n\tbagCapacity = forms.IntegerField(\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Bag Capacity','required':'on','autofocus':'on'}))\n\tdepartureGiven = forms.DateTimeField(\n\t\t\t\t\t\t\t\twidget=forms.DateTimeInput(attrs={'class':'form-control','placeholder':'Departure Date','required':'on','autofocus':'on'}))\n\tdestinationGiven = forms.CharField(max_length=200,\n\t\t\t\t\t\t\t\twidget=forms.TextInput(attrs={'class':'form-control','placeholder':'Destination','required':'on','autofocus':'on'}))\n\n\tdef clean(self):\n\t\tcleaned_data = super(RegistrationForm, self).clean()\n\n\t\tusername = cleaned_data['username']\n\t\temail = cleaned_data['emailGiven']\n\t\tclientNumber = cleaned_data['clientNumber']\n\t\tppm = cleaned_data['ppm']\n\t\tbagCapacity = cleaned_data['bagCapacity']\n\t\tdepartureGiven = cleaned_data['departureGiven']\n\t\tdestinationGiven = cleaned_data['destinationGiven']\n\n\t\tif not (username and email and clientNumber and ppm and bagCapacity and departureGiven and destinationGiven):\n\t\t\traise forms.ValidationError(\"Not Enough Information\")\n\n\t\treturn cleaned_data"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1842,"cells":{"__id__":{"kind":"number","value":5454608466489,"string":"5,454,608,466,489"},"blob_id":{"kind":"string","value":"c3ea05c259f64080d2efac0c0de5bf698c0439a8"},"directory_id":{"kind":"string","value":"fcf819a9dcd1bedef3c70a38e0541bd18c1d49ee"},"path":{"kind":"string","value":"/Zhidao_grabber.py"},"content_id":{"kind":"string","value":"5b1b5fc41eb4728c5703bd539ae7580f4e175a48"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"wblyy/Zhidao_grabber"},"repo_url":{"kind":"string","value":"https://github.com/wblyy/Zhidao_grabber"},"snapshot_id":{"kind":"string","value":"821d5800ebd35b25b0f24d3344ae85fa27984dfc"},"revision_id":{"kind":"string","value":"295fc8e32ff404bcd72d0423225c2c89a2e69f0b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T17:03:11.855981","string":"2020-05-18T17:03:11.855981"},"revision_date":{"kind":"timestamp","value":"2014-11-27T03:59:34","string":"2014-11-27T03:59:34"},"committer_date":{"kind":"timestamp","value":"2014-11-27T03:59:34","string":"2014-11-27T03:59:34"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":" # encoding: UTF-8\nimport ConfigParser\nimport re\nimport urllib2\nimport urllib\nimport time\nfrom mydbV2 import MydbV2\nfrom random import choice\nimport random\nimport requests\nfrom IPdb import IPdb\nimport sys\nimport logging\nimport time\nlogging.basicConfig(filename='zhidao_buzhidao.log',level=logging.DEBUG)\n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndbV2 = MydbV2()\nmyIPdb=IPdb()\nproxy_dict=['http://113.11.198.163:2223/',\n\t\t\t#r'http://113.11.198.164:2223/',\n\t\t\t#r'http://113.11.198.165:2223/',\n\t\t\t#r'http://113.11.198.166:2223/',\n\t\t\t'http://113.11.198.167:2223/',\n\t\t\t'http://113.11.198.168:2223/',\n\t\t\t'http://113.11.198.169:2223/',\n\t\t\t]\n\n \n\nfor url_index in xrange(85787890,0,-1):\n try:\n is_answerable=0\n is_used=0\n content_data=''\n title_data=''\n style_data=''\n\n \tpage_url='http://zhidao.baidu.com/question/'+str(url_index)\n \trelated_IP=random.choice(proxy_dict) \n req=requests.get(page_url,proxies={\"http\": related_IP})\n req.encoding='gbk'\n msg=req.text\n\t#msg.encoding ='utf-8'\n title=re.findall('(.*?)'.decode('utf-8').encode('utf-8'), msg, re.DOTALL)\n \tcontent=re.findall('accuse=\"qContent\">(.*?)'.decode('utf-8').encode('utf-8'), msg, re.DOTALL)#accuse=\"qContent\">\n \tused=re.findall('(.*?)'.decode('utf-8').encode('utf-8'), msg, re.DOTALL)\n \tanswerable=re.findall('id=\"answer-bar\">(.*?)'.decode('utf-8').encode('utf-8'), msg, re.DOTALL)\n \tstyle=re.findall(''.decode('utf-8').encode('utf-8'), msg, re.DOTALL)\n\t#\n\t#id=\"answer-bar\"> #\n print 'title:',title[0]\n title_data=title[0]\n if content:\n print 'content:',content[0]\n content_data=content[0]\n if used:\n print 'used:',used[0]\n is_used=1\n if answerable:\n print 'answerable',answerable[0]\n is_answerable=1\n if style:\n print style[0]\n style_data=style[0]\n qid=url_index\n if '百度知道 - 信息提示' not in title_data:\n dbV2.insert_data(qid, title_data, content_data, style_data, is_used,is_answerable,related_IP)\n\n except Exception, e:\n systime=time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))\n logging.debug(e) \n #print 'title:',title[0],'content:',content[0],'used:',used[0]\n\t#time.sleep(2)\n\t#\n\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1843,"cells":{"__id__":{"kind":"number","value":17420387382900,"string":"17,420,387,382,900"},"blob_id":{"kind":"string","value":"38e1fd6156f243f9a88aaab2acf7cc41d83d2bff"},"directory_id":{"kind":"string","value":"518651a389f0ee2ed341ca2162c1bcbf480ea3d8"},"path":{"kind":"string","value":"/RNASeq/src/run_pipeline.py"},"content_id":{"kind":"string","value":"63d6752be4c3637c47073f228bc702ac63fdc1d7"},"detected_licenses":{"kind":"list like","value":["CC-BY-NC-3.0","LicenseRef-scancode-proprietary-license"],"string":"[\n \"CC-BY-NC-3.0\",\n \"LicenseRef-scancode-proprietary-license\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"singhalg/gsinghal_python_src"},"repo_url":{"kind":"string","value":"https://github.com/singhalg/gsinghal_python_src"},"snapshot_id":{"kind":"string","value":"ee0eb19ca842f7864285ff67bf335126e24ccae0"},"revision_id":{"kind":"string","value":"88c20f5dfbbb63d0c464c706cc9196edb9b8dfd2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T00:02:57.830324","string":"2020-05-18T00:02:57.830324"},"revision_date":{"kind":"timestamp","value":"2013-05-08T18:22:22","string":"2013-05-08T18:22:22"},"committer_date":{"kind":"timestamp","value":"2013-05-08T18:22:43","string":"2013-05-08T18:22:43"},"github_id":{"kind":"number","value":7980456,"string":"7,980,456"},"star_events_count":{"kind":"number","value":6,"string":"6"},"fork_events_count":{"kind":"number","value":4,"string":"4"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import subprocess\nimport sys\n\nbowtie_dir = '/net/artemis/mnt/work2/seqApps/bowtie-0.12.7/'\nshortfuse_dir = '/net/artemis/mnt/work1/projects/gsinghalWork/GSseqApps/ShortFuse/'\noffset = '33'\n\n\nEM_bin = shortfuse_dir + 'EmFusion'\nbowtie_bin = bowtie_dir + 'bowtie'\nbowtie_build_bin = bowtie_dir + 'bowtie-build'\n\nexon_structure_file = shortfuse_dir + 'ref/refseq_exon_structure.txt'\nexon_seq_file = shortfuse_dir + 'ref/exon_seqs_refseq.fa'\n\ninitial_bowtie_flags = '-l 35 -e 150 -n 2 -a -m 150 -p 10 '\ndiscord_bowtie_flags = '-l 22 -e 350 -n 3 -y -a -m 5000 -p 10 '\npe_bowtie_flags = '-a -p 10 -X 2000 -m 1500 --chunkmbs 1024 '\n\nchrom_regions = shortfuse_dir + 'ref/chrom_regions.txt'\ntranscripts_plus_genome = shortfuse_dir + 'ref/transcripts_plus_genome'\nreference_root = shortfuse_dir + 'ref/refseq_transcripts'\nreference_fasta = shortfuse_dir + 'ref/RefSeqTranscripts_50up_polyA.fasta'\n\ndef main(fastq1, fastq2):\n\n\n p = subprocess.Popen(' '.join([bowtie_bin, initial_bowtie_flags, '--max 1.repeat',\n reference_root, fastq1, fastq1 + '.bowtie']), shell = True)\n p.wait()\n p = subprocess.Popen(' '.join([bowtie_bin, initial_bowtie_flags, '--max 2.repeat',\n reference_root, fastq2, fastq2 + '.bowtie']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'bowtie', fastq1 + '.bowtie']), shell = True)\n p.wait()\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'bowtie', fastq2 + '.bowtie']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'fasta', '1.repeat']), shell = True)\n p.wait()\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'fasta', '2.repeat']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'fastq', fastq1]), shell = True)\n p.wait()\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'fastq', fastq2]), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join([EM_bin, 'sift', fastq1 + '.bowtie', fastq2 + '.bowtie',\n fastq1, fastq2, offset, '1.repeat', '2.repeat']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['sort -k 2,2 -k 3,3', fastq1 + '.discord', '>',\n fastq1 + '.discord.sorted']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join([bowtie_bin, discord_bowtie_flags, '--max',\n fastq1 + '.discord.fastq.bust', transcripts_plus_genome,\n fastq1 + '.discord.fastq', 'concordant.bt']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'fa2ridlist.py',\n fastq1 + '.discord.fastq.bust', '>',\n 'concordant.reads']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'parseBowtie.py',\n 'concordant.bt', chrom_regions, '100', '>>',\n 'concordant.reads']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'ExtendReference.py',\n exon_structure_file, fastq1 + '.discord.sorted', 'concordant.reads']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['sort -k 2 -u', fastq1 + '.discord.sorted.exons',\n '>', fastq1 + '.discord.sorted.exons.uniq']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'MakeFusionTranscripts.py',\n exon_seq_file, fastq1 + '.discord.sorted.exons.uniq']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['cat', fastq1 + '.discord.sorted.exons.uniq.seqs',\n reference_fasta, '>', 'augmented_ref.fa']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['rm', fastq1 + '.bowtie', fastq2 + '.bowtie']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join([bowtie_build_bin, '-o 0', 'augmented_ref.fa',\n 'augmented_ref']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join([bowtie_bin, pe_bowtie_flags,\n 'augmented_ref', '-1', fastq1, '-2', fastq2,\n fastq1 + '.pe.bowtie']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py',\n 'paired_bowtie', fastq1 + '.pe.bowtie']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['sort -n -k 1,1', fastq1 + '.mapdist', '>',\n fastq1 + '.mapdist.sorted']), shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'getdistprob.py',\n fastq1 + '.mapdist.sorted', 'dist.prob']),\n shell = True)\n p.wait()\n\n p = subprocess.Popen(' '.join([EM_bin, 'EM', fastq1 + '.pe.bowtie', 'dist.prob',\n 'augmented_ref.fa', offset]), shell = True)\n p.wait()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print \"Usage: python run_pipeline.py \"\n sys.exit()\n main(*sys.argv[1:])\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1844,"cells":{"__id__":{"kind":"number","value":13245679158628,"string":"13,245,679,158,628"},"blob_id":{"kind":"string","value":"9052165998312c8f6b8864a8062220fdd0c24e5c"},"directory_id":{"kind":"string","value":"f3dc5339549fff5588d69da0cc81f98aa54f01e0"},"path":{"kind":"string","value":"/tests/hello_test.py"},"content_id":{"kind":"string","value":"b01613c821654024eeaea82948b7dc0e526b4fc1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dkoepke/hedgie"},"repo_url":{"kind":"string","value":"https://github.com/dkoepke/hedgie"},"snapshot_id":{"kind":"string","value":"7d819496e8aecec100f26c3a7a5f94517d9c6918"},"revision_id":{"kind":"string","value":"104f140868d3c10efae6026ee7b2577afe5879a0"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-06T15:12:23.579061","string":"2016-08-06T15:12:23.579061"},"revision_date":{"kind":"timestamp","value":"2014-11-18T01:10:54","string":"2014-11-18T01:10:54"},"committer_date":{"kind":"timestamp","value":"2014-11-18T01:10:54","string":"2014-11-18T01:10:54"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import hedgie\n\n\ndef hello_fallback(command):\n return 'fallback'\n\n\n@hedgie.command(fallback=hello_fallback)\ndef hello(name, should_fail):\n \"\"\"Say hello.\"\"\"\n assert should_fail is False\n return 'Hello, {0}'.format(name)\n\n\ndef test_hello_should_pass_naive_introspection():\n assert hello.__module__ == hello_fallback.__module__\n assert hello.__name__ == 'hello'\n assert hello.__doc__ == 'Say hello.'\n\n\ndef test_hello_should_be_a_hedgie_command():\n assert isinstance(hello, hedgie.Command)\n\n\ndef test_hello_should_return_greeting_if_not_should_fail_when_called():\n result = hello('world', False)\n assert result == 'Hello, world'\n\n\ndef test_hello_should_return_fallback_if_should_fail_when_called():\n result = hello('world', True)\n assert result == 'fallback'\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1845,"cells":{"__id__":{"kind":"number","value":1941325242706,"string":"1,941,325,242,706"},"blob_id":{"kind":"string","value":"8625f37e6c64abf035420a37b3baa0158a1279c2"},"directory_id":{"kind":"string","value":"f8c396afc8b51002868a838b0bcddd7b3bdf97a2"},"path":{"kind":"string","value":"/volumina/widgets/multiStepProgressDialog.py"},"content_id":{"kind":"string","value":"4907fafc470775f86c3af3a35c44b886055dabda"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"lfiaschi/volumina"},"repo_url":{"kind":"string","value":"https://github.com/lfiaschi/volumina"},"snapshot_id":{"kind":"string","value":"51ca1e4c55b4f032f0765b4d359f1eb66a6b8454"},"revision_id":{"kind":"string","value":"f05c10a791929d8e52fbdc291ceae0884b061bcc"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T20:56:25.120419","string":"2021-01-23T20:56:25.120419"},"revision_date":{"kind":"timestamp","value":"2013-06-30T15:29:54","string":"2013-06-30T15:29:54"},"committer_date":{"kind":"timestamp","value":"2013-06-30T15:29:54","string":"2013-06-30T15:29:54"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os, time\n\nfrom PyQt4 import uic\nfrom PyQt4.QtGui import QDialog, QDialogButtonBox\n\nclass MultiStepProgressDialog(QDialog):\n def __init__(self, parent=None):\n QDialog.__init__(self, parent)\n self._initUic()\n \n self._numberOfSteps = 1\n self._currentStep = 0\n self._steps = []\n self._update()\n\n def setNumberOfSteps(self, n):\n assert n >= 1\n self._numberOfSteps = n\n self._currentStep = 0\n self._update()\n self.time1 = time.time()\n self.times = []\n \n def setSteps(self, steps):\n self._steps = steps\n self.setNumberOfSteps(len(self._steps))\n \n def finishStep(self):\n self._currentStep = self._currentStep + 1\n self._update()\n if self._currentStep == self._numberOfSteps:\n self.buttonBox.button(QDialogButtonBox.Ok).setText(\"Finished!\")\n self.buttonBox.button(QDialogButtonBox.Cancel).hide()\n self.currentStepProgress.setValue(100)\n\n \n def _update(self):\n self.currentStepProgress.setValue(0)\n self.overallProgress.setMinimum(0)\n self.overallProgress.setMaximum(self._numberOfSteps)\n self.overallProgress.setFormat(\"step %d of %d\" % (self._currentStep, self._numberOfSteps))\n\n self.overallProgress.setValue(self._currentStep)\n \n def setStepProgress(self, x):\n oldx = self.currentStepProgress.value()\n self.time2 = time.time()\n self.currentStepProgress.setValue(x)\n if x - oldx > 0:\n timeLeft = (100 - x) * (self.time2 - self.time1) / (x - oldx)\n self._updateCurrentStepLabel( timeLeft)\n self.time1 = self.time2\n \n def _updateCurrentStepLabel(self, singlet):\n self.times.append(singlet)\n t = sum(self.times) / len(self.times)\n if len(self.times) > 5:\n self.times.pop(0)\n if t < 120:\n self.currentStepLabel.setText(\"ETA: %.02f sec\" % (t))\n else:\n self.currentStepLabel.setText(\"ETA: %.02f min\" % (t / 60))\n\n def _initUic(self):\n p = os.path.split(__file__)[0]+'/'\n if p == \"/\": p = \".\"+p\n uic.loadUi(p+\"ui/multiStepProgressDialog.ui\", self)\n \n\nif __name__ == \"__main__\":\n from PyQt4.QtGui import QApplication\n import vigra, numpy\n app = QApplication(list())\n \n d = MultiStepProgressDialog()\n d.setNumberOfSteps(5)\n d.show()\n app.exec_()\n\n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1846,"cells":{"__id__":{"kind":"number","value":12463995124637,"string":"12,463,995,124,637"},"blob_id":{"kind":"string","value":"19ff4e17522ae0ce2b735b63b29c8ca144e28a06"},"directory_id":{"kind":"string","value":"15839868b3a41927b4eda5d0310416cb6f30eb94"},"path":{"kind":"string","value":"/gui/ConfigBox.py"},"content_id":{"kind":"string","value":"406e6ec525502f105e22ebfc45e678c87829059b"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"bergetkorn82/lrg"},"repo_url":{"kind":"string","value":"https://github.com/bergetkorn82/lrg"},"snapshot_id":{"kind":"string","value":"46e79bf023dbc3b96f5b6ac3f3f465111d436a03"},"revision_id":{"kind":"string","value":"5a33d017f66104cfe9bb6c0dbd7436426bf7adba"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-01-17T01:36:14.059721","string":"2018-01-17T01:36:14.059721"},"revision_date":{"kind":"timestamp","value":"2008-10-26T18:37:11","string":"2008-10-26T18:37:11"},"committer_date":{"kind":"timestamp","value":"2008-10-26T18:37:11","string":"2008-10-26T18:37:11"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import wx\nfrom ConfigUtils import *\n\nID_TMPDIR_BUT = 81001\nID_INDIR_BUT = 81002\n\nID_OK_BUT = 82001\nID_CLOSE_BUT = 82002\n\nPADDING = 10\n\n\n\nclass ConfigBox(wx.Frame):\n\n\tdef __init__(self, parent, id, title):\n\t\twx.Frame.__init__(self, parent, id, title, style = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER | wx.MINIMIZE_BOX | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))\n\t\t\n\t\tself.incomingDirDialog = None\n\t\tself.tmpDirDialog = None\n\n\t\tself.splitter = wx.SplitterWindow(self, wx.ID_ANY)\n\t\tself.splitter.SetBorderSize(0)\n\t\t\n\t\tself.noteBookPanel = wx.Panel(self.splitter, wx.ID_ANY)\n\t\tself.noteBook = wx.Notebook(self.noteBookPanel)\t\n\t\tself.mainSizer = wx.BoxSizer(wx.VERTICAL)\t\t\n\t\t\n\t\tself.generalSettingsPanel = wx.Panel(self.noteBook, wx.ID_ANY)\n\t\t\n\t\tself.generalSettingsSizer = wx.BoxSizer(wx.VERTICAL)\t\t\n\t\t\n\t\tself.rapidAccountBox = wx.StaticBox(self.generalSettingsPanel, label='Account Settings')\n\t\tself.rapidAccountBoxSizer = wx.StaticBoxSizer(self.rapidAccountBox, wx.VERTICAL)\n\t\t\n\t\tself.usernameLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Rapidshare username:')\t\t\n\t\tself.username = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY)\n\t\tself.username.SetValue(Config.settings.rapidshareUsername)\n\t\tself.passwordLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Rapidshare password:')\n\t\tself.password = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY)\n\t\tself.password.SetValue(Config.settings.rapidsharePassword)\t\t\n\t\t\n\t\tself.rapidAccountBoxSizer.Add(self.usernameLbl, 0, wx.ALL, PADDING)\n\t\tself.rapidAccountBoxSizer.Add(self.username, 0, wx.EXPAND)\n\t\tself.rapidAccountBoxSizer.Add(self.passwordLbl, 0, wx.ALL, PADDING)\n\t\tself.rapidAccountBoxSizer.Add(self.password, 0, wx.EXPAND)\n\n\t\tself.generalSettingsSizer.Add(self.rapidAccountBoxSizer, 0, wx.EXPAND)\n\t\t\n\t\tself.directoryBox = wx.StaticBox(self.generalSettingsPanel, label='Directory settings')\n\t\tself.directoryBoxSizer = wx.StaticBoxSizer(self.directoryBox, wx.VERTICAL)\n\t\t\n\t\tself.incomingDirLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Incoming directory:')\n\t\tself.incomingDir = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY)\n\t\tself.incomingDir.SetValue(Config.settings.downloadDir)\n\t\tself.incomingBut = wx.Button(self.generalSettingsPanel, ID_INDIR_BUT, 'Browse')\n\t\twx.EVT_BUTTON(self, ID_INDIR_BUT, self.onSelectIncomingDir)\n\t\t\n\t\tself.incomingDirSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.incomingDirSizer.Add(self.incomingDir, 3, wx.EXPAND, PADDING)\n\t\tself.incomingDirSizer.Add(self.incomingBut, 0, wx.EXPAND, PADDING)\n\t\n\t\tself.tmpDirLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Temporary directory:')\n\t\tself.tmpDir = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY)\n\t\tself.tmpDir.SetValue(Config.settings.tmpDir)\n\t\tself.tmpBut = wx.Button(self.generalSettingsPanel, ID_TMPDIR_BUT, 'Browse')\n\t\twx.EVT_BUTTON(self, ID_TMPDIR_BUT, self.onSelectTmpDir)\n\t\t\n\t\tself.tmpDirSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.tmpDirSizer.Add(self.tmpDir, 3, wx.EXPAND, PADDING)\n\t\tself.tmpDirSizer.Add(self.tmpBut, 0, wx.EXPAND, PADDING)\n\t\t\n\t\t\n\t\tself.directoryBoxSizer.Add(self.incomingDirLbl, 0, wx.ALL, PADDING)\n\t\tself.directoryBoxSizer.Add(self.incomingDirSizer, 0, wx.EXPAND)\n\t\tself.directoryBoxSizer.Add(self.tmpDirLbl, 0, wx.ALL, PADDING)\n\t\tself.directoryBoxSizer.Add(self.tmpDirSizer, 0, wx.EXPAND)\n\t\t\n\t\tself.generalSettingsSizer.Add(self.directoryBoxSizer, 0, wx.EXPAND)\n\t\t\t\n\t\tself.generalSettingsPanel.SetSizerAndFit(self.generalSettingsSizer)\t\t\n\t\tself.generalSettingsPanel.SetAutoLayout(True)\n\t\tself.generalSettingsPanel.Layout()\t\n\t\n\t\n\n\t\tself.networkSettingsPanel = wx.Panel(self.noteBook, wx.ID_ANY)\n\t\t\n\t\tself.networkSettingsSizer = wx.BoxSizer(wx.VERTICAL)\t\t\n\t\t\n\t\tself.networkSettingsBox = wx.StaticBox(self.networkSettingsPanel, label='General Settings')\n\t\tself.networkSettingsBoxSizer = wx.StaticBoxSizer(self.networkSettingsBox, wx.VERTICAL)\n\t\t\n\t\tself.numberOfConnSizer = wx.BoxSizer(wx.HORIZONTAL)\t\t\n\t\tself.numberOfConnLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Max. Simultaneous Files:')\t\t\n\t\tself.numberOfConn = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.numberOfConn.SetValue(str(Config.settings.maxConcurrentDownload))\n\t\tself.numberOfConnSizer.Add(self.numberOfConnLbl, 0, wx.ALL, PADDING)\n\t\tself.numberOfConnSizer.Add(self.numberOfConn, 0, wx.ALL)\n\t\t\n\t\t\n\t\tself.numberOfConnPerFileSizer = wx.BoxSizer(wx.HORIZONTAL)\t\t\n\t\tself.numberOfConnPerFileLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Connections per File:')\n\t\tself.numberOfConnPerFile = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.numberOfConnPerFile.SetValue(str(Config.settings.maxConnectionPerFile))\n\t\tself.numberOfConnPerFileSizer.Add(self.numberOfConnPerFileLbl, 0, wx.ALL, PADDING)\n\t\tself.numberOfConnPerFileSizer.Add(self.numberOfConnPerFile, 0, wx.ALL)\n\n\t\tself.maxRetrySizer = wx.BoxSizer(wx.HORIZONTAL)\t\t\n\t\tself.maxRetryLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Max. Retries:')\n\t\tself.maxRetry = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.maxRetry.SetValue(str(Config.settings.maxRetry))\n\t\tself.maxRetrySizer.Add(self.maxRetryLbl, 0, wx.ALL, PADDING)\n\t\tself.maxRetrySizer.Add(self.maxRetry, 0, wx.ALL)\n\n\n\t\tself.networkSettingsBoxSizer.Add(self.numberOfConnSizer, 0, wx.EXPAND)\n\t\tself.networkSettingsBoxSizer.Add(self.numberOfConnPerFileSizer, 0, wx.EXPAND)\n\t\tself.networkSettingsBoxSizer.Add(self.maxRetrySizer, 0, wx.EXPAND)\n\n\t\tself.networkSettingsSizer.Add(self.networkSettingsBoxSizer, 0, wx.EXPAND, PADDING)\t\n\t\n\n\t\tself.proxyBox = wx.StaticBox(self.networkSettingsPanel, label='Proxy settings')\n\t\tself.proxyBoxSizer = wx.StaticBoxSizer(self.proxyBox, wx.VERTICAL)\n\t\t\n\t\tself.useProxy = wx.CheckBox(self.networkSettingsPanel, -1, 'Use proxy')\n\t\tself.useProxy.SetValue(Config.settings.useProxy)\n\t\t\n\t\tself.proxyTypeSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tself.proxyTypeLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Type:')\n\t\tself.proxyTypeList = wx.ComboBox(self.networkSettingsPanel, wx.ID_ANY, value = proxyTypeList[0], choices = proxyTypeList, style = wx.CB_READONLY)\n\t\tself.proxyTypeList.SetStringSelection(proxyTypeValueList[Config.settings.proxyType])\n\t\tself.proxyTypeSizer.Add(self.proxyTypeLbl, 0, wx.EXPAND)\n\t\tself.proxyTypeSizer.Add(self.proxyTypeList, 0, wx.EXPAND)\n\n\t\tself.proxyAddrSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tself.proxyAddrLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Address:')\n\t\tself.proxyAddr = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.proxyAddr.SetValue(str(Config.settings.proxyAddr))\n\t\tself.proxyAddrSizer.Add(self.proxyAddrLbl, 0, wx.EXPAND)\n\t\tself.proxyAddrSizer.Add(self.proxyAddr, 0, wx.EXPAND)\n\n\t\tself.proxyPortSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tself.proxyPortLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Port:')\n\t\tself.proxyPort = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.proxyPort.SetValue(str(Config.settings.proxyPort))\n\t\tself.proxyPortSizer.Add(self.proxyPortLbl, 0, wx.EXPAND)\n\t\tself.proxyPortSizer.Add(self.proxyPort, 0, wx.EXPAND)\t\t\n\t\t\n\t\tself.proxySettingsSizer = wx.BoxSizer(wx.HORIZONTAL)\t\n\t\tself.proxySettingsSizer.Add(self.proxyTypeSizer, 2, wx.EXPAND, PADDING)\n\t\t#self.proxySettingsSizer.Add(self.proxyAddrSizer, 2, wx.EXPAND, PADDING)\n\t\tself.proxySettingsSizer.Add(self.proxyPortSizer, 1, wx.EXPAND, PADDING)\n\t\t\t\t\n\t\tself.proxyBoxSizer.Add(self.useProxy, 0, wx.EXPAND)\n\t\tself.proxyBoxSizer.Add(self.proxyAddrSizer, 0, wx.EXPAND)\n\t\tself.proxyBoxSizer.Add(self.proxySettingsSizer, 0, wx.EXPAND)\n\t\t\n\t\tself.proxyUserPassSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n\t\tself.proxyUserSizer = wx.BoxSizer(wx.VERTICAL)\t\t\n\t\tself.proxyUserNameLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Username:')\n\t\tself.proxyUsername = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.proxyUsername.SetValue(str(Config.settings.proxyUsername))\n\t\tself.proxyUserSizer.Add(self.proxyUserNameLbl, 0, wx.EXPAND)\n\t\tself.proxyUserSizer.Add(self.proxyUsername, 0, wx.EXPAND)\n\n\t\tself.proxyPasswordSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tself.proxyPasswordLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Password:')\n\t\tself.proxyPassword = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY)\n\t\tself.proxyPassword.SetValue(str(Config.settings.proxyPassword))\n\t\tself.proxyPasswordSizer.Add(self.proxyPasswordLbl, 0, wx.EXPAND)\n\t\tself.proxyPasswordSizer.Add(self.proxyPassword, 0, wx.EXPAND)\n\t\t\n\t\tself.proxyUserPassSizer.Add(self.proxyUserSizer, 1, wx.EXPAND)\n\t\tself.proxyUserPassSizer.Add(self.proxyPasswordSizer, 1, wx.EXPAND)\n\t\t\n\t\tself.proxyBoxSizer.Add(self.proxyUserPassSizer, 0, wx.EXPAND)\n\t\t\n\t\tself.networkSettingsSizer.Add(self.proxyBoxSizer, 0, wx.EXPAND)\t\t\t\n\t\t\n\t\t\n\t\tself.networkSettingsPanel.SetSizerAndFit(self.networkSettingsSizer)\t\t\n\t\tself.networkSettingsPanel.SetAutoLayout(True)\n\t\tself.networkSettingsPanel.Layout()\n\t\t\n\t\t\n\n\t\tself.noteBook.AddPage(self.generalSettingsPanel, 'General Settings')\t\t\n\t\tself.noteBook.AddPage(self.networkSettingsPanel, 'Network Settings')\t\t\n\n\n\n\t\tself.buttonsPanel = wx.Panel(self.splitter, wx.ID_ANY)\n\t\tself.buttonsPanelSizer = wx.BoxSizer(wx.VERTICAL)\n\t\tself.okBut = wx.Button(self.buttonsPanel, ID_OK_BUT, 'Save')\n\t\twx.EVT_BUTTON(self, ID_OK_BUT, self.OnClickSave)\n\t\tself.closeBut = wx.Button(self.buttonsPanel, ID_CLOSE_BUT, 'Close')\n\t\twx.EVT_BUTTON(self, ID_CLOSE_BUT, self.OnClickClose)\n\t\tself.buttonsSizer = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.buttonsSizer.Add(self.okBut, 0, wx.CENTER)\n\t\tself.buttonsSizer.Add(self.closeBut, 0, wx.CENTER)\t\t\n\t\tself.buttonsPanelSizer.Add(self.buttonsSizer, 0, wx.ALIGN_CENTER | wx.ALIGN_BOTTOM)\n\t\tself.buttonsPanel.SetSizerAndFit(self.buttonsPanelSizer)\n\t\t\n\n\t\tself.splitter.SplitHorizontally(self.noteBookPanel, self.buttonsPanel)\t\t\n\t\tself.splitterSizer = wx.BoxSizer(wx.VERTICAL)\t\t\n\t\tself.splitterSizer.Add(self.splitter, 1, wx.EXPAND)\t\n\t\tself.SetSizer(self.splitterSizer)\t\t\n\t\t\n\t\n\t\tself.mainSizer.Add(self.noteBook, 0, wx.EXPAND)\n\t\tself.noteBookPanel.SetSizerAndFit(self.mainSizer)\t\t\n\t\t#self.noteBookPanel.SetAutoLayout(True)\n\t\t#self.noteBookPanel.Layout()\n\t\t\n\t\tself.Center(wx.BOTH)\n\t\tself.Fit()\n\t\tself.Show(True)\n\t\t\n\t\t\n\tdef onSelectIncomingDir(self, event):\t\t\n\t\tif (self.incomingDirDialog):\n\t\t\tself.incomingDirDialog.show()\n\t\telse:\n\t\t\tself.incomingDirDialog = wx.DirDialog(self, 'Please select a directory for your incoming files', Config.settings.downloadDir)\n\t\t\tif self.incomingDirDialog.ShowModal() == wx.ID_OK:\n\t\t\t\tself.incomingDirName = self.incomingDirDialog.GetPath()\n\t\t\t\tif (Config.checkExistence(self.incomingDirName, TYPE_DIR) != EXIST_W):\n\t\t\t\t\tself.incomingDirMessageDialog = wx.MessageDialog(self, 'You dont have permission to write to this directory', 'Error', style = wx.OK)\n\t\t\t\t\tself.incomingDirMessageDialog.ShowModal()\n\t\t\t\telse:\n\t\t\t\t\tConfig.settings.downloadDir = self.incomingDirName\n\t\t\t\t\tself.incomingDir.SetValue(Config.settings.downloadDir)\n\t\t\t\t\n\t\t\tself.incomingDirDialog.Destroy()\n\t\t\n\t\t\n\tdef onSelectTmpDir(self, event):\t\t\n\t\tif (self.tmpDirDialog):\n\t\t\tself.tmpDirDialog.show()\n\t\telse:\n\t\t\tself.tmpDirDialog = wx.DirDialog(self, 'Please select a directory for temporary files', Config.settings.tmpDir)\n\t\t\tif self.tmpDirDialog.ShowModal() == wx.ID_OK:\n\t\t\t\tself.tmpDirName = self.tmpDirDialog.GetPath()\n\t\t\t\tif (Config.checkExistence(self.tmpDirName, TYPE_DIR) != EXIST_W):\n\t\t\t\t\tself.tmpDirMessageDialog = wx.MessageDialog(self, 'You dont have permission to write to this directory', 'Error', style = wx.OK)\n\t\t\t\t\tself.tmpDirMessageDialog.ShowModal()\n\t\t\t\telse:\n\t\t\t\t\tConfig.settings.tmpDir = self.tmpDirName\n\t\t\t\t\tself.tmpDir.SetValue(Config.settings.tmpDir)\n\t\t\t\t\n\t\t\tself.tmpDirDialog.Destroy()\t\n\t\t\n\t\t\n\t\t\n\n\tdef OnClickSave(self, event):\n\t\tConfig.settings.rapidshareUsername = self.username.GetValue()\n\t\tConfig.settings.rapidsharePassword = self.password.GetValue()\n\t\tConfig.settings.downloadDir = self.incomingDir.GetValue()\n\t\tConfig.settings.tmpDir = self.tmpDir.GetValue()\n\t\tConfig.settings.maxConnectionPerFile = int(self.numberOfConnPerFile.GetValue())\n\t\tConfig.settings.maxConcurrentDownload = int(self.numberOfConn.GetValue())\n\t\tConfig.settings.maxRetry = int(self.maxRetry.GetValue())\n\t\tConfig.settings.useProxy = self.useProxy.GetValue()\n\t\tConfig.settings.proxyType = proxyTypeCurlList[self.proxyTypeList.GetValue()]\n\t\tConfig.settings.proxyAddr = str(self.proxyAddr.GetValue())\n\t\tConfig.settings.proxyPort = int(self.proxyPort.GetValue())\n\t\tConfig.settings.proxyUsername = str(self.proxyUsername.GetValue())\n\t\tConfig.settings.proxyPassword = str(self.proxyPassword.GetValue())\t\t\n\t\t\n\t\tConfig.save()\n\t\tself.Destroy()\n\t\t\n\tdef OnClickClose(self, event):\n\t\tself.Destroy()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2008,"string":"2,008"}}},{"rowIdx":1847,"cells":{"__id__":{"kind":"number","value":7301444437588,"string":"7,301,444,437,588"},"blob_id":{"kind":"string","value":"3e947547625a5dc163c06440333458c536dcd90b"},"directory_id":{"kind":"string","value":"abb9be2a1f6ecad7bb00fec5735002f34b44858e"},"path":{"kind":"string","value":"/application/decorator/auth_decorator.py"},"content_id":{"kind":"string","value":"a1b181a6386f487b3b4268b3c52079cd45bbc4da"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"Danielhhs/Victory"},"repo_url":{"kind":"string","value":"https://github.com/Danielhhs/Victory"},"snapshot_id":{"kind":"string","value":"9b50c21c07145172654204881941beef91d7a184"},"revision_id":{"kind":"string","value":"132f186f18ea6a709afb003e38ed1d6de3c0579f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T14:20:53.181664","string":"2021-01-17T14:20:53.181664"},"revision_date":{"kind":"timestamp","value":"2014-06-11T13:40:03","string":"2014-06-11T13:40:03"},"committer_date":{"kind":"timestamp","value":"2014-06-11T13:40:03","string":"2014-06-11T13:40:03"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# flask\nfrom flask import g, abort\n\n# application\nfrom application.models.datastore.user_model import *\n\n\n\ndef authorization(level):\n \"\"\"\n Authorization decorator.\n :param level: UserLevel\n \"\"\"\n def decorator(f):\n def wraps(*args, **kwargs):\n # root\n if level == UserLevel.root:\n if g.user is None or g.user.level != UserLevel.root:\n return abort(403)\n # normal\n elif level == UserLevel.normal:\n if g.user is None:\n return abort(403)\n\n return f(*args, **kwargs)\n return wraps\n\n return decorator"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1848,"cells":{"__id__":{"kind":"number","value":13769665155917,"string":"13,769,665,155,917"},"blob_id":{"kind":"string","value":"f28ff8079be09ed42f018c1169f9c3765d3b7e16"},"directory_id":{"kind":"string","value":"14bf4023ccc3dd95e23c71935fc8e17deecd0175"},"path":{"kind":"string","value":"/baby/urls.py"},"content_id":{"kind":"string","value":"57da525230bdf0bdcedd391879b703b2036bd9e3"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rsadwick/alice_sadwick_website"},"repo_url":{"kind":"string","value":"https://github.com/rsadwick/alice_sadwick_website"},"snapshot_id":{"kind":"string","value":"64ff0d0fe8b6401cdab88ffdbcdf3bb62a08d22f"},"revision_id":{"kind":"string","value":"bb04db2edc8b7526f9f4c04895374257b6a46517"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T21:26:46.347038","string":"2021-01-10T21:26:46.347038"},"revision_date":{"kind":"timestamp","value":"2013-05-26T18:57:27","string":"2013-05-26T18:57:27"},"committer_date":{"kind":"timestamp","value":"2013-05-26T18:57:27","string":"2013-05-26T18:57:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls import patterns, url\nfrom django.views.decorators.cache import cache_page\nfrom views import get_article, index\n\nurlpatterns = patterns('baby.views',\n url(r'^$', cache_page(60 * 5)(index)),\n url(r'^services/rsvp/$', 'rsvp'),\n url(r'^(?P[-\\w]+)/$', cache_page(60 * 30)(get_article)),\n url(r'^services/instagram/$', 'get_instagram'),\n\n)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1849,"cells":{"__id__":{"kind":"number","value":19387482399627,"string":"19,387,482,399,627"},"blob_id":{"kind":"string","value":"a41e563e7e6e3535c049cc74239a7c255c5fd130"},"directory_id":{"kind":"string","value":"d297aab40d81b1724736e4674a104fa85410c6cf"},"path":{"kind":"string","value":"/source/python/3-photos/16-get-emotions.py"},"content_id":{"kind":"string","value":"d42bc95c1b861c83583ed5b0730a0e03900e82b1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"kthcorp/openapi.pudding.to_samples"},"repo_url":{"kind":"string","value":"https://github.com/kthcorp/openapi.pudding.to_samples"},"snapshot_id":{"kind":"string","value":"c0bb70176f1c19b2b4ef596eb2588f7567292ae3"},"revision_id":{"kind":"string","value":"0d7212eaf7659ae572f48af1dde7ea9bdb664f64"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T22:07:46.960634","string":"2021-01-10T22:07:46.960634"},"revision_date":{"kind":"timestamp","value":"2012-08-08T07:25:52","string":"2012-08-08T07:25:52"},"committer_date":{"kind":"timestamp","value":"2012-08-08T07:25:52","string":"2012-08-08T07:25:52"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf8 -*-\nimport simplejson, urllib\nimport urllib2\n\n\"\"\"\n16 get emotion infomation \n\nformat : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN\nsample : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN\n\"\"\"\n\nACCESS_KEY = \"96474e57-cb16-11e1-91b7-12313f062e84\"\nAPI_BASE = \"http://openapi.pudding.to/api/v1/emotions/\"\n\n\ndef get_emotions(lang_id, **args):\n \"\"\"\n Get emotions \n \"\"\"\n args.update({\n 'access_key': ACCESS_KEY\n })\n\n url = API_BASE + lang_id + \"?\" + urllib.urlencode(args)\n\n if('format' in args and args['format'] == 'xml'):\n result = urllib2.urlopen(url).read()\n else:\n result = simplejson.load(urllib.urlopen(url))\n\n return result\n\n\nif __name__ == \"__main__\" :\n \n langid = \"en\" # ko, en, ja\n\n json = get_emotions(langid)\n print json\n\n xml = get_emotions(langid, format='xml')\n print xml\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1850,"cells":{"__id__":{"kind":"number","value":6081673735108,"string":"6,081,673,735,108"},"blob_id":{"kind":"string","value":"0213cfd1d56fadc9493035ab44f7bbd52e5caeab"},"directory_id":{"kind":"string","value":"e7e453268dc74c74a54c85d35a1f9b254298b9a2"},"path":{"kind":"string","value":"/sage/finance/stock.py"},"content_id":{"kind":"string","value":"bbc5f56065ccabf0fbfb7a74f00f8ab7186374ea"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pombredanne/sage-1"},"repo_url":{"kind":"string","value":"https://github.com/pombredanne/sage-1"},"snapshot_id":{"kind":"string","value":"4128172b20099dfcdaa9792a61945e97537501bd"},"revision_id":{"kind":"string","value":"4262d856b92f9e1772d71f993baa6aecbbd87a87"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-03-04T20:07:27.273680","string":"2018-03-04T20:07:27.273680"},"revision_date":{"kind":"timestamp","value":"2013-03-22T02:31:54","string":"2013-03-22T02:31:54"},"committer_date":{"kind":"timestamp","value":"2013-03-22T02:31:54","string":"2013-03-22T02:31:54"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nStock Market Price Series\n\nAUTHORS:\n\n- William Stein, 2008\n\n- Brett Nakayama, 2008\n\n- Chris Swierczewski, 2008\n\nTESTS::\n\n sage: ohlc = sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092)\n sage: loads(dumps(ohlc)) == ohlc\n True\n\"\"\"\n\nimport urllib\nfrom sage.structure.all import Sequence\nfrom datetime import date\n\nclass OHLC:\n def __init__(self, timestamp, open, high, low, close, volume):\n \"\"\"\n Open, high, low, and close information for a stock. Also stores\n a timestamp for that data along with the volume.\n\n INPUT:\n\n - ``timestamp`` -- string\n\n - ``open``, ``high``, ``low``, ``close`` -- float\n\n - ``volume`` -- int\n\n EXAMPLES::\n\n sage: sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092)\n 18-Aug-04 100.01 104.06 95.96 100.34 22353092\n \"\"\"\n self.timestamp = timestamp\n self.open=float(open); self.high=float(high); self.low=float(low); self.close=float(close)\n self.volume=int(volume)\n \n def __repr__(self):\n \"\"\"\n Return string representation of stock OHLC data.\n\n EXAMPLES::\n\n sage: sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092).__repr__()\n ' 18-Aug-04 100.01 104.06 95.96 100.34 22353092'\n \"\"\"\n return '%10s %4.2f %4.2f %4.2f %4.2f %10d'%(self.timestamp, self.open, self.high, \n self.low, self.close, self.volume)\n\n def __cmp__(self, other):\n \"\"\"\n Compare ``self`` and ``other``.\n\n EXAMPLES::\n\n sage: ohlc = sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092)\n sage: ohlc2 = sage.finance.stock.OHLC('18-Aug-04', 101.01, 104.06, 95.96, 100.34, 22353092)\n sage: cmp(ohlc, ohlc2)\n -1\n \"\"\"\n if not isinstance(other, OHLC):\n return cmp(type(self), type(other))\n return cmp((self.timestamp, self.open, self.high, self.low, self.close, self.volume),\n (other.timestamp, other.open, other.high, other.low, other.close, other.volume))\n\nclass Stock:\n \"\"\"\n Class for retrieval of stock market information.\n \"\"\"\n def __init__(self, symbol, cid=''):\n \"\"\"\n Create a ``Stock`` object. Optional initialization by ``cid``: an\n identifier for each equity used by Google Finance.\n\n INPUT:\n\n - ``symbol`` -- string, a ticker symbol (with or without market).\n Format: ``\"MARKET:SYMBOL\"`` or ``\"SYMBOL\"``. If you don't\n supply the market, it is assumed to be NYSE or NASDAQ.\n e.g. \"goog\" or \"OTC:NTDOY\"\n\n - ``cid`` -- Integer, a Google contract ID (optional).\n\n\n .. NOTE::\n\n Currently, the symbol and cid do not have to match. When using\n ``google()``, the cid will take precedence.\n\n EXAMPLES::\n\n sage: S = finance.Stock('ibm')\n sage: S # random; optional -- internet\n IBM (127.48) \n \"\"\"\n self.symbol = symbol.upper()\n self.cid = cid\n\n def __repr__(self):\n \"\"\"\n Return string representation of this stock.\n\n EXAMPLES::\n\n sage: finance.Stock('ibm').__repr__() # random; optional -- internet\n 'IBM (127.47)'\n \"\"\"\n return \"%s (%s)\"%(self.symbol, self.market_value())\n \n def market_value(self):\n \"\"\"\n Return the current market value of this stock.\n\n OUTPUT:\n\n A Python float.\n\n EXAMPLES::\n\n sage: finance.Stock('goog').market_value() # random; optional - internet\n 575.83000000000004\n \"\"\"\n return float(self.yahoo()['price'])\n\n def yahoo(self):\n \"\"\"\n Get Yahoo current price data for this stock.\n\n OUTPUT:\n\n A dictionary.\n\n EXAMPLES::\n\n sage: finance.Stock('GOOG').yahoo() # random; optional -- internet\n {'stock_exchange': '\"NasdaqNM\"', 'market_cap': '181.1B', '200day_moving_avg': '564.569', '52_week_high': '747.24', 'price_earnings_growth_ratio': '1.04', 'price_sales_ratio': '10.16', 'price': '576.48', 'earnings_per_share': '14.463', '50day_moving_avg': '549.293', 'avg_daily_volume': '6292480', 'volume': '1613507', '52_week_low': '412.11', 'short_ratio': '1.00', 'price_earnings_ratio': '40.50', 'dividend_yield': 'N/A', 'dividend_per_share': '0.00', 'price_book_ratio': '7.55', 'ebitda': '6.513B', 'change': '-9.32', 'book_value': '77.576'}\n \"\"\"\n url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (self.symbol, 'l1c1va2xj1b4j4dyekjm3m4rr5p5p6s7')\n values = urllib.urlopen(url).read().strip().strip('\"').split(',')\n data = {}\n data['price'] = values[0]\n data['change'] = values[1]\n data['volume'] = values[2]\n data['avg_daily_volume'] = values[3]\n data['stock_exchange'] = values[4]\n data['market_cap'] = values[5]\n data['book_value'] = values[6]\n data['ebitda'] = values[7]\n data['dividend_per_share'] = values[8]\n data['dividend_yield'] = values[9]\n data['earnings_per_share'] = values[10]\n data['52_week_high'] = values[11]\n data['52_week_low'] = values[12]\n data['50day_moving_avg'] = values[13]\n data['200day_moving_avg'] = values[14]\n data['price_earnings_ratio'] = values[15]\n data['price_earnings_growth_ratio'] = values[16]\n data['price_sales_ratio'] = values[17]\n data['price_book_ratio'] = values[18]\n data['short_ratio'] = values[19]\n return data\n\n def google(self,startdate='Jan+1,+1900',enddate=date.today().strftime(\"%b+%d,+%Y\"), histperiod='daily'):\n \"\"\"\n Return an immutable sequence of historical price data\n for this stock, obtained from Google. OHLC data is stored\n internally as well. By default, returns the past year's daily\n OHLC data.\n\n Dates ``startdate`` and ``enddate`` should be formatted\n ``'Mon+d,+yyyy'``, where ``'Mon'`` is a three character abbreviation\n of the month's name.\n\n .. NOTE::\n\n Google Finance returns the past year's financial data by default\n when ``startdate`` is set too low from the equity's date of going\n public. By default, this function only looks at the NASDAQ and\n NYSE markets. However, if you specified the market during\n initialization of the stock (i.e. ``finance.Stock(\"OTC:NTDOY\")``),\n ``Stock.google()`` will give correct results.\n\n INPUT:\n\n - ``startdate`` -- string, (default: ``'Jan+1,+1900'``)\n\n - ``enddate`` -- string, (default: current date)\n\n - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``)\n\n OUTPUT:\n\n A sequence.\n\n EXAMPLES:\n\n We get the first five days of VMware's stock history::\n\n sage: finance.Stock('vmw').google()[:5] # optional -- internet\n [\n 28-Nov-07 80.57 88.49 80.57 87.69 7496000,\n 29-Nov-07 90.91 93.20 89.50 90.85 5497600,\n 30-Nov-07 95.39 95.60 89.85 91.37 4750200,\n 3-Dec-07 89.87 96.00 88.70 94.97 4401100,\n 4-Dec-07 92.26 97.10 92.05 95.08 2896600\n ]\n \n sage: finance.Stock('F').google('Jan+3,+1978', 'Jul+7,+2008')[:5] # optional -- internet\n [\n 3-Jan-78 0.00 1.93 1.89 1.89 1618200,\n 4-Jan-78 0.00 1.89 1.87 1.88 2482700,\n 5-Jan-78 0.00 1.89 1.84 1.84 2994900,\n 6-Jan-78 0.00 1.84 1.82 1.83 3042500,\n 9-Jan-78 0.00 1.81 1.79 1.81 3916400\n ]\n \n Note that when ``startdate`` is too far prior to a stock's actual start\n date, Google Finance defaults to a year's worth of stock history\n leading up to the specified end date. For example, Apple's (AAPL)\n stock history only dates back to September 7, 1984::\n \n sage: finance.Stock('AAPL').google('Sep+1,+1900', 'Jan+1,+2000')[0:5] # optional -- internet\n [\n 4-Jan-99 0.00 10.56 10.00 10.31 34031600,\n 5-Jan-99 0.00 10.98 10.38 10.83 50360400,\n 6-Jan-99 0.00 11.03 10.25 10.44 48160800,\n 7-Jan-99 0.00 11.27 10.53 11.25 51036400,\n 8-Jan-99 0.00 11.72 11.00 11.25 24240000\n ]\n \n Here is an example where we create and get the history of a stock\n that is not in NASDAQ or NYSE::\n \n sage: finance.Stock(\"OTC:NTDOY\").google(startdate=\"Jan+1,+2007\", enddate=\"Jan+1,+2008\")[:5] # optional -- internet\n [\n 3-Jan-07 32.44 32.75 32.30 32.44 156283,\n 4-Jan-07 31.70 32.40 31.20 31.70 222643,\n 5-Jan-07 30.15 30.50 30.15 30.15 65670,\n 8-Jan-07 30.10 30.50 30.00 30.10 130765,\n 9-Jan-07 29.90 30.05 29.60 29.90 103338\n ]\n\n \n Here, we create a stock by cid, and get historical data.\n Note that when using historical, if a cid is specified,\n it will take precedence over the stock's symbol. So, if\n the symbol and cid do not match, the history based on the\n contract id will be returned. ::\n \n sage: sage.finance.stock.Stock(\"AAPL\", 22144).google(startdate='Jan+1,+1990')[:5] #optional -- internet\n [\n 2-Jan-90 0.00 9.38 8.75 9.31 6542800,\n 3-Jan-90 0.00 9.50 9.38 9.38 7428400,\n 4-Jan-90 0.00 9.69 9.31 9.41 7911200,\n 5-Jan-90 0.00 9.56 9.25 9.44 4404000,\n 8-Jan-90 0.00 9.50 9.25 9.50 3627600\n ]\n \"\"\"\n cid = self.cid\n symbol = self.symbol\n \n if self.cid=='':\n if ':' in symbol:\n R = self._get_data('', startdate, enddate, histperiod)\n else:\n R = self._get_data('NASDAQ:', startdate, enddate, histperiod)\n if \"Bad Request\" in R:\n R = self._get_data(\"NYSE:\", startdate, enddate, histperiod)\n else:\n R = self._get_data('', startdate, enddate, histperiod)\n if \"Bad Request\" in R:\n raise RuntimeError\n self.__historical = []\n self.__historical = self._load_from_csv(R)\n return self.__historical\n\n def open(self, *args, **kwds):\n r\"\"\"\n Return a time series containing historical opening prices for this\n stock. If no arguments are given, will return last acquired historical\n data. Otherwise, data will be gotten from Google Finance.\n\n INPUT:\n\n - ``startdate`` -- string, (default: ``'Jan+1,+1900'``)\n\n - ``enddate`` -- string, (default: current date)\n\n - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``)\n\n OUTPUT:\n\n A time series -- close price data.\n\n EXAMPLES:\n\n You can directly obtain Open data as so::\n\n sage: finance.Stock('vmw').open(startdate='Jan+1,+2008', enddate='Feb+1,+2008') # optional -- internet\n [83.0500, 85.4900, 84.9000, 82.0000, 81.2500 ... 82.0000, 58.2700, 54.4900, 55.6000, 56.9800]\n\n Or, you can initialize stock data first and then extract the Open\n data::\n\n sage: c = finance.Stock('vmw')\n sage: c.google(startdate='Feb+1,+2008', enddate='Mar+1,+2008')[:5] # optional -- internet\n [\n 31-Jan-08 55.60 57.35 55.52 56.67 2591100,\n 1-Feb-08 56.98 58.14 55.06 57.85 2473000,\n 4-Feb-08 58.00 60.47 56.91 58.05 1816500,\n 5-Feb-08 57.60 59.30 57.17 59.30 1709000,\n 6-Feb-08 60.32 62.00 59.50 61.52 2191100\n ]\n sage: c.open() # optional -- internet\n [55.6000, 56.9800, 58.0000, 57.6000, 60.3200 ... 56.5500, 59.3000, 60.0000, 59.7900, 59.2600]\n\n Otherwise, ``self.google()`` will be called with the default\n arguments returning a year's worth of data::\n\n sage: finance.Stock('vmw').open() # random; optional -- internet\n [52.1100, 60.9900, 59.0000, 56.0500, 57.2500 ... 83.0500, 85.4900, 84.9000, 82.0000, 81.2500]\n \"\"\"\n \n from time_series import TimeSeries\n \n if len(args) != 0:\n return TimeSeries([x.open for x in self.google(*args, **kwds)])\n \n try:\n return TimeSeries([x.open for x in self.__historical])\n except AttributeError:\n pass\n \n return TimeSeries([x.open for x in self.google(*args, **kwds)])\n \n def close(self, *args, **kwds):\n r\"\"\"\n Return the time series of all historical closing prices for this stock.\n If no arguments are given, will return last acquired historical data.\n Otherwise, data will be gotten from Google Finance.\n\n INPUT:\n\n - ``startdate`` -- string, (default: ``'Jan+1,+1900'``)\n\n - ``enddate`` -- string, (default: current date)\n\n - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``)\n\n OUTPUT:\n\n A time series -- close price data.\n\n EXAMPLES:\n\n You can directly obtain close data as so::\n\n sage: finance.Stock('vmw').close(startdate='Jan+1,+2008', enddate='Feb+1,+2008') # optional -- internet\n [84.9900, 84.6000, 83.9500, 80.4900, 72.9900 ... 83.0000, 54.8700, 56.4200, 56.6700, 57.8500]\n\n Or, you can initialize stock data first and then extract the Close\n data::\n\n sage: c = finance.Stock('vmw')\n sage: c.google(startdate='Feb+1,+2008', enddate='Mar+1,+2008')[:5] # optional -- internet\n [\n 31-Jan-08 55.60 57.35 55.52 56.67 2591100,\n 1-Feb-08 56.98 58.14 55.06 57.85 2473000,\n 4-Feb-08 58.00 60.47 56.91 58.05 1816500,\n 5-Feb-08 57.60 59.30 57.17 59.30 1709000,\n 6-Feb-08 60.32 62.00 59.50 61.52 2191100\n ]\n sage: c.close() # optional -- internet\n [56.6700, 57.8500, 58.0500, 59.3000, 61.5200 ... 58.2900, 60.1800, 59.8600, 59.9500, 58.6700]\n\n Otherwise, ``self.google()`` will be called with the default\n arguments returning a year's worth of data::\n\n sage: finance.Stock('vmw').close() # random; optional -- internet\n [57.7100, 56.9900, 55.5500, 57.3300, 65.9900 ... 84.9900, 84.6000, 83.9500, 80.4900, 72.9900]\n \"\"\"\n \n from time_series import TimeSeries\n \n if len(args) != 0:\n return TimeSeries([x.close for x in self.google(*args, **kwds)])\n \n try:\n return TimeSeries([x.close for x in self.__historical])\n except AttributeError:\n pass\n \n return TimeSeries([x.close for x in self.google(*args, **kwds)])\n \n def load_from_file(self, file):\n r\"\"\"\n Load historical data from a local csv formatted data file. Note\n that no symbol data is included in Google Finance's csv data.\n The csv file must be formatted in the following way, just as\n on Google Finance::\n\n Timestamp,Open,High,Low,Close,Volume\n\n INPUT:\n\n - ``file`` -- local file with Google Finance formatted OHLC data.\n\n OUTPUT:\n\n A sequence -- OHLC data.\n\n EXAMPLES:\n\n Suppose you have a file in your home directory containing Apple stock\n OHLC data, such as that from Google Finance, called\n ``AAPL-minutely.csv``. One can load this information into a Stock\n object like so. Note that the path must be explicit::\n\n sage: filename = tmp_filename(ext='.csv')\n sage: open(filename,'w').write(\"Date,Open,High,Low,Close,Volume\\n1212405780,187.80,187.80,187.80,187.80,100\\n1212407640,187.75,188.00,187.75,188.00,2000\\n1212407700,188.00,188.00,188.00,188.00,1000\\n1212408000,188.00,188.11,188.00,188.00,2877\\n1212408060,188.00,188.00,188.00,188.00,687\")\n sage: finance.Stock('aapl').load_from_file(filename)[:5]\n [\n 1212408060 188.00 188.00 188.00 188.00 687,\n 1212408000 188.00 188.11 188.00 188.00 2877,\n 1212407700 188.00 188.00 188.00 188.00 1000,\n 1212407640 187.75 188.00 187.75 188.00 2000,\n 1212405780 187.80 187.80 187.80 187.80 100\n ]\n\n\n Note that since the source file doesn't contain information on which\n equity the information comes from, the symbol designated at\n initialization of Stock need not match the source of the data. For\n example, we can initialize a Stock object with the symbol ``'goog'``,\n but load data from ``'aapl'`` stock prices::\n\n sage: finance.Stock('goog').load_from_file(filename)[:5]\n [\n 1212408060 188.00 188.00 188.00 188.00 687,\n 1212408000 188.00 188.11 188.00 188.00 2877,\n 1212407700 188.00 188.00 188.00 188.00 1000,\n 1212407640 187.75 188.00 187.75 188.00 2000,\n 1212405780 187.80 187.80 187.80 187.80 100\n ]\n\n This tests a file that doesn't exist::\n\n sage: finance.Stock(\"AAPL\").load_from_file(\"I am not a file\")\n Traceback (most recent call last):\n ...\n IOError: [Errno 2] No such file or directory: 'I am not a file'\n \"\"\"\n file_obj = open(file, 'r')\n R = file_obj.read();\n self.__historical = self._load_from_csv(R)\n file_obj.close()\n return self.__historical\n\n \n def _load_from_csv(self, R):\n r\"\"\"\n EXAMPLES:\n\n This indirectly tests ``_load_from_csv()``::\n\n sage: filename = tmp_filename(ext='.csv')\n sage: open(filename,'w').write(\"Date,Open,High,Low,Close,Volume\\n1212405780,187.80,187.80,187.80,187.80,100\\n1212407640,187.75,188.00,187.75,188.00,2000\\n1212407700,188.00,188.00,188.00,188.00,1000\\n1212408000,188.00,188.11,188.00,188.00,2877\\n1212408060,188.00,188.00,188.00,188.00,687\")\n sage: finance.Stock('aapl').load_from_file(filename)\n [\n 1212408060 188.00 188.00 188.00 188.00 687,\n 1212408000 188.00 188.11 188.00 188.00 2877,\n 1212407700 188.00 188.00 188.00 188.00 1000,\n 1212407640 187.75 188.00 187.75 188.00 2000,\n 1212405780 187.80 187.80 187.80 187.80 100\n ]\n \"\"\"\n R = R.splitlines()\n headings = R[0].split(',')\n hist_data = []\n for x in reversed(R[1:]):\n try:\n timestamp, opn, high, low, close, volume = x.split(',')\n ohlc = OHLC(timestamp, opn,high,low,close,volume)\n hist_data.append(ohlc)\n except ValueError:\n pass\n hist_data = Sequence(hist_data,cr=True,universe=lambda x:x, immutable=True)\n return hist_data\n \n def _get_data(self, exchange='', startdate='Jan+1,+1900', enddate=date.today().strftime(\"%b+%d,+%Y\"), histperiod='daily'):\n \"\"\" \n This function is used internally.\n\n EXAMPLES:\n\n This indirectly tests the use of ``_get_data()``::\n\n sage: finance.Stock('aapl').google(startdate='Jan+1,+1990')[:2] # optional -- internet\n [\n 2-Jan-90 0.00 9.38 8.75 9.31 6542800,\n 3-Jan-90 0.00 9.50 9.38 9.38 7428400\n ]\n \"\"\"\n symbol = self.symbol\n cid = self.cid\n if cid == '':\n url = 'http://finance.google.com/finance/historical?q=%s%s&startdate=%s&enddate=%s&histperiod=%s&output=csv'%(exchange, symbol.upper(), startdate, enddate, histperiod)\n else:\n url = 'http://finance.google.com/finance/historical?cid=%s&startdate=%s&enddate=%s&histperiod=%s&output=csv'%(cid, startdate, enddate, histperiod)\n return urllib.urlopen(url).read()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1851,"cells":{"__id__":{"kind":"number","value":15659450808879,"string":"15,659,450,808,879"},"blob_id":{"kind":"string","value":"ae5b30ee182bd1e437dee0f30c013e3286f0f475"},"directory_id":{"kind":"string","value":"3fa4e825b925d3385da09f3012f635fb0a21cccf"},"path":{"kind":"string","value":"/exponential_sol.py"},"content_id":{"kind":"string","value":"c5dbef6f6c1370f580391225fddd61a4164c1e50"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"aizwellenstan/ARC-moving-averages"},"repo_url":{"kind":"string","value":"https://github.com/aizwellenstan/ARC-moving-averages"},"snapshot_id":{"kind":"string","value":"84cd8939ba6c1dfa393b4f74d9ddd31ed43ac13e"},"revision_id":{"kind":"string","value":"33112fcd3ef6a48b7dd8c1f1f231b90a51a0f7c7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-27T23:23:06.171858","string":"2021-05-27T23:23:06.171858"},"revision_date":{"kind":"timestamp","value":"2014-01-20T04:42:11","string":"2014-01-20T04:42:11"},"committer_date":{"kind":"timestamp","value":"2014-01-20T04:42:11","string":"2014-01-20T04:42:11"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from stock import Stock\nfrom Queue import Queue\nfrom arc_utils import print_ma\n\n# we can use the recurrence equation to obtain the EMA\n# X_{0,a} = D_0\n# X_{t,a} = (1-a) * D_t + a * X_{t-1,a}\n\n#M, the number of days we care about\nma_days = 10\n\n# alpha, must satisfy 0 < a <= 1 \nalpha = 0.90\n\n#simulation length in days\nsim_len = 50\n\n# calculate the next moving average, using the current one\n# using our knowledge of the recurrence equation\ndef populate_ma(ma_arr, s):\n new_price = s.next_price()\n new_ma = ((1 - alpha) * new_price) + (alpha * s.get_ma())\n s.update_ma(new_ma)\n ma_arr.append(new_ma)\n\ndef main():\n s = Stock()\n ma_arr = []\n\n #populate the moving avg array with the first one\n ma_arr.append(s.get_ma())\n\n #loop that actually runs the simulation\n for x in range(1,sim_len,1):\n populate_ma(ma_arr, s)\n\n #pretty prints our array of moving avgs\n map(print_ma, ma_arr)\n\nmain()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1852,"cells":{"__id__":{"kind":"number","value":7533372645289,"string":"7,533,372,645,289"},"blob_id":{"kind":"string","value":"d3641acaa31f3f5bdc67953e00a6fe2d1fa66547"},"directory_id":{"kind":"string","value":"affa4d21751585f50ba9ab210cbc1ae30126566a"},"path":{"kind":"string","value":"/test_5.py"},"content_id":{"kind":"string","value":"2b421e0a50bd79bde73f71facbb04a6e3f1af176"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rhintz42/Learning-Python-fix-errors"},"repo_url":{"kind":"string","value":"https://github.com/rhintz42/Learning-Python-fix-errors"},"snapshot_id":{"kind":"string","value":"50e6ae9498b3510788773aefba308214d51b9482"},"revision_id":{"kind":"string","value":"ae4ce9ae2727c038653554ee09026e55249e086c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T12:37:29.561094","string":"2021-01-20T12:37:29.561094"},"revision_date":{"kind":"timestamp","value":"2014-05-24T22:32:21","string":"2014-05-24T22:32:21"},"committer_date":{"kind":"timestamp","value":"2014-05-24T22:32:21","string":"2014-05-24T22:32:21"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def bar(l):\n l.append(\"Hello')\n return l\n\ndef foo():\n return bar([])\n\nprint(\"Should print out a list with 'Hello' in it\")\nprint(foo())\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1853,"cells":{"__id__":{"kind":"number","value":6433861021431,"string":"6,433,861,021,431"},"blob_id":{"kind":"string","value":"be504405d7db1315aee5c5bdc74998446a0f9262"},"directory_id":{"kind":"string","value":"5669fbf3015a5c83d268e2dcc85bc8fc74c6ed2d"},"path":{"kind":"string","value":"/resource_wagon/web/urls.py"},"content_id":{"kind":"string","value":"9617021b883ebc01e875cbc9e3f659ed088781a8"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"technomicssolutions/resource_wagon"},"repo_url":{"kind":"string","value":"https://github.com/technomicssolutions/resource_wagon"},"snapshot_id":{"kind":"string","value":"7e32a26d2bd1bab7b1be704f04a20dbd8527fa4b"},"revision_id":{"kind":"string","value":"8a61aff833ea0bc1785a1b15bb051b61f591e443"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-07T17:15:22.752259","string":"2016-08-07T17:15:22.752259"},"revision_date":{"kind":"timestamp","value":"2014-10-27T07:47:45","string":"2014-10-27T07:47:45"},"committer_date":{"kind":"timestamp","value":"2014-10-27T07:47:45","string":"2014-10-27T07:47:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nfrom django.conf.urls import patterns, url\nfrom django.contrib.auth.decorators import login_required\n\nfrom web.views import (Login, Logout, Home, ResetPassword, RequestView, ReplyEmployer, \\\n\tDeleteRequest, ForgotPassword, Aboutus, Dashboard, Companies, Company , PremiumEmployer, \\\n MissionStatement, ResourcesWagon, WagonDrivers, CandidatePreparation, CompetencyAnalysis, \\\n\tContact, RecruitmentDivisions, DeleteEmployer, DeleteJobseeker, \\\n MissionStatement, ResourcesWagon, WagonDrivers, CandidatePreparation, CompetencyAnalysis, RecruitmentDivisions, RequestCV, \n TermsAndConditions, ViewTestmonials)\n\n\nurlpatterns = patterns('',\n url(r'login/$', Login.as_view(), name=\"login\"),\n url(r'logout/$', Logout.as_view(), name=\"logout\"),\n url(r'^$', Home.as_view(), name=\"home\"),\n url(r'^admin_dashboard/$', login_required(Dashboard.as_view(), login_url=\"/login/\"), name=\"admin_dashboard\"),\n url(r'^forgot_password/$', ForgotPassword.as_view(), name='forgot_password'),\n url(r'^reset_password/(?P\\d+)/$', login_required(ResetPassword.as_view(), login_url=\"/login/\"), name=\"reset_password\"),\n url(r'^request/$', login_required(RequestView.as_view(), login_url=\"/login/\"), name=\"request\"),\n url(r'^reply/(?P\\d+)/$', login_required(ReplyEmployer.as_view(), login_url=\"/login/\"), name=\"reply\"),\n url(r'^delete_request/(?P\\d+)/$', login_required(DeleteRequest.as_view(), login_url=\"/login/\"), name='delete_request'),\n url(r'^aboutus/$', Aboutus.as_view(), name='aboutus'),\n\turl(r'^contact/$', Contact.as_view(), name='contact'),\n url(r'^cv_request/$', RequestCV.as_view(), name='cv_request'),\n url(r'^aboutus/mission_statement/$', MissionStatement.as_view(), name='mission_statement'),\n url(r'^aboutus/resources_wagon/$', ResourcesWagon.as_view(), name='resources_wagon'),\n url(r'^aboutus/wagon_drivers/$', WagonDrivers.as_view(), name='wagon_drivers'),\n\turl(r'^employers/recruitment_divisions/$', RecruitmentDivisions.as_view(), name='recruitment_divisions'),\n\turl(r'^employers/competency_analysis/$', CompetencyAnalysis.as_view(), name='competency_analysis'),\n\turl(r'^candidates/candiadte_preparation/$', CandidatePreparation.as_view(), name='candidate_preparation'),\n url(r'^company/(?P\\d+)/$', Company.as_view(), name='company'),\n url(r'^companies/$', Companies.as_view(), name='companies'),\n url(r'^delete_employer/(?P\\d+)/$', DeleteEmployer.as_view(), name='delete_employer'),\n url(r'^delete_jobseeker/(?P\\d+)/$', DeleteJobseeker.as_view(), name='delete_jobseeker'),\n url(r'^save_premium_employer/$', PremiumEmployer.as_view(), name='save_premium_employer'),\n url(r'^terms_conditions/$', TermsAndConditions.as_view(), name='terms_conditions'),\n url(r'^testmonials/$', ViewTestmonials.as_view(), name='testmonials')\n)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1854,"cells":{"__id__":{"kind":"number","value":15857019269271,"string":"15,857,019,269,271"},"blob_id":{"kind":"string","value":"226ddb4aa5d1c8792c8dc96e7c521049f451fa52"},"directory_id":{"kind":"string","value":"28b71261dc65c4d777951e20588c81ac45aef0d5"},"path":{"kind":"string","value":"/inheritence-quiz/main.py"},"content_id":{"kind":"string","value":"b3b2bd23a601eab00cebd3d44019728d408c5ba5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"DOCarroll/DPW1405"},"repo_url":{"kind":"string","value":"https://github.com/DOCarroll/DPW1405"},"snapshot_id":{"kind":"string","value":"91a562c50094839f9de74638a4e5d8e93f9ff943"},"revision_id":{"kind":"string","value":"bad34aeff2763bbe67d1c21fde29e850c012c6f4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T03:13:53.456017","string":"2021-01-22T03:13:53.456017"},"revision_date":{"kind":"timestamp","value":"2014-05-22T03:30:06","string":"2014-05-22T03:30:06"},"committer_date":{"kind":"timestamp","value":"2014-05-22T03:30:06","string":"2014-05-22T03:30:06"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#Daniel O'Carroll\n#DPW1405\n#May 19th, 2014\nimport webapp2\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n self.response.write('Hello world!')\n\n\nclass Dog(object):\n def __init__(self):\n __dog_sound = 'Bark!'\n favorite_game = 'Fetch'\n\n def getsound(self, __dog_sound):\n return __dog_sound\n\n def getgame(self, favorite_game):\n return favorite_game\n\n\nclass Husky(Dog):\n Dog.__init__(self):\n husky = Dog()\n husky\n\n\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1855,"cells":{"__id__":{"kind":"number","value":1640677546162,"string":"1,640,677,546,162"},"blob_id":{"kind":"string","value":"124279c3b63700a8fbc351915c51efdb070bb8f3"},"directory_id":{"kind":"string","value":"637088bf9f54b75e50d20c801e62b9fb4c237256"},"path":{"kind":"string","value":"/views/register.py"},"content_id":{"kind":"string","value":"03b9c085ff0610a7267b346344cc78ec1773dc7a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ehsansh84/Nafis_Project"},"repo_url":{"kind":"string","value":"https://github.com/ehsansh84/Nafis_Project"},"snapshot_id":{"kind":"string","value":"aa9e8450425843592660840f8271c9b0708b951f"},"revision_id":{"kind":"string","value":"a04a7293076a5a28f5ac8238dd84e03aea8247e6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-08-13T10:15:37.744739","string":"2015-08-13T10:15:37.744739"},"revision_date":{"kind":"timestamp","value":"2014-09-19T18:08:16","string":"2014-09-19T18:08:16"},"committer_date":{"kind":"timestamp","value":"2014-09-19T18:08:16","string":"2014-09-19T18:08:16"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom views.data import *\nfrom models.user import *\n\nclass Register(tornado.web.RequestHandler):\n\n def get(self, *args, **kwargs):\n self.render('register.html')\n\n\n\n def post(self, *args, **kwargs):\n name=self.get_argument('name')\n family=self.get_argument('family')\n age=self.get_argument('age')\n username=self.get_argument('username')\n password=self.get_argument('password')\n obj = User()\n obj.name = name\n obj.username = username\n obj.family = family\n obj.age = age\n obj.password=password\n obj.save()\n\n\n\n\n\n\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1856,"cells":{"__id__":{"kind":"number","value":9869834871328,"string":"9,869,834,871,328"},"blob_id":{"kind":"string","value":"93ad2be37013f3b427772713b422c7683e336f59"},"directory_id":{"kind":"string","value":"1696542c9d76fbd9c3da46ac3ab895c879e5a142"},"path":{"kind":"string","value":"/scripts/ml_benchmark.py"},"content_id":{"kind":"string","value":"d58ff6618b2dfdd67441f92022a9d0c755be0b19"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ryeakle/DigitClassifier"},"repo_url":{"kind":"string","value":"https://github.com/ryeakle/DigitClassifier"},"snapshot_id":{"kind":"string","value":"a17fffca9158481621cbcd0c803b36ac6165f342"},"revision_id":{"kind":"string","value":"5680d58b91cc206be207f0c645a66995c6e10bd9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-07-02T03:04:59.823295","string":"2020-07-02T03:04:59.823295"},"revision_date":{"kind":"timestamp","value":"2013-08-19T03:15:55","string":"2013-08-19T03:15:55"},"committer_date":{"kind":"timestamp","value":"2013-08-19T03:15:55","string":"2013-08-19T03:15:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport time\nimport scipy\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport sklearn as sk\nimport sklearn.neighbors \nimport sklearn.ensemble\nimport numpy as np\nimport Image\nimport sys\n\nIMG_SIZE = 28*28\nTRAIN_CSV_PATH = \"../data/train.csv\"\n\n# Load images and labels\ntrain_labels = np.loadtxt(open(TRAIN_CSV_PATH, \"rb\"), delimiter=\",\", skiprows=1, \n usecols=[0])\n\nimg_train = np.loadtxt(open(TRAIN_CSV_PATH, \"rb\"), delimiter=\",\", skiprows=1, \n usecols=range(1, IMG_SIZE + 1))\n\nset_size = np.size(train_labels)\nprint \"Total Set Size: \" + str(set_size)\n\ntrain_size = (int) (set_size * 0.5)\ntest_size = set_size - train_size\n\ntrain_range = range(0, train_size)\ntest_range = range(train_size, set_size)\n\ndef showTrainImage(test_image):\n img = test_image.reshape(28,28)\n plt.imshow(img, cmap = cm.Greys_r)\n plt.show()\n\n\n#benchmark K Nearest Neighbors\ndef benchmarkKNN(train_imgs, train_labels, test_imgs, test_labels):\n knn = sk.neighbors.KNeighborsClassifier(n_neighbors = 5, weights='uniform', \n algorithm='auto', leaf_size=30, \n warn_on_equidistant=True, p=2)\n\n train_size = len(train_labels)\n test_size = len(test_labels)\n\n train_range = range(0, train_size)\n test_range = range(0, test_size)\n\n start_t = time.time()\n knn.fit(img_train[train_range, :], train_labels[train_range])\n train_t = time.time() - start_t;\n print \"KNN train time: \" + str(train_t)\n print \"KNN training complete, beginning test set:\"\n\n error_count = 0\n start_t = time.time()\n for i in test_range:\n if knn.predict(img_train[i, :]) != train_labels[i]:\n error_count = error_count + 1\n if i % 100 == 0:\n sys.stdout.write(\"\\r\")\n sys.stdout.write(str((100.0*(i)/len(test_labels))) + \n \"% complete (KNN)\")\n\n error_rate = 100.0*error_count/len(test_labels)\n test_t = time.time() - start_t\n print \"Test time = \" + str(test_t)\n return error_rate, train_t, test_t\n\n# Benchmark Random Forest Classifier\ndef benchmarkRF(train_imgs, train_labels, test_imgs, test_labels):\n rf = sk.ensemble.RandomForestClassifier()\n\n start_t = time.time()\n rf.fit(train_imgs, train_labels)\n train_t = time.time() - start_t \n\n error_count = 0\n print \"Test Size = \" + str(len(test_labels))\n \n start_t = time.time()\n for i in range(0, len(test_labels)):\n if rf.predict(test_imgs[i, :]) != test_labels[i]:\n error_count = error_count + 1\n if (i % 100) == 0:\n sys.stdout.write(\"\\r\")\n sys.stdout.write(str((100.0*(i)/len(test_labels))) + \n \"% complete (RF)\")\n \n test_t = time.time() - start_t\n error_rate = 100.0 * error_count/(len(test_labels))\n return error_rate, train_t, test_t\n\n\nerr_rf, train_t_rf, test_t_rf = benchmarkRF(img_train[train_range, :], \n train_labels[train_range], \n img_train[test_range, :], \n train_labels[test_range])\n\nerr_knn, train_t_knn, test_t_knn = benchmarkKNN(img_train[train_range, :], \n train_labels[train_range], \n img_train[test_range, :], \n train_labels[test_range])\n\n\nprint \"RF Performance [Error %, train_t, test_t]: \" \nprint err_rf \nprint train_t_rf\nprint test_t_rf\n\nprint \"KNN Performance [Error %, train_t, test_t]: \" \nprint err_knn \nprint train_t_knn\nprint test_t_knn\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1857,"cells":{"__id__":{"kind":"number","value":15917148829991,"string":"15,917,148,829,991"},"blob_id":{"kind":"string","value":"feef6d40f8ceae7981a241f84c6c5bc2157704bd"},"directory_id":{"kind":"string","value":"e5150b8862688eff630a542df68fc4f634c88349"},"path":{"kind":"string","value":"/views.py"},"content_id":{"kind":"string","value":"fa8051a5fa4154cee756e102aee3eb6433694291"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"derekzhang79/outofthedarj.com"},"repo_url":{"kind":"string","value":"https://github.com/derekzhang79/outofthedarj.com"},"snapshot_id":{"kind":"string","value":"265bbd765fdad691de9384d756fd2853da69f00c"},"revision_id":{"kind":"string","value":"2b4463548f7ef8869c4d2a51735336bf6ef07ec5"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T06:59:21.208366","string":"2021-01-18T06:59:21.208366"},"revision_date":{"kind":"timestamp","value":"2010-05-15T13:07:29","string":"2010-05-15T13:07:29"},"committer_date":{"kind":"timestamp","value":"2010-05-15T13:07:29","string":"2010-05-15T13:07:29"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import render_to_response\n\ndef coming_soon(request):\n return render_to_response('coming_soon.html',)\n\ndef base_template(request):\n return render_to_response('base_template.html',)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1858,"cells":{"__id__":{"kind":"number","value":9302899183259,"string":"9,302,899,183,259"},"blob_id":{"kind":"string","value":"321a692c776bc9ff7b0d9f112d4a505ff9f45257"},"directory_id":{"kind":"string","value":"6cde2f21ba42762b10bd0092ccff262ea0d62e53"},"path":{"kind":"string","value":"/src/mastersproject/CMPE295B_Submission/sentimentAnalyzer.py"},"content_id":{"kind":"string","value":"7819b279338fde696e73bd78aec3f95c17dd75b5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"abhimanyusalokhe/DSA"},"repo_url":{"kind":"string","value":"https://github.com/abhimanyusalokhe/DSA"},"snapshot_id":{"kind":"string","value":"ca63d8861bf203ee4554e14525a496f8481a6df3"},"revision_id":{"kind":"string","value":"d0620fffabca5df0fa3c8b5fcb6efe01ee6a87a6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T19:55:19.931272","string":"2016-09-05T19:55:19.931272"},"revision_date":{"kind":"timestamp","value":"2014-02-23T05:13:17","string":"2014-02-23T05:13:17"},"committer_date":{"kind":"timestamp","value":"2014-02-23T05:13:17","string":"2014-02-23T05:13:17"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import re\nimport csv\nimport pprint\nimport nltk.classify\nimport MySQLdb as mdb\nimport sys\nimport time\nimport pickle\n\n#Load emoticons from file\nfp = open('emoticons.txt', \"r\")\nemoticons = {}\nfor line in fp:\n\trow = line.rstrip().split(',')\n emoticons[str(row[0])] = row[1]\n\nfp.close() \n\n#replace repeated characters\ndef replaceRepeatedLetters(s):\n #look for 2 or more repetitions of character\n pattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL) \n return pattern.sub(r\"\\1\\1\", s)\n#end\n\n#load StopWords from given file\ndef readStopWordList(stopWordFile):\n stopWords = []\n \n fp = open(stopWordFile, 'r')\n line = fp.readline()\n while line:\n word = line.strip()\n stopWords.append(word)\n line = fp.readline()\n\n stopWords.append('URL')\t\n stopWords.append('AT_USER')\t\n fp.close()\n return stopWords\n#end\n\n#start process_tweet\ndef processRawTweet(tweet):\n # process the tweets\n \n #lower case conversion\n tweet = tweet.lower()\n #Replace www.* or https?://* by URL\n tweet = re.sub('((www\\.[\\s]+)|(https?://[^\\s]+))','URL',tweet)\n #Replace @username by AT_USER\n tweet = re.sub('@[^\\s]+','AT_USER',tweet) \n #strip extra white spaces\n tweet = re.sub('[\\s]+', ' ', tweet)\n #Convert #word to word\n tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\n \n tweet = tweet.strip('\\'\"')\n return tweet\n#end \n\n\n\n#start getFeaturesInVector\ndef getFeaturesInVector(tweet, stopWords):\n FeatureVectoryContainer = []\n \n words = tweet.split()\n for w in words:\n #replace two or more with two occurrences \n w = replaceRepeatedLetters(w) \n #strip punctuation\n w = w.strip('\\'\"?,.')\n #check if it consists of only words\n val = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$\", w)\n\t\n\t#replace emoticons\n\tword_to_replace= emoticons.get(w)\n\tif (word_to_replace is not None):\n\t\tw = word_to_replace\n\t\n #ignore if it is a stopWord\n if(w in stopWords or val is None):\n continue\n else:\n FeatureVectoryContainer.append(w.lower())\n return FeatureVectoryContainer \n#end\n\n#start getTotalFeatureList\ndef getTotalFeatureList(fileName):\n fp = open(fileName, 'r')\n line = fp.readline()\n featureListContainer = []\n while line:\n line = line.strip()\n featureListContainer.append(line)\n line = fp.readline()\n fp.close()\n return featureListContainer\n#end\n\n#Feature set extraction from tweet\ndef extractFeaturesFromTweet(tweet):\n tweet_words = set(tweet)\n features = {}\n for word in featureListContainer:\n features['contains(%s)' % word] = (word in tweet_words)\n return features\n#end\n\n\n#Read the tweets one by one and process it\ninput_Tweets = csv.reader(open('data/feature_list/full_training_dataset.csv', 'rb'), delimiter=',', quotechar='|')\nst = open('data/feature_list/stopwords.txt', 'r')\nstopWords = readStopWordList('data/feature_list/stopwords.txt')\nfeatureListContainer = getTotalFeatureList('data/feature_list/feature_list.txt')\npp = pprint.PrettyPrinter()\ncount = 0;\ntweets = []\nfor row in input_Tweets:\n sentiment = row[0]\n tweet = row[1]\n processedTweet = processRawTweet(tweet)\n FeatureVectoryContainer = getFeaturesInVector(processedTweet, stopWords)\n tweets.append((FeatureVectoryContainer, sentiment));\n#end loop\n\n#training_set = nltk.classify.util.apply_features(extractFeaturesFromTweet, tweets)\n#pp.pprint(training_set)\n\n# Train and save the Naive Bayes classifier\n#NBClassifier = nltk.NaiveBayesClassifier.train(training_set)\n#fc = open('my_classifier.pickle', 'wb')\n#pickle.dump(NBClassifier, fc)\n#fc.close()\n\n# Load the Naive Bayes classifier\nf= open('my_classifier.pickle')\nclassifier = pickle.load(f)\nf.close()\n\n#Infinite loop to read and process tweets\nvar = 1\nwhile var == 1 :\n\t#Connect to database and fetch tweets:\n\tconn = None\n\n\ttry:\n\t\tconn = mdb.connect(host=\"localhost\",\n\t\t\t\t\tuser=\"root\",\n\t\t\t\t\tpasswd=\"nazya\",\n\t\t\t\t\tdb=\"mydb\")\n\n\t\tcursor = conn.cursor()\n\t\tquery = \"Select id, TweetText, Location from tweets where status='u'\"\n\t\tcursor.execute(query)\n\t\trows = cursor.fetchall()\n\n\t\tfor row in rows:\n\t\t\tnewTweet = row[1]\n\t\t\tt_id = row[0]\n\t\t\tprocessedNewTweet = processRawTweet(newTweet)\n\t\t\tsentiment = \"neutral\"\n\t\t\t#Using trained classifier to extract sentiment\n\t\t\tsentiment = classifier.classify(extractFeaturesFromTweet(getFeaturesInVector(processedNewTweet, stopWords)))\n\t\t\tprint \"newTweet = %s, sentiment = %s\\n\" % (newTweet, sentiment)\n\t\t #updateQuery=\"update tweets set status = 'p', sentiment= where TweetText != 'effective but too-tepid biopic';\"\n\t\t\tcursor.execute(\"UPDATE tweets SET Sentiment = %s, status = %s WHERE id = %s\", \n\t\t\t\t( sentiment, 'p', t_id))\n\t\t\tconn.commit() \n\t \n\t\t\tprint \"Number of rows updated: %d\" % cursor.rowcount\n\t\n\t\t\n\texcept mdb.Error, e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\t\tsys.exit(1)\n\t \n\tfinally: \n\t\t\n\t if conn: \n\t\tcursor.close()\n\t\tconn.close()\n\t\tprint \"Sleeping of 100 seconds!!!\"\n\t\ttime.sleep(100)\n\n\t#end db\nelse :\n\tprint \"Good Bye!!!\"\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1859,"cells":{"__id__":{"kind":"number","value":17875653896027,"string":"17,875,653,896,027"},"blob_id":{"kind":"string","value":"4ff989abedc4b3a9c111ac78e8f64af83d45c637"},"directory_id":{"kind":"string","value":"80a338adee13ea767f3b85b68d7bf4decdb05755"},"path":{"kind":"string","value":"/tailseq/tailseq/logger.py"},"content_id":{"kind":"string","value":"424385635c029b4b0d1c250e86ffc8250768611e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"hbc/daley_tailseq"},"repo_url":{"kind":"string","value":"https://github.com/hbc/daley_tailseq"},"snapshot_id":{"kind":"string","value":"8f612f0d4dc01f7985b5c7323a5852f230b0f63c"},"revision_id":{"kind":"string","value":"58d04357fe620f04d663ad1283f609ea33bd39e1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-08T01:58:56.108941","string":"2016-09-08T01:58:56.108941"},"revision_date":{"kind":"timestamp","value":"2014-11-16T01:01:11","string":"2014-11-16T01:01:11"},"committer_date":{"kind":"timestamp","value":"2014-11-16T01:01:11","string":"2014-11-16T01:01:11"},"github_id":{"kind":"number","value":19383744,"string":"19,383,744"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import logging\n\nlogging.basicConfig(level=logging.DEBUG)\nmy_logger = logging.getLogger('Tail-Seq')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1860,"cells":{"__id__":{"kind":"number","value":2946347576916,"string":"2,946,347,576,916"},"blob_id":{"kind":"string","value":"ff3485aa0a2788d11638b16680890556c9532cca"},"directory_id":{"kind":"string","value":"7a64e3dd6a74dfa2853e0bc10e5ec934badb36a2"},"path":{"kind":"string","value":"/csv_transformer.py"},"content_id":{"kind":"string","value":"2fec12c00fdb2b21b158a25d2b550b5fc406a555"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"siteshen/taobaospider"},"repo_url":{"kind":"string","value":"https://github.com/siteshen/taobaospider"},"snapshot_id":{"kind":"string","value":"b3d4dd70d1f69201921a42ee858ebc3e6f31327c"},"revision_id":{"kind":"string","value":"82ed6c6eb9b8a93a179b1f8b7be04caafddd8724"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T12:28:06.582169","string":"2021-01-18T12:28:06.582169"},"revision_date":{"kind":"timestamp","value":"2013-09-26T10:24:46","string":"2013-09-26T10:24:46"},"committer_date":{"kind":"timestamp","value":"2013-09-26T10:24:46","string":"2013-09-26T10:24:46"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import csv\nimport types\nimport sys\n\nclass Table(object):\n def __init__(self, tablename):\n self.tablename = tablename\n self.records = []\n\n def __len__(self):\n return len(self.records)\n\n def getrecords(self, filename, skipheader=True, n=None):\n self.readfile(filename, self.getfields(), skipheader, n)\n print \"Len of records: %d\" % len(self)\n\n def create_and_insert(self, conn, batch=100):\n cnt = 0\n conn.execute(self.table_create_sql())\n conn.commit()\n print \"total: %d\" % len(self)\n for record in self.records:\n print \"cnt: %d\" % cnt\n conn.execute(self.insertrecord_sql(record))\n cnt += 1\n if cnt % batch == 0:\n conn.commit()\n\n conn.commit()\n\n def getfields(self, fields=[]):\n \"\"\"\n The fields is []\n \"\"\"\n newfields = []\n\n for field in fields:\n l = list(field)\n if len(field) == 3: l.append(None)\n newfields.append(l)\n\n return newfields\n\n\n def readfile(self, data_file, fields, skipheader=True, n=None):\n print \"Read file: %s\" % data_file\n fp = open(data_file)\n\n for i, line in enumerate(fp):\n if i == 0 and skipheader == True:\n continue\n if i == n:\n break\n record = self.makerecord(line, fields)\n self.addrecord(record)\n\n fp.close()\n\n def makerecord(self, line, fields):\n obj = {}\n\n rawdata = line.split(',')\n for field, index, cast, typename in fields:\n try:\n v = rawdata[index]\n v = cast(v)\n except Exception as ex:\n print ex, field, index\n v = None\n\n obj[field] = v\n\n return obj\n\n def addrecord(self, record):\n self.records.append(record)\n\n def table_create_sql(self):\n mapper = {\n \"str\": \"text\",\n \"int\": \"int\"\n }\n\n sql_template = \"create table IF NOT EXISTS %s ( id int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY, %s ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci\"\n\n values = []\n for field, idx, cast, typename in self.getfields():\n if typename:\n values.append(\"%s %s\" % (field, typename))\n else:\n values.append(\"%s %s\" % (field, mapper[cast.__name__]))\n\n return sql_template % (self.tablename, \", \".join(values))\n\n def insertrecord_sql(self, record):\n \"\"\"\n Generate the sql according to the tablename and fields\n \"\"\"\n kvs = [[field, record[field]] for field, idx, cast, typename in self.getfields()]\n\n def postprocess(v):\n if v == None: return 'NULL'\n else: return \"'%s'\" % str(v)\n\n return \"insert into %s (%s) values (%s)\" % \\\n (self.tablename, ','.join([kv[0] for kv in kvs]), ','.join([postprocess(kv[1]) for kv in kvs]))\n\n\nclass Taobao(Table):\n def __init__(self, tablename):\n super(Taobao, self).__init__(tablename)\n self.tablename = tablename\n\n def getfields(self):\n fields = [\n ('uid', 0, str),\n ('ip', 1, str),\n ('agent', 2, str),\n ('url', 5, str),\n ('site', 6, str),\n ('domain', 7, str),\n ('referurl', 8, str),\n ('date', 11, str, 'datetime'),\n ('staytime', 12, int),\n ('url_kw', 15, str),\n ('refer_kw', 16, str),\n ('raw_id', 18, int),\n ('gener', 21, int),\n ('age', 22, int),\n ('city', 23, int),\n ('income_pre', 24, int),\n ('income_fml', 25, int),\n ('education', 26, int),\n ('job', 27, int),\n ('industry', 28, int),\n ('birth', 29, str)\n ]\n\n return super(Taobao, self).getfields(fields)\n\ndef main(filename):\n taobao = Taobao('taobao')\n print taobao.table_create_sql()\n taobao.getrecords(filename, skipheader=True)\n print len(taobao)\n print taobao.records[0]\n record = taobao.records[0]\n record['birth'] = None\n print taobao.insertrecord_sql(record)\n # import sqlite3\n # conn = sqlite3.connect('test.db')\n # taobao.create_and_insert(conn)\n\nif __name__ == '__main__':\n print sys.argv\n main(*sys.argv[1:])\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1861,"cells":{"__id__":{"kind":"number","value":5162550708927,"string":"5,162,550,708,927"},"blob_id":{"kind":"string","value":"60ae3ac4a181c40439a15857e0080fcd9b7ccee8"},"directory_id":{"kind":"string","value":"ecc1e8a020e5328a8fdda17513ad56c11dd8993f"},"path":{"kind":"string","value":"/007.py"},"content_id":{"kind":"string","value":"cc4435db99e19f67fe14485793426e4d06bb00f2"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"masato-mi/NLP_100"},"repo_url":{"kind":"string","value":"https://github.com/masato-mi/NLP_100"},"snapshot_id":{"kind":"string","value":"ffe8338ac11b452d47c5c5f343db9dd047b3f703"},"revision_id":{"kind":"string","value":"2e3b0465f2d25131aab673e0a72e8a44e1e7fb3a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-11T03:01:00.283957","string":"2016-09-11T03:01:00.283957"},"revision_date":{"kind":"timestamp","value":"2014-05-19T11:10:14","string":"2014-05-19T11:10:14"},"committer_date":{"kind":"timestamp","value":"2014-05-19T11:10:14","string":"2014-05-19T11:10:14"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding : UTF-8\nfile = open(\"address.txt\", \"r\")\nnew_set = set()\nfor line in file.readlines(): \n item = line.split(\" \") #1コラム目と2コラム目を1スペースで分けておく.\n new_set.add(item[0]) #1コラム目を集合(new_set)に加える.これより重複がなくなる.\n \nprint len(new_set),\n \nfile.close()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1862,"cells":{"__id__":{"kind":"number","value":1116691531771,"string":"1,116,691,531,771"},"blob_id":{"kind":"string","value":"ef009f4230fae4701ae02d2cbc4d363def3795e9"},"directory_id":{"kind":"string","value":"982f68f5ad8972523913eb01a322d2ba1cd02e92"},"path":{"kind":"string","value":"/source/Input/KeyboardInputSource.py"},"content_id":{"kind":"string","value":"d07b7740003b79b2711d9ba6aed1ae15cbaa2614"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"blakeohare/pyweek-soulshenanigans"},"repo_url":{"kind":"string","value":"https://github.com/blakeohare/pyweek-soulshenanigans"},"snapshot_id":{"kind":"string","value":"e96d279b9f672631255e0b5aececa94cb8be38ee"},"revision_id":{"kind":"string","value":"c007d14299901ca3cc72ebf1fcde03a075fcc274"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T00:23:26.837501","string":"2016-09-06T00:23:26.837501"},"revision_date":{"kind":"timestamp","value":"2010-04-04T00:05:38","string":"2010-04-04T00:05:38"},"committer_date":{"kind":"timestamp","value":"2010-04-04T00:05:38","string":"2010-04-04T00:05:38"},"github_id":{"kind":"number","value":38605569,"string":"38,605,569"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\r\nclass KeyboardInputSource:\r\n\tdef __init__(self):\r\n\t\tself.keymap = {\r\n\t\t\tK_UP : 'up',\r\n\t\t\tK_DOWN : 'down',\r\n\t\t\tK_LEFT : 'left',\r\n\t\t\tK_RIGHT : 'right',\r\n\t\t\tK_SPACE : 'B',\r\n\t\t\tK_z : 'Y',\r\n\t\t\tK_x : 'A',\r\n\t\t\tK_m : 'X',\r\n\t\t\tK_RETURN : 'start',\r\n\t\t\tK_1 : 'L',\r\n\t\t\tK_2 : 'R'\r\n\t\t\t}\r\n\t\r\n\tdef get_input_type(self):\r\n\t\treturn 'keyboard'\r\n\t\r\n\tdef process_events(self, pygame_events):\r\n\t\tevents = []\r\n\t\tfor event in pygame_events:\r\n\t\t\tif event.type == KEYDOWN or event.type == KEYUP:\r\n\t\t\t\tif event.key in self.keymap.keys():\r\n\t\t\t\t\tevents.append(InputEvent(self.keymap[event.key], event.type == KEYDOWN))\r\n\t\treturn events\r\n\t\r\n\tdef get_name(self):\r\n\t\treturn \"Keyboard\"\r\n\t\t\r\n\tdef configure_key(self, key):\r\n\t\tpass\r\n\t\t#TODO: configure keyboard keys\r\n\t\t"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1863,"cells":{"__id__":{"kind":"number","value":1640677556588,"string":"1,640,677,556,588"},"blob_id":{"kind":"string","value":"37ca807a6625d370fbd4f3f36ebc65de94b7af52"},"directory_id":{"kind":"string","value":"1a191e99d978691d184441bac1ad8e69f74c33ad"},"path":{"kind":"string","value":"/src/weapon.py"},"content_id":{"kind":"string","value":"607920ee5728d5293fd1563e54590f6d534377ce"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"GregorStocks/mobster"},"repo_url":{"kind":"string","value":"https://github.com/GregorStocks/mobster"},"snapshot_id":{"kind":"string","value":"503ba41842065d4278b0dcb1e71bf5cb439e799a"},"revision_id":{"kind":"string","value":"bcdb9b2c40f5ae6e1d32028b9b3dbca48ab41c83"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-05T02:12:21.830116","string":"2020-06-05T02:12:21.830116"},"revision_date":{"kind":"timestamp","value":"2011-01-18T22:22:11","string":"2011-01-18T22:22:11"},"committer_date":{"kind":"timestamp","value":"2011-01-18T22:22:11","string":"2011-01-18T22:22:11"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nclass Weapon(object):\n def __init__(self, range):\n self.range = range\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1864,"cells":{"__id__":{"kind":"number","value":4741643933105,"string":"4,741,643,933,105"},"blob_id":{"kind":"string","value":"a232121eaa789c6639bb5844ac2bb9cf5ba1af3f"},"directory_id":{"kind":"string","value":"d5a35ee0546b79bb1ca602a84d93567901fc22a6"},"path":{"kind":"string","value":"/blog/context_processors.py"},"content_id":{"kind":"string","value":"904e1a22b3998a6108dacd14fe882d65ef78711d"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"brsyuksel/how-to"},"repo_url":{"kind":"string","value":"https://github.com/brsyuksel/how-to"},"snapshot_id":{"kind":"string","value":"9270591da8fd97d5036f3971963820c2f9dde2dc"},"revision_id":{"kind":"string","value":"0b2a3cd0e5f19b0b3ba76844167641a92f28b835"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-14T06:52:03.703455","string":"2020-04-14T06:52:03.703455"},"revision_date":{"kind":"timestamp","value":"2014-03-24T11:54:12","string":"2014-03-24T11:54:12"},"committer_date":{"kind":"timestamp","value":"2014-03-24T11:54:12","string":"2014-03-24T11:54:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from .utils import get_settings\n\ndef howto_settings(request):\n\t_settings = get_settings()\n\treturn {'blog': _settings}"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1865,"cells":{"__id__":{"kind":"number","value":12369505845252,"string":"12,369,505,845,252"},"blob_id":{"kind":"string","value":"691c60ac581479c653144d6e1774c663872195fc"},"directory_id":{"kind":"string","value":"d3136f125eb8b7f57cf73275696588b2f723d409"},"path":{"kind":"string","value":"/geburtstag.py"},"content_id":{"kind":"string","value":"a786ceae40ab2d0c142b5bbdf8329b2b4da7c5a0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tom111/NullenEinsen"},"repo_url":{"kind":"string","value":"https://github.com/tom111/NullenEinsen"},"snapshot_id":{"kind":"string","value":"0ab1c94a883a7c55cd97e09d4ad412437ad1fe6e"},"revision_id":{"kind":"string","value":"ef3e97143d90e15e56ee9483642307e68289ee78"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-02T04:53:19.552764","string":"2021-01-02T04:53:19.552764"},"revision_date":{"kind":"timestamp","value":"2014-05-16T13:35:15","string":"2014-05-16T13:35:15"},"committer_date":{"kind":"timestamp","value":"2014-05-16T13:35:15","string":"2014-05-16T13:35:15"},"github_id":{"kind":"number","value":19422760,"string":"19,422,760"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def geburtstags_wahrscheinlichkeit (n):\n pbar = 1.0;\n for i in range (n):\n pbar = pbar * (1 - float(i)/365)\n return (1 - pbar)\n\nprint [(n,geburtstags_wahrscheinlichkeit(n)) for n in range (50)]\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1866,"cells":{"__id__":{"kind":"number","value":687194788752,"string":"687,194,788,752"},"blob_id":{"kind":"string","value":"76342996a6f3d2be25ee1557015c9e772a55ad85"},"directory_id":{"kind":"string","value":"c65aa011cb879dc77d74ed3b111dc66508c50f15"},"path":{"kind":"string","value":"/server_controller.py"},"content_id":{"kind":"string","value":"831a6e969f253ba0db12bcbc29ce095b5db0eac9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"nolifelover/Football-Info"},"repo_url":{"kind":"string","value":"https://github.com/nolifelover/Football-Info"},"snapshot_id":{"kind":"string","value":"f288d8fcfe8d9b738f451d3a249888843a47c9e5"},"revision_id":{"kind":"string","value":"258d14f690d96594a9a32321b2f1f485a3f2652a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T07:33:10.557972","string":"2021-01-18T07:33:10.557972"},"revision_date":{"kind":"timestamp","value":"2010-06-10T13:21:02","string":"2010-06-10T13:21:02"},"committer_date":{"kind":"timestamp","value":"2010-06-10T13:21:02","string":"2010-06-10T13:21:02"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#! /usr/bin/env python\n#! coding: utf-8\n# pylint: disable-msg=W0311\n\n## @server_controller - May 31, 2010\n# Remote Controller for Football Info 2010\n#\n# - Login via SSH with private_key\n# - Use rsync to move local files to server\n# \nfrom lib import ssh\nfrom sys import argv\n\nprivate_key = \"/home/Workspace/FootballInfo2010/id_rsa\"\nremote = ssh.Connection('203.128.246.60', 'root', private_key)\n\nhelp = \"\"\"Sử dụng:\n python server_controller.py update | rollback\n\"\"\"\n\ndef update():\n command = \"ls -l\"\n remote.execute(command)\n\ndef rollback():\n pass\n\ndef connect():\n pass\n\ndef sync():\n pass\n\nif __name__ == '__main__':\n try:\n command = argv[1]\n except IndexError:\n command = None\n\n if command == \"update\":\n remote.execute(\"update command\")\n\n elif command == \"rollback\":\n remote.execute(\"rollback command\")\n\n else:\n remote.execute(\"ls -l /home\")\n# print help\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":1867,"cells":{"__id__":{"kind":"number","value":5806795800676,"string":"5,806,795,800,676"},"blob_id":{"kind":"string","value":"b64d45496e866970edc88e3e45061f0f99a786bd"},"directory_id":{"kind":"string","value":"1461c2d47cebc3ae4570ef3c97e10762e6e72af7"},"path":{"kind":"string","value":"/dlc-database/gendb.py"},"content_id":{"kind":"string","value":"5680989b2987de90fe0a243a66391917539cbe44"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"rossengeorgiev/sg-enhancement-addon"},"repo_url":{"kind":"string","value":"https://github.com/rossengeorgiev/sg-enhancement-addon"},"snapshot_id":{"kind":"string","value":"5194314fa751cde2e96deafa31b779dad32673a2"},"revision_id":{"kind":"string","value":"6db1836d00336a5207ec67ca59a56275d7ae6665"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-13T01:25:55.184333","string":"2021-01-13T01:25:55.184333"},"revision_date":{"kind":"timestamp","value":"2013-10-26T05:46:41","string":"2013-10-26T05:46:41"},"committer_date":{"kind":"timestamp","value":"2013-10-26T05:46:41","string":"2013-10-26T05:46:41"},"github_id":{"kind":"number","value":6637494,"string":"6,637,494"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2013-10-26T05:46:42","string":"2013-10-26T05:46:42"},"gha_created_at":{"kind":"timestamp","value":"2012-11-11T10:44:20","string":"2012-11-11T10:44:20"},"gha_updated_at":{"kind":"timestamp","value":"2013-10-26T05:46:41","string":"2013-10-26T05:46:41"},"gha_pushed_at":{"kind":"timestamp","value":"2013-10-26T05:46:41","string":"2013-10-26T05:46:41"},"gha_size":{"kind":"number","value":3639,"string":"3,639"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"number","value":4,"string":"4"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"JavaScript"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python -u\n\n\"\"\"\nDLC Database generator - extracts all active DLCs from store.steampowered.com\nCopyright (C) 2012 Rossen Georgiev\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n\nimport json\nimport urllib2 \nimport re\nimport sys\nimport threading\n\n# global vars\npage = 1\ns_url = \"http://store.steampowered.com/search/?term=%20&category1=21&sort_order=ASC&page=\"\ndlc = {}\noutput_filename = \"sge_dlc_database.json\"\nmax_threads = 5\n\ndef flush_db():\n\t#output to json\n\tprint \"\\nWritting to %s\" % output_filename\n\n\tout = open(output_filename, 'w')\n\tout.write(json.dumps(dlc,indent=4).replace(\" \",'\\t'))\n\n\t# close files\n\tout.close()\n\tlog.close()\n\ndef worker(url,name):\n\tglobal dlc, thread_n, log\n\t\n\ttry:\n\t\tr = bro.open(url).read()\n\t\tgame = re.findall(r\"base game.*?>(.*?)<\\/a> on Steam\", r)[0]\n\n\t\tif not dlc.has_key(game):\n\t\t\tdlc[game] = []\n\t\t\n\t\tdlc[game].append(name)\n\texcept:\n\t\tlog.write(\"Failed to parse: %s\\n\" % url)\t\n\n\tthread_n -= 1 # thread is finished\n\nbro = urllib2.build_opener()\nbro.addheaders.append(('Cookie', 'birthtime=-1735660799')) # to pass steam age check\n\nthread_n = 0\nlog = open('error.log', 'w')\n\ntry:\n\twhile 1:\n\t\t# print progress\n\t\tprint \"\\rParsing page %d... \" % page, \" \"*10,\n\n\t\tr = re.sub(\"[\\n\\r]\", \"\", bro.open(s_url + str(page)).read())\n\t\td = re.findall(r\"href=\\\"(.{,150})\\\" class=\\\"search_result_row.*?

(.*?)<\\/h4>\", r)\n\n\t\t# we've reached the end, stop\n\t\tif len(d) == 0:\n\t\t\tbreak;\n\t\t\tprint \"\\n\",\n\n\t\t# this is 2012, parallel is king\n\t\tx = 0\n\t\tthread_n = 0\n\t\twhile x < len(d):\n\t\t\tif thread_n < max_threads:\n\t\t\t\tdlc_url = d[x][0]\n\t\t\t\tdlc_name = d[x][1]\n\t\t\t\tthreading.Thread(target=worker, args=(dlc_url,dlc_name,)).start()\n\t\t\t\tthread_n += 1\n\n\t\t\t\t# update progress\n\t\t\t\tx += 1\n\t\t\t\tprint \"\\rParsing page %d...\" % page, \"%d/%d\" % (x,len(d)),\n\n\t\t# move to next page\n\t\tpage += 1\n\nexcept KeyboardInterrupt:\n\tprint \"\\nInterrupted, stopping...\\n\",\n\tflush_db()\n\tsys.exit()\n\nflush_db()\n\n\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1868,"cells":{"__id__":{"kind":"number","value":9715216041193,"string":"9,715,216,041,193"},"blob_id":{"kind":"string","value":"80b8e71e03fdc33f73e9ba022a32bdf6601bfb44"},"directory_id":{"kind":"string","value":"5b78dba6e26e513823da9187daf998022aa16de4"},"path":{"kind":"string","value":"/Console.py"},"content_id":{"kind":"string","value":"38037063c6025ae05650f4756c9a46dbea3e649a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rinrinne/mkaproc"},"repo_url":{"kind":"string","value":"https://github.com/rinrinne/mkaproc"},"snapshot_id":{"kind":"string","value":"3aaab7dd5fb25927d3f8b2aed74577d851f157d7"},"revision_id":{"kind":"string","value":"a083fe420abe64b57a3b4ace7d4599b574f34637"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T19:21:50.795913","string":"2021-01-22T19:21:50.795913"},"revision_date":{"kind":"timestamp","value":"2009-12-29T17:49:43","string":"2009-12-29T17:49:43"},"committer_date":{"kind":"timestamp","value":"2009-12-29T17:49:43","string":"2009-12-29T17:49:43"},"github_id":{"kind":"number","value":139993,"string":"139,993"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport sys, codecs, os, os.path\n\nfrom subprocess import *\n\nclass Console:\n def __init__(self, charset = 'iso8859-1'):\n self.writable = False\n self.executable = True\n self.logging = False\n self.syscharset = charset\n self.paths = []\n self.current = None\n self.log = []\n \n def __repr__(self):\n \"\"\"String representation.\"\"\"\n return '<%r: writable=%r, executable=%r, logging=%r, syscharset=%r, paths=%r, current=%r, log=%r>' % (\n self.__class__.__name__, self.writable, self.executable, self.logging, self.syscharset, self.paths, self.current, self.log\n )\n \n def root(self, current):\n if not os.path.isdir(current):\n os.mkdir(current)\n self.current = current\n \n def write(self, line, prefix = u'[WRITE]: ', fd=None):\n if self.writable:\n if fd is not None:\n out = sys.stdout\n else:\n out = fd\n out.write((prefix + line).encode(self.syscharset))\n out.write('\\n')\n \n if self.logging:\n self.log.append(prefix + line)\n self.log.append('\\n')\n \n def writeerr(self, line, prefix = u'[ERROR]: '):\n self.write(line, prefix, sys.stderr)\n \n def appendpath(self, path):\n if isinstance(path, str):\n self.paths.append([path])\n elif isinstance(path, list):\n self.paths.append(path)\n \n def poppath(self, depth=0):\n ret = []\n if(depth==0):\n ret = self.paths\n self.paths = []\n else:\n for i in range(depth):\n ret.append(self.paths.pop())\n return ret\n \n def execute(self, cmd):\n cwd = os.getcwd()\n line = ' '.join(cmd)\n self.writeerr(line, u'[EXEC]: ')\n if self.executable:\n pathlist = []\n for path in self.paths:\n pathlist += path\n \n envmap = os.environ.copy()\n envmap['PATH'] = os.pathsep.join(map(lambda x: x.encode(self.syscharset), pathlist) + [envmap['PATH']])\n \n if self.logging:\n proc = Popen(line.encode(self.syscharset), shell=True, stdout=PIPE, stderr=STDOUT, cwd=self.current, env=envmap)\n \n for line in proc.stdout:\n self.write(line.decode(self.syscharset))\n \n ret = proc.retcode\n else:\n ret = call(line.encode(self.syscharset), shell=True, cwd=self.current, env=envmap)\n\n return ret\n else:\n return -1\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":1869,"cells":{"__id__":{"kind":"number","value":15547781656112,"string":"15,547,781,656,112"},"blob_id":{"kind":"string","value":"dc341bd5ec6098c1b33d31b1967e87c1277d6542"},"directory_id":{"kind":"string","value":"d6d9bdf00c73cfc77131a965ecd68f20e9860540"},"path":{"kind":"string","value":"/impact/tests/test_api.py"},"content_id":{"kind":"string","value":"36bc0dbd2279085ad6c9d81698d3aa9dccd044d7"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"AIFDR/riab"},"repo_url":{"kind":"string","value":"https://github.com/AIFDR/riab"},"snapshot_id":{"kind":"string","value":"aab9b433640464af43817e9658b14620e6b516a3"},"revision_id":{"kind":"string","value":"0bc2cbbef82be588e3568bb2ac8739727fb906c1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-10T02:03:20.271377","string":"2016-09-10T02:03:20.271377"},"revision_date":{"kind":"timestamp","value":"2012-03-05T09:16:24","string":"2012-03-05T09:16:24"},"committer_date":{"kind":"timestamp","value":"2012-03-05T09:16:24","string":"2012-03-05T09:16:24"},"github_id":{"kind":"number","value":1462033,"string":"1,462,033"},"star_events_count":{"kind":"number","value":7,"string":"7"},"fork_events_count":{"kind":"number","value":4,"string":"4"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import unittest\nimport os\nfrom django.test.client import Client\nfrom django.utils import simplejson as json\nfrom django.conf import settings\nfrom impact.storage.io import save_to_geonode\n\nfrom geonode.maps.utils import check_geonode_is_up\nfrom geonode.maps.models import Layer\nfrom geonode.maps.utils import get_valid_user\nfrom impact.storage.io import check_layer\nfrom impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL\n\nfrom impact.tests.plugins import unspecific_building_impact_model\n\n\nclass Test_HTTP(unittest.TestCase):\n \"\"\"Test suite for API\n \"\"\"\n\n def setUp(self):\n \"\"\"Check geonode and create valid superuser\n \"\"\"\n check_geonode_is_up()\n self.user = get_valid_user()\n\n def tearDown(self):\n pass\n\n def test_functions(self):\n \"\"\"Functions can be retrieved from the HTTP Rest API\n \"\"\"\n\n c = Client()\n rv = c.get('/impact/api/functions/')\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n msg = ('The api should return a dictionary with at least one item. '\n 'The key of that item should be \"functions\"')\n assert 'functions' in data, msg\n functions = data['functions']\n\n msg = ('No functions were found in the functions list, '\n 'not even the built-in ones')\n assert len(functions) > 0, msg\n\n def test_layers(self):\n \"\"\"Layers can be retrieved from the HTTP Rest API\n \"\"\"\n\n c = Client()\n rv = c.get('/impact/api/layers/')\n self.assertEqual(rv.status_code, 200)\n self.assertEqual(rv['Content-Type'], 'application/json')\n data = json.loads(rv.content)\n\n def test_calculate_fatality(self):\n \"\"\"Earthquake fatalities calculation via the HTTP Rest API is correct\n \"\"\"\n\n # Upload required data first\n for filename in ['Earthquake_Ground_Shaking.asc',\n 'Population_2010_clip.tif']:\n thefile = os.path.join(TESTDATA, filename)\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n # Run calculation through API\n c = Client()\n rv = c.post('/impact/api/calculate/',\n dict(hazard_server=INTERNAL_SERVER_URL,\n hazard='geonode:earthquake_ground_shaking',\n exposure='geonode:population_2010_clip',\n exposure_server=INTERNAL_SERVER_URL,\n bbox='99.36,-2.199,102.237,0.00',\n impact_function='Earthquake Fatality Function',\n keywords='test,earthquake,fatality'))\n\n msg = 'Expected status code 200, got %i' % rv.status_code\n self.assertEqual(rv.status_code, 200), msg\n\n msg = ('Expected Content-Type \"application/json\", '\n 'got %s' % rv['Content-Type'])\n self.assertEqual(rv['Content-Type'], 'application/json'), msg\n\n data = json.loads(rv.content)\n\n if data['stacktrace'] is not None:\n msg = data['stacktrace']\n raise Exception(msg)\n\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n assert 'bbox' in data.keys()\n assert 'impact_function' in data.keys()\n\n layer_uri = data['layer']\n\n #FIXME: This is not a good way to access the layer name\n typename = layer_uri.split('/')[4]\n name = typename.split(':')[1]\n\n # Check the autogenerated styles were correctly uploaded\n layer = Layer.objects.get(name=name)\n\n msg = ('A new style should have been created for layer [%s] '\n 'got [%s] style instead.' % (name, layer.default_style.name))\n assert layer.default_style.name == name, msg\n\n def test_calculate_school_damage(self):\n \"\"\"Earthquake school damage calculation works via the HTTP REST API\n \"\"\"\n\n # Upload required data first\n for filename in ['lembang_mmi_hazmap.asc',\n 'lembang_schools.shp']:\n thefile = os.path.join(TESTDATA, filename)\n uploaded = save_to_geonode(thefile, user=self.user, overwrite=True)\n check_layer(uploaded, full=True)\n\n # Run calculation through API\n c = Client()\n rv = c.post('/impact/api/calculate/', data=dict(\n hazard_server=INTERNAL_SERVER_URL,\n hazard='geonode:lembang_mmi_hazmap',\n exposure_server=INTERNAL_SERVER_URL,\n exposure='geonode:lembang_schools',\n bbox='105.592,-7.809,110.159,-5.647',\n impact_function='Earthquake Building Damage Function',\n keywords='test,schools,lembang',\n ))\n\n msg = 'Expected status code 200, got %i' % rv.status_code\n self.assertEqual(rv.status_code, 200), msg\n\n msg = ('Expected Content-Type \"application/json\", '\n 'got %s' % rv['Content-Type'])\n self.assertEqual(rv['Content-Type'], 'application/json'), msg\n\n data = json.loads(rv.content)\n\n if data['stacktrace'] is not None:\n msg = data['stacktrace']\n raise Exception(msg)\n\n assert 'hazard_layer' in data.keys()\n assert 'exposure_layer' in data.keys()\n assert 'run_duration' in data.keys()\n assert 'run_date' in data.keys()\n assert 'layer' in data.keys()\n\n # FIXME (Ole): Download result and check.\n\n\nif __name__ == '__main__':\n suite = unittest.makeSuite(Test_HTTP, 'test')\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1870,"cells":{"__id__":{"kind":"number","value":738734417862,"string":"738,734,417,862"},"blob_id":{"kind":"string","value":"53c925fabc9a230fc5f184c67d732cd4fe5d777e"},"directory_id":{"kind":"string","value":"e4777f957d1ca17c7b99667ed66b0923e216754e"},"path":{"kind":"string","value":"/e/e.py"},"content_id":{"kind":"string","value":"84606d9308ca1ad76d8e3d4b278e0682eeed67e5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"donce/university"},"repo_url":{"kind":"string","value":"https://github.com/donce/university"},"snapshot_id":{"kind":"string","value":"c77071a0133620ec5d0b3fbe8acd6d2289fcee5f"},"revision_id":{"kind":"string","value":"fbdb8a8c616fd1305144e944a9e68dce52678d50"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T20:54:34.666150","string":"2021-01-10T20:54:34.666150"},"revision_date":{"kind":"timestamp","value":"2013-03-16T09:01:46","string":"2013-03-16T09:01:46"},"committer_date":{"kind":"timestamp","value":"2013-03-16T09:01:46","string":"2013-03-16T09:01:46"},"github_id":{"kind":"number","value":6204675,"string":"6,204,675"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"def gcd(a, b):\n\twhile b != 0:\n\t\tt = a\n\t\ta = b\n\t\tb = t % b\n\treturn a\n\ndef fix(frac):\n\td = gcd(frac[0], frac[1])\n\treturn frac[0]/d, frac[1]/d\n\ndef join(a, b):\n\treturn (a[0]*b[1] + b[0], a[1] * b[1])\n\ndef calcInterval(start, end):\n\tf = 1\n\tadd = 0\n\tfor i in xrange(start, end):\n\t\tf *= i\n\t\tadd = i*add+1\n\treturn (add, f)\n\nfrom multiprocessing import Process, Value, Queue, Lock\n\nresults = Queue()\n\ncurrentJob = Value('L', 0)\njobLock = Lock()\n\nintervalSize = 10\n\ndef getJob():\n\tjobLock.acquire()\n\tnr = currentJob.value\n\tcurrentJob.value += 1\n\tjobLock.release()\n\tinterval = nr * intervalSize + 1, (nr+1) * intervalSize + 1\n\treturn (nr, interval)\n\ndef calculate():\n\ttry:\n\t\twhile True:\n\t\t\tjob = getJob()\n\t\t\tresult = calcInterval(*job[1])\n\t\t\tresults.put([job[0], result])\n\texcept KeyboardInterrupt:\n\t\tpass\n\ndef bitSize(size):\n\tsize = float(size) / 8\n\tletter = ['', 'K', 'M', 'G']\n\tfor l in letter:\n\t\tif size < 1024:\n\t\t\treturn str(round(size, 2)) + l + 'B'\n\t\tsize /= 1024\n\treturn str(round(1024*size, 2)) + letter[len(letter)-1] + 'B'\n\nfrom sys import stdout\ndef printFrac(frac, stream=stdout, length=8):\n\tprint \"GCD...\"\n\tfrac = fix(frac)\n\tprint \"Printing...\"\n\tstream.write(str(frac[0] / frac[1]) + '.')\n\tbottom = frac[1]\n\ttop = frac[0] % frac[1]\n\tcount = 0\n\tfor i in xrange(length):\n\t\ttop *= 10\n\t\tstream.write(str(top / bottom))\n\t\ttop %= bottom\n\t\tcount += 1\n\t\tif count % 1000 == 0:\n\t\t\tprint count\n\tstream.write('\\n')\n\n\ndef calcE(threadsNumber, _intervalSize):\n\tglobal intervalSize\n\tintervalSize = _intervalSize\n\tthreads = []\n\tfor i in range(threadsNumber):\n\t\tProcess(target=calculate).start()\n\t\n\te = (1, 1)\n\teDict = {}\n\teNow = 0\n\t\n\tfinished = False\n\ttry:\n\t\twhile True:\n\t\t\tresult = results.get()\n\t\t\teDict[result[0]] = result[1]\n\t\t\twhile eNow in eDict:\n\t\t\t\te = join(e, eDict[eNow])\n\t\t\t\tdel eDict[eNow]\n\t\t\t\teNow += 1\n\texcept KeyboardInterrupt:\n\t\tprint \"Calculation closing.\"\n\tprint \"Iterations done:\", eNow * intervalSize\n\tprint bitSize(e[0].bit_length() + e[1].bit_length())\n\t\n\tfrom math import log10\n\tdigits = int(log10(e[1]))\n\tprint digits\n\tf = open('output', 'w')\n\tprintFrac(e, f, digits)\n\tf.close()\n\n\treturn e\n\ncalcE(8, 10000)\nprint \"Done!\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1871,"cells":{"__id__":{"kind":"number","value":15771119947690,"string":"15,771,119,947,690"},"blob_id":{"kind":"string","value":"a41fe9ac2ff8a971c858974f46c547766ad1f1a9"},"directory_id":{"kind":"string","value":"4707881af349b5d9ae07417f54c1c341170d1de9"},"path":{"kind":"string","value":"/mledu/algorithm/__init__.py"},"content_id":{"kind":"string","value":"f9b15b0a5938f53c5adcdbf75eb794c777adea7a"},"detected_licenses":{"kind":"list like","value":["GPL-2.0-only"],"string":"[\n \"GPL-2.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"dreamwalkerrr/mledu"},"repo_url":{"kind":"string","value":"https://github.com/dreamwalkerrr/mledu"},"snapshot_id":{"kind":"string","value":"e83032ae6e66ba69c86c49c55be05c02984f1035"},"revision_id":{"kind":"string","value":"911d16361cd753d920f2ac2ce4d132d85e30fe68"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-01T20:03:01.988928","string":"2016-09-01T20:03:01.988928"},"revision_date":{"kind":"timestamp","value":"2013-12-25T08:54:12","string":"2013-12-25T08:54:12"},"committer_date":{"kind":"timestamp","value":"2013-12-25T08:54:12","string":"2013-12-25T08:54:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from perceptron import Perceptron\nfrom linregression import LinearRegression\nfrom babynn import BabyNN\ndel perceptron, linregression, babynn\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1872,"cells":{"__id__":{"kind":"number","value":6365141534739,"string":"6,365,141,534,739"},"blob_id":{"kind":"string","value":"c6035f823ff66418d3ac3c74ef57e97c5ed20177"},"directory_id":{"kind":"string","value":"98c46168e904b48482d7222b8d6b013ed50bfe86"},"path":{"kind":"string","value":"/ssm.py"},"content_id":{"kind":"string","value":"debc125c2d62ac3cc168abaf9d5dd7c441d9aa16"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"bboyjacks/Python-Codes"},"repo_url":{"kind":"string","value":"https://github.com/bboyjacks/Python-Codes"},"snapshot_id":{"kind":"string","value":"4beae70bcca75c2873621df73fccebf54752656b"},"revision_id":{"kind":"string","value":"46ca5f7c2d46e75c1d69cc72fec8f8c6578e89e1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T11:41:47.980675","string":"2016-09-06T11:41:47.980675"},"revision_date":{"kind":"timestamp","value":"2014-06-21T04:37:38","string":"2014-06-21T04:37:38"},"committer_date":{"kind":"timestamp","value":"2014-06-21T04:37:38","string":"2014-06-21T04:37:38"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#-----------STOCHASTIC STATE MACHINE----------------\n#\n#\tThe following library implements discrete distributions,\n#\tstochastic state machines and state estimators.\n#\n#\n#\tScott's Note: This project was not fully completed, as the SSM\n#\tonly works without input, since a fully generalized JDist class\n#\twasn't created. It's only a demonstration, so full testing suites\n#\tand correct modularization have also not been fully implemented\n#\n#---------------------------------------------------\n\nimport sm\nimport random\n\n\n\n#-----Discrete Distribution----------\n#\tThe following class stores discrete distributions\n#\tby using a dictionary which associates events and\n# \ttheir probabilities\n#------------------------------------\nclass DDist():\n\tdef __init__(self, dictionary):\n\t\tself.d = dictionary\n\n\t#returns the probability of an event\n\tdef prob(self, event):\n\t\tif event in self.d:\n\t\t\tp = self.d[event]\n\t\telse:\n\t\t\tp = 0\n\t\treturn p\n\t\n\t#returns a list of all possible events\n\tdef support(self):\n\t\treturn [k for k in self.d.keys() if self.prob(k) > 0]\n\t\t\n\t#draws an element from the distribution\n\tdef draw(self):\n\t\tr = random.random()\n\t\tprobSum = 0.0\n\t\tfor event in self.support():\n\t\t\tprobSum += self.prob(event)\n\t\t\tif r < probSum:\n\t\t\t\treturn event\n\t\t\t\t\n\t\tprint \"Error: draw() failed to find an event.\"\n\t\treturn None\n\t\t\n\tdef output(self):\n\t\tfor k in self.support():\n\t\t\tprint k,\": \",self.d[k],\"\\n\"\n\t\t\ndef testSuite_DDist():\n\tchance = 1.0/6.0\n\tdie = DDist({1:chance,2:chance,3:chance,4:chance,5:chance,6:chance})\n\tweightedDie = DDist({1:chance/2,2:chance/2,3:chance,4:chance,5:chance*2,6:chance*2})\n\tprint die.support()\n\tfor i in range(10):\n\t\tprint die.draw()\n\tfor i in range(10):\n\t\tprint weightedDie.draw()\n\t\t\n#------JOINT DISTRIBUTION--------------\n#\tThis class allows you to create a joint distribution\n#\tgiven a distribution and a function for calculating\n#\tthe conditional distribution given that distribution\n#--------------------------------------\nclass JDist(DDist):\n\t\n\t#Takes a distribution of a random variable and the\n\t#function which determines the conditional distribution\n\tdef __init__(self, pA, pBgivenA):\n\t\tself.d = {}\n\t\tpossibleAs = pA.support()\n\t\tfor a in possibleAs:\n\t\t\tconditional = pBgivenA(a)\n\t\t\tfor b in conditional.support():\n\t\t\t\tself.d[(a,b)] = pA.prob(a)*conditional.prob(b)\n\t\t\t\t\n\t#returns the individual distribution of just one of the\n\t#two random variable components\t\t\t\n\tdef marginalizeOut(self, variable):\t\t\t\n\t\tnewD = {}\n\t\tfor event in self.d.keys():\n\t\t\tnewEvent = removeElt(event, variable)\n\t\t\tincrDictEntry(newD, newEvent, self.prob(event))\n \t\treturn(DDist(newD))\n\n\t#returns the distribution of a variable, given the value\n\t#of the other variable\n\tdef conditionOn(self, variable, value):\n\t\tnewD = {}\n\t\ttotalProb = 0.0\n\t\t\n\t\t#first construct an incomplete distribution, with only\n\t\t#the joint probabilities of valued elements\n\t\tfor event in self.d.keys():\n\t\t\tif event[variable] == value:\n\t\t\t\tindivProb = self.d[event]\n\t\t\t\ttotalProb += indivProb\n\t\t\t\tnewEvent = removeElt(event, variable)\n\t\t\t\tnewD[newEvent] = indivProb\n\t\t\n\t\t#divide by the total sub-probability to ensure all\n\t\t#probabilities sum to 1\n\t\tfor subEvent in newD.keys():\n\t\t\tnewD[subEvent] /= totalProb\n\t\t\n\t\treturn(DDist(newD))\n\t\t\t\t\n\tdef output(self):\n\t\tfor event in self.d.keys():\n\t\t\tprint \"Event \",event,\" has a \",self.prob(event),\" probability.\"\n\t\t\t\ndef removeElt(items, i):\n\tresult = items[:i] + items[i+1:]\n\tif len(result)==1:\n\t\treturn result[0]\n\telse:\n\t\treturn result\n\ndef incrDictEntry(d, k, v):\n\tif d.has_key(k):\n\t\td[k] += v\n\telse:\n\t\td[k] = v\t\t\t\n\t\t\t\ndef testSuite_JDist():\n\tpIll = DDist({'disease':.01, 'no disease':.99})\n\tdef pSymptomGivenIllness(status):\n\t\tif status == 'disease':\n\t\t\treturn(DDist({'cough':.9, 'none':.1}))\n\t\telif status == 'no disease':\n\t\t\treturn(DDist({'cough':.05, 'none':.95}))\n\t\n\tjIllnessSymptoms = JDist(pIll, pSymptomGivenIllness)\n\t\n\tjIllnessSymptoms.output()\n\t\n\tdSymptoms = jIllnessSymptoms.marginalizeOut(0)\n\tprint \"Symptoms include: \\n\", dSymptoms.d\t\n\t\n\tsymptomsGivenIll = jIllnessSymptoms.conditionOn(0,'no disease')\n\tprint \"Symptoms given no disease: \\n\", symptomsGivenIll.d\n\t\n#===================STOCHASTIC STATE MACHINE===================\n\nclass SSM(sm.SM):\n\tdef __init__(self, prior, transition, observation):\n\t\tself.prior = prior\n\t\tself.transition = transition\n\t\tself.observation = observation\n\t\t\n\tdef startState(self):\n\t\treturn self.prior.draw()\n\t\n\tdef getNextValues(self, state, inp):\n\t\treturn(self.transition(inp)(state).draw(), self.observation(state).draw())\n\t\t\ndef testSuite_SSM():\n\t\n\tprior = DDist({'good':0.9, 'bad':0.1})\n\n\tdef observationModel(state):\n\t\tif state == 'good':\n\t\t\treturn DDist({'perfect':0.8, 'smudged':0.1, 'black':0.1})\n\t\telse:\n\t\t\treturn DDist({'perfect':0.1, 'smudged':0.7, 'black':0.2})\n\t\n\tdef transitionModel(input):\n\t\tdef transitionGivenInput(oldState):\n\t\t\tif oldState == 'good':\n\t\t\t\treturn DDist({'good':0.7, 'bad':0.3})\n\t\t\telse:\n\t\t\t\treturn DDist({'good':0.1, 'bad':0.9})\n\t\treturn transitionGivenInput\n\t\t\t\n\tcopyMachine = SSM(prior,transitionModel,observationModel)\n\t\n\tprint copyMachine.transduce(['copy']*20)\n\t\n#==========STOCHASTIC STATE ESTIMATOR==============\t\n\t\nclass SSE(sm.SM): \t\n\t\n\tdef __init__(self, machine):\n\t\tself.machine = machine\n\t\tself.startState = machine.prior\n\t\tself.transitionModel = machine.transition\n\t\tself.observationModel = machine.observation\n\t\t\n\t#Keep in mind for a Stochastic State Estimator the input\n\t#must be the last observed value and the state is the Bayesian\n\t#Machine's degree of belief, expressed as a probability distribution\n\t#over all known internal states belonging to the SSM\t\n\tdef getNextValues(self, state, inp):\n\t\t\n\t\t#First, calculate an updated belief of the last state, given\n\t\t#the known output\n\t\t\n\t\t\n\t\t#Calculates Pr(S|O = obs)\n\t\tbelief = JDist(state, self.observationModel).conditionOn(1, inp)\n\t\t\n\t\t\n\t\t#Second, run the belief state through the transition model\n\t\t#to predict the current state of the machine\n\t\tn=0\n\t\tpartialDist={}\n\t\tfor possibility in belief.d.keys():\t\t\t\t\t\t\t#go through all states\n\t\t\tpartialDist[n] = self.transitionModel(0)(possibility).d\t#figure out what would happen, were you in that state\n\t\t\tfor event in partialDist[n].keys():\n\t\t\t\tpartialDist[n][event] *= belief.prob(possibility)\t#multiply by the chance you actually were in that state\n\t\t\tn+=1\n\t\ttotalDist = partialDist[0]\n\t\tfor event in partialDist[0].keys():\t\t\n\t\t\tfor count in range(1, n):\n\t\t\t\ttotalDist[event] += partialDist[count][event]\t\t\t#sum up the partial probabilities\n\t\t\t\t\n\t\tbeliefPrime = DDist(totalDist)\n\t\t\n\t\t\n\t\treturn (beliefPrime, beliefPrime)\t\t\n\t\t\ndef testSuite_SSE():\n\t\n\tprior = DDist({'good':0.9, 'bad':0.1})\n\n\tdef observationModel(state):\n\t\tif state == 'good':\n\t\t\treturn DDist({'perfect':0.8, 'smudged':0.1, 'black':0.1})\n\t\telse:\n\t\t\treturn DDist({'perfect':0.1, 'smudged':0.7, 'black':0.2})\n\t\n\tdef transitionModel(input):\n\t\tdef transitionGivenInput(oldState):\n\t\t\tif oldState == 'good':\n\t\t\t\treturn DDist({'good':0.7, 'bad':0.3})\n\t\t\telse:\n\t\t\t\treturn DDist({'good':0.1, 'bad':0.9})\n\t\treturn transitionGivenInput\n\t\t\t\n\tcopyMachine = SSM(prior,transitionModel,observationModel)\n\tcopyEstimator = SSE(copyMachine)\n\t\n\tcopyMachine.start()\n\tcopyEstimator.start()\n\t\n\tfor n in range(20):\n\t\t\n\t\tobservation = copyMachine.step(\"copy\")\n\t\tprint \"Copy machine => \", observation\n\t\tbelief = copyEstimator.step(observation)\n\t\tprint \"Estimate of copier's status: \\n\", belief.output(),\"\\n\\n\"\n\t\n#============================MAIN==============================\n\ndef main():\n\n\n\ttestSuite_SSE()\t\t\n\tprint \"Program complete.\"\n\t\n\t\n#This will run the testing suite if the program is run directly\t\nif __name__ == '__main__':\n\tmain()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1873,"cells":{"__id__":{"kind":"number","value":10728828347847,"string":"10,728,828,347,847"},"blob_id":{"kind":"string","value":"e8a63bd309e18dba6fc9c3cc8019d6d693b200d1"},"directory_id":{"kind":"string","value":"154108ba1afff2c7d679c3aee47f867f673d03fe"},"path":{"kind":"string","value":"/django_project/changes/views/entry.py"},"content_id":{"kind":"string","value":"26e5d4676b95a03169af013588e95f3a6e3809c4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dodobas/visual_changelog"},"repo_url":{"kind":"string","value":"https://github.com/dodobas/visual_changelog"},"snapshot_id":{"kind":"string","value":"0101a3fab93233879d01fa32532e7a5be9828939"},"revision_id":{"kind":"string","value":"d4b4590252b98a32e8c7993a6ab8f39f1867823f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T10:48:19.925270","string":"2021-01-17T10:48:19.925270"},"revision_date":{"kind":"timestamp","value":"2013-09-15T15:32:15","string":"2013-09-15T15:32:15"},"committer_date":{"kind":"timestamp","value":"2013-09-15T15:32:15","string":"2013-09-15T15:32:15"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import logging\nlogger = logging.getLogger(__name__)\n\n# noinspection PyUnresolvedReferences\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import (\n ListView,\n CreateView,\n DeleteView,\n DetailView,\n UpdateView,\n RedirectView,\n TemplateView)\n\nfrom django.http import HttpResponseRedirect\nfrom braces.views import LoginRequiredMixin, StaffuserRequiredMixin\nfrom pure_pagination.mixins import PaginationMixin\n\nfrom ..models import Project, Category, Version, Entry\nfrom ..forms import ProjectForm, CategoryForm, VersionForm, EntryForm\n\n\nclass EntryMixin(object):\n model = Entry # implies -> queryset = Entry.objects.all()\n form_class = EntryForm\n\n\nclass EntryCreateUpdateMixin(EntryMixin, LoginRequiredMixin):\n def get_context_data(self, **kwargs):\n context = super(EntryMixin, self).get_context_data(**kwargs)\n return context\n\n def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form))\n\n\nclass EntryListView(EntryMixin, PaginationMixin, ListView):\n context_object_name = 'entries'\n template_name = 'entry/list.html'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(EntryListView, self).get_context_data(**kwargs)\n context['num_entries'] = self.get_queryset().count()\n context['unapproved'] = False\n return context\n\n def get_queryset(self):\n \"\"\"Only approved objects are shown.\"\"\"\n qs = Entry.objects.all()\n return qs\n\n\nclass EntryDetailView(EntryMixin, DetailView):\n context_object_name = 'entry'\n template_name = 'entry/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(EntryDetailView, self).get_context_data(**kwargs)\n return context\n\n def get_queryset(self):\n \"\"\"Anyone can see any entry.\"\"\"\n qs = Entry.all_objects.all()\n return qs\n\n def get_object(self, queryset=None):\n obj = super(EntryDetailView, self).get_object(queryset)\n obj.request_user = self.request.user\n return obj\n\n\nclass EntryDeleteView(EntryMixin, DeleteView, LoginRequiredMixin):\n context_object_name = 'entry'\n template_name = 'entry/delete.html'\n\n def get_success_url(self):\n return reverse('entry-list')\n\n def get_queryset(self):\n qs = Entry.all_objects.all()\n if self.request.user.is_staff:\n return qs\n else:\n qs.filter(creator=self.request.user)\n\n\nclass EntryCreateView(EntryCreateUpdateMixin, CreateView):\n context_object_name = 'entry'\n template_name = 'entry/create.html'\n\n def get_success_url(self):\n return reverse('pending-entry-list')\n\n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass EntryUpdateView(EntryCreateUpdateMixin, UpdateView):\n context_object_name = 'entry'\n template_name = 'entry/update.html'\n\n def get_form_kwargs(self):\n kwargs = super(EntryUpdateView, self).get_form_kwargs()\n return kwargs\n\n def get_queryset(self):\n qs = Entry.all_objects.all()\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n def get_success_url(self):\n return reverse('pending-entry-list')\n\n\nclass PendingEntryListView(EntryMixin,\n PaginationMixin,\n ListView,\n StaffuserRequiredMixin):\n \"\"\"List all unapproved entries\"\"\"\n context_object_name = 'entries'\n template_name = 'entry/list.html'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(PendingEntryListView, self).get_context_data(**kwargs)\n context['num_entries'] = self.get_queryset().count()\n context['unapproved'] = True\n return context\n\n def get_queryset(self):\n qs = Entry.unapproved_objects.all()\n if self.request.user.is_staff:\n return qs\n else:\n return qs.filter(creator=self.request.user)\n\n\nclass ApproveEntryView(EntryMixin, StaffuserRequiredMixin, RedirectView):\n permanent = False\n query_string = True\n pattern_name = 'pending-entry-list'\n\n def get_redirect_url(self, pk):\n entry_qs = Entry.unapproved_objects.all()\n entry = get_object_or_404(entry_qs, pk=pk)\n entry.approved = True\n entry.save()\n return reverse(self.pattern_name)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1874,"cells":{"__id__":{"kind":"number","value":13151189893717,"string":"13,151,189,893,717"},"blob_id":{"kind":"string","value":"cba074364ca99de4801e5bcc1b0fa22e45c40e34"},"directory_id":{"kind":"string","value":"66075efb4a9c0f6e5c7cb1d0024487b5c69f22c4"},"path":{"kind":"string","value":"/Balanced/attack_algorithms.py"},"content_id":{"kind":"string","value":"1226d503a37995f622fc19b018982555fd0be112"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"todayispotato/tron"},"repo_url":{"kind":"string","value":"https://github.com/todayispotato/tron"},"snapshot_id":{"kind":"string","value":"248d7f96156a55c1f7fe5433ceb58b08c0d1313d"},"revision_id":{"kind":"string","value":"eae88e529d402f19c127f2e59dd7018a54311e3e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-11-26T19:37:24.505110","string":"2020-11-26T19:37:24.505110"},"revision_date":{"kind":"timestamp","value":"2013-05-31T17:01:55","string":"2013-05-31T17:01:55"},"committer_date":{"kind":"timestamp","value":"2013-05-31T17:01:55","string":"2013-05-31T17:01:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nimport tron, time\n\nfrom heapq import heappush, heappop, heapify\n\nclass AStar:\n\n\tdef execute(self, board):\n\t\tstart = time.clock()\n\t\tgoal = board.them()\n\t\t\n\t\tclosedSet = []\n\t\topenSet = dict()\n\t\tstartPath = AStar.Path(board.me(), [], 0, board.distance(board.me(), goal))\n\t\topenSet[board.me()] = startPath\n\t\tqueue = [startPath]\n\t\ttron.log(\"goal: \" + str(goal))\n\t\tshortestPath = []\n\t\twhile len(queue) > 0 and time.clock() - start < 0.4:\n\t\t\tpath = heappop(queue)\n\t\t\tshortestPath = path.visited\n\t\t\tif path.node == goal:\n\t\t\t\tbreak\n\t\t\tclosedSet.append(path.node)\n\t\t\tdestinations = [dest for dest in board.adjacent(path.node) if board.passable(dest) or dest == goal]\n\t\t\tfor dest in destinations:\n\t\t\t\tif dest in closedSet:\n\t\t\t\t\tcontinue\n\t\t\t\tnewScore = path.score + 1\n\t\t\t\tif dest not in openSet.keys() or openSet[dest] not in queue:\n\t\t\t\t\tnewPath = AStar.Path(dest, list(path.visited), newScore, board.distance(dest, goal))\n\t\t\t\t\topenSet[dest] = newPath\n\t\t\t\t\theappush(queue, newPath)\n\t\t\t\telif newScore < openSet[dest].score and openSet[dest] in queue:\n\t\t\t\t\topenSet[dest].node = dest\n\t\t\t\t\topenSet[dest].score = newScore\n\t\t\t\t\tnewVisited = list(path.visited)\n\t\t\t\t\tnewVisited.append(dest)\n\t\t\t\t\topenSet[dest].visited = newVisited\n\t\t\t\t\topenSet[dest].estimate = board.distance(dest, goal)\n\t\ttron.log(\"Shortest path took: \" + str(float(time.clock() - start)))\n\t\treturn shortestPath[1:]\n\t\t\n\tclass Path:\n\t\t\n\t\tdef __init__(self, node, visited, score, estimate):\n\t\t\tself.node = node\n\t\t\tself.visited = visited\n\t\t\tself.visited.append(node)\n\t\t\tself.score = score\n\t\t\tself.estimate = estimate\n\t\t\t\n\t\tdef __eq__(self, other):\n\t\t\treturn self.visited == other.visited\n\t\t\t\n\t\tdef __cmp__(self, other):\n\t\t\treturn cmp(self.score + self.estimate, other.score + other.estimate)\n\t\t\t\nclass Minimax:\n\n\tdef __init__(self):\n\t\tself.cachedLevels = []\n\t\t\n\tdef prepareCache(self, board, spaceCount, enemySpaceCount):\n\t\tif len(self.cachedLevels) <= 3:\n\t\t\troot = Minimax.TreeNode(None, board.me(), board.them(), [board.me(), board.them()], True, 0)\n\t\t\tlevels = [Minimax.Level([root], True)]\n\t\t\tfor dir in board.moves():\n\t\t\t\tmove = board.rel(dir)\n\t\t\t\tvisited = list(root.visited)\n\t\t\t\tvisited.append(move)\n\t\t\t\troot.addChild(root, move, root.them, visited, spaceCount[dir] - enemySpaceCount[dir])\n\t\t\tself.cachedLevels = levels\n\t\t\treturn\n\t\tmyMove = None\n\t\tfor node in self.cachedLevels[1].nodes:\n\t\t\tif node.me == board.me():\n\t\t\t\tmyMove = node\n\t\t\t\tbreak\n\t\ttheirMove = None\n\t\tfor node in myMove.children:\n\t\t\tif node.them == board.them():\n\t\t\t\ttheirMove = node\n\t\t\t\tbreak\n\t\ttheirMove.score = 0\n\t\tself.cachedLevels = self.cachedLevels[2:]\n\t\tself.cachedLevels[0] = Minimax.Level([theirMove], True)\n\n\tdef execute(self, board, spaceCount, enemySpaceCount):\n\t\t#self.prepareCache(board, spaceCount, enemySpaceCount)\n\t\tlevels = self.minimax(board, spaceCount, enemySpaceCount)\n\t\t#self.cachedLevels = levels\n\t\troot = levels[0].nodes[0]\n\t\t\n\t\ttron.log(\"Minimax level: \" + str(len(levels)-1))\n\t\t\n\t\tminimaxSpaceCount = dict()\n\t\tfor node in root.children:\n\t\t\tfor dir in spaceCount.keys():\n\t\t\t\tif board.rel(dir) == node.me:\n\t\t\t\t\tminimaxSpaceCount[dir] = node.score\n\t\treturn minimaxSpaceCount\n\t\t\n\tdef minimax(self, board, spaceCount, enemySpaceCount):\n\t\t#'''\n\t\troot = Minimax.TreeNode(None, board.me(), board.them(), [board.me(), board.them()], True, 0)\n\t\tlevels = [Minimax.Level([root], True)]\n\t\tfor dir in spaceCount.keys():\n\t\t\tmove = board.rel(dir, root.me)\n\t\t\tvisited = list(root.visited)\n\t\t\tvisited.append(move)\n\t\t\troot.addChild(root, move, root.them, visited, spaceCount[dir] - enemySpaceCount[dir])\n\t\t#'''\n\t\t#levels = list(self.cachedLevels)\n\t\t#levels.append(Minimax.Level(list(root.children)))\n\t\t\n\t\toutOfTime = False\n\t\twhile time.clock() - board.startTime < 0.85 and len(levels[len(levels)-1].nodes) > 0:\n\t\t\tcurrLevel = levels[len(levels)-1]\n\t\t\t#for node in level.nodes:\n\t\t\t#\tnode.refineScore()\n\t\t\t#tron.log(\"level \" + str(len(levels)))\n\t\t\t#tron.log(\"nodes\" + str(level.nodes))\n\t\t\tfor parent in currLevel.nodes:\n\t\t\t\tif time.clock() - board.startTime > 0.85:\n\t\t\t\t\toutOfTime = True\n\t\t\t\t\tbreak\n\t\t\t\tnodeChildren = [child for child in parent.children if child.me != child.them and len(board.adjacentImpassableOrVisited(child.me, child.visited)) < 4 and len(board.adjacentImpassableOrVisited(child.them, child.visited)) < 4]\n\t\t\t\theapify(nodeChildren)\n\t\t\t\twhile len(nodeChildren) > 0:\n\t\t\t\t\tnode = heappop(nodeChildren)\n\t\t\t\t\n\t\t\t\t\tif node.myMove:\n\t\t\t\t\t\tmovedFrom = node.me\n\t\t\t\t\t\tother = node.them\n\t\t\t\t\telse:\n\t\t\t\t\t\tmovedFrom = node.them\n\t\t\t\t\t\tother = node.me\n\t\t\t\t\tunvisitedMoves = [move for move in board.moveableDestinations(movedFrom) if move not in node.visited or move == other]\n\t\t\t\t\tnewScore = None\n\t\t\t\t\tfor move in unvisitedMoves:\n\t\t\t\t\t\tmoveVisited = list(node.visited)\n\t\t\t\t\t\tmoveVisited.append(move)\n\t\t\t\t\t\tif move == other:\n\t\t\t\t\t\t\tscore = 0\n\t\t\t\t\t\telif len(board.adjacentImpassableOrVisited(move, moveVisited)) == 4:\n\t\t\t\t\t\t\tboardSize = (board.width - 2) * (board.height - 2)\n\t\t\t\t\t\t\tif node.myMove:\n\t\t\t\t\t\t\t\tscore = -boardSize\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tscore = boardSize\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#floodfilled = tron.floodfill.execute(board, move, moveVisited)\n\t\t\t\t\t\t\t#deadCorners = [ffNode for ffNode in floodfilled if len(board.adjacentImpassable(ffNode)) == 3]\n\t\t\t\t\t\t\t#moveScore = len(floodfilled) - len(deadCorners) + min(len(deadCorners), 1)\n\t\t\t\t\t\t\t#if other in floodfilled:\n\t\t\t\t\t\t\t#\tscore = 0\n\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\tmoveScore = tron.floodfill.floodfillScore(board, move, moveVisited)\n\t\t\t\t\t\t\totherScore = tron.floodfill.floodfillScore(board, other, moveVisited)\n\t\t\t\t\t\t\tif node.myMove:\n\t\t\t\t\t\t\t\tscore = moveScore - otherScore\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tscore = otherScore - moveScore\n\t\t\t\t\t\n\t\t\t\t\t\tif node.myMove:\n\t\t\t\t\t\t\tchild = node.addChild(node, move, other, moveVisited, score)\n\t\t\t\t\t\t\tif newScore is None or score > newScore:\n\t\t\t\t\t\t\t\tnewScore = score\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tchild = node.addChild(node, other, move, moveVisited, score)\n\t\t\t\t\t\t\tif newScore is None or score < newScore:\n\t\t\t\t\t\t\t\tnewScore = score\n\t\t\t\t\tnode.score = newScore\n\t\t\t\t\t#newLevel.nodes.append(node)\n\t\t\t\t\t#childlevel\n\t\t\t\t#parentlevel\n\t\t\t\tparent.refineScore()\n\t\t\tif outOfTime:\n\t\t\t\tbreak\n\t\t\t#tron.log(levels)\n\t\t\tenemyMoveLevels = [level for level in levels if not level.myMove]\n\t\t\tfor level in enemyMoveLevels:\n\t\t\t\tfor parent in level.nodes:\n\t\t\t\t\tparent.children = [child for child in parent.children if child.score == parent.score]\n\t\t\tnewLevel = Minimax.Level([], not currLevel.myMove)\n\t\t\tfor parent in currLevel.nodes:\n\t\t\t\tnewLevel.nodes.extend(parent.children)\n\t\t\t\t#tron.log(\"parentscore: \" + str(parent.score) + \" \" + str(parent.me) + str(parent.them))\n\t\t\t\t#if parent.myMove:\n\t\t\t\t#\ttron.log(\"mine: \" + str([(node.me, node.score) for node in parent.children]))\n\t\t\t\t#else:\n\t\t\t\t#\ttron.log(\"them: \" + str([(node.them, node.score) for node in parent.children]))\n\t\t\tlevels.append(newLevel)\n\t\tindex = 0\n\t\t'''\n\t\tfor index in range(len(levels)):\n\t\t\tprintLevel = levels[index]\n\t\t\ttron.log(\"level \" + str(index) + \", nodes \" + str(len(printLevel.nodes)))\n\t\t\tfor node in printLevel.nodes:\n\t\t\t\ttron.log(str(node.me) + \",\" + str(node.them) + \" \" + str(node.score))\n\t\t\t\t#tron.log(node.visited)\n\t\t\tindex += 1\n\t\t'''\n\t\treturn levels\n\t\n\tclass Level:\n\t\tdef __init__(self, nodes, myMove):\n\t\t\tself.nodes = nodes\n\t\t\tself.myMove = myMove\n\t\n\tclass TreeNode:\n\t\tdef __init__(self, parent, me, them, visited, myMove, score):\n\t\t\tself.parent = parent\n\t\t\tself.me = me\n\t\t\tself.them = them\n\t\t\tself.visited = visited\n\t\t\tself.myMove = myMove\n\t\t\tself.score = score\n\t\t\tself.children = []\n\t\t\t\n\t\tdef addChild(self, node, me, them, moveVisited, score):\n\t\t\tchild = Minimax.TreeNode(self, me, them, moveVisited, not self.myMove, score)\n\t\t\theappush(self.children, child)\n\t\t\treturn child\n\t\t\t\n\t\tdef __cmp__(self, other):\n\t\t\treturn cmp(other.score, self.score)\n\t\t\n\t\tdef refineScore(self):\n\t\t\tif len(self.children) > 0:\n\t\t\t\tif self.myMove:\n\t\t\t\t\tmaxScore = -100\n\t\t\t\t\tfor child in self.children:\n\t\t\t\t\t\tif child.score > maxScore:\n\t\t\t\t\t\t\tmaxScore = child.score\n\t\t\t\t\tself.score = maxScore\n\t\t\t\telse:\n\t\t\t\t\tminScore = 100\n\t\t\t\t\tfor child in self.children:\n\t\t\t\t\t\tif child.score < minScore:\n\t\t\t\t\t\t\tminScore = child.score\n\t\t\t\t\tself.score = minScore\n\t\t\tif self.parent is not None:\n\t\t\t\tself.parent.refineScore()\n\t\t\t\t\n\t\t'''\n\t\tdef addChild(self, board, dest, score=None):\n\t\t\t#tron.log(score)\n\t\t\tnewVisited = list(self.visited)\n\t\t\tif score == None:\n\t\t\t\tif dest == self.them or dest == self.me:\n\t\t\t\t\tscore = 0\n\t\t\t\telse:\n\t\t\t\t\tif self.myMove:\n\t\t\t\t\t\tother = self.them\n\t\t\t\t\t\tnewVisited.append(self.me)\n\t\t\t\t\t\t#child = Minimax.TreeNode(self, node, self.them, newVisited, False, score)\n\t\t\t\t\telse:\n\t\t\t\t\t\tother = self.me\n\t\t\t\t\t\tnewVisited.append(self.them)\n\t\t\t\t\t\t#child = Minimax.TreeNode(self, self.me, node, newVisited, True, score)\n\t\t\t\t\tdestScore = tron.floodfill.floodfillScore(board, dest, self.visited)\n\t\t\t\t\totherScore = tron.floodfill.floodfillScore(board, other, newVisited)\n\t\t\t\t\tscore = destScore - otherScore\n\t\t\tif self.myMove:\n\t\t\t\tchild = Minimax.TreeNode(self, dest, self.them, newVisited, not self.myMove, score)\n\t\t\telse:\n\t\t\t\tchild = Minimax.TreeNode(self, self.me, dest, newVisited, not self.myMove, score)\n\t\t\tself.children.append(child)\n\t\t\treturn child\n\t\t'''\n\t\t\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1875,"cells":{"__id__":{"kind":"number","value":17059610128852,"string":"17,059,610,128,852"},"blob_id":{"kind":"string","value":"444c81faa8218ee40cff7d4504ea3ccd436a0086"},"directory_id":{"kind":"string","value":"6de138140a350a3439e4d998d76092fe985ae499"},"path":{"kind":"string","value":"/pydj/maui/map/helpers.py"},"content_id":{"kind":"string","value":"8e153485c9305083776902f2ca55fe90fd533faf"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"zarcoder/maui"},"repo_url":{"kind":"string","value":"https://github.com/zarcoder/maui"},"snapshot_id":{"kind":"string","value":"3e4c8be6fe745cb9edfbcaf7483970116cd60ed7"},"revision_id":{"kind":"string","value":"2c7df426716e374220583f6c2c92a1d5c2115f20"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-21T04:11:25.935298","string":"2021-01-21T04:11:25.935298"},"revision_date":{"kind":"timestamp","value":"2013-01-16T16:27:05","string":"2013-01-16T16:27:05"},"committer_date":{"kind":"timestamp","value":"2013-01-16T16:27:05","string":"2013-01-16T16:27:05"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from settings import MAPHOSTS,TEMPLATE_DIRS\nimport re,logging\nfrom django.template import Context, Template\nimport libssh2\nimport os\nimport scp_upload\nimport ssh_exec\nimport sftp_listdir\n\nlogger=logging.getLogger(__name__)\n\n\n\ndef listjobsSlurm(session):\n\tlistres = parseoutput(session,\"squeue\",2,-1,[\"id\",\"queue\",\"name\" , \"user\", \"state\", \"timeUse\" , \"nodes\" , \"nodeList\"])\n\treturn listres\n\ndef listjobsTorque(session):\n\tlistres = parseoutput(session,\"qstat\",3,0,[\"id\",\"name\", \"user\", \"timeUse\", \"state\", \"queue\"])\n\treturn listres\n\ndef listresSlurm(session):\n\tlistres = parseoutput(session, \"sinfo -N -l\", 1, -1, [\"NODELIST\", \"NODES\", \"PARTITION\" , \"STATE\", \"CPUS\" , \"SCT\", \"MEMORY\", \"TMP_DISK\", \"WEIGHT\", \"FEATURES\", \"REASON\"])\n\treturn listres\n\ndef regexpf(key,field):\n\treturn (key , re.compile(\"^\" + field + \"=(\\S+)\") )\n\t\n\ndef listnodesSlurm(session):\n#NodeName=drago0 Arch=x86_64 CoresPerSocket=10 CPUAlloc=0 CPUErr=0 CPUTot=40 Features=(null) Gres=(null) NodeAddr=drago0 NodeHostName=drago0 OS=Linux RealMemory=1 Sockets=4 State=IDLE ThreadsPerCore=1 TmpDisk=0 Weight=1 BootTime=2012-02-09T10:21:15 SlurmdStartTime=2012-02-09T12:02:43 Reason=(null)\n\tlistres = parseoutput(session, \"scontrol show nodes -o\", -1, -1, [ \n\t\tregexpf('id', 'NodeName'), \n\t\tregexpf('Arch', 'Arch'), \n\t\tregexpf('CoresPerSocket', 'CoresPerSocket'), \n\t\tregexpf('CPUAlloc', 'CPUAlloc'), \n\t\tregexpf('CPUErr', 'CPUErr'), \n\t\tregexpf('CPUTot', 'CPUTot'), \n\t\tregexpf('Features', 'Features'), \n\t\tregexpf('Gres', 'Gres'), \n\t\tregexpf('NodeAddr', 'NodeAddr'), \n\t\tregexpf('NodeHostName', 'NodeHostName'), \n\t\tregexpf('OS', 'OS'), \n\t\tregexpf('RealMemory', 'RealMemory'), \n\t\tregexpf('Sockets', 'Sockets'), \n\t\tregexpf('State', 'State'), \n\t\tregexpf('ThreadsPerCore', 'ThreadsPerCore'), \n\t\tregexpf('TmpDisk', 'TmpDisk'), \n\t\tregexpf('Weight', 'Weight'), \n\t\tregexpf('BootTime', 'BootTime'), \n\t\tregexpf('SlurmdStartTime', 'SlurmdStartTime'), \n\t\tregexpf('Reason', 'Reason') \n\t])\n\treturn listres\n\n\ndef listnodes(session):\n\tlistres = sorted(parseoutput(session, \"diagnose -n\", 4, 4,[\"id\",\"state\",\"procs\",\"memory\",\"disk\", \"Swap\" , \"Speed\" , \"Opsys\", \"Arch\", \"Par\" , \"load\", \"Res\" ,\"classes\",\" Network\",\" features\"]), key=lambda node: node['id'] )\n\treturn listres\n\ndef getgangliaurl(sessionhost):\n\tif(MAPHOSTS[sessionhost]['enable_ganglia']):\n\t\tif MAPHOSTS[sessionhost].has_key(\"ganglia_cluster_name\"):\n\t\t\tcn = MAPHOSTS[sessionhost][\"ganglia_cluster_name\"]\n\t\telse:\n\t\t\tcn = sessionhost\n\t\treturn MAPHOSTS[sessionhost]['ganglia_url'] + 'graph.php?m=load_one&z=small&c=' + cn + '&x=0&g=load_report&n=0&r=hour&h='\n\treturn None\n\ndef listres(session):\n\treturn parseoutput(session,\"showres -n\",5,1,[\"nodeid\", \"type\" , \"resid\" , \"jobState\", \"task\" , \"start\" , \"duration\" , \"startTimeDayOfWeek\" , \"startTimeMonth\" , \"startTimeDayOfMonth\", \"startTime\" ])\n\n\ndef liststats(session):\n\treturn parseoutput(session,\"showstats -u\",5,2,[ \"user\" ,\"jobs\", \"procs\" , \"procHours\" , \"jobs\" , \"jobsprc\", \"phReq\" , \"phReqPrc\" , \"phDed\" , \"phDedPrc\" , \"fsTgt\" , \"avgXF\" , \"maxXF\" , \"avgQH\" , \"effic\" , \"wCAcc\" ])\n\ndef getqueuesfromcfg(sessionhost):\n\tqueues = MAPHOSTS[sessionhost]['queues']\n\tif isinstance(queues,tuple):\t\n\t\treturn map(lambda x: (x,x), MAPHOSTS[sessionhost]['queues'])\n\telse:\n\t\treturn [(queues, queues)]\n\n\ndef getjoboutSlurm(session,jobid):\n\treturn re.split(\"\\n\",execcommand(session,\"scontrol show job \"+ jobid))\t\n\ndef getjobout(session,jobid):\n\treturn re.split(\"\\n\",execcommand(session,\"checkjob -v \"+ jobid))\n\ndef getjoboutTorque(session,jobid):\n\treturn re.split(\"\\n\",execcommand(session,\"qstat -f \"+ jobid))\n\ndef getnodeoutSlurm(session,nodeid):\n\treturn re.split(\"\\n\",execcommand(session,\"scontrol show node \"+ nodeid))\t\n\ndef getnodeout(session,nodeid):\n\treturn re.split(\"\\n\",execcommand(session,\"checknode -v \"+ nodeid))\n\n\ndef getScriptContent(h, rmtype):\n\tf = open (TEMPLATE_DIRS + \"/script_\" + rmtype +\".sh\", \"r\") \n\tt = Template(f.read())\n\tf.close()\n\tc = Context(h)\n\treturn t.render(c)\n\n\n\ndef submitJob(session, scriptFilename):\n\trmtype = MAPHOSTS[session[\"host\"]]['rmtype']\n\tif rmtype == \"slurm\":\n\t\tsubmitcommand = \"sbatch\"\n\t\tresregexp = re.compile(\"^Submitted batch job (\\S+)$\")\n\telse:\n\t\tsubmitcommand = \"qsub\"\n\t\tresregexp = re.compile(\"^\\s*(\\S+).+\")\n\tres = execcommand(session, submitcommand + \" \"+scriptFilename )\n\tm = resregexp.match(res)\n\tif m:\n\t\tjobid = m.group(1)\n\t\treturn jobid\n\treturn None\n\n\n\ndef parseoutput(session,cmd,tailplus,headminus,fields):\n\tcmdstr = cmd\n\tif tailplus!=-1:\n\t\tcmdstr += \" | tail -n +\"+ str(tailplus)\n\tif headminus!=-1:\n\t\tcmdstr += \" | head -n -\" + str(headminus) \n\n\t#logger.debug(\"cmdstr:\" +cmdstr )\n\tout = execcommand(session,cmdstr)\n\tlines = re.split(\"\\n\", out)\n\tres = []\n\tfor line in lines:\n\t\tm = re.split(\"\\s+\",line)\n\t\to = {}\n\t\t#if string starts with \\s+ split will put \"\" in the first elem\n\t\tif m[0]==\"\":\n\t\t\tm.pop(0)\n\t\ti=0\n\t\twhile i=2 and data[1]:\n\t\t\tif data[0]!='.' and data[0]!=\"..\":\tfilenames.append( (data[0], data[1][0] == 4096) )\n\tfilenames = sorted(filenames, key=lambda fn: fn[0])\n\tif dirname!=\".\": filenames.insert(0,(\"..\",True))\n\treturn filenames\n\n\n\n\ndef authenticate(host,username,password):\n\tsrc = ssh_exec.SSHRemoteClient(MAPHOSTS[host]['host'], username, password, MAPHOSTS[host]['port'])\n\tout = src.execute()\n\treturn out\n\ndef execcommand(session,cmd):\n\thost = session['host']\n\tsrc = ssh_exec.SSHRemoteClient(MAPHOSTS[host]['host'], session['username'], session['password'], MAPHOSTS[host]['port'])\n\tres = src.execute(cmd)\n#\tprint \"command:\"\n#\tprint cmd\n#\tprint \"res:\"\n#\tprint res\n#\tprint \"res-end:\"\n\treturn res\n\ndef uploadfile(session,filename,remotefilename):\n\thost = session['host']\n\tmyscp = scp_upload.MySCPClient(hostname=MAPHOSTS[host]['host'], username=session['username'], password=session['password'], port=MAPHOSTS[host]['port'])\n\tmyscp.send(filename,remotefilename)\n\ndef downloadfile(session,remotefilename):\n\thost = session['host']\n\tmyscp = scp_upload.MySCPClient(hostname=MAPHOSTS[host]['host'], username=session['username'], password=session['password'], port=MAPHOSTS[host]['port'])\n\treturn myscp.recv(remotefilename)\n\n\n\t\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1876,"cells":{"__id__":{"kind":"number","value":14577119010580,"string":"14,577,119,010,580"},"blob_id":{"kind":"string","value":"967e548d6af3d65b64af9de37c6b534077e8480e"},"directory_id":{"kind":"string","value":"c0f87f93063502169e335037422e24d8dd0b7a20"},"path":{"kind":"string","value":"/kaggle/criteo-display-ad-challenge/sample_generate.py"},"content_id":{"kind":"string","value":"d5c725229e5fd2a3f4b1944e4e35a49c4c26d9dd"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dataninjas/challenges"},"repo_url":{"kind":"string","value":"https://github.com/dataninjas/challenges"},"snapshot_id":{"kind":"string","value":"4c9d3bc086137274962c0b682cfe8fc3a4046897"},"revision_id":{"kind":"string","value":"7f9bb1e289aa7f79e2a7a93b0fe71473692c0025"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T08:49:04.517747","string":"2021-01-22T08:49:04.517747"},"revision_date":{"kind":"timestamp","value":"2014-09-17T09:23:26","string":"2014-09-17T09:23:26"},"committer_date":{"kind":"timestamp","value":"2014-09-17T09:23:26","string":"2014-09-17T09:23:26"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Generate the samples\n# TODO: There should be a single-pass way to generate all samples at once\n\nimport sample_util\n\n#file_size = file_len('train.csv')\n\nfile_size = 45840618\n\nsample_size = 20000\nsample_util.generate_sample('train.csv', \n\t\t\t\tfile_size, \n\t\t\t\tsample_size, \n\t\t\t\t\"train_sample_%s.csv\" % sample_size,\n\t\t\t\tTrue)\n\nsample_size = 100000\nsample_util.generate_sample('train.csv', \n\t\t\t\tfile_size, \n\t\t\t\tsample_size, \n\t\t\t\t\"train_sample_%s.csv\" % sample_size,\n\t\t\t\tTrue)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1877,"cells":{"__id__":{"kind":"number","value":15771119955461,"string":"15,771,119,955,461"},"blob_id":{"kind":"string","value":"21099f7bec84e9827b33d452e3331964a64788fe"},"directory_id":{"kind":"string","value":"8b95a52d00ef8445fd5e1f404e7f35f17008fd1d"},"path":{"kind":"string","value":"/splitwarning.py"},"content_id":{"kind":"string","value":"31e0fdd2d26f23eac7d9293582b9d381ae32af01"},"detected_licenses":{"kind":"list like","value":["MIT","CC-BY-SA-3.0","LicenseRef-scancode-mit-old-style","Python-2.0"],"string":"[\n \"MIT\",\n \"CC-BY-SA-3.0\",\n \"LicenseRef-scancode-mit-old-style\",\n \"Python-2.0\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"h4ck3rm1k3/pywikipediabot"},"repo_url":{"kind":"string","value":"https://github.com/h4ck3rm1k3/pywikipediabot"},"snapshot_id":{"kind":"string","value":"77b81e852da96b730a1dbf1d4bf777270164d102"},"revision_id":{"kind":"string","value":"489b55b7a71a5c55d9ddd917a27302fa2b74458c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T22:23:40.846621","string":"2021-01-15T22:23:40.846621"},"revision_date":{"kind":"timestamp","value":"2012-08-01T18:04:12","string":"2012-08-01T18:04:12"},"committer_date":{"kind":"timestamp","value":"2012-08-01T18:04:12","string":"2012-08-01T18:04:12"},"github_id":{"kind":"number","value":4473407,"string":"4,473,407"},"star_events_count":{"kind":"number","value":5,"string":"5"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"Splits a interwiki.log file into chunks of warnings separated by language.\n\nThe following parameter is supported:\n\n-folder: The target folder to save warning files, if given. Otherwise\n use the /logs/ folder.\n\"\"\"\n#\n# (C) Rob W.W. Hooft, 2003\n# (C) Pywikipedia bot team, 2004-2012\n#\n# Distributed under the terms of the MIT license.\n#\n__version__ = '$Id: splitwarning.py 10023 2012-03-17 18:03:32Z xqt $'\n#\n\nimport wikipedia as pywikibot\nimport codecs\nimport re\n\ndef splitwarning(folder):\n files={}\n count={}\n fn = pywikibot.config.datafilepath(\"logs\", \"interwiki.log\")\n logFile = codecs.open(fn, 'r', 'utf-8')\n rWarning = re.compile('WARNING: (?P.+?): \\[\\[(?P.+?):.*')\n for line in logFile:\n m = rWarning.match(line)\n if m:\n family = m.group('family')\n code = m.group('code')\n if code in pywikibot.getSite().languages():\n if not code in files:\n files[code] = codecs.open(\n pywikibot.config.datafilepath(\n folder, 'warning-%s-%s.log' % (family, code)),\n 'w', 'utf-8')\n count[code] = 0\n files[code].write(line)\n count[code] += 1\n for code in files.keys():\n print '* %s (%d)' % (code, count[code])\n\ndef main(*args):\n folder = 'logs'\n for arg in pywikibot.handleArgs(*args):\n if arg.startswith(\"-folder\"):\n folder = arg[len('-folder:'):]\n splitwarning(folder)\n\nif __name__ == \"__main__\":\n # No need to have me on the stack - I don't contact the wiki\n pywikibot.stopme()\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1878,"cells":{"__id__":{"kind":"number","value":403726935714,"string":"403,726,935,714"},"blob_id":{"kind":"string","value":"f779e143e6f9aff675b790bac4e133bcbd0a1c09"},"directory_id":{"kind":"string","value":"28827f3e60166e9f9b5c179ccea6f4a16a6729e0"},"path":{"kind":"string","value":"/errandboy/fileio.py"},"content_id":{"kind":"string","value":"4be425cbae3672c8643c08e131aff5dae836c15b"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"BobRoss/Lindon-Test-Framework-"},"repo_url":{"kind":"string","value":"https://github.com/BobRoss/Lindon-Test-Framework-"},"snapshot_id":{"kind":"string","value":"89581434a478605a52a7f71de5621a2603cc7eb5"},"revision_id":{"kind":"string","value":"76442ae3917f99c9b01f9f044a1a74a000a89d31"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-07T06:30:27.593027","string":"2016-08-07T06:30:27.593027"},"revision_date":{"kind":"timestamp","value":"2013-11-08T16:20:03","string":"2013-11-08T16:20:03"},"committer_date":{"kind":"timestamp","value":"2013-11-08T16:20:03","string":"2013-11-08T16:20:03"},"github_id":{"kind":"number","value":14237855,"string":"14,237,855"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/bin/python\nfrom os import path\nfrom functools import partial\nfrom sys import stdin, stderr, argv\nfrom flow import firstOrNone, pipeline\nfrom diagnostics import reportyFunc, test\n\ndef attempt(f): # rig:3\n '''Functions decorated as \"attempts\" will consider a return\n value of None to denote failures and will print their own\n doc strings as error messages.'''\n def _do(*things, **etc):\n def measure(outcome):\n if outcome is not None:\n return outcome\n if \"-v\" in argv:\n stderr.write(\"%s(%s):\\n\\t%s\\n\"%(f.func_name,\n \", \".join(map(repr, things)),\n f.__doc__))\n return None\n return measure(f(*things, **etc))\n return _do\n\ndef mchain(o, *methods):\n return [getattr(o, m, lambda: None)() for m in methods]\n\n@attempt\ndef fpth(filePath): # rig:3 \n '''File does not exist.'''\n return filePath and path.exists(filePath) and filePath or None\n\n@attempt\ndef loadf(mode, filePath): # rig:3 \n '''Can't load specified file.'''\n return fpth(filePath) and open(filePath, mode) or None\n\n@attempt\ndef goodStream(stream):\n '''Not a valid byte stream.'''\n return getattr(stream, \"readlines\", None) and stream\n\n@attempt\ndef firstGood(*streams):\n '''Did not receive any good byte streams.'''\n return firstOrNone(*filter(goodStream, streams))\n\n@attempt\ndef linesOf(stream): # rig:3 \n '''Could not read lines out of stream.'''\n return firstOrNone(*mchain(stream, \"readlines\", \"close\"))\n\n@attempt\ndef goodLines(linesrc): # rig:3 \n '''Bad line list.'''\n return hasattr(linesrc, \"__iter__\") or None\n\n@attempt\ndef _cleanLines(cleanWith, lines):\n '''Can not clean the lines list with method specified.'''\n if goodLines(lines) and callable(cleanWith):\n return map(cleanWith, lines)\n return None\ntest(_cleanLines, [\"\",\"\"], lambda x: \"\", [\"aoeu\", \"ntehu\"])\ntest(_cleanLines, [\"aoeu\",\"ueoa\"], lambda x: x.strip(), [\" aoeu \", \"\\nueoa\\t\"])\ntest(_cleanLines, [3,4], lambda x: x+2, [1,2])\n\n@attempt\ndef bytesOf(stream):\n '''Could not read data from stream.'''\n return firstOrNone(*mchain(stream, \"read\", \"close\"))\n\n@attempt\ndef readFile(filePath, mode, *etc): # rig:3\n '''Failed to load file'''\n return pipeline(filePath,\n fpth,\n partial(loadf, mode),\n *etc)\n\n@attempt\ndef readFromFile(filePath, alternate, mode, *etc): # rig:3 \n '''Failed to open file.'''\n return pipeline(firstGood(readFile(filePath, mode), alternate), *etc)\n\n@attempt\ndef readLines(filePath, alternate=stdin, cleaner=str.strip): # rig:3 \n '''See above for failure information.'''\n return readFromFile(filePath, \n alternate, \n \"rt\", \n linesOf,\n partial(_cleanLines, cleaner))\n\n@attempt\ndef readBytes(filePath, alternate=stdin, mode=\"rt\"): # rig:3 \n '''See above for failure information.'''\n return readFromFile(filePath,\n alternate,\n mode,\n bytesOf)\n\n@attempt\ndef writeTo(lines, stream): # rig:3 \n '''Unable to write to stream.'''\n return goodStream(stream) and firstOrNone(stream.writelines(lines) is None,\n stream.close())\n\n@attempt\ndef writeLines(filePath, *lines): # rig:3 \n return pipeline(filePath,\n fpth,\n partial(loadf, \"wt\"),\n partial(writeTo, lines))\n\n@reportyFunc\ndef test_firstGood():\n class DummyGoodStream:\n def readlines(self, *aoeu):\n return True\n def test_1():\n return firstGood(None, DummyGoodStream(), None).readlines()\n return test_1()\n\n@reportyFunc\ndef test_goodStream():\n class DummyGoodStream:\n def readlines(self, *aoeu):\n return True\n def test_1():\n return goodStream(DummyGoodStream()) is not None\n def test_2():\n return goodStream(None) is None\n return test_1() and test_2()\n\n@reportyFunc\ndef test_mchain():\n class DummyObject:\n def first(self, *received):\n return 1\n def second(self, *received):\n return 2\n def test_1():\n return mchain(DummyObject(), \"first\", \"second\")==[1,2]\n def test_2():\n return mchain(DummyObject(), \"first\", \"aoeu\", \"second\")==[1,None,2]\n return test_1() and test_2()\n\n@reportyFunc\ndef test_bytesOf():\n class ByteSource():\n def close(self, *etc):\n return True\n def read(self, *etc):\n return \"Hooray\"\n def test_1():\n return bytesOf(ByteSource())==\"Hooray\"\n def test_2():\n return bytesOf(None) is None\n return test_1() and test_2()\n\ndef testModule():\n return [test_firstGood, test_mchain, test_goodStream, test_bytesOf]\n\nif __name__==\"__main__\":\n if all(map(apply, testModule())):\n print \"OK\"\n else:\n print \"FAILURES!\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1879,"cells":{"__id__":{"kind":"number","value":10625749108008,"string":"10,625,749,108,008"},"blob_id":{"kind":"string","value":"77f5b1e4e74788f304c5296a06700453efebe08f"},"directory_id":{"kind":"string","value":"150d783ac685ebf61e4a2d4f9558b90ead7ab629"},"path":{"kind":"string","value":"/bin/oncall.py"},"content_id":{"kind":"string","value":"80a253300e06d3619b4cb3c338ea01f5167db1cf"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"magicaltrevor/Oncall"},"repo_url":{"kind":"string","value":"https://github.com/magicaltrevor/Oncall"},"snapshot_id":{"kind":"string","value":"1391ca7cfebaa2da75a571781e76ae9e2ce09163"},"revision_id":{"kind":"string","value":"e0f45a4d3b6b7b28e9bff961ffcf7bca754a9544"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-25T02:01:40.383350","string":"2020-12-25T02:01:40.383350"},"revision_date":{"kind":"timestamp","value":"2012-03-10T20:48:12","string":"2012-03-10T20:48:12"},"committer_date":{"kind":"timestamp","value":"2012-03-10T20:48:12","string":"2012-03-10T20:48:12"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\nfrom optparse import OptionParser\n\n# add this file location to sys.path\ncmd_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nif cmd_folder not in sys.path:\n sys.path.insert(-1, cmd_folder)\n sys.path.insert(-1, cmd_folder + \"/classes\")\n\nimport mysql_layer as mysql\nimport twilio_layer as twilio\nimport user_layer as User\nimport alert_layer as Alert\nimport util_layer as Util\n\nconf = Util.load_conf()\n\nUtil.init_logging(\"client\")\n\ndef user():\n\t'''\n\tThis function handles the rest of the command as it pertains to a user(s).\n\t'''\n\t# Parse the command line\n\tparser = OptionParser()\n\tparser.add_option('-i', '--id', dest='id', help='User id', type='int', default=0)\n\tparser.add_option('-n', '--name', dest='name', help='User name', type='string', default='')\n\tparser.add_option('-p', '--phone', dest='phone', help='Phone number', type='string', default='')\n\tparser.add_option('-e', '--email', dest='email', help='Email address', type='string', default='')\n\tparser.add_option('-t', '--team', dest='team', help='Team', type='string', default='')\n\t# default is set to 100 as an easy means of figuring out if an state is inputted by user\n\tparser.add_option('-s', '--state', dest='state', help='State (0 = in rotation, 3 = off rotation, 9 = global entity)', type='int', default=100)\n\tparser.add_option('-d', '--delete', dest='delete', help='Delete result of user list query', action=\"store_true\", default=False)\n\tparser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='')\n\tparser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action=\"store_true\", default=False)\n\t(opts, args) = parser.parse_args()\n\t\n\tuser_usage='''\noncall.py user create (options)\noncall.py user list (options)\noncall.py user edit -i (options)\n\t'''\n\n\tif (len(sys.argv) > 2) and sys.argv[2] in ['create', 'list', 'edit']:\n\t\tmode = sys.argv[2]\n\t\tif mode == \"create\": o = user_create(opts)\n\t\tif mode == \"list\": o = user_list(opts)\n\t\tif mode == \"edit\": o = user_edit(opts)\n\t\treturn o\n\telse:\n\t\treturn user_usage\n\ndef user_create(opts):\n\t'''\n\tCreate a new user in the db.\n\t'''\n\ttry:\n\t\tif opts.name == '': return \"User name is not set (-n)\"\n\t\tif opts.email == '': return \"User email is not set (-e)\"\n\t\tif opts.phone == '' and opts.state != 9: return \"User phone is not set (-p)\"\n\t\tif \"@\" not in opts.email or \".\" not in opts.email: return \"Invalid email address, try again\"\n\t\tif (opts.phone.startswith(\"+\") and len(opts.phone) == 12):\n\t\t\tpass\n\t\telse:\n\t\t\tif opts.state != 9: return \"Invalid phone number format. Must be like '+12225558888' (no quotes)\"\n\t\tif opts.team == '':\topts.team = \"default\"\n\t\tif opts.state == 100: opts.state = 0\n\t\tnewuser = User.User()\n\t\tnewuser.name = opts.name\n\t\tnewuser.email = opts.email\n\t\tnewuser.phone = opts.phone\n\t\tif opts.team != '': newuser.team = opts.team\n\t\tif opts.state != 100: newuser.state = opts.state\n\t\tnewuser.save_user()\n\t\t# validate the phone number with twilio\n\t\tif opts.state != 9:\n\t\t\tvalid_code = twilio.validate_phone(newuser)\n\t\t\tif valid_code == False:\n\t\t\t\tlogging.error(\"Unable to get a validation code for new phone number\")\n\t\t\t\treturn newuser.print_user(opts.mobile) + \"\\nUnable to get a validation code. Please verify new phone number through Twilio website\"\n\t\t\telif valid_code == True:\n\t\t\t\treturn newuser.print_user(opts.mobile) + \"\\nPhone has already been verified with Twilio\"\n\t\t\telse:\n\t\t\t\treturn newuser.print_user(opts.mobile) + \"\\nValidation Code: %s\" % (valid_code)\n\t\telse:\n\t\t\treturn newuser.print_user(opts.mobile)\n\texcept Exception, e:\n\t\tlogging.error(\"Failed to create new user: %s\" % (e))\n\t\treturn \"Failed to create user: %s\" % (e.__str__())\n\t\t\ndef user_list(opts):\n\t'''\n\tList users. Filter with options.\n\t'''\n\tall_users = User.all_users()\n\tusers = []\n\t# init these variables with value True\n\t(id, name, phone, email, team, state) = [True] * 6\n\t# filter users with options given\n\tfor u in all_users:\n\t\tif opts.id != 0 and u.id != opts.id: id = False\n\t\tif opts.name != '' and u.name != opts.name: name = False\n\t\tif opts.phone != '' and u.phone != opts.phone: phone = False\n\t\tif opts.email != '' and u.email != opts.email: email = False\n\t\tif opts.team != '' and u.team != opts.team: team = False\n\t\tif opts.state != 100 and u.state != opts.state: state = False\n\t\t# see if all values given match attributes for user object\n\t\tif id == True and name == True and phone == True and email == True and team == True and state == True: users.append(u)\n\tif len(users) == 0: return \"No users.\"\n\tif opts.delete == True:\n\t\toutput = \"Deleting users...\\n\"\n\telse:\n\t\toutput = ''\n\tfor u in users:\n\t\toutput=output + \"%s\" % (u.print_user(opts.mobile))\n\t\tif opts.delete == True: u.delete_user()\n\treturn output\n\ndef user_edit(opts):\n\t'''\n\tMaking changes to a user account with options inputted.\n\t'''\n\tif opts.id == '' or opts.id == 0: return \"User id is not set (-i)\"\n\tuser = User.User(opts.id)\n\tif opts.name != '': user.name = opts.name\n\tif opts.phone != '': user.phone = opts.phone\n\tif opts.email != '': user.email = opts.email\n\tif opts.team != '':\tuser.team = opts.team\n\tif opts.state != '' and opts.state != 100: user.state = opts.state\n\tuser.save_user()\n\treturn user.print_user(opts.mobile)\n\ndef alert():\n\t'''\n\tThis function handles the rest of the command as it pertains to an alert(s).\n\t'''\n\t# Parse the command line\n\tparser = OptionParser()\n\tparser.add_option('-i', '--id', dest='id', help='Alert id', type='int', default=0)\n\tparser.add_option('-t', '--team', dest='team', help='The team you want to send the message to', type='string', default='default')\n\tparser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='')\n\tparser.add_option('-a', '--ack', dest='ack', help='Ack the results of alert list query', action=\"store_true\", default=False)\n\tparser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action=\"store_true\", default=False)\n\t(opts, args) = parser.parse_args()\n\t\n\tuser_usage='''\noncall.py alert status -t -a\noncall.py alert ack -i -f \n\t'''\n\n\tif (len(sys.argv) > 2) and sys.argv[2] in ['status', 'ack']:\n\t\tmode = sys.argv[2]\n\t\t#if mode == \"create\": o = alert_create(opts)\n\t\tif mode == \"status\": o = alert_status(opts)\n\t\t#if mode == \"acked\": o = alert_acked(opts)\n\t\t#if mode == \"all\": o = alert_all(opts)\n\t\tif mode == \"ack\": o = alert_ack(opts)\n\t\treturn o\n\telse:\n\t\treturn user_usage\n\ndef alert_create(opts):\n\t'''\n\tCreating a new alert. Currently not in use.\n\t'''\n\ttry:\n\t\tif opts.subject == '': return \"Subject is not set (-s)\"\n\t\tif opts.message == '': return \"Message is not set (-m)\"\n\t\tif opts.team == '': opts.team = \"default\"\n\t\tnewalert = Alert.Alert()\n\t\tnewalert.subject = opts.subject\n\t\tnewalert.message = opts.message\n\t\tif opts.team != '': newalert.team = opts.team\n\t\tnewalert.save_alert()\n\t\treturn newalert.print_alert(opts.mobile)\n\texcept Exception, e:\n\t\treturn \"Failed to create alert: %s\" % (e.__str__())\n\ndef alert_status(opts):\n\t'''\n\tPrinting out alerts that haven't been acked. If -a is given, will ack them.\n\t'''\n\tuser = None\n\talerts = Alert.status()\n\tif len(alerts) == 0: return \"No active alerts.\"\n\tif opts.ack == True:\n\t\tif opts._from == '':\n\t\t\treturn \"Must use option -f to ack alerts\"\n\t\telse:\n\t\t\tuser = User.get_user_by_phone(opts._from)\n\t\t\toutput = \"Acking alerts as %s...\\n\" % (u.name)\n\telse:\n\t\toutput = ''\n\tfor a in alerts:\n\t\toutput=output + \"%s\" % (a.print_alert(opts.mobile))\n\t\tif user != None: a.ack_alert(user)\n\treturn output\n\ndef alert_acked(opts):\n\t'''\n\tPrinting out alerts acked. Currently not in use.\n\t'''\n\talerts = Alert.acked()\n\tif len(alerts) == 0: return \"No acked alerts.\"\n\toutput = ''\n\tfor a in alerts:\n\t\toutput=output + \"%s\" % (a.print_alert(opts.mobile))\n\treturn output\n\ndef alert_all(opts):\n\t'''\n\tPrinting out all alerts. Currently not in use.\n\t'''\n\talerts = Alert.all_alerts()\n\tif len(alerts) == 0: return \"No alerts.\"\n\toutput = ''\n\tfor a in alerts:\n\t\toutput=output + \"%s\" % (a.print_alert(opts.mobile))\n\treturn output\n\ndef alert_ack(opts):\n\t'''\n\tAcking a specific alert. Assumes the last alert to be sent to user if not given.\n\t'''\n\tuser = None\n\tif opts._from == '': return \"Must use option -f to go on/off call\"\n\tuser = User.get_user_by_phone(opts._from)\n\tif user == False: return \"No user ends with that phone number (-f)\"\n\toutput = \"Acking alerts as %s...\\n\" % (user.name)\n\tif opts.id > 0:\n\t\talert = Alert.Alert(opts.id)\n\t\talert.ack_alert(user)\n\t\treturn \"Acknowledged\"\n\tif user.lastAlert > 0:\n\t\talert = Alert.Alert(user.lastAlert)\n\t\talert.ack_alert(user)\n\t\treturn \"Acknowledged\"\n\telse:\n\t\treturn \"No alert associated with your user\"\n\ndef oncall():\n\t# Parse the command line\n\tparser = OptionParser()\n\tparser.add_option('-s', '--state', dest='state', help='On call stage (1 = primary, 2= secondary, etc)', type='int', default=1)\n\tparser.add_option('-t', '--team', dest='team', help='A team name', type='string', default='default')\n\tparser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='')\n\tparser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action=\"store_true\", default=False)\n\t(opts, args) = parser.parse_args()\n\t\n\tuser_usage='''\noncall.py oncall on -s -f \noncall.py oncall off -f \noncall.py oncall status -t \n\t'''\n\n\tif (len(sys.argv) > 2) and sys.argv[2] in ['on', 'off', 'status']:\n\t\tmode = sys.argv[2]\n\t\tif mode == \"off\": opts.state = 0\n\t\tif mode == \"on\" or mode == \"off\": o = oncall_change(opts)\n\t\tif mode == \"status\": o = oncall_status(opts)\n\t\treturn o\n\telse:\n\t\treturn user_usage\n\ndef oncall_change(opts):\n\t'''\n\tChange your own oncall status\n\t'''\n\tuser = None\n\tif opts._from == '': return \"Must use option -f to go on/off call\"\n\tuser = User.get_user_by_phone(opts._from)\n\tif user == False: return \"No user ends with that phone number (-f)\"\n\tuser.print_user(opts.mobile)\n\tuser.state = opts.state\n\tuser.save_user()\n\tif user.state > 0:\n\t\treturn \"You, %s, are now on call\" % user.name\n\telse:\n\t\treturn \"You, %s, are now off call\" % user.name\n\ndef oncall_status(opts):\n\t'''\n\tGet a list of people oncall for a specific team\n\t'''\n\tusers = User.on_call(opts.team)\n\toncall_users = []\n\tfor u in users:\n\t\tif u.state > 0 and u.state < 9:\n\t\t\toncall_users.append(u)\n\tif len(oncall_users) == 0: return \"No one is on call on the %s team.\" % (opts.team)\n\toutput = ''\n\tfor user in oncall_users:\n\t\toutput=output + \"%s\" % (user.print_user(opts.mobile))\n\treturn output\n\ndef run(args):\n\t'''\n\tThis gets run from oncall-server to execute the Oncall CLI\n\t'''\n\t# convert argsuments into input params\n\tsys.argv = args.split()\n\t# gotta pad the arguments because usually sys.argv[0] is the python file name\n\tsys.argv.insert(0, 'spacer')\n\treturn main()\n\ndef main():\n\tusage = '''\noncall.py user create (options)\noncall.py user list (options)\noncall.py user edit -i (options)\n\noncall.py alert status -t -a\noncall.py alert ack -i -f \n\noncall.py oncall on -s -f \noncall.py oncall off -f \noncall.py oncall status -t \n'''\n\n\t# converting all parameters to be lowercase to remove any case sensitivity\n\tsys.argv = map(lambda x:x.lower(),sys.argv)\n\n\tif (len(sys.argv) > 1) and sys.argv[1] in ['user', 'users', 'status', 'alert', 'alerts', 'ack', 'rotation', 'oncall']:\n\t\tmode = sys.argv[1]\n\t\tif mode == \"user\" or mode == 'users': o = user()\n\t\tif mode == \"alert\" or mode == 'alerts': o = alert()\n\t\tif mode == \"status\":\n\t\t\tsys.argv.insert(1, \"alert\")\n\t\t\to = alert()\n\t\tif mode == \"ack\": \n\t\t\tsys.argv.insert(1, \"alert\")\n\t\t\to = alert()\n\t\tif mode == \"oncall\": o = oncall()\n\t\t#if mode == \"rotation\": o = rotation()\n\t\tlogging.info(\"Oncall.py output: %s\" % o)\n\t\treturn o\n\telse:\n\t\treturn usage\n\nif __name__ == \"__main__\": print main()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1880,"cells":{"__id__":{"kind":"number","value":14955076149244,"string":"14,955,076,149,244"},"blob_id":{"kind":"string","value":"905e6e08c5a85084fc83c01090a3a230529c3cd8"},"directory_id":{"kind":"string","value":"10ba33b74911be372eff006f3f63c2f42b3f97ec"},"path":{"kind":"string","value":"/calc_ambiguity.py"},"content_id":{"kind":"string","value":"4a42f36945f2837f1fe60a12a72618a26bc225ec"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"HenryMarshall/softKey"},"repo_url":{"kind":"string","value":"https://github.com/HenryMarshall/softKey"},"snapshot_id":{"kind":"string","value":"36f5e29e2cf040c8cdb9d680fdca396e32fa0bef"},"revision_id":{"kind":"string","value":"7adae326acdef794ed0f31a7538f437651aa912f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-28T05:17:42.770610","string":"2021-05-28T05:17:42.770610"},"revision_date":{"kind":"timestamp","value":"2014-05-05T10:37:16","string":"2014-05-05T10:37:16"},"committer_date":{"kind":"timestamp","value":"2014-05-05T10:37:16","string":"2014-05-05T10:37:16"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import argparse\nimport pickle\n\nimport calc_vectors\nimport calc_scores\n\n# establish corpus from cli\nparser = argparse.ArgumentParser(description=\"\"\"Calculate the ambiguity of\n word pairings. Accepts words, and files prefixed with '@' (e.g., \n @corpus.txt) with \\\\n delimited words.\"\"\",\n fromfile_prefix_chars='@')\n\nparser.add_argument('words', metavar='N', type=str, nargs='+',\n help='enter a series of words')\ncorpus = parser.parse_args()\n\n# establish layouts\n# with open('character_positions.pickle', 'rb') as handle:\nwith open('layouts/random_layouts_computer.pickle', 'rb') as handle:\n layouts = pickle.load(handle)\n\n# we will write the results to a csv file\nghetto_csv = open(\"results/layout_results.csv\", \"w\")\n# create label row\nlabel_lst = [\"layout\"]\nfor i in range(101):\n label_lst.append(str(i/100.0))\n label_str = \",\".join(label_lst)\nghetto_csv.write(label_str)\nghetto_csv.write(\"\\n\")\n\n# calculate the vector path for each word on each layout\npaths = {}\nfor layout_name, layout in layouts.iteritems():\n paths[layout_name] = {}\n\n for word in corpus.words:\n word_vector = calc_vectors.calc_word_vector(word, layout)\n paths[layout_name][word] = word_vector\n\n# calculate and save the ambiguity of the corpus on each layout\nfor layout_name, layout_paths in paths.iteritems():\n ambiguity_results = calc_scores.calc_layout_results(layout_paths)\n results_lst = [str(layout_name)]\n for i in range(101):\n ambiguity_count = str(ambiguity_results[i/100.0])\n results_lst.append(ambiguity_count)\n results_str = \",\".join(results_lst)\n ghetto_csv.write(results_str)\n ghetto_csv.write(\"\\n\")\n\nghetto_csv.close()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1881,"cells":{"__id__":{"kind":"number","value":13735305422269,"string":"13,735,305,422,269"},"blob_id":{"kind":"string","value":"be4e6f42388a18abbbb0cac02d4bfd697fc3800f"},"directory_id":{"kind":"string","value":"0b650d4db5064e27cad3993b617499c55d0a0b8a"},"path":{"kind":"string","value":"/webpages_fetcher.py"},"content_id":{"kind":"string","value":"6d1bc5d91dcee70088a822564277a0b763752459"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ultimatecoder/webpage_downloader"},"repo_url":{"kind":"string","value":"https://github.com/ultimatecoder/webpage_downloader"},"snapshot_id":{"kind":"string","value":"195b13b4244056ae4df0cd9f8120aeb39647b05c"},"revision_id":{"kind":"string","value":"02ae3d833f6d431a6c92020a73dddf609ee622bf"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-07T10:54:48.285983","string":"2016-09-07T10:54:48.285983"},"revision_date":{"kind":"timestamp","value":"2014-10-16T09:12:36","string":"2014-10-16T09:12:36"},"committer_date":{"kind":"timestamp","value":"2014-10-16T09:12:36","string":"2014-10-16T09:12:36"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import urllib2\nfrom sys import exit, argv\nfrom os import path\n\nclass Fetcher:\n _urls = []\n _output = ''\n\n def _geturls(self, file):\n return file.read().split()\n\n def __init__(self, ifpath, ofpath):\n file = open(ifpath, 'r')\n self._urls = self._geturls(file)\n self._output = ofpath\n\n def _getwebpage(self, url):\n response = urllib2.urlopen(url)\n print(\"fetching : {}\".format(url))\n return response.read()\n\n def _writewebpage(self, webpage, fname):\n f = open(self._output + '/' + fname, 'w')\n f.write(webpage)\n f.close()\n\n def start(self):\n for i, url in enumerate(self._urls):\n self._writewebpage(self._getwebpage(url), str(i) + '.html')\n\ndef main():\n usage = 'usage: [--urlFile] url.txt webpages-dir-path'\n args = argv[1:]\n if not args:\n print(usage)\n exit(1)\n else:\n if args[0] == '--urlFile':\n try:\n urls = args[1]\n webpages = args[2]\n except IndexError:\n print(\"ERROR: Please provide enough arguments\")\n print(usage)\n exit(1)\n if path.exists(urls) and path.isdir(webpages):\n fetcher = Fetcher(urls, webpages)\n fetcher.start()\n print(\"Fetcher Task completed !\")\n else:\n print(\"ERROR : In url file path or out put webpages dir.\")\n exit(1)\n else:\n print(usage)\n\nif __name__ == '__main__':\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1882,"cells":{"__id__":{"kind":"number","value":11854109783179,"string":"11,854,109,783,179"},"blob_id":{"kind":"string","value":"00824038ba9b7664e1bd26e4b0d576535e4f4ce0"},"directory_id":{"kind":"string","value":"35277c644d4a2189aff624b7813107a35c234a93"},"path":{"kind":"string","value":"/rethinkORM/rethinkCollection.py"},"content_id":{"kind":"string","value":"a5927b62eda05ab67638ad0030818eda2243a943"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"JoshAshby/pyRethinkORM"},"repo_url":{"kind":"string","value":"https://github.com/JoshAshby/pyRethinkORM"},"snapshot_id":{"kind":"string","value":"f29393c649b013ed7f2f8af3336d8100d5b36c06"},"revision_id":{"kind":"string","value":"92158d146dea6cfe9022d7de2537403f5f2c1e02"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2022-10-20T18:22:36.339599","string":"2022-10-20T18:22:36.339599"},"revision_date":{"kind":"timestamp","value":"2014-05-06T16:15:43","string":"2014-05-06T16:15:43"},"committer_date":{"kind":"timestamp","value":"2014-05-06T16:15:43","string":"2014-05-06T16:15:43"},"github_id":{"kind":"number","value":11209106,"string":"11,209,106"},"star_events_count":{"kind":"number","value":8,"string":"8"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"string","value":"GPL-3.0"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2023-06-28T12:15:32","string":"2023-06-28T12:15:32"},"gha_created_at":{"kind":"timestamp","value":"2013-07-05T21:50:23","string":"2013-07-05T21:50:23"},"gha_updated_at":{"kind":"timestamp","value":"2023-05-03T20:37:12","string":"2023-05-03T20:37:12"},"gha_pushed_at":{"kind":"timestamp","value":"2022-09-23T21:08:21","string":"2022-09-23T21:08:21"},"gha_size":{"kind":"number","value":158,"string":"158"},"gha_stargazers_count":{"kind":"number","value":17,"string":"17"},"gha_forks_count":{"kind":"number","value":9,"string":"9"},"gha_open_issues_count":{"kind":"number","value":4,"string":"4"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"bool","value":false,"string":"false"},"gha_disabled":{"kind":"bool","value":false,"string":"false"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nQuick way to get groupings of RethinkModels objects matching the given criteria\n\"\"\"\nimport rethinkdb as r\n\n\nclass RethinkCollection(object):\n \"\"\"\n A way to fetch groupings of documents that meet a criteria and have them\n in an iterable storage object, with each document represented by\n `RethinkModel` objects\n \"\"\"\n documents = []\n table = \"\"\n _model = None\n _query = None\n _filter = {}\n _join = None\n _joinedField = None\n\n def __init__(self, model, filter=None):\n \"\"\"\n Instantiates a new collection, using the given models table, and\n wrapping all documents with the given model.\n\n Filter can be a dictionary or lambda, similar to the filters for the\n RethinkDB drivers filters.\n \"\"\"\n self._model = model\n self._query = r.table(self._model.table)\n\n if filter:\n self._filter = filter\n self._query = self._query.filter(self._filter)\n\n def joinOn(self, model, onIndex):\n \"\"\"\n Performs an eqJoin on with the given model. The resulting join will be\n accessible through the models name.\n \"\"\"\n return self._joinOnAsPriv(model, onIndex, model.__name__)\n\n def joinOnAs(self, model, onIndex, whatAs):\n \"\"\"\n Like `joinOn` but allows setting the joined results name to access it\n from.\n\n Performs an eqJoin on with the given model. The resulting join will be\n accessible through the given name.\n \"\"\"\n return self._joinOnAsPriv(model, onIndex, whatAs)\n\n def _joinOnAsPriv(self, model, onIndex, whatAs):\n \"\"\"\n Private method for handling joins.\n \"\"\"\n if self._join:\n raise Exception(\"Already joined with a table!\")\n\n self._join = model\n self._joinedField = whatAs\n table = model.table\n self._query = self._query.eq_join(onIndex, r.table(table))\n return self\n\n def orderBy(self, field, direct=\"desc\"):\n \"\"\"\n Allows for the results to be ordered by a specific field. If given,\n direction can be set with passing an additional argument in the form\n of \"asc\" or \"desc\"\n \"\"\"\n if direct == \"desc\":\n self._query = self._query.order_by(r.desc(field))\n else:\n self._query = self._query.order_by(r.asc(field))\n\n return self\n\n def __iter__(self):\n for doc in self._documents:\n yield doc\n\n def offset(self, value):\n \"\"\"\n Allows for skipping a specified number of results in query. Useful\n for pagination.\n \"\"\"\n\n self._query = self._query.skip(value)\n\n return self\n\n def limit(self, value):\n \"\"\"\n Allows for limiting number of results returned for query. Useful\n for pagination.\n \"\"\"\n self._query = self._query.limit(value)\n\n return self\n\n # Pagination helpers...\n # These are questionable, on if I'll put them in or not.\n #def paginate(self, start,finish):\n #pass\n\n #@property\n #def currentPage(self):\n #pass\n\n #@property\n #def perpage(self):\n #pass\n\n #@property\n #def hasnextpage(self):\n #pass\n\n #@property\n #def pages(self):\n #pass\n # Okay, enough pagination\n\n def fetch(self):\n \"\"\"\n Fetches the query and then tries to wrap the data in the model, joining\n as needed, if applicable.\n \"\"\"\n returnResults = []\n\n results = self._query.run()\n for result in results:\n if self._join:\n # Because we can tell the models to ignore certian fields,\n # through the protectedItems blacklist, we can nest models by\n # name and have each one act normal and not accidentally store\n # extra data from other models\n item = self._model.fromRawEntry(**result[\"left\"])\n joined = self._join.fromRawEntry(**result[\"right\"])\n item.protectedItems = self._joinedField\n item[self._joinedField] = joined\n\n else:\n item = self._model.fromRawEntry(**result)\n\n returnResults.append(item)\n\n self._documents = returnResults\n return self._documents\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1883,"cells":{"__id__":{"kind":"number","value":3444563819594,"string":"3,444,563,819,594"},"blob_id":{"kind":"string","value":"626fc0af8b8cc2d192bebf56a4662066cfb4d399"},"directory_id":{"kind":"string","value":"20a872331e80f6ad11752fa2d9d63864c2812b10"},"path":{"kind":"string","value":"/test/base.py"},"content_id":{"kind":"string","value":"c9a0dd429ed3dec77bd950859a24dba02b944903"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only","LicenseRef-scancode-unknown-license-reference","AGPL-3.0-or-later"],"string":"[\n \"GPL-3.0-only\",\n \"LicenseRef-scancode-unknown-license-reference\",\n \"AGPL-3.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"smurfix/pybble"},"repo_url":{"kind":"string","value":"https://github.com/smurfix/pybble"},"snapshot_id":{"kind":"string","value":"1ee535c74ae73605bc725a1ae1a41ef83190d000"},"revision_id":{"kind":"string","value":"305ba81d4600abb4d575b39926abc76992696c17"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T23:53:05.310683","string":"2021-01-22T23:53:05.310683"},"revision_date":{"kind":"timestamp","value":"2014-07-23T16:41:10","string":"2014-07-23T16:41:10"},"committer_date":{"kind":"timestamp","value":"2014-07-23T16:41:10","string":"2014-07-23T16:41:10"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division, unicode_literals\n##\n## This is part of Pybble, a WMS (Whatever Management System) based on\n## Jinja2/Haml, Werkzeug, Flask, and Optimism.\n##\n## Pybble is Copyright © 2009-2014 by Matthias Urlichs ,\n## it is licensed under the GPLv3. See the file `README.md` for details,\n## including an optimistic statements by the author.\n##\n## This paragraph is auto-generated and may self-destruct at any time,\n## courtesy of \"make update\". The original is in ‘utils/_boilerplate.py’.\n## Thus, please do not remove the next line, or insert any blank lines.\n##BP\n\nimport sys,os\nsys.path.insert(0,os.pardir)\n\nfrom pybble import ROOT_SITE_NAME\nimport unittest\nimport datetime\nimport flask\nfrom wsgi_intercept import WSGI_HTTPConnection,WSGI_HTTPSConnection\nfrom pybble.manager.main import SubdomainDispatcher\nfrom pybble.core.db import init_db\nfrom pybble.core import config as pybble_config\n\nmain_app = None\n\nclass Fake_HTTPConnection(WSGI_HTTPConnection):\n\tdef get_app(self, host, port):\n\t\treturn main_app,\"\"\nclass Fake_HTTPSConnection(WSGI_HTTPSConnection):\n\tdef get_app(self, host, port):\n\t\treturn main_app,\"\"\n\ntry:\n\tfrom wsgi_intercept import http_client_intercept\nexcept ImportError:\n\tskip_httpclient = True\nelse:\n\tskip_httpclient = False\n\thttp_client_intercept.HTTPInterceptorMixin = Fake_HTTPConnection\n\thttp_client_intercept.HTTPSInterceptorMixin = Fake_HTTPSConnection\n\ntry:\n\tfrom wsgi_intercept import httplib2_intercept\nexcept ImportError:\n\tskip_httplib2 = True\nelse:\n\tskip_httplib2 = False\n\thttplib2_intercept.InterceptorMixin = Fake_HTTPConnection\n\ntry:\n\tfrom wsgi_intercept import requests_intercept\nexcept ImportError:\n\tskip_requests = True\nelse:\n\tskip_requests = False\n\trequests_intercept.InterceptorMixin = Fake_HTTPConnection\n\ntry:\n\tfrom wsgi_intercept import urllib_intercept\nexcept ImportError:\n\tskip_urllib = True\nelse:\n\tskip_urllib = False\n\turllib_intercept.HTTPInterceptorMixin = Fake_HTTPConnection\n\turllib_intercept.HTTPSInterceptorMixin = Fake_HTTPSConnection\n\nfrom pybble.core.db import db\nfrom pybble.core.models.site import Site,Blueprint\nfrom pybble.core.models.config import ConfigVar,SiteConfigVar\n\ndid_once=set()\nclass TC(unittest.TestCase):\n\tTESTING = True\n\tapp_class = flask.Flask\n\ttestsite=None\n\n\tdef once(self,proc):\n\t\tif proc in did_once:\n\t\t\treturn\n\t\tdid_once.add(proc)\n\t\treturn proc()\n\n\tdef clear_db(self):\n\t\tpass\n\n\tdef setUp(self):\n\t\tsuper(TC,self).setUp()\n\t\tapp = self.app_class(__name__)\n\t\tapp.config = pybble_config\n\t\tapp.config.from_object(self)\n\t\tapp.config.from_object(\"TEST\")\n\t\tinit_db(app)\n\n\t\tself.app = app\n\t\tself.ctx = app.test_request_context()\n\t\tself.ctx.push()\n\t\tself.cleanData()\n\n\t\tif self.testsite:\n\t\t\ttry:\n\t\t\t\ts = Site.q.get_by(name=self.testsite)\n\t\t\texcept NoData:\n\t\t\t\ts = Site.new(name=self.testsite, domain=self.testsite)\n\t\t\t\tdb.session.flush()\n\t\t\tflask.current_app.site = s\n\t\telse:\n\t\t\tflask.current_app.site = Site.q.get_by(name=ROOT_SITE_NAME)\n\t\tself.setupData()\n\t\tself.setupRest()\n\n\tdef cleanData(self):\n\t\tpass\n\tdef setupData(self):\n\t\tpass\n\tdef setupRest(self):\n\t\tpass\n\t\n\tdef tearDown(self):\n\t\tself.ctx.pop()\n\t\tsuper(TC,self).tearDown()\n\nclass WebTC(TC):\n\tdef setupRest(self):\n\t\tfrom pybble.app import make_cfg_app\n\t\tsuper(WebTC,self).setupRest()\n\t\tglobal main_app\n\t\tapp = make_cfg_app()\n\t\tmain_app = SubdomainDispatcher(app)\n\n\t\tif not skip_httpclient:\n\t\t\thttp_client_intercept.install()\n\t\tif not skip_httplib2:\n\t\t\thttplib2_intercept.install()\n\t\tif not skip_requests:\n\t\t\trequests_intercept.install()\n\t\tif not skip_urllib:\n\t\t\turllib_intercept.install_opener()\n\n\tdef tearDown(self):\n\t\tif not skip_httpclient:\n\t\t\thttp_client_intercept.uninstall()\n\t\tif not skip_httplib2:\n\t\t\thttplib2_intercept.uninstall()\n\t\tif not skip_requests:\n\t\t\trequests_intercept.uninstall()\n\t\tif not skip_urllib:\n\t\t\turllib_intercept.uninstall_opener()\n\t\t\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1884,"cells":{"__id__":{"kind":"number","value":13572096660684,"string":"13,572,096,660,684"},"blob_id":{"kind":"string","value":"57e55ff8917ca9c673bfe88ba1234e11e54404b8"},"directory_id":{"kind":"string","value":"05f05d576d5aa29190cf83f0b4b9a1eea2fd555a"},"path":{"kind":"string","value":"/src/pyPA/__init__.py"},"content_id":{"kind":"string","value":"26e38f5fb2fa3e6951d463b1d27a58bcd58635c7"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"barronh/pypa"},"repo_url":{"kind":"string","value":"https://github.com/barronh/pypa"},"snapshot_id":{"kind":"string","value":"476caab2704f545cd578894f3e2f4534952fc2c1"},"revision_id":{"kind":"string","value":"5e10eecd5bfed19f95dd92f1f1667fd1ae45d3eb"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T15:08:30.103421","string":"2016-09-05T15:08:30.103421"},"revision_date":{"kind":"timestamp","value":"2014-02-19T04:07:23","string":"2014-02-19T04:07:23"},"committer_date":{"kind":"timestamp","value":"2014-02-19T04:07:23","string":"2014-02-19T04:07:23"},"github_id":{"kind":"number","value":32350405,"string":"32,350,405"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"__doc__ = r\"\"\"\n.. _pyPA\n:mod:`pyPA` -- Python-based Process Analysis\n============================================\n\n.. module:: pyPA\n :platform: Unix, Windows\n :synopsis: Provides tools for analyzing Air Quality Model Process Analysis \n data\n.. moduleauthor:: Barron Henderson \n\"\"\"\n__all__=['utils','pappt', 'test']\nif __name__ != '__main__':\n import utils\n import pappt\n import cmaq\n \n from test import run as test\nelse:\n from pyPA.main import run\n run()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1885,"cells":{"__id__":{"kind":"number","value":16707422817896,"string":"16,707,422,817,896"},"blob_id":{"kind":"string","value":"9da29893e98c43df5036050f97db105176341aea"},"directory_id":{"kind":"string","value":"ee232ec9f522eb06996ece7417712bfaa8ee2c2a"},"path":{"kind":"string","value":"/setup.py"},"content_id":{"kind":"string","value":"b4edec4c7ee5336e52b634163a5c9ec859a89ce7"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"thet/elevate.cynin.fixes"},"repo_url":{"kind":"string","value":"https://github.com/thet/elevate.cynin.fixes"},"snapshot_id":{"kind":"string","value":"d048097111f8ba25fa7495a6a854231051c67e48"},"revision_id":{"kind":"string","value":"18fedff42227e7f943c666fbba1872a805a48109"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T16:49:40.741030","string":"2020-12-24T16:49:40.741030"},"revision_date":{"kind":"timestamp","value":"2012-07-06T14:36:49","string":"2012-07-06T14:36:49"},"committer_date":{"kind":"timestamp","value":"2012-07-06T14:36:49","string":"2012-07-06T14:36:49"},"github_id":{"kind":"number","value":1587032,"string":"1,587,032"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from setuptools import setup, find_packages\nimport os\n\nversion = '1.0'\n\nsetup(name='elevate.cynin.fixes',\n version=version,\n description=\"Cynin fixes for elevate\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from\n # http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n ],\n keywords='cynin, plone',\n author='Johannes Raggam',\n author_email='raggam-nl@adm.at',\n url='https://github.com/thet/elevate.cynin.fixes',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['elevate', 'elevate.cynin'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n # -*- Extra requirements: -*-\n 'collective.monkeypatcher',\n 'collective.autolinks',\n 'Products.CacheSetup',\n 'plone.contentratings',\n ],\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1886,"cells":{"__id__":{"kind":"number","value":18408229841258,"string":"18,408,229,841,258"},"blob_id":{"kind":"string","value":"4e3321d90b6ba7a29b2977d2f847fad8f643e485"},"directory_id":{"kind":"string","value":"479a2fdc36191b6f5514d24b1a19307c7a7157b8"},"path":{"kind":"string","value":"/hitime/md_io.py"},"content_id":{"kind":"string","value":"0e8be5904955af956a9c00423bd8325aa9f93f1d"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"bjpop/HiTIME"},"repo_url":{"kind":"string","value":"https://github.com/bjpop/HiTIME"},"snapshot_id":{"kind":"string","value":"eaf73e81f9432bb746938edb44e4b431cac5edb1"},"revision_id":{"kind":"string","value":"c0671709bde7167305dd7ce2d440c1e370ff54ac"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T13:56:01.072200","string":"2021-01-17T13:56:01.072200"},"revision_date":{"kind":"timestamp","value":"2014-10-06T05:02:42","string":"2014-10-06T05:02:42"},"committer_date":{"kind":"timestamp","value":"2014-10-06T05:02:42","string":"2014-10-06T05:02:42"},"github_id":{"kind":"number","value":24574268,"string":"24,574,268"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2015-02-20T03:39:19","string":"2015-02-20T03:39:19"},"gha_created_at":{"kind":"timestamp","value":"2014-09-29T00:29:12","string":"2014-09-29T00:29:12"},"gha_updated_at":{"kind":"timestamp","value":"2014-09-29T00:53:31","string":"2014-09-29T00:53:31"},"gha_pushed_at":{"kind":"timestamp","value":"2014-10-06T05:02:52","string":"2014-10-06T05:02:52"},"gha_size":{"kind":"number","value":140,"string":"140"},"gha_stargazers_count":{"kind":"number","value":0,"string":"0"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/bin/env python\n\nfrom lxml import etree\nimport sys\nimport resource\nimport base64\nimport struct\nimport numpy as np\nfrom itertools import *\nimport math\nimport csv\nimport logging\nimport os\nimport os.path\nimport pymzml\nfrom collections import deque\nimport resource\n\n# add dir of this (and following) file to path\nsys.path.append(os.path.realpath(__file__))\nimport md_filter\n\n\n# helper funtion for memory profiling\ndef memory_usage_resource():\n import resource\n rusage_denom = 1024\n if sys.platform == 'darwin':\n # ... it seems that in OSX the output is different units ...\n rusage_denom = rusage_denom * rusage_denom\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / float(rusage_denom)\n return mem\n\nendianMap = { 'little': '<', 'big': '>' }\nprecisionMap = { '64' : 'd', '32': 'f' }\n\n\n# convert the base64 encoded data into an array of floating point values\n# given the endianness and precision of the raw data\ndef interpretBinary(data, endian, precision):\n precisionChar = precisionMap[precision]\n endianChar = endianMap[endian]\n decoded = base64.b64decode(data)\n count = len(decoded) / struct.calcsize(endianChar + precisionChar)\n return struct.unpack(endianChar + precisionChar * count, decoded[0:len(decoded)])\n\n# find the binary data, endianness and precision of a single spectrum element\ndef getMZDATASpectrum(spectrum, tag):\n for child in spectrum:\n if child.tag == tag:\n for binaryChild in child:\n if binaryChild.tag == 'data':\n endian = binaryChild.get('endian')\n precision = binaryChild.get('precision')\n binary = binaryChild.text\n return interpretBinary(binary, endian, precision)\n\n# find the timestamp of a spectrum, returns None if no timestamp was found.\ndef getSpectrumTime(spectrum):\n desc = spectrum.find('spectrumDesc')\n settings = desc.find('spectrumSettings')\n instrument = settings.find('spectrumInstrument')\n for param in instrument.iter('cvParam'):\n if param.get('name') == 'TimeInMinutes':\n return param.get('value')\n return None\n\n\n# an encapsulation of a single Spectrum element containing its\n# identity, decoded mz data and decoded intensity data\nclass Spectrum(object):\n def __init__(self, id, time, mzs, intensities):\n self.time = time\n self.mzs = mzs\n self.intensities = intensities\n self.id = int(id)\n\n\ndef parseMZDATA(options):\n filename = options.inputFile\n result = []\n # parse the XML document\n tree = etree.parse(filename)\n # get the root element\n root = tree.getroot()\n # iterate over the spectrum elements\n for spectrum in root.iter('spectrum'):\n # get the mz data for the spectrum\n mzData = getMZDATASpectrum(spectrum, 'mzArrayBinary')\n # get the intensity data for the spectrum\n intData = getMZDATASpectrum(spectrum, 'intenArrayBinary')\n time = getSpectrumTime(spectrum)\n result.append(Spectrum(spectrum.get('id'), time, mzData, intData))\n return result\n\n\ndef writeResults(stream, spectrum, scores=None):\n if scores is not None:\n rt = spectrum.time\n for mz, amp, val in zip(spectrum.mzs, spectrum.intensities, scores):\n# if val > 0.0:\n# print >> stream, '{}, {}, {}, {}'.format(rt, mz, amp, val)\n if val[0] > 0.0:\n print >> stream, '{}, {}, {}, {}'.format(rt, mz, amp, ', '.join([str(v) for v in val]))\n else:\n rt = spectrum.time\n for mz, amp in zip(spectrum.mzs, spectrum.intensities):\n print >> stream, '{}, {}, {}'.format(rt, mz, amp)\n\n\ndef MZMLtoSpectrum(options):\n filename = options.inputFile\n delta_time = 0\n time_prev = 0\n points = 0\n mean = 0\n time = 0\n msrun = pymzml.run.Reader(filename)\n for n,spectrum in enumerate(msrun):\n mzData = np.array(spectrum.mz, dtype=\"float32\")\n intData = np.array(spectrum.i, dtype=\"uint64\")\n points += len(intData)\n mean += sum(intData)\n try:\n time = spectrum['MS:1000016']\n delta_time += (time - time_prev - delta_time)/(n+1) # incremental update to mean delta_time\n time_prev = time\n except KeyError:\n time_prev = time\n if delta_time > 0:\n time += delta_time\n else:\n time += 1.0\n yield Spectrum(n, time, mzData, intData)\n\n if points > 0:\n mean /= float(points)\n else:\n exit(\"Zero spectra read from mz data file, did you specify the wrong input format?\")\n logging.info('mzdata input file parsed, {0} ({1}) spectra (data points) read in'.format(n+1, points))\n logging.info('time delta: %g, mean signal: %g' % (delta_time, mean))\n\n\ndef nextWindow(reader, options, half_window):\n '''\n Use iterators to serve up data when needed\n '''\n pad_front = repeat(Spectrum(0, 0.0, [0.0], [0.0]), half_window + 1) # extra one at start that gets ignored\n pad_back = repeat(Spectrum(0, 0.0, [0.0], [0.0]), half_window)\n items = chain(pad_front, reader(options), pad_back)\n # 1st window\n data = list(islice(items, 0, 2 * half_window + 1 ))\n data_deque = deque(data)\n # rest\n for i, scan in enumerate(items):\n data_deque.popleft()\n data_deque.append(scan)\n yield list(data_deque)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1887,"cells":{"__id__":{"kind":"number","value":13615046367660,"string":"13,615,046,367,660"},"blob_id":{"kind":"string","value":"ce7e5c49d3211e620982b309b3c0c110971ea428"},"directory_id":{"kind":"string","value":"df74cad3e64e7f86cc1b13211f9a46551a2c8599"},"path":{"kind":"string","value":"/object_proxy/_lambda_relations.py"},"content_id":{"kind":"string","value":"9bf02e8eab6aec6b46016296f8ca7b25e9553728"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause","LicenseRef-scancode-unknown-license-reference"],"string":"[\n \"BSD-3-Clause\",\n \"LicenseRef-scancode-unknown-license-reference\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"Montegasppa/ObjectProxy"},"repo_url":{"kind":"string","value":"https://github.com/Montegasppa/ObjectProxy"},"snapshot_id":{"kind":"string","value":"2e5c73661ad39d6f1ff30399b7a4d0ff886393f6"},"revision_id":{"kind":"string","value":"e4cd247dc16b3533ea0d77767000d8c35953e9f6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T16:10:06.088428","string":"2016-09-06T16:10:06.088428"},"revision_date":{"kind":"timestamp","value":"2013-12-16T21:49:47","string":"2013-12-16T21:49:47"},"committer_date":{"kind":"timestamp","value":"2013-12-16T21:49:47","string":"2013-12-16T21:49:47"},"github_id":{"kind":"number","value":15196196,"string":"15,196,196"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# coding: UTF-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n# @copyright ©2013, Rodrigo Cacilhας \n\nfrom contextlib import wraps\n\n__all__ = ['method_map']\n\n\n@apply\ndef method_map():\n\n\n def catch(wrapped, default=None):\n @wraps(wrapped)\n def wrapper(*args, **kwargs):\n try:\n return wrapped(*args, **kwargs)\n except:\n return default\n return wrapper\n\n\n return (\n ('__abs__', abs),\n ('__add__', lambda target, o: target + o),\n ('__and__', lambda target, o: target & o),\n ('__bool__', catch(bool, False)),\n ('__call__', lambda target, *args, **kwargs: target(*args, **kwargs)),\n ('__cmp__', cmp),\n ('__coerce__', coerce),\n ('__contains__', lambda target, item: item in target),\n ('__delattr__', delattr),\n ('__dir__', catch(dir, [])),\n ('__divmod__', divmod),\n ('__float__', float),\n ('__floordiv__', lambda target, o: target // o),\n ('__eq__', lambda target, o: target == o),\n ('__ge__', lambda target, o: target >= o),\n ('__getattr__', getattr),\n ('__getitem__', lambda target, key: target[key]),\n ('__getslice__', lambda target, i, j: target[i:j]),\n ('__gt__', lambda target, o: target > o),\n ('__hash__', hash),\n ('__hex__', hex),\n ('__instancecheck__', lambda target, instance: isinstance(instance, target)),\n ('__int__', int),\n ('__invert__', lambda target: ~(target)),\n ('__iter__', iter),\n ('__le__', lambda target, o: target <= o),\n ('__len__', len),\n ('__long__', long),\n ('__lshift__', lambda target, o: target << o),\n ('__lt__', lambda target, o: target < o),\n ('__mod__', lambda target, o: target % o),\n ('__mul__', lambda target, o: target * o),\n ('__ne__', lambda target, o: target != o),\n ('__neg__', lambda target: -(target)),\n ('__oct__', oct),\n ('__or__', lambda target, o: target | o),\n ('__pos__', lambda target: +(target)),\n ('__pow__', lambda target, o: target ** o),\n ('__radd__', lambda target, o: o + target),\n ('__rand__', lambda target, o: o & target),\n ('__rcmp__', lambda target, o: cmp(o, target)),\n ('__rdiv__', lambda target, o: o.__div__(target)),\n ('__reversed__', reversed),\n ('__rfloordiv__', lambda target, o: o // target),\n ('__rlshift__', lambda target, o: o << target),\n ('__rmod__', lambda target, o: o % target),\n ('__rmul__', lambda target, o: o * target),\n ('__ror__', lambda target, o: o | target),\n ('__rpow__', lambda target, o: o ** target),\n ('__rrshift__', lambda target, o: o >> target),\n ('__rshift__', lambda target, o: target >> o),\n ('__rsub__', lambda target, o: o - target),\n ('__rtruediv__', lambda target, o: o / target),\n ('__rxor__', lambda target, o: o ^ target),\n ('__str__', bytes),\n ('__sub__', lambda target, o: target - o),\n ('__truediv__', lambda target, o: target / o),\n ('__unicode__', unicode),\n ('__xor__', lambda target, o: target ^ o),\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1888,"cells":{"__id__":{"kind":"number","value":13735305438882,"string":"13,735,305,438,882"},"blob_id":{"kind":"string","value":"87d654564f346ecf8e02f724db4034b5ef30edd5"},"directory_id":{"kind":"string","value":"3060a1174aeddec4147e22a21f828b65bcb16854"},"path":{"kind":"string","value":"/main.py"},"content_id":{"kind":"string","value":"dc958042538f368fde68907d320d6b0f1493928e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"chinnsane/wedding"},"repo_url":{"kind":"string","value":"https://github.com/chinnsane/wedding"},"snapshot_id":{"kind":"string","value":"818ef4b20d3ab2ff856b3c19548448212d2619fa"},"revision_id":{"kind":"string","value":"03834a5810f9cf247b43fe2b69188cfc7da1e679"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-07T00:41:00.221764","string":"2016-09-07T00:41:00.221764"},"revision_date":{"kind":"timestamp","value":"2014-09-23T04:41:36","string":"2014-09-23T04:41:36"},"committer_date":{"kind":"timestamp","value":"2014-09-23T04:41:36","string":"2014-09-23T04:41:36"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import jinja2, os, webapp2, json\nfrom random import shuffle\n\nfrom models.attendee import Attendee\nfrom models.question import Question, QuestionOption\nfrom models.rsvp import RSVP\n\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'])\n\nROOT_PATH = '/'\nRSVP_RESPONSE_PATH = '/rsv-response'\nDATA_VIWER_PATH = '/data-viewer'\nQUIZ_PATH = '/quiz'\nQUIZ_RESULTS_PATH = '/'.join([QUIZ_PATH, 'results'])\nQUIZ_FORM_PATH = '/'.join([QUIZ_PATH, 'form'])\nQUIZ_SUBMIT_PATH = '/'.join([QUIZ_PATH, 'form', 'submit'])\n\nclass MainPage(webapp2.RequestHandler):\n\n def get(self):\n template_values = {\n 'rsvp_form_action': RSVP_RESPONSE_PATH\n }\n \n util = Util(self.response)\n return util.render_template('index.html', template_values)\n\nclass RsvpResponse(webapp2.RequestHandler):\n def post(self):\n first_name = self.request.get('firstName')\n last_name = self.request.get('lastName')\n email = self.request.get('email')\n reply = self.request.get('reply')\n \n data = {\n 'status': \"error\",\n 'message': \"\"\n }\n util = Util(self.response)\n \n # See if someone's already replied\n if RSVP().get_by_email(email)[0]:\n data['message'] = \"E-mail address already used.\"\n return util.send_json_response(data, 400)\n \n # Check for empty field\n if not first_name or not last_name or not email or not reply:\n data['message'] = \"Missing required fields\"\n return util.send_json_response(data, 400)\n \n rsvp = RSVP(first_name=first_name,\n last_name=last_name,\n email=email)\n \n if int(reply) == 0:\n rsvp.reply = False\n \n else:\n num_adults = int(self.request.get('numAdults'))\n num_children = int(self.request.get('numChildren', 0))\n \n # Check if some how they didn't tell me how many people are coming\n if num_adults == 0:\n data['message'] = \"Missing the number of attending guests\"\n return util.send_json_response(data, 400)\n \n total_attendees = num_adults + num_children\n\n attendees = []\n for i in range(1, total_attendees + 1):\n i = str(i)\n \n # Some how you forgot your name, the meal you wanted, or both\n if not self.request.get(\"name_\" + i) or self.request.get(\"meal_\" + i) == \"none\":\n data['message'] = \"Missing a name, type of meal, or both\"\n return util.send_json_response(data, 400)\n \n attendees.append(Attendee(name=self.request.get(\"name_\" + i),\n meal_type=self.request.get(\"meal_\" + i)))\n \n rsvp.reply = True\n rsvp.num_adults = num_adults\n rsvp.num_children = num_children\n rsvp.attendees = attendees\n \n rsvp.put()\n data['status'] = \"success\"\n data['message'] = \"Successfully Saved.\"\n \n # Massage the data\n data['data'] = rsvp.to_dict()\n data['data']['created_at'] = str(data['data']['created_at'])\n\n return util.send_json_response(data)\n\nclass DataViewer(webapp2.RequestHandler):\n def get(self):\n admin_cookie = self.request.cookies.get('lee-chinn-admin', '')\n \n if admin_cookie != \"20140829\":\n self.redirect(ROOT_PATH)\n \n data = RSVP().get_all()\n template_values = {\n 'data': data\n }\n \n util = Util(self.response)\n return util.render_template('data-viewer.html', template_values)\n\nclass Quiz(webapp2.RequestHandler):\n def get(self):\n data = Question().get_all()\n data_as_dicts = []\n shuffle(data)\n truncated_data = data[:5]\n \n # Make the date usable to JSON serializable\n for question in truncated_data:\n q_dict = question.to_dict()\n q_dict['created_at'] = q_dict['created_at'].isoformat()\n q_dict['id'] = question.key.id()\n for option in q_dict['options']:\n option['created_at'] = option['created_at'].isoformat()\n \n data_as_dicts.append(q_dict)\n \n template_values = {\n 'data': json.dumps(data_as_dicts)\n }\n util = Util(self.response)\n return util.render_template('quiz/quiz.html', template_values)\n \nclass QuizForm(webapp2.RequestHandler):\n def get(self):\n data = Question().get_all()\n \n util = Util(self.response)\n template_values = {\n 'form_action_url': QUIZ_SUBMIT_PATH,\n 'data': data\n }\n return util.render_template('quiz/form.html', template_values)\n \nclass QuizSubmit(webapp2.RequestHandler):\n def post(self):\n question = self.request.get('question')\n num_opts = int(self.request.get('numOpts'))\n answer = int(self.request.get('answer'))\n \n qo = []\n for i in range(1, num_opts + 1):\n qo.append(QuestionOption(\n text=self.request.get('opt' + str(i)),\n isAnswer=True if answer == i else False)\n )\n\n q = Question(\n prompt=question,\n options=qo)\n q.put()\n \n self.redirect(QUIZ_FORM_PATH)\n \nclass QuizResults(webapp2.RequestHandler):\n def get(self):\n util = Util(self.response)\n return util.render_template('quiz/results.html')\n\nclass Util(object):\n def __init__(self, response):\n self.response = response\n \n def send_json_response(self, data, code=200):\n self.response.headers['Content-Type'] = \"application/json\"\n self.response.set_status(code)\n return self.response.out.write(json.dumps(data))\n \n def render_template(self, template_name, template_values={}):\n # Side effect of JINJA - doesn't play well with windows, expects *nix path systems\n # and therefore looks for a forward slash. Bad JINJA.\n template = JINJA_ENVIRONMENT.get_template(ROOT_PATH.join(['templates', template_name]))\n return self.response.write(template.render(template_values))\n\napplication = webapp2.WSGIApplication([\n (ROOT_PATH, MainPage),\n (RSVP_RESPONSE_PATH, RsvpResponse),\n (DATA_VIWER_PATH, DataViewer),\n (QUIZ_PATH, Quiz),\n (QUIZ_FORM_PATH, QuizForm),\n (QUIZ_SUBMIT_PATH, QuizSubmit),\n (QUIZ_RESULTS_PATH, QuizResults)\n], debug=True)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1889,"cells":{"__id__":{"kind":"number","value":3839700781085,"string":"3,839,700,781,085"},"blob_id":{"kind":"string","value":"ee17c0d600272496b9570eafce5a53d74fcb060e"},"directory_id":{"kind":"string","value":"618b3aadec53d2d19370df57a87dcd904f9ceeb6"},"path":{"kind":"string","value":"/02_add_fields_split_for_xy.py"},"content_id":{"kind":"string","value":"ceaf9251bcfe3a1b283e12a838461826246f5e5c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"nygeog/pnetapi"},"repo_url":{"kind":"string","value":"https://github.com/nygeog/pnetapi"},"snapshot_id":{"kind":"string","value":"8eb34e6501543d54339865a270c6a5196984cda4"},"revision_id":{"kind":"string","value":"728fe8b5f54c3d642ed554d9639f5437bd76fb62"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T15:05:44.151339","string":"2020-12-24T15:05:44.151339"},"revision_date":{"kind":"timestamp","value":"2014-07-30T12:21:45","string":"2014-07-30T12:21:45"},"committer_date":{"kind":"timestamp","value":"2014-07-30T12:21:45","string":"2014-07-30T12:21:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\n#add 2 new fields (lat, lng) (similar to loop above and then use split (( for x and then , for y\n\nimport urllib2\nimport json\nimport csv\nimport sys, re, time\nimport pandas as pd\nimport glob\nimport random\n\nwith open('all_shows_working.csv', 'r') as csvinput:\n\twith open('all_shows_working_lat.csv', 'w') as csvoutput:\n\t\twriter = csv.writer(csvoutput, lineterminator='\\n')\n\t\treader = csv.reader(csvinput)\n\n\t\tall = []\n\t\trow = next(reader)\n\t\trow.append('lat')\n\t\tall.append(row)\n\n\t\tcnt_cols = len(row)\n\t\tz = cnt_cols\n\n\t\tfor row in reader:\n\n\t\t\ttry:\n\t\t\t\tlat = row[z-2].split('(', 2)[2].split(',',1)[0]\n\t\t\t\tlat = float(lat) + 0.0001*random.randint(1, 50)\n\t\t\t\trow.append(lat)\n\t\t\t\tall.append(row)\n\n\t\t\texcept:\n\t\t\t\tlat = 0 + 0.0001*random.randint(1, 50)\n\t\t\t\trow.append(lat)\n\t\t\t\tall.append(row)\n\n\n\t\twriter.writerows(all)\n\n\nwith open('all_shows_working_lat.csv', 'r') as csvinput:\n\twith open('all_shows_working_lat_lng.csv', 'w') as csvoutput:\n\t\twriter = csv.writer(csvoutput, lineterminator='\\n')\n\t\treader = csv.reader(csvinput)\n\n\t\tall = []\n\t\trow = next(reader)\n\t\trow.append('lng')\n\t\tall.append(row)\n\n\t\tcnt_cols = len(row)\n\t\tz = cnt_cols\n\n\t\tfor row in reader:\n\n\t\t\ttry:\n\t\t\t\tlng = row[z-3].split('(', 2)[2].split(',',1)[1].strip(')').strip(' ')\n\t\t\t\tlng = float(lng) + 0.0001*random.randint(1, 50)\n\t\t\t\trow.append(lng)\n\t\t\t\tall.append(row)\n\n\t\t\texcept:\n\t\t\t\tlng = 0 + 0.0001*random.randint(1, 50)\n\t\t\t\trow.append(lng)\n\t\t\t\tall.append(row)\n\n\n\t\twriter.writerows(all)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1890,"cells":{"__id__":{"kind":"number","value":1400159351909,"string":"1,400,159,351,909"},"blob_id":{"kind":"string","value":"feb379c8ab2669ca109ce0cac226342bcd8b2e42"},"directory_id":{"kind":"string","value":"fcf5f9fa8bbac29562f28b7160902cea873789bb"},"path":{"kind":"string","value":"/pybargain_demo_client/services/nego_db_service.py"},"content_id":{"kind":"string","value":"8f2904d9ba2c251d8adb4f5b096474ee302da9c3"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"LaurentMT/pybargain_demo_client"},"repo_url":{"kind":"string","value":"https://github.com/LaurentMT/pybargain_demo_client"},"snapshot_id":{"kind":"string","value":"6c8191a6dd3f0f17d0d34af9d1a7ef0a35402aae"},"revision_id":{"kind":"string","value":"3bf1eac02be0fcedb3a9cca3fb0220ba88a40cf4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-06T07:03:24.920441","string":"2020-04-06T07:03:24.920441"},"revision_date":{"kind":"timestamp","value":"2014-08-23T19:02:31","string":"2014-08-23T19:02:31"},"committer_date":{"kind":"timestamp","value":"2014-08-23T19:02:31","string":"2014-08-23T19:02:31"},"github_id":{"kind":"number","value":23263043,"string":"23,263,043"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n'''\nA class simulating a wrapper to access a database storing Negotiations.\nFor this toy project, we store Negotiations in memory.\n'''\n\nclass NegoDbService(object):\n \n def __init__(self):\n # Initializes some dictionaries to store negotiations\n self._negos_by_id = dict()\n \n def create_nego(self, nid, nego):\n '''\n Create a nego entry in db\n Parameters:\n nid = id of the negotiation\n nego = nego object to store in db\n '''\n # Checks parameter\n if not self._check_nego(nego):\n return False\n # Checks that a nego with same id has not already been stored in db\n if self.get_nego_by_id(nid) is None:\n # Creates the user in db\n self._negos_by_id[nid] = nego\n return True\n else:\n return False \n \n def update_nego(self, nid, nego):\n '''\n Update a nego entry in db\n Parameters:\n nid = id of the negotiation\n nego = nego object to update in db\n '''\n # Checks parameter\n if not self._check_nego(nego):\n return False\n # Checks that a nego with same id exists in db\n if not (self.get_nego_by_id(nid) is None):\n # Updates the nego in db\n self._negos_by_id[nid] = nego\n return True\n else:\n return False \n \n def delete_nego(self, nid):\n '''\n Delete a nego entry from db\n Parameters:\n nid = id of the negotiation\n '''\n # Checks parameter\n if not nid: return False\n # Checks that a nego with same id exists in db\n if not (self.get_nego_by_id(nid) is None):\n del self._negos_by_id[nid]\n return True\n else:\n return False\n \n def get_nego_by_id(self, nid):\n '''\n Gets a nego associated to a given id\n Parameters:\n nid = id of the negotiation\n '''\n return self._negos_by_id.get(nid, None) if nid else None \n \n def get_all_negos(self):\n '''\n Gets a list of all negotiations\n '''\n return self._negos_by_id.values()\n \n def _check_nego(self, nego):\n if nego is None: return False\n else: return True \n \n \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1891,"cells":{"__id__":{"kind":"number","value":15590731290087,"string":"15,590,731,290,087"},"blob_id":{"kind":"string","value":"031a1ee00b6fcb142823ab97b05cd300e35ec806"},"directory_id":{"kind":"string","value":"f389d6857c2fcf7b9132cb64602e2dc571c1f7ef"},"path":{"kind":"string","value":"/longest_consec_seq.py"},"content_id":{"kind":"string","value":"1fa2d9e055fd5e98fcb3f58282c3c2b3789820d3"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"xblh2018/LeetcodePython"},"repo_url":{"kind":"string","value":"https://github.com/xblh2018/LeetcodePython"},"snapshot_id":{"kind":"string","value":"a6c005b6d07b0d73fc8a82ca0dc04a5d06daf3d0"},"revision_id":{"kind":"string","value":"7c3b65f82fab3405fa8ba097c3c659edcc63a330"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-09T16:51:04.660252","string":"2020-05-09T16:51:04.660252"},"revision_date":{"kind":"timestamp","value":"2014-10-14T18:25:57","string":"2014-10-14T18:25:57"},"committer_date":{"kind":"timestamp","value":"2014-10-14T18:25:57","string":"2014-10-14T18:25:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n'''\nLeetcode: Longest Consecutive Sequence\nGiven an unsorted array of integers, find the length of the longest consecutive elements sequence.\n\nFor example, Given [100, 4, 200, 1, 3, 2], The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.\n\nYour algorithm should run in O(n) complexity.\n'''\nfrom __future__ import division\nimport random\n\n# Using bitmap\ndef consecutive_seq(L):\n bitmap = 0\n for x in L: bitmap |= 1 << x\n max_len = cur_len = 0\n print bitmap, bin(bitmap)\n while bitmap > 0:\n bitmap, r = divmod(bitmap, 2)\n if r == 1:\n cur_len += 1\n else:\n max_len = max(max_len, cur_len)\n cur_len = 0\n return max_len\n\n\n# Using extra space to merge seq\n# Think as cluster merge, a single number is a length=1 cluster.\n# Map lowest and highest to length. To merge two neighbor clusters, only need to update it's new lowest and highest, with new length.\n# For every a[i], checking its neighbor a[i]-1 and a[i]+1 is enough.\ndef merge(seq, x, y):\n a, b = min(seq[x][0], seq[y][0]), max(seq[x][1], seq[y][1])\n seq[x] = [a,b]; seq[y] = [a,b]\n seq[a] = [a,b]; seq[b] = [a,b]\n return seq\n\ndef consecutive_seq2(L):\n seq = {} # mapping: x -> sequence [a,b] that contains x\n for x in L:\n if x in seq: continue\n seq[x] = [x,x]\n if x-1 in seq: seq = merge(seq, x, x-1)\n if x+1 in seq: seq = merge(seq, x, x+1)\n print seq\n return max([b-a+1 for a,b in seq.values()])\n\n\nif __name__ == '__main__':\n print consecutive_seq2([4,10,8,200,1,3,30,5,12,3,1,2,2,7,70,6,9,9,11,18,16,19])\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1892,"cells":{"__id__":{"kind":"number","value":953482742307,"string":"953,482,742,307"},"blob_id":{"kind":"string","value":"baa4b7ef9b91fa3663e3469155e508704ea5847c"},"directory_id":{"kind":"string","value":"2826031fd655335cf56dd305af324f7e39b44c8c"},"path":{"kind":"string","value":"/scorekeeper/migrations/0003_auto_20141202_2009.py"},"content_id":{"kind":"string","value":"e5cfe61645c809e9de93213c4b0733d4974cd5c1"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"Treggats/fun-and-games"},"repo_url":{"kind":"string","value":"https://github.com/Treggats/fun-and-games"},"snapshot_id":{"kind":"string","value":"98eeb181644a0932695dac783660378541de705d"},"revision_id":{"kind":"string","value":"246a55b302c840a82840721b9a3bb0b606d0088b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T15:05:26.668624","string":"2016-09-05T15:05:26.668624"},"revision_date":{"kind":"timestamp","value":"2014-12-09T13:50:04","string":"2014-12-09T13:50:04"},"committer_date":{"kind":"timestamp","value":"2014-12-09T13:50:04","string":"2014-12-09T13:50:04"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scorekeeper', '0002_auto_20141201_1349'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='player',\n old_name='initial_points',\n new_name='points',\n ),\n migrations.RemoveField(\n model_name='score',\n name='player',\n ),\n migrations.RemoveField(\n model_name='score',\n name='score',\n ),\n migrations.AddField(\n model_name='player',\n name='score',\n field=models.ForeignKey(default=0, to='scorekeeper.Score'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='score',\n name='points',\n field=models.IntegerField(default=0),\n preserve_default=True,\n ),\n ]\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1893,"cells":{"__id__":{"kind":"number","value":13984413529163,"string":"13,984,413,529,163"},"blob_id":{"kind":"string","value":"eee6e4f7a0a578e914eed520e944fde84a1f3929"},"directory_id":{"kind":"string","value":"b833b76c218505e8d4d3850ad31b948bdce31a7f"},"path":{"kind":"string","value":"/coltrane/views.py"},"content_id":{"kind":"string","value":"e6ba42ee0488535c0500a5b0b1c05d55685a753c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"yunan/django"},"repo_url":{"kind":"string","value":"https://github.com/yunan/django"},"snapshot_id":{"kind":"string","value":"0d917bde826e2398fe6e57a0ec343238087eac88"},"revision_id":{"kind":"string","value":"d8163e5bfd49e54a04118bf5ed674688dcdead60"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T15:49:10.874561","string":"2020-12-24T15:49:10.874561"},"revision_date":{"kind":"timestamp","value":"2014-03-16T08:52:45","string":"2014-03-16T08:52:45"},"committer_date":{"kind":"timestamp","value":"2014-03-16T08:52:45","string":"2014-03-16T08:52:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.sortcuts import render_to_response, get_object_or_404\nfrom coltrane.models import Entry\n\ndef entries_index(request):\n return render_to_response('coltrane/entries_index.html',\n {'entry_list': Entry.objects.all()})\n\ndef entry_detail(request, year, month, day, slug):\n\timport datetime, time\n\tdate_stamp = time.strptime(year+month+day, \"%Y%b%d\")\n\tpub_date = datetime.date(*date_stamp[:3])\n\tentry = get_object_or_404(Entry, pub_date__year=pub_date.year,\n\t\t\t\t\t\t\t\t\t pub_date__month=pub_date.month,\n\t\t\t\t\t\t\t\t\t pub_date__day=pub_date.day,\n\t\t\t\t\t\t\t\t\t slug=slug)\n\treturn render_to_response('coltrane/entry_detail.html',\n\t\t\t\t\t\t\t\t{ 'entry': entry })\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1894,"cells":{"__id__":{"kind":"number","value":8392366123752,"string":"8,392,366,123,752"},"blob_id":{"kind":"string","value":"dc65e90dbf502a22ae411bd02ae86c88717a083b"},"directory_id":{"kind":"string","value":"4b0e168a339b56d6f101f285419b4b5878d9ae28"},"path":{"kind":"string","value":"/new_django_project/main/models.py"},"content_id":{"kind":"string","value":"a90b148e61fe2730085f81c7f19e99dc61bf4887"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"torchingloom/new_django_project"},"repo_url":{"kind":"string","value":"https://github.com/torchingloom/new_django_project"},"snapshot_id":{"kind":"string","value":"8f372c5e1d5180b7af0013d313d11882f6cc672b"},"revision_id":{"kind":"string","value":"60094f56d00f6e8f417d447f6683f1108155d46c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T18:34:17.711703","string":"2021-01-01T18:34:17.711703"},"revision_date":{"kind":"timestamp","value":"2014-06-18T12:20:24","string":"2014-06-18T12:20:24"},"committer_date":{"kind":"timestamp","value":"2014-06-18T12:20:24","string":"2014-06-18T12:20:24"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# coding: utf-8\nfrom django.conf import settings\nfrom django.db import models\nfrom mptt.fields import TreeForeignKey\nfrom mptt.managers import TreeManager\nfrom mptt.models import MPTTModel\n\nfrom new_django_project._lib.models import SoftDeletionModel\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1895,"cells":{"__id__":{"kind":"number","value":17025250401765,"string":"17,025,250,401,765"},"blob_id":{"kind":"string","value":"45e1844a69b84c5be7cca405a32a4d25133c3642"},"directory_id":{"kind":"string","value":"6c335e403bad1ac6baf12e623687d1bbdc70af3a"},"path":{"kind":"string","value":"/sources/canolibs/lib/canolibs/unittest/cfile-Myunittest.py"},"content_id":{"kind":"string","value":"f38fe2d1d1b9f32af98ef35a8c8cd2fc79cba7b6"},"detected_licenses":{"kind":"list like","value":["AGPL-3.0-or-later"],"string":"[\n \"AGPL-3.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"tito/canopsis"},"repo_url":{"kind":"string","value":"https://github.com/tito/canopsis"},"snapshot_id":{"kind":"string","value":"98140a2210e48f4b9f7534fb57dfedfb34efb163"},"revision_id":{"kind":"string","value":"ef14bae140cae5226b3c062f82572e6907cde1a4"},"branch_name":{"kind":"string","value":"refs/heads/develop"},"visit_date":{"kind":"timestamp","value":"2020-12-25T06:32:40.492546","string":"2020-12-25T06:32:40.492546"},"revision_date":{"kind":"timestamp","value":"2014-10-10T12:43:11","string":"2014-10-10T12:43:11"},"committer_date":{"kind":"timestamp","value":"2014-10-10T12:43:11","string":"2014-10-10T12:43:11"},"github_id":{"kind":"number","value":32029140,"string":"32,029,140"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":true,"string":"true"},"gha_event_created_at":{"kind":"timestamp","value":"2015-03-11T16:42:49","string":"2015-03-11T16:42:49"},"gha_created_at":{"kind":"timestamp","value":"2015-03-11T16:42:48","string":"2015-03-11T16:42:48"},"gha_updated_at":{"kind":"timestamp","value":"2015-03-03T17:09:17","string":"2015-03-03T17:09:17"},"gha_pushed_at":{"kind":"timestamp","value":"2015-03-11T15:49:00","string":"2015-03-11T15:49:00"},"gha_size":{"kind":"number","value":47386,"string":"47,386"},"gha_stargazers_count":{"kind":"number","value":0,"string":"0"},"gha_forks_count":{"kind":"number","value":0,"string":"0"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#--------------------------------\n# Copyright (c) 2011 \"Capensis\" [http://www.capensis.com]\n#\n# This file is part of Canopsis.\n#\n# Canopsis is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Canopsis is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with Canopsis. If not, see .\n# ---------------------------------\n\nimport unittest\n\nimport logging\nlogging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s')\n\nfrom cfile import cfile\nfrom cfile import get_cfile\n\nfrom caccount import caccount\nfrom cstorage import get_storage\n\nfrom gridfs.errors import NoFile\n\nanonymous_account = caccount()\nroot_account = caccount(user=\"root\", group=\"root\")\n\nstorage = get_storage(account=root_account , namespace='unittest', logging_level=logging.DEBUG)\n\nsample_file_path = '/opt/canopsis/var/www/canopsis/themes/canopsis/resources/images/logo_small.png'\nsample_binary = open(sample_file_path, 'rb').read()\n\nsample_binary2 = bin(1234567890123456789)\n\nmyfile = None\n\nclass KnownValues(unittest.TestCase):\n\n\tdef test_01_Init(self):\n\t\tglobal myfile\n\t\tmyfile = cfile(storage=storage)\n\t\t\n\t\tif myfile.data != {}:\n\t\t\traise Exception('Data corruption ...')\n\t\t\t\n\tdef test_02_put_data(self):\n\t\tmyfile.put_data(sample_binary2)\n\n\t\tif myfile.binary != sample_binary2:\n\t\t\traise Exception('Data corruption ...')\n\n\tdef test_03_save_data(self):\n\t\tglobal meta_id, bin_id\n\t\tmeta_id = myfile.save()\n\t\tbin_id = myfile.get_binary_id()\n\t\t\n\t\tprint \"Meta Id: %s, Binary Id: %s\" % (meta_id, bin_id)\n\t\t\n\t\tif not bin_id or not meta_id:\n\t\t\traise Exception('Impossible to save cfile')\n\t\t\t\n\tdef test_04_put_file(self):\n\t\tmyfile.put_file(sample_file_path)\n\n\t\tif myfile.binary != sample_binary:\n\t\t\traise Exception('Data corruption ...')\n\n\tdef test_05_save_file(self):\n\t\tglobal meta_id, bin_id\n\t\tmeta_id = myfile.save()\n\t\t\n\t\tbin_id = myfile.get_binary_id()\n\t\tif not bin_id or not meta_id:\n\t\t\traise Exception('Impossible to save cfile')\n\t\t\n\tdef test_06_Rights(self):\n\t\n\t\twith self.assertRaises(ValueError):\n\t\t\tstorage.put(myfile, account=anonymous_account)\n\n\t\twith self.assertRaises(ValueError):\n\t\t\tstorage.remove(myfile, account=anonymous_account)\t\t\n\n\tdef test_07_GetMeta(self):\n\t\tmeta = storage.get(meta_id)\n\t\tif not meta:\n\t\t\traise Exception('Impossible to get meta data')\n\t\t\t\n\t\tprint \"Meta: %s\" % meta\n\t\t\t\n\tdef test_08_GetBinary(self):\n\t\tbinary = storage.get_binary(bin_id)\n\t\tif not binary:\n\t\t\traise Exception('Impossible to get binary data')\n\t\t\t\n\t\tif binary != sample_binary:\n\t\t\traise Exception('Data corruption ...')\n\n\tdef test_09_RemoveFile(self):\n\t\tmyfile.remove()\n\n\tdef test_10_CheckFileRemove(self):\n\t\twith self.assertRaises(NoFile):\n\t\t\tbinary = storage.get_binary(bin_id)\n\t\t\t\n\t\twith self.assertRaises(KeyError):\n\t\t\tget_cfile(meta_id, storage)\n\t\t\n\t\tif myfile.check():\n\t\t\traise Exception('cfile is not deleted ...')\n\nif __name__ == \"__main__\":\n\tunittest.main(verbosity=2)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1896,"cells":{"__id__":{"kind":"number","value":5669356874273,"string":"5,669,356,874,273"},"blob_id":{"kind":"string","value":"83905f44d4730f3628f621099bbe592bd2d3348e"},"directory_id":{"kind":"string","value":"eea3fa4e235b9b76bf73aa08370d50714d61604b"},"path":{"kind":"string","value":"/scripts/dump-sizes.py"},"content_id":{"kind":"string","value":"c3907ab3a6cb71ea4fd5c1f7a9c2038e4d416a8e"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"conceptslearningmachine-FEIN-85-1759293/jydoop"},"repo_url":{"kind":"string","value":"https://github.com/conceptslearningmachine-FEIN-85-1759293/jydoop"},"snapshot_id":{"kind":"string","value":"d2ea1272f6f83e9c38c48b6317e3aad60eea88f3"},"revision_id":{"kind":"string","value":"a1ce82f3c6f3d335ba2b0cbc310dac52624a6e0b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-27T11:40:50.313084","string":"2021-05-27T11:40:50.313084"},"revision_date":{"kind":"timestamp","value":"2014-09-02T16:11:06","string":"2014-09-02T16:11:06"},"committer_date":{"kind":"timestamp","value":"2014-09-02T16:11:06","string":"2014-09-02T16:11:06"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import crashstatsutils\nimport jydoop\nimport json\nfrom org.python.core.util import StringUtil\n\nsetupjob = crashstatsutils.dosetupjob([])\ndef map(k, context):\n result = context.cx.getCurrentValue()\n meta_data = StringUtil.fromBytes(result.getValue(\"meta_data\", \"json\"))\n meta = json.loads(meta_data)\n product = meta['ProductName']\n version = meta['Version']\n ispluginhang = meta.get('PluginHang', None) == \"1\"\n err = 0\n\n kv = result.getColumnLatest(\"raw_data\", \"dump\")\n if kv is None:\n err += 1\n dumplen = 0\n else:\n dumplen = kv.getValueLength()\n\n if \"additional_minidumps\" in meta:\n extradumps = meta[\"additional_minidumps\"].split(\",\")\n for extradump in extradumps:\n extrakv = result.getColumnLatest(\"raw_data\", \"upload_file_minidump_\" + extradump)\n if extrakv is None:\n err += 1\n else:\n extralen = extrakv.getValueLength()\n dumplen += extralen\n\n context.write(k, (product, version, ispluginhang, dumplen, err))\n\noutput = jydoop.outputWithKey\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1897,"cells":{"__id__":{"kind":"number","value":17626545809632,"string":"17,626,545,809,632"},"blob_id":{"kind":"string","value":"571da77464f101469faae81c565ad6a77c9ba0d3"},"directory_id":{"kind":"string","value":"12b5f5ca59e6258698b68c4c5874163cb7e6f55e"},"path":{"kind":"string","value":"/hyperspyui/signalwrapper.py"},"content_id":{"kind":"string","value":"17faed2f06f6ed5263a059bddf7718118f78b4c6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pburdet/hyperspyUI"},"repo_url":{"kind":"string","value":"https://github.com/pburdet/hyperspyUI"},"snapshot_id":{"kind":"string","value":"03ff40aae21ead8d71bf8f7fa6f0bb3925c9543b"},"revision_id":{"kind":"string","value":"220458832abcf2e079ef3358f43885a1735565e6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-21T16:54:12.334674","string":"2021-01-21T16:54:12.334674"},"revision_date":{"kind":"timestamp","value":"2014-12-30T20:52:55","string":"2014-12-30T20:52:55"},"committer_date":{"kind":"timestamp","value":"2014-12-30T20:52:55","string":"2014-12-30T20:52:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 24 18:27:15 2014\n\n@author: Vidar Tonaas Fauske\n\"\"\"\n\nfrom util import fig2win\nfrom python_qt_binding import QtCore, QtGui\n\nfrom modelwrapper import ModelWrapper\nfrom actionable import Actionable\nimport hyperspy.hspy\n\nclass SignalWrapper(Actionable):\n closing = QtCore.Signal()\n model_added = QtCore.Signal(object)\n model_removed = QtCore.Signal(object)\n \n def __init__(self, signal, mainwindow, name):\n super(SignalWrapper, self).__init__()\n self.signal = signal\n if name is None:\n name = signal.metadata.General.title\n self.name = name\n self.figures = []\n self.mainwindow = mainwindow\n self.models = []\n \n self._keep_on_close = 0\n \n self.navigator_plot = None\n self.signal_plot = None\n \n self._nav_geom = None\n self._sig_geom = None\n self._replotargs = ((), {})\n \n self._model_id = 1\n \n self.add_action('plot', \"&Plot\", self.replot)\n self.add_action('add_model', \"Add &model\", self.make_model)\n self.add_separator()\n self.add_action('close', \"&Close\", self.close)\n\n @property\n def keep_on_close(self):\n return self._keep_on_close > 0\n \n @keep_on_close.setter\n def keep_on_close(self, value):\n if value:\n self._keep_on_close += 1\n else:\n if self._keep_on_close > 0:\n self._keep_on_close -= 1\n\n def plot(self, *args, **kwargs):\n self.keep_on_close = True\n self.signal.plot(*args, **kwargs)\n self.keep_on_close = False\n self.update_figures()\n self._replotargs = (args, kwargs)\n self.mainwindow.main_frame.subWindowActivated.emit(\n self.mainwindow.main_frame.activeSubWindow())\n \n def replot(self):\n self.plot(*self._replotargs[0], **self._replotargs[1])\n \n def update(self):\n if self.navigator_plot is not None:\n self.navigator_plot.update()\n if self.signal_plot is not None:\n self.signal_plot.update()\n \n def update_figures(self): \n old_nav = self.navigator_plot\n old_sig = self.signal_plot\n self.remove_figure(old_nav)\n self.remove_figure(old_sig)\n self.navigator_plot = None\n self.signal_plot = None\n \n atleast_one_changed = False\n \n # If we have a navigator plot\n if self.signal._plot and self.signal._plot.navigator_plot:\n # Set internal `navigator_plot` to window containing it\n navi = self.signal._plot.navigator_plot.figure\n self.navigator_plot = fig2win(navi, self.mainwindow.figures)\n # Did the window change?\n if old_nav is not self.navigator_plot:\n # Process the plot\n navi.axes[0].set_title(\"\") # remove title\n # Wire closing event\n self.navigator_plot.closing.connect(self.nav_closing)\n # Set a reference on window to self\n self.navigator_plot.setProperty('hyperspyUI.SignalWrapper',\n self)\n # Add to figures list\n self.add_figure(self.navigator_plot)\n \n # Did we have a previous window?\n if old_nav is not None:\n # Save geometry of old, and make sure it is closed\n self._nav_geom = old_nav.saveGeometry()\n old_nav.closing.disconnect(self.nav_closing)\n old_nav.close()\n atleast_one_changed = True\n # If we have stored geometry, and a valid plot, restore\n if self._nav_geom is not None and self.navigator_plot is not None:\n self.navigator_plot.restoreGeometry(self._nav_geom)\n self._nav_geom = None\n \n if self.signal._plot and self.signal._plot.signal_plot is not None:\n sigp = self.signal._plot.signal_plot.figure\n self.signal_plot = fig2win(sigp, self.mainwindow.figures)\n if old_sig is not self.signal_plot:\n sigp.axes[0].set_title(\"\")\n self.signal_plot.closing.connect(self.sig_closing)\n self.signal_plot.setProperty('hyperspyUI.SignalWrapper', self)\n self.add_figure(self.signal_plot)\n if old_sig is not None:\n self._sig_geom = old_sig.saveGeometry()\n old_sig.closing.disconnect(self.sig_closing)\n old_sig.close()\n atleast_one_changed = True\n if self._sig_geom is not None and self.signal_plot is not None:\n self.signal_plot.restoreGeometry(self._sig_geom)\n self._sig_geom = None\n \n if atleast_one_changed:\n self.mainwindow.check_action_selections()\n \n def add_figure(self, fig):\n self.figures.append(fig)\n \n def remove_figure(self, fig):\n if fig in self.figures:\n self.figures.remove(fig)\n \n def as_image(self, axis=(0,1)):\n self.close() # Store geomtery and close\n tmp = self._sig_geom\n self._sig_geom = self._nav_geom\n self._nav_geom = tmp\n self.signal = self.signal.as_image(axis)\n \n def as_spectrum(self, axis=0):\n self.close() # Store geomtery and close\n tmp = self._sig_geom\n self._sig_geom = self._nav_geom\n self._nav_geom = tmp\n self.signal = self.signal.as_spectrum(axis)\n \n def run_nonblock(self, function, windowtitle):\n self.keep_on_close = True\n\n def on_close():\n self.keep_on_close = False\n self.update_figures()\n \n def on_capture(dialog):\n dialog.destroyed.connect(on_close)\n dialog.setParent(self.mainwindow, QtCore.Qt.Tool)\n dialog.show()\n dialog.activateWindow()\n \n # Setup capture\n self.mainwindow.capture_traits_dialog(on_capture)\n \n # Call actual function that triggers dialog\n function()\n \n def make_model(self, *args, **kwargs): \n m = hyperspy.hspy.create_model(self.signal, *args, **kwargs)\n# modelname = self.signal.metadata.General.title\n modelname = \"Model %d\" % self._model_id\n self._model_id += 1\n mw = ModelWrapper(m, self, modelname)\n self.add_model(mw)\n mw.plot()\n return mw\n \n def add_model(self, model):\n self.models.append(model)\n self.model_added.emit(model)\n \n def remove_model(self, model):\n self.models.remove(model)\n self.model_removed.emit(model)\n self.plot()\n \n def nav_closing(self):\n if self.navigator_plot:\n self._nav_geom = self.navigator_plot.saveGeometry()\n self.navigator_plot = None\n if self.signal_plot is None:\n self._closed()\n \n def sig_closing(self):\n if self.signal_plot:\n p = self.signal_plot.pos()\n # For some reason the position changes -8,-30 on closing, at least\n # it does on windows 7, Qt4.\n self.signal_plot.move(p.x()+8, p.y()+30)\n self._sig_geom = self.signal_plot.saveGeometry()\n if self.navigator_plot is not None:\n self.navigator_plot.close()\n self.navigator_plot = None\n self.signal_plot = None\n self._closed()\n \n def close(self):\n if self.signal_plot is not None:\n self.signal_plot.close()\n self.signal_plot = None\n \n if self.navigator_plot is not None:\n self.navigator_plot.close()\n self.navigator_plot = None\n self._closed()\n \n def _closed(self):\n if not self.keep_on_close:\n self.closing.emit()\n # TODO: Should probably be with by events for concistency\n if self in self.mainwindow.signals and not self.keep_on_close:\n self.mainwindow.signals.remove(self)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1898,"cells":{"__id__":{"kind":"number","value":996432459599,"string":"996,432,459,599"},"blob_id":{"kind":"string","value":"0dda2feadf37c4eeeccf3fa1eda339961f3fbe92"},"directory_id":{"kind":"string","value":"f7330e26658ece327d2c44b8791d1104008fc271"},"path":{"kind":"string","value":"/puntersparadise/app.py"},"content_id":{"kind":"string","value":"9fb9a86ff0edb58f9653b3d364edc03d96e11a36"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"diaakasem/scrap"},"repo_url":{"kind":"string","value":"https://github.com/diaakasem/scrap"},"snapshot_id":{"kind":"string","value":"7e3acd1970a9335b7a39e24668f3c75a3a9c0c88"},"revision_id":{"kind":"string","value":"4fb84eb2cea4da9e053a327a13d2182acdebae2a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T13:29:39.195885","string":"2021-01-23T13:29:39.195885"},"revision_date":{"kind":"timestamp","value":"2013-10-08T18:30:02","string":"2013-10-08T18:30:02"},"committer_date":{"kind":"timestamp","value":"2013-10-08T18:30:02","string":"2013-10-08T18:30:02"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"###############################################################################\n# Description: Collects data from punter paradise.com.au\n# Author : Diaa Kasem\n# Email : me@diaa.me\n# oDesk : https://www.odesk.com/users/~019c6c183545f26f3f\n# Usage : Alter the config vlues below\n# Save The script with a \n# execute 'python ' Where name.py is the script name\n###############################################################################\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport csv\nimport time, datetime\nimport re\nimport json\nimport math\nfrom dateutil.parser import *\n\n\nconfig = {\n \"weight-kg\" : 0,\n \"horse-age\" : 0,\n \"career-win-rate\" : 0,\n \"career-place-rate\" : 100,\n \"career-prize-money\" : 0,\n \"average-prize-money\" : 0,\n \"jockey-wins\" : 0,\n \"track-wins\" : 0,\n \"good-tracks\" : 0,\n \"heavy-tracks\" : 0,\n \"synthetic-tracks\" : 0,\n \"jumps-tracks\" : 0,\n \"barrier\" : 0,\n \"distance-wins\" : 0,\n \"track-distance-wins\" : 0,\n \"fast-tracks\" : 0,\n \"dead-tracks\" : 0,\n \"slow-tracks\" : 0\n }\n\n\nBASE = \"http://www.puntersparadise.com.au\"\nmeetings_url = \"%s/form-guid/\" % BASE\nmeetings_url = 'http://www.puntersparadise.com.au/form-guide'\n\nurl = BASE + \"/%s\"\ndata = []\noutputFile = 'csvoutput.csv'\nfields = ['Rcdate', 'Track', 'Rcno', 'Rctime', 'Tab', 'Horse', 'Rat']\npagesCount = 998\n\ndef ordinal(n):\n if 10 <= n % 100 < 20:\n return str(n) + 'th'\n else:\n return str(n) + {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(n % 10, \"th\")\n\ndef isToday(ts):\n \"\"\"\n Check if the Date of race equals today\n \"\"\"\n n = datetime.datetime.now()\n d = datetime.datetime.fromtimestamp(ts)\n # print \"Race Date : %s \" % d\n # print \"Your current time: %s \" % n\n return d.day == n.day\n\ndef getMeetings():\n \"\"\"\n Collect all race meetings going to happen\n \"\"\"\n page = urllib2.urlopen(meetings_url).read()\n soup = BeautifulSoup(page)\n scripts = soup.findAll('script')\n script = [ script for script in scripts if 'meeting_events' in str(script) ]\n jsCode = re.sub('<[^<]+?>', '', str(script[0]))\n array = re.findall('meeting_events\\s*=\\s*(.*)\\s*;', jsCode)\n return json.loads(array[0])\n\ndef extractPage(url):\n \"\"\"\n Extract Meetings data from page\n \"\"\"\n page = urllib2.urlopen(url).read()\n soup = BeautifulSoup(page)\n fh = soup.find('div' , {'class': 'formHeader'})\n h2 = fh.find('h2').text\n result = []\n racedata = h2.split('-')\n racedate = racedata[0].strip()\n title = re.findall('(.+)\\s*Race.*(\\d+)', racedata[1])[0]\n track = title[0]\n track = track.strip()\n racenumber = title[1]\n racenumber = re.findall('\\d+', racedata[1])[0]\n ts = float(soup.find('abbr', {'class': 'time12'})['data-utime'])\n otime = datetime.datetime.fromtimestamp(ts)\n racetime = otime.strftime('%I:%M%p')\n\n if not isToday(ts):\n print \"Skipping - Not today : %s \" % racedate\n return\n\n horses = []\n runners = soup.findAll('div', {'class': 'csRunner'})\n def num(string):\n s = str(float(string))\n if len(s) > 4:\n return float(s[0:4])\n return float(s)\n\n # A dictionary that when script executes, will hold max value\n # Important for calculations - DO NOT ALTER\n maxResults = {\n \"weight-kg\" : 0,\n \"horse-age\" : 0,\n \"career-win-rate\" : 0,\n \"career-place-rate\" : 0,\n \"career-prize-money\" : 0,\n \"average-prize-money\" : 0,\n \"jockey-wins\" : 0,\n \"track-wins\" : 0,\n \"good-tracks\" : 0,\n \"heavy-tracks\" : 0,\n \"synthetic-tracks\" : 0,\n \"jumps-tracks\" : 0,\n \"barrier\" : 0,\n \"distance-wins\" : 0,\n \"track-distance-wins\" : 0,\n \"fast-tracks\" : 0,\n \"dead-tracks\" : 0,\n \"slow-tracks\" : 0\n }\n\n # Calculating horse rate\n for runner in runners:\n obj = {\n \"runner-name\" : runner.get('data-runner-name', ''),\n \"runner-title\" : runner.get('data-runner-title', ''),\n\n \"weight-kg\" : num(runner.get('data-weight-kg', '0')),\n \"horse-age\" : num(runner.get('data-horse-age', '0')),\n \"career-win-rate\" : num(runner.get('data-career-win-rate', '0')),\n \"career-place-rate\" : num(runner.get('data-career-place-rate', '0')),\n \"career-prize-money\" : num(runner.get('data-career-prize-money', '0')),\n \"average-prize-money\" : num(runner.get('data-average-prize-money', '0')),\n \"jockey-wins\" : num(runner.get('data-jockey-wins', '0')),\n \"track-wins\" : num(runner.get('data-track-wins', '0')),\n \"good-tracks\" : num(runner.get('data-good-tracks', '0')),\n \"heavy-tracks\" : num(runner.get('data-heavy-tracks', '0')),\n \"synthetic-tracks\" : num(runner.get('data-synthetic-tracks', '0')),\n \"jumps-tracks\" : num(runner.get('data-jumps-tracks', '0')),\n \"barrier\" : num(runner.get('data-barrier', '0')),\n \"distance-wins\" : num(runner.get('data-distance-wins', '0')),\n \"track-distance-wins\" : num(runner.get('data-track-distance-wins', '0')),\n \"fast-tracks\" : num(runner.get('data-fast-tracks', '0')),\n \"dead-tracks\" : num(runner.get('data-dead-tracks', '0')),\n \"slow-tracks\" : num(runner.get('data-slow-tracks', '0'))\n }\n maxResults = {\n \"weight-kg\" : max(obj[\"weight-kg\"], maxResults[\"weight-kg\"]),\n \"horse-age\" : max(obj[\"horse-age\"], maxResults[\"horse-age\"]),\n \"career-win-rate\" : max(obj[\"career-win-rate\"], maxResults[\"career-win-rate\"]),\n \"career-place-rate\" : max(obj[\"career-place-rate\"], maxResults[\"career-place-rate\"]),\n \"career-prize-money\" : max(obj[\"career-prize-money\"], maxResults[\"career-prize-money\"]),\n \"average-prize-money\" : max(obj[\"average-prize-money\"], maxResults[\"average-prize-money\"]),\n \"jockey-wins\" : max(obj[\"jockey-wins\"], maxResults[\"jockey-wins\"]),\n \"track-wins\" : max(obj[\"track-wins\"], maxResults[\"track-wins\"]),\n \"good-tracks\" : max(obj[\"good-tracks\"], maxResults[\"good-tracks\"]),\n \"heavy-tracks\" : max(obj[\"heavy-tracks\"], maxResults[\"heavy-tracks\"]),\n \"synthetic-tracks\" : max(obj[\"synthetic-tracks\"], maxResults[\"synthetic-tracks\"]),\n \"jumps-tracks\" : max(obj[\"jumps-tracks\"], maxResults[\"jumps-tracks\"]),\n \"barrier\" : max(obj[\"barrier\"], maxResults[\"barrier\"]),\n \"distance-wins\" : max(obj[\"distance-wins\"], maxResults[\"distance-wins\"]),\n \"track-distance-wins\" : max(obj[\"track-distance-wins\"], maxResults[\"track-distance-wins\"]),\n \"fast-tracks\" : max(obj[\"fast-tracks\"], maxResults[\"fast-tracks\"]),\n \"dead-tracks\" : max(obj[\"dead-tracks\"], maxResults[\"dead-tracks\"]),\n \"slow-tracks\" : max(obj[\"slow-tracks\"], maxResults[\"slow-tracks\"])\n }\n horses.append(obj)\n\n horsesRates = {}\n for horse in horses:\n rate = 0\n for k in config.iterkeys():\n rate += ( horse.get(k, 0) / (maxResults.get(k, 1) or 1) ) * config.get(k, 0) / 100\n\n # SAVING HORSE RATES\n horsesRates[horse.get('runner-name', '').strip()] = int(round(rate * 100))\n\n data = soup.find('table', {'class': 'formRaceCard'})\n data = data.findAll('tr')[1:]\n\n for tr in data:\n number = tr.find('td', {'class': 'horseNumber'})\n number = number.find('a').text\n\n name = tr.find('td', {'class': 'horseDetails'})\n name = name.find('a', {'class': 'hoverTrigger'}).text\n name = name.strip()\n\n # rate = tr.find('td', {'class': 'winPercent'}).text\n # rate = re.findall('\\d+', rate)[0]\n rate = horsesRates.get(name, 0)\n\n obj = {\n 'Rcdate': racedate,\n 'Track': track,\n 'Rctime': racetime,\n 'Rcno': racenumber,\n 'Tab': number,\n 'Horse': name,\n 'Rat': rate\n }\n\n # Saving Extracted Data\n result.append(obj)\n\n # Sort Records in race by horse Rate\n result.sort(key=lambda row: row.get('Rat', 0), reverse=True)\n # print json.dumps(result, sort_keys = False, indent = 4)\n return result\n\ndef writeData(outputFile, data):\n \"\"\"\n Save Data to Desk\n @param outputFile The path of the file to write to\n @param data The dictionary of value to save\n \"\"\"\n with open(outputFile, 'wb') as csvfile:\n output = csv.DictWriter(csvfile, delimiter=',', fieldnames=fields)\n output.writeheader()\n output.writerows(data)\n\ndef main():\n \"\"\"\n Program Starting Point\n \"\"\"\n result = []\n meetings = getMeetings()\n for meeting in meetings:\n u = url % meeting['href']\n print \"Working on : %s\" % meeting['href']\n data = extractPage(u)\n if data:\n result = result + data\n\n print \"Writing to disk.\"\n writeData(outputFile, result)\n time.sleep(1)\n print \"Done.\"\n\nif __name__ == '__main__':\n main()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1899,"cells":{"__id__":{"kind":"number","value":807453869173,"string":"807,453,869,173"},"blob_id":{"kind":"string","value":"4c3ddab6e9fb4cc822b1770c0964709f95a06464"},"directory_id":{"kind":"string","value":"efcfdf661a12da240780a2e505394bdf17af0b9c"},"path":{"kind":"string","value":"/core/geometry.py"},"content_id":{"kind":"string","value":"e3af36465cdcba0b3ea5ed323e4e9fe715a1e00e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"zarch/openriver"},"repo_url":{"kind":"string","value":"https://github.com/zarch/openriver"},"snapshot_id":{"kind":"string","value":"730a23c67cb4d1ab55f18c743d3e4e89437ddff4"},"revision_id":{"kind":"string","value":"9754f74552dbb9978a209420016196f160aad32c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T23:45:30.924509","string":"2021-01-20T23:45:30.924509"},"revision_date":{"kind":"timestamp","value":"2010-05-17T08:08:57","string":"2010-05-17T08:08:57"},"committer_date":{"kind":"timestamp","value":"2010-05-17T08:08:57","string":"2010-05-17T08:08:57"},"github_id":{"kind":"number","value":563674,"string":"563,674"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport re\nfrom os.path import join as joinpath\n\n# section example\nsec_rectangular=[(0, 0), (0, -2), (10, -2), (10, 0)]\nsec_rectangular2=sec_rectangular[1:]\nsec_rectangular2=sec_rectangular.insert(-1, sec_rectangular[-1])\nprint sec_rectangular\nprint sec_rectangular2\nrect=np.array(sec_rectangular)\nrect2=np.array(sec_rectangular2)\nrect_y=np.array([i[0] for i in sec_rectangular])\nrect_z=np.array([i[1] for i in sec_rectangular])\nprint rect, rect2\n\nimport csv\n\nclass Section:\n \"\"\" print m.group('sez_name')\n It defines attributes and methods for a river cross-section.\n It's possible to define sub-segments of the section,\n each one with a different roughness.\n Example of usage:\n\n coord = [[0,10],[0,0],[10,0],[20,0],[20,10]]\n sect = Section(0, coord)\n sect.addSegment(sect.yzcoord[0:2], 35)\n sect.addSegment(sect.yzcoord[2:], 40)\n \"\"\"\n def __init__(self, name=None, data=None,\n first=0, last=-1, erodible=True,\n roughness=None, discontinuity=False,\n subsection=False, watersurface=None,\n variotype = None, d90=None, variolenght=None, varioexcav=None):\n self.name = name\n self.data = np.array(data)\n self.xcoord = self.data.T[0]\n self.yzcoord = self.data.T[1:3]\n self.first = first\n self.last = last\n minimum = self.yzcoord[1].argmin()\n self.min = self.yzcoord[minimum][1]\n\n self.erodible = erodible\n self.roughness = roughness\n self.discontinuity = discontinuity\n self.subsection = subsection\n self.segment = []\n\n # Add this attributs to support vario format\n self.d90 =d90\n self.variotype = variotype\n self.varioLenght = variolenght\n self.varioexcav = varioexcav\n\n self.watersurf = watersurface\n\n\n def __str__(self):\n return str(self.name)\n\n def addSegment(self, yzcoordSegm=None,\n roughness=None):\n self.segment.append(Section(yzcoord=yzcoordSegm, roughness=roughness, subsection=True))\n\n def firstPointAfter_h(self, points, h):\n \"\"\"Return index of the first\n\n >>> points=np.array([ 742.73, 742.75, 742.77, 742.79, 747.27])\n\n >>> section.firstPointAfter_h(points, 745)\n 4\n\n >>> section.firstPointAfter_h(points, 742)\n Traceback (innermost last):\n ...\n ValueError: h outside section\n h < min\n\n >>> section.firstPointAfter_h(points, 748)\n Traceback (innermost last):\n ...\n ValueError: h outside section\n h > max\n \"\"\"\n if h > points.max():\n raise ValueError(\"h outside section\\n h > max\")\n elif h < points.min():\n raise ValueError(\"h outside section\\n h < min\")\n else:\n #print 'points:', points, h\n for i, p in enumerate(points):\n #print i, p\n if p > h:\n return i\n\n def intersection(self, pn1, pn2, h):\n \"\"\"Returnurn intersection between 2 points and height\n\n >>> section.intersection((0,5),(0,0),3)\n (0, 3)\n\n (h-y0)/(y1-y0) = (x-x0)/(x1-x0)\n x=(x1-x0)/(y1-y0)*(h-y0)+x0\n return x,h\"\"\"\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h\n\n def getSect(self, h):\n \"\"\"Return section only from left intersection to right intersection\n\n >>> section.getSect(745)\n array([[ 4.71227679e-01, 7.45000000e+02],\n [ 9.30000000e-01, 7.42790000e+02],\n [ 7.19000000e+00, 7.42770000e+02],\n [ 1.25900000e+01, 7.42750000e+02],\n [ 1.80800000e+01, 7.42730000e+02],\n [ 1.89100000e+01, 7.42730000e+02],\n [ 1.94887253e+01, 7.45000000e+02]])\n\n \"\"\"\n lefttomin=self.yzcoord[1][:self.min+1]\n # left point\n lpnt = self.firstPointAfter_h(lefttomin[::-1], h)\n # find index of left point\n l_pnt = self.min - lpnt\n # find left intersection\n l_intersect = self.intersection(self.yzcoord.T[l_pnt], self.yzcoord.T[l_pnt+1], h)\n # right point\n rpnt = self.firstPointAfter_h(self.yzcoord[1][self.min:], h)\n # find index of right point\n r_pnt = self.min + rpnt\n # find right intersection\n r_intersect = self.intersection(self.yzcoord.T[r_pnt], self.yzcoord.T[r_pnt-1], h)\n # make new section geometries\n sez = self.yzcoord.T[l_pnt+1:r_pnt]\n # Add left intersection on the top\n sez=np.insert(sez, [0,], l_intersect,axis=0)\n # Add rightht intersection on the bottom\n sez=np.append(sez,[r_intersect],axis=0)\n return sez\n\n def area(self, sez):\n \"\"\"Return area given a section take from getSect\n\n >>> section.area(yzcoordT)\n 41.448496630204318\n \"\"\"\n # find area below water line\n area_h2o = (sez[-1][0]-sez[0][0])*sez[0][1]\n # find area bellow section\n area_sez = np.trapz(sez.T[1],x=sez.T[0])\n return area_h2o - area_sez\n\n def wetBorder(self, sez):\n \"\"\"Calculate web border from a given section\n\n >>> section.wetBorder(yzcoordT)\n 23.557497620999964\n\n \"\"\"\n # calculate with pitagora: sqrt(dx²+dy²)\n sez1=np.delete(sez, 0, axis=0)\n sez2=np.delete(sez, -1, axis=0)\n delta=sez1-sez2\n return np.sum(np.sqrt(delta * delta))\n\n def rh(self, h):\n \"\"\"Return thee idraulic radius given height\n\n >>> section.rh(745)\n 1.7594609288533762\n \"\"\"\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder\n\nclass Reach:\n \"\"\"\n It defines the geometric properties of a river reach.\n It is composed by sections and sections can be subdivided in\n segments.\n \"\"\"\n def __init__(self, sections = []):\n self.sections = sections\n self.workingpath = None\n\n def __str__(self):\n slist = []\n for s in self.sections:\n separetor = '='*50\n sectname = s.name + ': ' + str(len(s.yzcoord))\n data = str(s.data)\n slist.append(\"\\n\".join([separetor, sectname, data]))\n return \"\\n\".join(slist)\n\n def recursiveReadVario(self, datalist):\n \"\"\"This function append section to reach.sections\"\"\"\n index = 0\n # read first line section\n xcoord,e,d = datalist[index]\n xcoord = float(xcoord)\n erodible = True if e == 't' else False\n discontinuity= False if d == 'f' else True\n # go to the second line\n index+=1\n # read and trasform str value in integer\n npoints, nsegments = map(int, datalist[index])\n\n #initialize locals variables\n yzcoord = []\n segmens = []\n endpoints=index+npoints+1\n endsegments = endpoints+nsegments\n index+=1\n\n # start a cicle between points\n for e in datalist[index:endpoints]:\n # trasform string in float\n yz=map(float, e[:2])\n #print e[:2]\n # add new coordinates to yzcoordinates list\n yzcoord.append(yz)\n\n # add x column to the data array\n data = np.ones(shape=(len(yzcoord),1))\n data = data * xcoord\n\n # transform list in a numpy array because in this way is easier\n # to assign value for ks\n yzcoord = np.array(yzcoord)\n #print yzcoord\n # add yzcoord to the data array\n data = np.append(data,yzcoord,axis=1)\n # add roughnes column default it is 0\n kscolumn = np.zeros(shape=(len(yzcoord),1))\n data = np.append(data,kscolumn,axis=1)\n\n # assign KS = 3 to have more readable source\n KS = 3\n for e in datalist[endpoints:endsegments]:\n # trasform string in integer and assign start end and ks\n start, end, ks = map(int, e)\n start -= 1\n data.T[KS][start:end] = ks\n\n index = endsegments\n\n # check if discontinuity == True\n if discontinuity:\n [[type], [d90], [l], [excavation]] = datalist[index:index+4]\n index = index + 4\n type, d90, l, excavation= int(type), int(d90), float(l), float(excavation)\n #print \"type: %d, d90: %d, l: %f, excav: %f\" % (type, d90, l, excavation)\n\n # make new section and append to the reach list\n self.sections.append(Section(data = data,\n erodible = erodible,\n discontinuity = discontinuity,\n variotype = None if discontinuity == False else type,\n d90 = None if discontinuity == False else d90,\n variolenght = None if discontinuity == False else l,\n varioexcav = None if discontinuity == False else excavation,))\n\n newline = datalist[index]\n # check if new line is the end of file.\n if newline == ['-100', '-100', '-100']:\n print \"Finish to import.\"\n else:\n self.recursiveReadVario(datalist[index:])\n\n def importFileVario(self, filename):\n \"\"\"\n >>> river = Reach()\n >>> river.importFileVario('../test/importexport/variosection.geo')\n Finish to import.\n \"\"\"\n datalist = []\n geometryFile = open(filename, \"r\")\n # make a list of list from the file.\n for row in geometryFile:\n datalist.append(row.split())\n self.recursiveReadVario(datalist)\n\n\n def exportFileVario(self, filename):\n \"\"\"\n Return a vario file of sections\n >>> river = Reach()\n >>> river.importFileVario('../test/importexport/variosection.geo')\n Finish to import.\n >>> river.exportFileVario('../test/importexport/variosectionTEST.geo')\n Finish to export.\n \"\"\"\n sectionVarioFile = open(filename, \"w\")\n for s in self.sections:\n # Vario take just one x coordinates so we take the first one\n x = float(s.xcoord[0])\n erod = 't' if s.erodible else 'f'\n disc = 't' if s.discontinuity else 'f'\n npoints = int(len(s.data))\n kslist = s.data.T[3][:-1]\n segmentslist = []\n index = 0\n # initialize segment start and end\n s_start=0\n s_end =1\n while s_end != len(kslist):\n ks = kslist[s_start]\n ksnext = kslist[s_end]\n #print ks, ksnext, s_start, s_end\n if ks == ksnext:\n s_end += 1\n else:\n segmentslist.append('%d %d %d' % (s_start+1, s_end+1, ks))\n s_start = s_end\n s_end += 1\n segmentslist.append('%d %d %d' % (s_start+1, s_end+1, kslist[s_start]))\n\n nsegments = int(len(segmentslist))\n #print s.yzcoord.T\n yzcoordstr = \"\\n\".join([\"%f %f\" % tuple(c) for c in s.yzcoord.T])\n segmentstr = \"\\n\".join(segmentslist)\n\n # Define the string that will be write in the file for each section\n variosection = \"\"\"%f %s %s\n%d %d\n%s\n%s\n\"\"\" % (x, erod, disc,\n npoints, nsegments,\n yzcoordstr,\n segmentstr, )\n # check if there are discontinuity\n if s.discontinuity:\n dis_str = \"%d\\n%d\\n%f\\n%f\\n\" % (s.variotype, s.d90, s.varioLenght, s.varioexcav)\n variosection +=dis_str\n\n # then write section string to the output file\n sectionVarioFile.write(variosection)\n sectionVarioFile.close()\n print \"Finish to export.\"\n\n\n def importFileOri(self, sectionfilename, pointsfilename):\n \"\"\"section.ori\n -------------------------\n 301\n 4 sez0001\n 1 100.00000 4 100.00000\n 4 sez0002\n 1 100.00000 4 100.00000\n 4 sez0003\n 1 100.00000 4 100.00000\n\n points.ori\n -------------------------\n 0.00000 10.00000 100.00000 100.00000\n 0.00000 10.00000 0.00000 100.00000\n 0.00000 50.00000 0.00000 100.00000\n 0.00000 50.00000 100.00000 100.00000\n 5.00000 10.00000 100.00000 100.00000\n 5.00000 10.00000 0.00000 100.00000\n 5.00000 50.00000 0.00000 100.00000\n 5.00000 50.00000 100.00000 100.00000\n\n >>> river = Reach()\n >>> river.importFileOri('../test/importexport/sections.ori', '../test/importexport/points.ori')\n >>> len(river.sections)\n 301\n\n \"\"\"\n sectionFile = open(sectionfilename, \"r\")\n pointsFile = open(pointsfilename, \"r\")\n\n # define regexp\n restr = r\"\"\"^\\s*(?P\\d+)\\s+(?P[sez]+\\d+)\\s*\\n^\\s*(?P\\d+)\\s+(?P[0-9.]+)\\s+(?P\\d+)\\s+(?P[0-9.]+)\\s*\\n\"\"\"\n regexp = re.compile(restr, re.MULTILINE)\n\n # find all section informations\n matches = [m.groupdict() for m in regexp.finditer(sectionFile.read())]\n\n # take all data from points.ori\n allcoord = []\n for row in pointsFile:\n allcoord.append([float(x) for x in row.split()])\n\n # make the list of sections\n sectionlist = []\n first = 0\n last = 0\n for m in matches:\n # make a Section obj\n #print 'Numero punti sezione: %s\\nSezione: %s\\nPrimoPunto: %s\\nPrimoPuntoH: %s\\nUltimoPunto: %s\\nUltimoPuntoH: %s\\n' % (m['points_num'], m['sez_name'], m['first_point'],m['first_point_h'], m['last_point'],m['last_point_h'])\n first += int(m['first_point']) - 1\n last += int(m['last_point'])\n sectionlist.append(Section(name=m['sez_name'], data=allcoord[first:last], first=int(m['first_point'])-1, last=int(m['last_point'])))\n first = last\n # asign sections attribute\n self.sections = sectionlist\n return\n\n\n\n def exportFileOri(self, sectionfilename, pointsfilename):\n \"\"\"\n >>> river = Reach()\n >>> river.importFileOri('../test/importexport/sections.ori', '../test/importexport/points.ori')\n >>> river.exportFileOri('../test/importexport/sectionsTEST.ori', '../test/importexport/pointsTEST.ori')\n Start writing: ../test/importexport/sectionsTEST.ori\n Start writing: ../test/importexport/pointsTEST.ori\n Finish\n \"\"\"\n\n sectionFile = open(sectionfilename, \"w\")\n print \"Start writing: %s\" % sectionfilename\n sectionFile.write('%s\\n' % len(self.sections))\n for sect in self.sections:\n #301\n #4 sez0001\n #1 100.00000 4 100.00000\n #print sect.data\n rows = '%s %s\\n%s %s %s %s\\n' % (len(sect.data),\n sect.name,\n sect.first +1,\n sect.data[sect.first][2],\n sect.last,\n sect.data[sect.last-1][2])\n #print rows\n sectionFile.write(rows)\n sectionFile.close()\n print \"Start writing: %s\" % pointsfilename\n pointsFile = open(pointsfilename, \"w\")\n for section in self.sections:\n rowlist = []\n for row in section.data:\n rowlist.append(\" \".join(['%9.5f' % x for x in row]))\n pointsFile.write('%s\\n' % \"\\n\".join([' %s' % r for r in rowlist]))\n pointsFile.close()\n print \"Finish\"\n\n\n def addSection(self, section=None):\n self.sections.append(section)\n\n def length(self, sectlist = None, dim = 3):\n \"\"\"\n >>> river = Reach()\n >>> river.importFileOri('../test/test1/sections.ori', '../test/test1/points.ori')\n\n to calculate length just only 1D long x\n >>> river.length(dim = 1)\n 1500.0\n\n to calculate length just only 2D long x and y\n >>> river.length(dim = 2)\n 1585.0\n\n to calculate length just only 3D long x, y and z\n >>> river.length(dim = 3)\n 1607.1999999999994\n\n \"\"\"\n # check input\n if not sectlist:\n sectlist = self.sections\n\n if dim <= 3:\n dim = int(dim)\n else:\n raise ValueError(\"dim must be <= 3\")\n\n l = []\n for sez in sectlist:\n #print 'sez.first:', sez.first\n #print 'sez.last:', sez.last\n #print '-', sez.data[sez.first:sez.last]\n data = sez.data[sez.first:sez.last]\n x = dim -4\n l.append(data[0][0:x])\n array = np.array(l)\n #print array\n a1 = np.delete(array, 0, axis=0)\n a2 = np.delete(array, -1, axis=0)\n #print a1, a2\n delta = a2 - a1\n #print delta\n return np.sum(np.sqrt(delta * delta))\n\n def readSimulation(self):\n pass\n\n\nif __name__ == \"__main__\":\n import doctest\n yzcoordT=np.array([[ 4.71227679e-01, 7.45000000e+02],\\\n [ 9.30000000e-01, 7.42790000e+02],\\\n [ 7.19000000e+00, 7.42770000e+02],\\\n [ 1.25900000e+01, 7.42750000e+02],\\\n [ 1.80800000e+01, 7.42730000e+02],\\\n [ 1.89100000e+01, 7.42730000e+02],\\\n [ 1.94887253e+01, 7.45000000e+02]])\n sezdata=np.array([[ 0. , 0. , 747.27, 50. ],\n [ 0. , 0.93, 742.79, 50. ],\n [ 0. , 7.19, 742.77, 50. ],\n [ 0. , 12.59, 742.75, 50. ],\n [ 0. , 18.08, 742.73, 50. ],\n [ 0. , 18.91, 742.73, 50. ],\n [ 0. , 20.07, 747.28, 50. ]])\n section=Section(data=sezdata)\n doctest.testmod()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":18,"numItemsPerPage":100,"numTotalItems":42509,"offset":1800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjgwMzg0OCwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU2ODA3NDQ4LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.yGg_HNRDpvZM9wzE3Iv12Lv3o2GNwg4oqKxpodq3t2zAQ8yaObJeoZaPW_NQeyDFwwTqSQGICJ8JVft7I2kICQ","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
__id__
int64
3.09k
19,722B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
256
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
3 values
repo_name
stringlengths
5
109
repo_url
stringlengths
24
128
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
6.65k
581M
star_events_count
int64
0
1.17k
fork_events_count
int64
0
154
gha_license_id
stringclasses
16 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
5.76M
gha_stargazers_count
int32
0
407
gha_forks_count
int32
0
119
gha_open_issues_count
int32
0
640
gha_language
stringlengths
1
16
gha_archived
bool
2 classes
gha_disabled
bool
1 class
content
stringlengths
9
4.53M
src_encoding
stringclasses
18 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
year
int64
1.97k
2.01k
16,269,336,119,456
52a92c451054927aa85e5838347cae5ad5114e79
f05098367c470f1483386beb937606cd0996f115
/scheduler/datasingletons.py
be9b4b497519664913ee89f3cad5a98ffdb8dbbb
[ "MIT" ]
permissive
Gr1N/scheduler
https://github.com/Gr1N/scheduler
1c76b813f382a13691b071b215a8049408fc6abe
2ba782f25bb78ccfed78ba34ea34c5723d320099
refs/heads/master
2016-09-06T02:52:48.609223
2012-03-02T17:54:45
2012-03-02T17:54:45
3,478,111
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # # Copyright (c) 2012 Grishko Nikita <grin.minsk at gmail dot com> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ datasingletons.py - Params and SQL singletons for scheduler """ import os.path import shelve import pygtk pygtk.require('2.0') import gtk from datetime import date def _singleton(cls): """ Singleton instance. """ instances = {} def getinstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getinstance def _check_app_dir(): """ Chech directory at ~/.config/scheduler and if directory not exists, create it. """ if not os.path.exists(os.path.expanduser('~/.config/scheduler')): os.mkdir(os.path.expanduser('~/.config/scheduler')) @_singleton class Params: """ Params singleton include all variables and operations who works with scheduler settings. """ def __init__(self): """ Class initializer. """ _check_app_dir() if os.path.exists(os.path.expanduser('~/.config/scheduler/params')): self._use_existing_params() else: self._use_default_params() def _use_default_params(self): """ Initialize default params and save it. """ self.params = { # Desktop window params 'pos': (100, 100), 'lock_pos': False, # Font params 'default_font': 'Sans 9', # Lessons colors 'lecture_color': '#009566660000', 'laboratory_color': '#987600000000', 'practice_color': '#188820eda89b', 'non_color': '#0000849acdf4', 'day_color': '#000000000000', # Window style 'full_transparent': True, 'window_color': '#5ad65ad65ad6', 'transparent_percent': 50.0, # View schedule settings 'view_sch': [True, True, True, True, True] } self.save_params() def _use_existing_params(self): """ Initialize params from file. """ sh = shelve.open(os.path.expanduser('~/.config/scheduler/params')) self.params = sh['params'] sh.close() def save_params(self): """ Save params dict to file. """ sh = shelve.open(os.path.expanduser('~/.config/scheduler/params')) sh['params'] = self.params sh.close() def get_pos(self): """ Get window position. """ return self.params['pos'] def set_pos(self, pos): """ Set wondow position. """ self.params['pos'] = pos self.save_params() def get_lock_pos(self): """ Get lock_pos flag. """ return self.params['lock_pos'] def set_lock_pos(self, lock_pos): """ Set lock_pos flag. """ self.params['lock_pos'] = lock_pos self.save_params() def get_default_font(self): """ Get default font. """ return self.params['default_font'] def set_default_font(self, font): """ Set default font. """ self.params['default_font'] = font def get_lecture_color(self): """ Get lecture color. """ return gtk.gdk.Color(self.params['lecture_color']) def set_lecture_color(self, color): """ Set lecture color. """ self.params['lecture_color'] = str(color) def get_laboratory_color(self): """ Get laboratory color. """ return gtk.gdk.Color(self.params['laboratory_color']) def set_labaratory_color(self, color): """ Set laboratory color. """ self.params['laboratory_color'] = str(color) def get_practice_color(self): """ Get practice color. """ return gtk.gdk.Color(self.params['practice_color']) def set_practice_color(self, color): """ Set practice color. """ self.params['practice_color'] = str(color) def get_non_color(self): """ Get non color. """ return gtk.gdk.Color(self.params['non_color']) def set_non_color(self, color): """ Set non color. """ self.params['non_color'] = str(color) def get_day_color(self): """ Get day color. """ return gtk.gdk.Color(self.params['day_color']) def set_day_color(self, color): """ Set day color. """ self.params['day_color'] = str(color) def get_view_sch(self): """ Get scheduler view settings. """ return self.params['view_sch'] def set_view_sch(self, view_sch): """ Set scheduler view settings. """ self.params['view_sch'] = view_sch def get_is_window_transparent(self): """ Get flag for window transparent. """ return self.params['full_transparent'] def set_is_window_transparent(self, flag): """ Set flag for window transparent. """ self.params['full_transparent'] = flag def get_window_color(self): """ Get window color. """ return gtk.gdk.Color(self.params['window_color']) def set_window_color(self, color): """ Set window color. """ self.params['window_color'] = str(color) def get_transparent_percent(self): """ Get window transparent percent. """ return self.params['transparent_percent'] def set_transparent_percent(self, percent): """ Set window transparent percent. """ self.params['transparent_percent'] = percent @_singleton class Schedule: """ DataBase singleton inlude all variables and operations who works with schedule in database. """ def __init__(self): """ Class initializer. """ _check_app_dir() if os.path.exists(os.path.expanduser('~/.config/scheduler/schedule')): self._use_existing_schedule() self.update_current_week() else: self._use_default_schedule() self.save_schedule() def _use_default_schedule(self): """ Initialize default schedule and save it. """ def gen_day(): dl = [] ll = [-1, '', -1, '', ''] for i in range(8): dl.append(ll[:]) rl = [] for i in range(4): rl.append(dl[:]) return rl self.schedule = { 'current_week': [1, date.today().isocalendar()[1]], 'lessons_time': [ ['8:00', '9:35'], ['9:45', '11:20'], ['11:40', '13:15'], ['13:25', '15:00'], ['15:20', '16:55'], ['17:05', '18:40'], ['18:45', '20:20'], ['20:25', '22:00'] ], 'schedule': { 'Monday': gen_day(), 'Tuesday': gen_day(), 'Wednesday': gen_day(), 'Thursday': gen_day(), 'Friday': gen_day(), 'Saturday': gen_day() }, 'subgroup': 0 } def _use_existing_schedule(self): """ Load existing schedule from file. """ sh = shelve.open(os.path.expanduser('~/.config/scheduler/schedule')) self.schedule = sh['schedule'] sh.close() def save_schedule(self): """ Save params dict to file. """ sh = shelve.open(os.path.expanduser('~/.config/scheduler/schedule')) sh['schedule'] = self.schedule sh.close() def update_current_week(self): """ Update current week. Week range: 1-4. """ if self.schedule['current_week'][1] != date.today().isocalendar()[1]: cw = self.schedule['current_week'][0] for i in range(date.today().isocalendar()[1] - self.schedule['current_week'][1]): if cw != 4: cw += 1 else: cw = 1 self.schedule['current_week'] = [cw, date.today().isocalendar()[1]] return True else: return False def get_current_week(self): """ Get current week. """ return self.schedule['current_week'][0] def set_current_week(self, week): """ Set current week. """ self.schedule['current_week'] = [week, date.today().isocalendar()[1]] def get_lessons_time(self): """ Get lessons time. """ return self.schedule['lessons_time'] def set_lessons_time(self, lessons_time): """ Set lessons time. """ self.schedule['lessons_time'] = lessons_time def get_schedule(self, day, week): """ Get schedule by day and week. """ return self.schedule['schedule'][day][week] def set_schedule(self, day, week, schedule): """ Set schedule by day and week. """ self.schedule['schedule'][day][week] = schedule def get_all_schedule(self): """ Get all schedule. """ return self.schedule['schedule'] def set_all_schedule(self, schedule): """ Set all schedule. """ self.schedule['schedule'] = schedule def get_subgroup(self): """ Get num of subgroup. """ return self.schedule['subgroup'] def set_subgroup(self, subgroup): """ Set num of subgroup. """ self.schedule['subgroup'] = subgroup if __name__ == '__main__': print __doc__.strip()
UTF-8
Python
false
false
2,012
8,899,172,258,399
992104972d652079f0ed801399388c1af81d8316
47243c719bc929eef1475f0f70752667b9455675
/repoze.pkgindex/trunk/repoze/pkgindex/views.py
d6a66e4b1bb083d886ce6d90d2f9ad25dea6dac0
[]
no_license
malangalanga/bungeni-portal
https://github.com/malangalanga/bungeni-portal
bbf72ce6d69415b11287a8796b81d4eb6520f03a
5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d
refs/heads/master
2021-01-19T15:31:42.943315
2014-11-18T09:03:00
2014-11-18T09:03:00
32,453,405
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from zope.component import getUtility from repoze.bfg.wsgi import wsgiapp from repoze.bfg.chameleon_zpt import render_template_to_response from repoze.bfg.view import static from repoze.bfg.traversal import model_path from repoze.bfg.interfaces import ISettings from paste.urlparser import StaticURLParser def static_view(context, request): settings = getUtility(ISettings) path = settings.path request.path_info = context.path[len(path):] static = StaticURLParser(path, cache_max_age=3600) return request.get_response(static) def directory_view(context, request): """directory view Show a list of packages or files in a directory. """ return render_template_to_response( 'templates/page.pt', project='pkgindex', items=[(name, model_path(item)) for (name, item) in context.items()])
UTF-8
Python
false
false
2,014
7,181,185,346,771
b7d1b80f1e3206b74a7d8d8effc9e8fb305912f4
c94bc43814c6ca963559fb2f6a52ec4ef332dac1
/cronsub.py
5d794b3efaa27af478bf1192ed8dd46fb9344fb8
[ "GPL-1.0-or-later", "GPL-2.0-or-later", "LGPL-2.0-or-later" ]
non_permissive
huahbo/dns
https://github.com/huahbo/dns
0bfe2c256b78d2897711bea850014ca676296148
fd2c2c7ef3f4f37fd8abf47148ae24be5002ef97
refs/heads/master
2021-01-15T16:37:36.044938
2014-08-22T19:57:06
2014-08-22T19:57:06
23,448,790
1
3
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python from string import * import os, commands, getopt, sys, exceptions ############################################################################## # # ./cronsub.py [submit] # # automatically resubmit LSF jobs in ~/cronlsf/*.job # # running without any options will print all messages, but not actually # submit any jobs. Good for testing. # # To use: # # add script to ~/cronlsf # make sure script has a unique job name, with a line like: #BSUB -J iso12 # create a file iso12.job.resub with a single line (ASCII) containing # the number of runs to submit. # # The .resub file number will be decreased each time a job is submitted # until it gets to zero. # # For all jobs not submitted, the reason will be printed to stdout # For jobs submitted, the command will be echoed to stderr # So running every 5min under cron, use: # # ./cronsub.py submit >> /dev/null # # And it will only generate mail every time a job is submitted to LSF # # ############################################################################# class ContinueEx(exceptions.Exception): def __init__(self,args=None): self.args=args #testing mode, unless arg1= "submit" submit=0 if (len(sys.argv)== 2): if (sys.argv[1]=="submit"): submit=1 #user="taylorm" user="mataylo" #set below for value of path #path="cronlsf/" #requires that we run from home directory # which is what cron does. otherwise hard code # a full path # are we on IRIX or OSF1? cmdstat = "bjobs -u all" jobopt = " < " (status,out) = commands.getstatusoutput("/bin/hostname") if (status==0): if (out=="qbfe1"): bsub="bsub" path="cronqb/" elif (out=="qfe1"): bsub="bsub" path="cronqa/" elif (out=="qfe2"): bsub="bsub" path="cronqb/" elif (out=="qscfe1"): bsub="bsub" path="cronqsc/" elif (out=="blogin2.sandia.gov"): bsub="/apps/torque/bin/qsub" jobopt = " " cmdstat = "/apps/torque/bin/qstat" path="crontbird/" else: print 'Error getting hostname' sys.exit(1) else: print 'Error getting OS type' sys.exit(1) # get a list of all jobnames queued in LSF (status,out) = commands.getstatusoutput(cmdstat) if (status!=0) & (status!=255): print 'Error getting list of queued jobs' sys.exit(1) # sometimes there are NO jobs in the system: if (status==255) & (find(out,"No unfinished job found")>=0): # in this case, there were no jobs in the system, # so dont parse to find our running jobs print "jobs: que is empty?" print "jobs: ",out else: #parse out to get jobname_running jobname_running=[] jobc=-1; vout=split(out,"\n") if (len(vout)<2): print 'Error parsing jobs output: need at least 2 lines' sys.exit(1) for line in vout: sline=split(line) if (jobc==-1) & (len(sline)>=7): if (sline[6]=="JOB_NAME"): # LSF output #find column where job starts jobc=find(line,"JOB_NAME") if (sline[2]=="Name"): # PBS output #find column where job starts jobc=find(line,"Name") if (jobc<0): print 'Error parsing bjobs output for job name' sys.exit(1) if (len(sline)>=3) & (jobc>=0): if (sline[1]==user): # LSF get everything in JOB_NAME column and beyond: out=line[jobc:-1] out=split(out," ") if (len(out)==0): print 'Error parsing jobs output for jobname' sys.exit(1) jobname_running.append(out[0]); if (sline[2]==user): # PBS get everything in JOB_NAME column and beyond: out=line[jobc:-1] out=split(out," ") if (len(out)==0): print 'Error parsing jobs output for jobname' sys.exit(1) jobname_running.append(out[0]); print "current queued jobs for user ",user if (len(jobname_running)==0): print "<none>" for out in jobname_running: print out print ' ' # get a list of all the .job files in ~/cronlsf: cmd = "ls "+path+"*.job" (status,out) = commands.getstatusoutput(cmd) if (status!=0): print 'Error: didn''t find any que scripts using: ',cmd sys.exit(1) vjobscript=split(out,"\n") for jobscript in vjobscript: print ' ' print 'script: ',jobscript try: # parse file for the job name #print "LSF script: ",jobscript jobname=""; jobcpus=""; fid=open(jobscript) line=fid.readline() while line: out=split(rstrip(line)," ") if (len(out)>=3) & (out[0]=="#BSUB"): if (out[1]=="-J"): jobname=out[2] if (len(out)>=3) & (out[0]=="#PBS"): if (out[1]=="-N"): jobname=out[2] line=fid.readline() if len(jobname)==0: raise ContinueEx,"no BSUB -J option: "+jobname if len(jobname)>10: raise ContinueEx,"BSUB -J <jobname>: jobname too long! "+jobname # for idbsub, #BSUB -n 64 line is ignored, we have to add this # to the idbsub line #if len(jobcpus)>0: # jobcpus=" -n "+jobcpus+" " print 'que name: ',jobname # check if it is in que i=jobname in jobname_running if i: raise ContinueEx,"job already running." # check resub count in file jobname.resub jobresub=path+jobname+".resub" fid=open(jobresub,'r') line=fid.readline() fid.close() fvalue=atoi(line) if (fvalue<=0): raise ContinueEx,"job counter reached 0" fvalue=fvalue-1 fid=open(jobresub,'w') fid.write(str(fvalue)+"\n") fid.close() except IOError,e: print 'FILE_ACCESS_ERROR: (resub file?)' except ValueError,e: print 'VALUE_ERROR: ' except ContinueEx,e: print 'skipping script: ',e else: # submit job jobcommand = bsub + jobcpus + jobopt + jobscript print "resub=" + str(fvalue)+" que job: "+jobcommand if (submit): os.system(jobcommand) else: print "(testing mode - no jobs submitted)"
UTF-8
Python
false
false
2,014
15,479,062,140,718
088da7b86b277083538800e83ff134cd84162282
6b3fa2742a08500824ad014afb514c57b78f72d2
/lastline.py
ab2f3033a09389c3f9658a04e06e83dcd4dd0db6
[]
no_license
hnyholm/ciphers
https://github.com/hnyholm/ciphers
214f2126a26455e740300b13e67b2cb5d4e8581e
4932842af76366d0dfdbab9d7625823fa1562f6f
refs/heads/master
2016-09-05T13:11:34.072183
2014-01-23T19:20:13
2014-01-23T19:20:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from helper_dora import * # only analyze the last line of the cipher - it only contains 12 different characters and can hopefully be brute-forced limit = 4 cipher = [ 12,27, 37,13,36,22, 26,23,27,16, 36,13,31,35, 13,36,23,31, 12,18,31 ] #cipher = [ 22,36,23,27, 36,22,12,27, 37,13,36,22, 26,23,27,16, 36,13,31,35, 13,36,23,31, 12,18,31 ] alphabet = "qzbcfgklmprvywxinotdeha" # not 's' #----------- def xcombinations(items, n): if n==0: yield [] else: for i in xrange(len(items)): for cc in xcombinations(items[:i]+items[i+1:],n-1): yield [items[i]]+cc #----------- uncommons = {} ufile = open("uncommon_grams_3.txt") for line in ufile: uncommons[line.strip()] = 0 ufile.close() wordlist = {} wfile = open("words_sorted.txt") for line in wfile: word = line.strip() key = word[:2] if key in wordlist: wordlist[key].append(word[2:]) else: wordlist[key] = [word[2:]] wfile.close() logfile = open("result_lastline.txt","w") key = {} for i in cipher: key[i] = '_' #key[36] = 'n' #key[13] = 'o' #key[31] = 't' #key[35] = 'd' #key[23] = 'e' #key[12] = 'h' #key[18] = 'a' counter = 0 # make up every possible key for c in xcombinations(alphabet,5): counter += 1 if counter%1000000 == False: print counter cind = 0 #for x in [ 22,27,16,26,37 ]: for x in [ 12,16,26,27,35,36,37,31,13,18,23 ]: # 22 missing (s) key[x] = c[cind] cind += 1 text = "" for cip in dora: if cip in key: text += key[cip] else: text += "_" #first check: it is not gibberish broken = False for i in range(len(text)-2): if text[i:i+3] in uncommons: broken = True break if not broken: text2 = text[:] text = text[len(text)-len(cipher):] # second check: it contains words letlist = [0 for x in range(len(text))] for v in range(len(text)-2): bigram = text[v] + text[v+1] if bigram in wordlist: possib_w = wordlist[bigram] therest = text[v+2:] for bw in possib_w: if therest.startswith(bw): for rp in range(len(bw)+2): letlist[v+rp] = 1 break uglies = letlist.count(0) if uglies < limit: print text2, uglies logfile.write(text2 + " " + str(uglies) + "\n") logfile.flush() logfile.close()
UTF-8
Python
false
false
2,014
4,415,226,383,984
3009d8b75e5e6c9a7c71606beeba3c47f9983a73
5d142801de475f41d4512879de84a2472639d0d7
/runs/lookuptable/reflectance/800/test/test.py
cc3e69fc197f939b65849b224c70a086f24ed253
[]
no_license
samyvilar/greenbandsubgroups
https://github.com/samyvilar/greenbandsubgroups
e817a59e4cd3f5224608f5e9da1f3e59fa0460ab
0eba2d1c52093723d24ab4e322327c84caf79b2f
refs/heads/master
2021-01-01T05:30:37.586998
2012-08-23T19:23:17
2012-08-23T19:23:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
__author__ = 'Samy Vilar' import sys sys.path.extend('../../../../../..') from lookuptable.lookuptable import lookuptable from HDFFile import read_file from Utils import save_images, get_root_mean_square, get_sum_of_errors_squared, get_granule_path if __name__ == '__main__': lut = lookuptable() lut.load_table('../800_lookuptable.numpy') granule_path = '/DATA_11/TERRA_1KM/temp/MOD021KM.A2002179.1640.005.2010085164818.hdf' original, valid_range, original_shape = read_file(file = granule_path, bands = [1,2,3,4], param = 'reflectance', winsize = 75, maxinvalid = .35, clean = True) predicted = lut.predict(original) error = get_root_mean_square(original = original[:, 3], predicted = predicted[:, 3]) print "RMSE: %f%%" % error print "Sum of Squared Errors: %f" % get_sum_of_errors_squared(original = original[:, 3], predicted = predicted[:, 3]) save_images(original = original, predicted = predicted, granule_path = granule_path, original_shape = original_shape ) # red is 1, green = 4, blue = 3, NIR = 2 # 1, 4, 3 # 0, 3, 2
UTF-8
Python
false
false
2,012
3,040,836,862,696
ad9ca40af29edd2c834af99560a5491bf74f4497
cfd547b2cf7812d2534a1992e633fcf4a54d5fa6
/TriblerCode/Tribler/Web2/photo/photo.py
ab9cb6355083702d4b574dd798052a72939ad9cd
[ "LicenseRef-scancode-unknown-license-reference", "OpenSSL", "LGPL-2.1-only", "LGPL-2.0-or-later", "Python-2.0", "MIT", "LicenseRef-scancode-python-cwi", "LicenseRef-scancode-other-copyleft", "WxWindows-exception-3.1", "LGPL-2.1-or-later", "LicenseRef-scancode-openssl", "LicenseRef-scancode-warranty-disclaimer", "GPL-1.0-or-later", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-ssleay-windows", "LicenseRef-scancode-mit-old-style", "BitTorrent-1.1", "GPL-2.0-only" ]
non_permissive
thejosh223/cs198mojo
https://github.com/thejosh223/cs198mojo
14f359a8d55a24904aed7381a485f79774bb32dc
4d8d698f28e265ac91c0b1467ef3766cb33a854a
refs/heads/master
2021-01-22T05:28:11.558733
2014-04-02T11:44:00
2014-04-02T11:44:00
10,900,361
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from bsddb import dbshelve import os from util import observer from util import download from util import db import base64 import re import settings import urllib import wx import flickr import zooomr class PhotoDB(db.DB): def __init__(self, dir, name): db.DB.__init__(self, dir, name) def onlineSearch(self, query): searches = [] searches.append(flickr.FlickrSearch(query, self)) searches.append(zooomr.ZooomrSearch(query, self)) return db.CompoundDBSearch(searches) def newItem(self, id, *args, **kws): return db.DB.newItem(self, PhotoItem, id, *args, **kws) class PhotoItem(db.Item): def __init__(self, id, dbname, path, name, dl, tags=[], by=""): db.Item.__init__(self, id, dbname, path, name, tags, dl) self.by = by self.content = os.path.join(self.path, "photo.jpg") self.preview = os.path.join(self.path, "preview.jpg") def setPreview(self, url): try: urllib.urlretrieve(url, self.preview) except: pass def getPreview(self): # Be sure to call this from the gui thread! # otherwise this may crash Xlib with # Xlib: unexpected async reply if not os.path.exists(self.preview): return None img = wx.Bitmap(self.preview, wx.BITMAP_TYPE_JPEG).ConvertToImage() return img def hasPreview(self): if not os.path.exists(self.preview): return False else: return True def getPreviewPath(self): return self.preview def getBy(self): return self.by def isStored(self): return os.path.exists(self.content) def getType(self): return "image" def getPath(self): return self.content
UTF-8
Python
false
false
2,014
19,258,633,388,135
be867262436a15b9377156bae226b36abe50f0d5
c955988ef1f47d9143b38f266673bb357b13d04f
/IFCPythonSDK/srlparser-before.py
15ff69a9f9a8e1133239c6265205845939b73daa
[]
no_license
quntian/BimCenter
https://github.com/quntian/BimCenter
1c3d75afe0ed79320ed503d07089560ede6f313b
5d67a975321d0b7c352d5e9aec2f6d440c205fa0
refs/heads/master
2020-02-20T09:16:13.958734
2014-05-22T11:20:57
2014-05-22T11:20:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python #coding=utf-8 #Filename:srlparser.py import re from utils import render from expressionparser import ExpressionParser from tomysql import getMysqlConn,getMongoConn md5='' subids=set() pattern=re.compile(r'#(\d+)') conn,cur=None,None monConn,db=None,None class node(object): """""" def __init__(self,name): super(node,self).__init__() self.name=name self.attr=[] self.where=[] def add(self,attr): """""" self.attr.append(attr) def dump(self,level=0): """""" print ' '*level, print self.name, print self.where if self.where else '' for i in self.attr: i.dump(level+1) def __repr__(self): return self.name+' '+str(self.where) def __str__(self): return self.name+' '+str(self.where) def parseSRL(filename): """""" nodes=[] blockCommentPattern=re.compile(r'/\*[\s\S]*\*/') lineCommentPattern=re.compile(r'//.*\n') with open(filename,'r') as fp: fileContent=fp.read() fileContent=blockCommentPattern.sub('',fileContent) lines=lineCommentPattern.sub('',fileContent).split('\n') for line in lines: if not line: continue line=line.strip('\r\n').replace(' ','\t').strip(' ') if not line: continue idx=0 while line[idx]=='\t': idx+=1 attrname=line[idx:] pos=attrname.find('where') where=[] if pos>=0: whereClause=attrname[pos+5:].strip(' ') exp=ExpressionParser(whereClause) where=exp.parse() attrname=attrname[:pos].strip(' ') #print idx, attrname n=node(attrname) if where: n.where=where #parse if idx==0:#top entities nodes.append(n) level=idx #last level lastnode=n stack=[] elif idx==level:#new brother stack[-1].add(n) elif idx>level:#new attr stack.append(lastnode) stack[-1].add(n) else: #pop stack while level>idx: stack.pop() level-=1 stack[-1].add(n) level=idx lastnode=n return nodes def visitEntities(entities): """""" for entity in entities: #visit current entity ids=nameToIds(entity.name,entity.where) #print nametoids(ifcdoor->(#234,#342)) if entity.attr:#has attr for attr in entity.attr: extractAttr(attr,ids) subids.update(ids) else: subids.update(extractSubItems(ids)) def extractAttr(attr,ids): """ extract this attr on all ids attrs: attr desc of this kind ids: ids of same kind """ refs=set() for id in ids: #get id->entity entity= getEntity(id) #vlidate expression condition #id or id list ref=entity[attr.name] if isinstance(ref,list): if attr.where: for i in ref: if validateExp(getEntity(i),attr.where): refs.add(i) else: refs.update(refs) else: if attr.where: if validateExp(getEntity(ref),attr.where): refs.add(ref) else: refs.add(ref) print id,attr.name, ref if attr.attr: #has sub attr for attr in attr.attr: #get every entity's attr ref id ->4,5,6 extractAttr(attr,refs) subids.update(refs) else:#append subids.update(extractSubItems(refs)) def extractSubItems(ids): """""" currentIds=list(ids) idx=0 while idx<len(currentIds): curId=currentIds[idx] params=getParam(curId) refs=pattern.findall(params) for ref in refs: if not int(ref) in currentIds: currentIds.append(int(ref)) idx+=1 print idx return sorted(currentIds) def generateModelFile(ids,to=None): """""" code='' lines=getLines(ids) for line in lines: code+='#%d=%s(%s);\n'%(line[0],line[1],line[2]) if cur.execute('select description , implementationLevel , name , author, organization , preprocessorVersion, originatingSystem,authorization, schemaIdentifiers from sdk_indexes where hash=%s',md5): #print cur.fetchone() idx=cur.fetchone() return render('temp.ifc', { 'description':idx[0], 'implementationLevel':idx[1], 'name' :idx[2], 'author':idx[3], 'organization':idx[4], 'preprocessorVersion':idx[5], 'originatingSystem':idx[6], 'authorization':idx[7], 'schemaIdentifiers':idx[8], 'data':code }, to) def validateExp(entity,where): """""" for exp in where: key=exp[0] value=exp[2] op=exp[1] en=entity[key] if op=='>' and not en>value: return False elif op=='<' and not en<value: return False elif op=='=' and not en==value: return False elif op=='<=' and not en<=value: return False elif op=='>=' and not en>=value: return False elif op=='!=' and not en!=value: return False return True def nameToIds(name,where): """""" ids=[] name=name.upper() if cur.execute('select id from '+md5+' where `name`=%s',(name)): allIds={i[0] for i in cur.fetchall()} else: allIds={} if where: for i in allIds: entity=getEntity(i) if validateExp(entity,where): ids.append(i) else: ids+=allIds return ids def getParam(id): """""" if cur.execute('select param from '+md5+' where `id`=%s',str(id)): param=cur.fetchone()[0] return param def getLines(ids): """""" if not ids: return [] ids=[str(id) for id in ids] if cur.execute('select id,name,param from '+md5+' where `id` in (%s)'%(','.join(ids))): lines=cur.fetchall() return lines def getEntity(id): """""" return db[md5].find_one({'id':id}) def getSubModel(srl,hash): """""" global md5,conn,cur,monConn,db conn,cur=getMysqlConn() monConn,db=getMongoConn() md5=hash visitEntities(parseSRL(srl)) fileContent= generateModelFile(subids) cur.close() conn.close() monConn.close() return fileContent if __name__ == '__main__': import time #parseSRL('rules/geo2.srl') #getSubModel('rules/geo2.srl','335af8fe4295cad94d270f29aff664d0') conn,cur=getMysqlConn() monConn,db=getMongoConn() #md5='335af8fe4295cad94d270f29aff664d0' md5='5de172dbe0db7fa9f590ab51fdcd28ad' #md5='193ab02b353e9d3a59d719c87ffbdc52' beg=time.time() visitEntities(parseSRL('rules/geo2.srl')) end=time.time() print end-beg #fileContent= generateModelFile(subids) cur.close() conn.close() monConn.close() #with open("output.ifc",'w') as fp: #fp.write(fileContent)
UTF-8
Python
false
false
2,014
5,403,068,882,512
42dba9c17f65fa2a07d91c65e09df5fec8d89140
9bb9569f2089914083edc2f8558e5ed2ecd6b188
/crxhost/models.py
38494f05964d29d3a046f448eea66b62f51180fb
[]
no_license
ledzep2/django-crxhost
https://github.com/ledzep2/django-crxhost
8de107faa09a5e65c09ee66c00e6afea02ee8dad
157babf86a38845723be756b7fbcad9e264ca702
refs/heads/master
2021-01-01T18:23:28.055617
2013-08-20T15:17:57
2013-08-20T15:17:57
12,210,516
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from django.conf import settings import datetime, re, zipfile import crxhost.utils as utils class CRXId(models.Model): name = models.CharField(max_length = 255, blank = False, default = '', help_text = "CRX package name without version number") cid = models.CharField(max_length = 255, blank = True, help_text = "extension or app id") active = models.BooleanField(default = True, blank = True) def __unicode__(self): return self.name class CRXPackage(models.Model): def crx_upload_path_gen(instance, filename): ret = utils.break_filename(filename) now = datetime.datetime.now().strftime("%Y%m%d%H%M%S") filename = "%s-%s.crx" % (filename, now) return "%s/%s" % (ret['crxname'], filename) crx = models.ForeignKey(CRXId, blank = True, null = True) version = models.CharField(max_length = 32, blank = True, default = '', help_text = "The version number parsed from the filename or manifest of the uploaded package") package = models.FileField(upload_to = getattr(settings, "CRX_UPLOAD_PATH", crx_upload_path_gen), blank = False) timestamp = models.DateTimeField(auto_now_add = True) downloaded = models.PositiveIntegerField(default = 0, blank = True) active = models.BooleanField(default = True) def __unicode__(self): return u"%s-%s" % (unicode(self.crx), self.version) def package_name(self): return "%s-%s.crx" % (self.crx.name, self.version) def save(self, *args, **kwargs): attrs = utils.break_filename(self.package.name) with zipfile.ZipFile(self.package, 'r') as f: manifest = f.read('manifest.json') ret = re.findall(r'"version"\:\s*"(.+)"', manifest) if ret and not ret[0].startswith('__'): attrs['version'] = ret[0] ret = re.findall(r'"name"\:\s*"(.+)"', manifest) if ret and not ret[0].startswith('__'): attrs['crxname'] = ret[0] if not self.crx: try: crx = CRXId.objects.get(name = attrs['crxname']) except CRXId.DoesNotExist: crx = CRXId(name = attrs['crxname']) crx.save() self.crx = crx self.version = self.version or attrs['version'] super(CRXPackage, self).save(*args, **kwargs) __all__ = ("CRXId", "CRXPackage")
UTF-8
Python
false
false
2,013
326,417,553,148
b6709d5e09d2647593e2cb756dd9a8def51388ab
9e4437419124f7d28e1b3fc14c774a1b71f24e11
/hw1/HW1_StudentSorting.py
afca74094fd23824e25e1a076e532575f9cd0eb0
[]
no_license
asimsaleem/APT_Fall2014
https://github.com/asimsaleem/APT_Fall2014
1f4bfe212c8df38f46686405a43374adda466683
fad6805c8698e75ae587e697a086d6444f0d1d6a
refs/heads/master
2020-04-18T07:12:35.081243
2014-11-22T20:58:43
2014-11-22T20:58:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Create an Array of Five Students #Name, Age, GPA - Unique GPAs ''' testArray = [["Adam", 25, 3.0], ["Carl", 24, 4.0], ["Bart", 23, 3.5], ["Deng", 22, 2.5], ["Eden", 21, 2.0]] ''' #Name, Age, GPA - GPA gets repeated. Sorting on Name ''' testArray = [["Adam", 25, 3.0], ["Bart", 24, 4.0], ["Carl", 23, 4.0], ["Deng", 22, 2.0], ["Eden", 21, 2.0]] ''' #Name, Age, GPA - GPA gets repeated. Sorting on Name and Age testArray = [["Adam", 25, 3.0], ["Bart", 24, 4.0], ["Carl", 23, 4.0], ["Deng", 22, 2.0], ["Deng", 21, 2.0]] #Sort by GPA in Increasing Order. Implies that the Lowest GPA should come first sorted_list = sorted(testArray, key=lambda x:(x[2], x[0], x[1])) print "Test Array is: ", testArray print "Sorted List is: ", sorted_list
UTF-8
Python
false
false
2,014
16,217,796,513,603
45353f1b9060825abb25543c562df17031655102
8e4a0879e51e19d348f36eadd926866187fef38f
/testing/util/allnaive.py
9f3d227d47b2004536be100116e1ded9164ca05d
[ "MIT" ]
permissive
cha63506/mediawiki-svn
https://github.com/cha63506/mediawiki-svn
0a41c0027f179c8ad955456b8a3f4030d591f726
2dac24cf651515dd319205777451df0ad39851b2
HEAD
2017-10-07T11:11:21.097849
2011-07-01T10:03:14
2011-07-01T10:03:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # (C) 2009 Kim Bruning. Distributed under the MIT license (see LICENSE file) import sys import os, os.path sys.path.append("..") import wiki_works from installer.naive_installer import Naive_Installer # settings target_wiki='test' resultfile=file("naive_results","w") def test_extension(extension_name): result=None try: installer=Naive_Installer() installer.set_instance(target_wiki) installer.install(extension_name) result=wiki_works.wiki_works(target_wiki) print "result=",result installer.uninstall(extension_name) except Exception,e: print e return result if __name__=="__main__": installer=Naive_Installer() installer.set_instance(target_wiki) naive_extensions=installer.get_installers() for extension_name in naive_extensions: print extension_name, result=test_extension(extension_name) print result r="Unknown" if result: r="NAIVE_INSTALL_SEEMS_OK" else: r="NAIVE_INSTALL_BREAKS" resultfile.write("* "+extension_name+" "+r+"\n") resultfile.flush() resultfile.close()
UTF-8
Python
false
false
2,011
7,387,343,763,650
4549cb3fa4377a3399a2f87200040b4d0e26f63a
0aad810cdc2dda776be4693fa16e21c9e8db5c06
/test.py
752df6534dab7dd4feab37f29bddfb8c923e4a8f
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
mingderwang/105-Robot
https://github.com/mingderwang/105-Robot
ef383ef4bf77664398806d3e2b0e335025c3d431
e6d7038d89b51a40583279d139fdb9543ad4e380
refs/heads/master
2021-01-23T22:38:25.343948
2009-05-21T05:17:16
2009-05-21T05:17:16
206,531
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
//<![CDATA[ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <title>Robot 105 Thinking...</title> <link type="text/css" rel="stylesheet" href="/static/styles.css"> <script type="text/javascript" src="/static/script.js"></script> <script src="http://maps.google.com/maps?file=api&amp;v=2&amp;key=ABQIAAAAU0xcljgmc3MI0v0I1a300RQZd9WN54AAYgK46OiTAO7YET-bpBRg4UYrjrkKnHGbjChUwlvs77uq3A" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ var map; var userAdded = 1; var layers = { "blackbirds": {"url": "http://bbs.keyhole.com/ubb/download.php?Number=50664", "name": "Blackbirds"}, "tourdefrance": {"url": "http://bbs.keyhole.com/ubb/download.php?Number=43757", "name": "Tour de France"}, "japanpics": {"url" : "http://www.chemlink.com.tw/about/ChemlinkNanKangOffice.kmz", "name": "Nankang Office"} }; function load() { if (GBrowserIsCompatible()) { var map = new GMap2(document.getElementById("map")); map.setCenter(new GLatLng(41.83210555555556, 27.03565), 5); } } function onLoad() { map = new GMap2(document.getElementById("map")); map.setCenter(new GLatLng(25.03565, 121.3619), 5); map.addControl(new GSmallMapControl()); document.getElementById("url").value = "http://"; for(var layer in layers) { addTR(layer, layers[layer].name); } document.getElementById(layer).checked = true; toggleGeoXML(layer, true); } function addGeoXML() { var theUrl = document.getElementById("url").value; theUrl = theUrl.replace(/^\s+/, ""); theUrl = theUrl.replace(/\s+$/, ""); if (theUrl.indexOf(' ') != -1) { alert('Error - that address has a space in it'); } else { var id = "userAdded" + userAdded; layers[id] = {}; layers[id].url = theUrl; layers[id].name = "User Layer " + userAdded; addTR(id); document.getElementById(id).checked = true; toggleGeoXML(id, true); userAdded++; } } function addTR(id) { var layerTR = document.createElement("tr"); var inputTD = document.createElement("td"); var input = document.createElement("input"); input.type = "checkbox"; input.id = id; input.onclick = function () { toggleGeoXML(this.id, this.checked) }; inputTD.appendChild(input); var nameTD = document.createElement("td"); var nameA = document.createElement("a"); nameA.href = layers[id].url; var name = document.createTextNode(layers[id].name); nameA.appendChild(name); nameTD.appendChild(nameA); layerTR.appendChild(inputTD); layerTR.appendChild(nameTD); document.getElementById("sidebarTBODY").appendChild(layerTR); } function zoomToGeoXML(geoXml) { var center = geoXml.getDefaultCenter(); var span = geoXml.getDefaultSpan(); var sw = new GLatLng(center.lat() - span.lat() / 2, center.lng() - span.lng() / 2); var ne = new GLatLng(center.lat() + span.lat() / 2, center.lng() + span.lng() / 2); var bounds = new GLatLngBounds(sw, ne); map.setCenter(center); map.setZoom(map.getBoundsZoomLevel(bounds)); } function toggleGeoXML(id, checked) { if (checked) { var geoXml = new GGeoXml(layers[id].url); GEvent.addListener(geoXml, 'load', function() { if (geoXml.loadedCorrectly()) { geoXml.gotoDefaultViewport(map); layers[id].geoxml = geoXml; document.getElementById("status").innerHTML = ""; } }); layers[id].geoXml = geoXml; map.addOverlay(layers[id].geoXml); document.getElementById("status").innerHTML = "Loading..."; } else if (layers[id].geoXml) { map.removeOverlay(layers[id].geoXml); } } //]]> </script> </head> <body onload="onLoad()"> <div> <b>mingderwang &lt;[email protected]&gt;</b> | <a href="http://105.appspot.com/_ah/logout?continue=https://www.google.com/accounts/Logout%3Fcontinue%3Dhttp://105.appspot.com/%26service%3Dah">Sign Out</a> <h2>Free GPS Uploading Places</h2> List of Places: <ul> <li> <a href="/edit/142">Sparkles2</a> - created 2008/06/23 08:02:31 by mingderwang <li> <a href="/edit/141">location</a> - created 2008/06/23 06:23:08 by mingderwang <li> <a href="/edit/122">&lt;&gt;&lt;&gt;&lt;&lt;&lt;&lt;&lt;&gt;&gt;&gt;&gt;&gt;&lt;&gt;&lt;&gt;&lt;&gt;&lt;&lt;&lt;&lt;&gt;&lt;&lt;&gt;&lt;&gt;&lt;&gt;&lt;&gt;&lt;&gt;&lt;&lt;.&lt;&gt;&lt;&gt;&gt;&lt;&lt;&gt;&gt;&gt;,.&gt;&gt;&gt;&lt;&lt;&gt;&lt;&lt;</a> - created 2008/06/21 15:23:06 by mingderwang <li> <a href="/edit/121">¤¤¤å</a> - created 2008/06/21 14:48:19 by mingderwang <li> <a href="/edit/101">stars</a> - created 2008/06/19 15:20:34 by ym1220 <li> <a href="/edit/2">PicLens</a> - created 2008/06/16 09:28:58 by mingderwang <li> <a href="/edit/81">aaaaaaaaaaaaaaaaaaaaaaaaaaaaa lllllllllllllllllllllong list</a> - created 2008/06/15 14:25:33 by ym1220 <li> <a href="/edit/61">Birthdaygift</a> - created 2008/06/15 14:24:24 by ym1220 <li> <a href="/edit/41">Protopage</a> - created 2008/06/15 14:05:55 by mingderwang <li> <a href="/edit/21">free tibet</a> - created 2008/06/15 13:54:48 by mingderwang <li> <a href="/edit/1">Sparkle</a> - created 2008/06/15 13:53:15 by mingderwang </ul> <a href="/new">Create new place.</a> </div> <br/> <input id="url" value="" size="60"/> <input type="button" value="Add" onClick="addGeoXML();"/> <br/> <br/> <div id="map" style="width: 400px; height: 400px; float:left; border: 1px solid black;"></div> <div id="sidebar" style="float:left; overflow-vertical:scroll; height: 400px; width:150px; border:1px solid black"> <table id="sidebarTABLE"> <tbody id="sidebarTBODY"> </tbody> </table> <div id="status" style="text-align:center; color: #ff0000"></div> </div> </body> </html> //]]>
WINDOWS-1252
Python
false
false
2,009
9,234,179,725,005
c473413ee148802bb77c6e4d321de7bd87fcfbaa
5e27c7f5426c169fd348b26e94b65c35f9cdc459
/dragonfly/convert/trigger_true.py
08df27650994e17a7b1fb2fa5e53bc28fdd48e2d
[ "BSD-2-Clause" ]
permissive
agoose77/hivesystem
https://github.com/agoose77/hivesystem
e2c9c27408233b5794151ca74f541d2e6063d58a
e1f55c5ea530a989477edb896dcd89f3926a31b8
refs/heads/master
2020-07-21T23:07:37.178856
2014-08-23T02:13:19
2014-08-23T02:13:19
20,776,359
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# untested import bee from bee.segments import * class trigger_true(bee.worker): inp = antenna("push", "trigger") outp = output("push", "bool") b_outp = buffer("push", "bool") startvalue(b_outp, True) connect(b_outp, outp) trigger(inp, b_outp)
UTF-8
Python
false
false
2,014
15,178,414,462,911
d66da57458abcd43ce888bee54ecc8104da6607a
2dc81ba0488a5f9ecf01bd83e5b18bd781f617ad
/registration/model/basis.py
014ac18d17bcc7037eff1769f19c0028b334ef92
[ "BSD-3-Clause" ]
permissive
demianw/pyMedImReg-public
https://github.com/demianw/pyMedImReg-public
857f9c2cbc00a875b4d1cf8118159f828e3a855f
f6532f68d3a00fc982fc79053079f0a71405a5e4
refs/heads/master
2020-11-26T16:30:56.264948
2013-07-02T21:45:07
2013-07-02T21:45:07
10,549,991
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
r""" Module with the basic classes defining transformation models """ import numpy from ..util import vectorized_dot_product __all__ = ['Model'] class Model(object): r"""Base Class for Transformations A transformation is defined as a map: .. math:: \phi: \Omega \mapsto \Omega where :math:`\Omega \subseteq \Re^N` and the transform has a parameter vector :math:`\theta \in \Re^M` with :math:`M` the number of parameters Notes ---------- We define :math:`\phi(x; \theta) = (\phi_1(x;\theta),\ldots, \phi_N(x;\theta))`, then the jacobian of the transform with respect to the parameter :math:`\theta` as .. math:: [D_\theta\phi(x; \theta)]_{ij} = \frac{\partial \phi_i(x; \theta)}{\partial \theta_j}, i=1\ldots N, j=1\ldots M and the jacobian of the transform with respect to the location :math:`x` as .. math:: [D_x\phi(x; \theta)]_{ij} = \frac{\partial \phi_i(x; \theta)}{\partial x_j}, i, j =1\ldots N attributes ---------- `parameter` : array-like, shape (n_parameters) Stores the parameter vector :math:`\theta` of the transform. `identity` : array-like, shape (n_parameters) Stores the parameter value :math:`\theta_0` such that :math:`\phi(x; \theta_0) = x`. `bounds` : array-like, shape (n_parameters, 2) Stores the upper and lower bounds for each component of the parameter vectors :math:`\theta` such that :math:`\text{bounds}_{i0} \leq \theta_i \leq \text{bounds}_{i1}` References ---------- """ def __init__(self): self.parameter = self.identity @property def identity(self): r""" Stores the parameter value :math:`\theta_0` such that :math:`\phi(x; \theta_0) = x`. """ return None def transform_points(self, points): r"""Transform a set of points. Parameters ---------- x : array-like, shape (n_points, n_dimensions) Points to be transformed Returns ------- y : array-like, shape (n_points, n_dimensions) :math:`y = \phi(x)` """ raise NotImplementedError() def transform_vectors(self, points, vectors): r"""Transform a set of vectors located in space. Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the vectors to be transformed v : array-like, shape (n_points, n_dimensions) Vectors to be transformed Returns ------- w : array-like, shape (n_points, n_dimensions) :math:`w = D_x^T\phi(x) \cdot w` where :math:`D_x\phi(x)` is the Jacobian of :math:`\phi(x)` with respect to the spatial position :math:`x` """ jacobians = self.jacobian_position(points) res = vectorized_dot_product(jacobians, vectors[..., None])[..., 0] return numpy.atleast_2d(res) def transform_tensors(self, points, tensors): r"""Transform a set of tensors located in space. Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the vectors to be transformed T : array-like, shape (n_points, n_dimensions, n_dimensions) Tensors to be transformed Returns ------- S : array-like, shape (n_points, n_dimensions) :math:`S = D^T_x\phi(x) \cdot T \cdot D_x\phi(x)` where :math:`D_x\phi(x)` is the Jacobian of :math:`\phi(x)` with respect to the spatial position :math:`x` """ jacobians = self.jacobian_position(points) return vectorized_dot_product( vectorized_dot_product(jacobians.swapaxes(-1, -2), tensors), jacobians ) def jacobian(self, points): r"""Transposed Jacobian of the transform with respect to its parameters Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the Jacobian to be calculated Returns ------- J : array-like, shape (n_points, n_parameters, n_dimensions) :math:`J = D^T_\theta\phi(x)` """ raise NotImplementedError() def jacobian_position(self, points): r"""Transposed Jacobian of the transform with respect to its location Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the Jacobian to be calculated Returns ------- J : array-like, shape (n_points, n_dimensions, n_dimensions) :math:`J = D^T_x\phi(x)` """ raise NotImplementedError() def jacobian_parameter_jacobian_position(self, points): r"""Iterated Transposed Jacobian of the transform with respect to its parameter and Location Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the Jacobian to be calculated Returns ------- J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions) :math:`J_{ijk} = \frac{\partial \phi_k(x)}{\partial \theta_i \partial x_j}` """ raise NotImplementedError() def jacobian_vector_matrices(self, points, vectors): r"""Transposed Jacobian with respect to the transform parameter of the expression :math:`D^T_x \phi(x) \cdot v` Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the Jacobian to be calculated v : array-like, shape (n_points, n_dimensions) Vectors at each point of x Returns ------- J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions) :math:`J = D^T_\theta[D^T_x\phi(x) \cdot v]` """ jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points) DjacT_vector = vectorized_dot_product( jacobian_parameter_jacobian_position, # .swapaxes(-1, -2), vectors[:, None, :, None] )[:, :, :, 0] return DjacT_vector def jacobian_tensor_matrices(self, points, tensors): r"""Transposed Jacobian with respect to the transform parameter of the expression :math:`D_x^T \phi(x) \cdot T \cdot D_x\phi(x)` Parameters ---------- x : array-like, shape (n_points, n_dimensions) Location of the Jacobian to be calculated T : array-like, shape (n_points, n_dimensions, n_dimensions) Tensors at each point of x Returns ------- J : array-like, shape (n_points, n_parameters, n_dimensions, n_dimensions) :math:`J = D^T_\theta[D^T_x\phi(x) \cdot T\cdot D_x\phi(x)]` """ jacobians = self.jacobian_position(points) jacobian_parameter_jacobian_position = self.jacobian_parameter_jacobian_position(points) tensor_jac = vectorized_dot_product(tensors, jacobians) DjacT_tensor_jac = vectorized_dot_product( jacobian_parameter_jacobian_position.swapaxes(-1, -2), tensor_jac[:, None, :, :] ) return DjacT_tensor_jac + DjacT_tensor_jac.swapaxes(-1, -2) def norm(self, points): raise NotImplementedError() @property def bounds(self): r""" Stores the upper and lower bounds for each component of the parameter vectors :math:`\theta` such that :math:`\text{bounds}_{i0} \leq \theta_i \leq \text{bounds}_{i1}` """ return None
UTF-8
Python
false
false
2,013
7,885,559,996,688
7922c1c0439c9c6a264718ec7bac604ca754dc37
d08e8a6f0254a1c632ad3fd783cb9547198841c2
/ahr2127_Project_3_Python/gomoku.py
18a5e5bfa6659e1536bdd5f0b3e8472ba81d3b90
[]
no_license
adamreis/AI_projects
https://github.com/adamreis/AI_projects
52ebd725d3f1974840b58fa7f2e92ca38f00203e
abe2a4ab2199fbd838ae0dd888e654d245bf00f9
refs/heads/master
2020-05-19T13:39:31.562845
2013-11-22T16:48:45
2013-11-22T16:48:45
14,159,265
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
__date__ ="Nov 6, 2013" import sys import time from gomoku_state import GomokuState from gomoku_game import GomokuGame from gomoku_player import HumanPlayer, RandomPlayer, SmartPlayer def usage(): print """ usage: python gomoku.py [mode] [board dimension] [winning chain length] [time limit] ex: python gomoku.py 1 10 5 60 """ if __name__ == '__main__': if len(sys.argv) != 5: usage() sys.exit(2) mode, board_dimension, winning_length, time_limit = \ [int(i) for index, i in enumerate(sys.argv) if index] if mode==1: gomo = GomokuGame(board_dimension, winning_length, time_limit, HumanPlayer, SmartPlayer) elif mode==2: gomo = GomokuGame(board_dimension, winning_length, time_limit, HumanPlayer, RandomPlayer) elif mode==3: gomo = GomokuGame(board_dimension, winning_length, time_limit, SmartPlayer, SmartPlayer)
UTF-8
Python
false
false
2,013
3,358,664,451,358
364782d188f975a20c41f5064a180b616f5b9716
541e1080ef18536b4ebb1442391010eb861f51f7
/Angle.py
d771575b960b37bd20d772e0e20fde486e63864e
[]
no_license
pd0wm/EPO4
https://github.com/pd0wm/EPO4
e898218b0452981cd2386249e6348288e61474ba
df24076e05f92a6a8ecf07c701687a2306371437
refs/heads/master
2016-08-08T06:02:14.255238
2014-06-11T09:16:36
2014-06-11T09:16:36
20,395,412
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import cmath auto_old = (1.0, 1.0) auto_new = (0.0, 2.0) coord = (0.0, 1.0) def angle_relative(angle_car, angle_dest): angle = angle_car - angle_dest if angle < -180: angle += 360 elif angle > 180: angle -= 360 return angle
UTF-8
Python
false
false
2,014
8,349,416,428,102
d92fa5865cfd11b8f42d5fdc1d8ea4421b9fbb9e
564d1352ec876f09d3b048e30353d5c48e3e3479
/app/__init__.py
9d9d96bc8fe8bbe143c7534f15eee3f86ab93930
[]
no_license
configuresystems/jane
https://github.com/configuresystems/jane
3a23ad2b9a61036be479064dd9a0c1e6cc301aef
d69b5a4c454ae90cb75858f62f789762b9292f68
refs/heads/master
2020-04-15T23:39:47.875348
2014-08-17T19:59:43
2014-08-17T19:59:43
21,185,752
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy app = Flask(__name__) app.debug = True app.config.from_object('config') db = SQLAlchemy(app) #if not app.debug: # import logging # from logging.handlers import RotatingFileHandler # error = RotatingFileHandler('tmp/error.log', 'a', 1 * 1024 * 1024, 10) # error.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) # app.logger.setLevel(logging.INFO) # error.setLevel(logging.INFO) # app.logger.addHandler(error) # app.logger.info('Jane - syncing with SkyNet') from app.core import api_views, web_views, models
UTF-8
Python
false
false
2,014
4,561,255,313,930
c161ca351cec8946b9061494c6f570aef1fa661c
7e37f6e4fe873496fd769e47ac926bd8863e2524
/python/maya/site-packages/amTools/rigging/shoulderSetup.py
7dd4dbab9672529834a07c5be74d806d07fb2cad
[ "BSD-3-Clause" ]
permissive
0xb1dd1e/PipelineConstructionSet
https://github.com/0xb1dd1e/PipelineConstructionSet
4b585881abfbc6c9209334282af8745bbfeb937b
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
refs/heads/master
2021-01-18T00:53:32.987717
2014-03-11T22:44:39
2014-03-11T22:44:39
17,955,574
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
""" A GUI to automatically create the twist structure for an upper arm. \b Requirements: AM_ShoulderConstraint.py To use this tool, select one or more elbow joints and enter the desired data into the option fields before pressing either the Create or Apply button. To skin the model, the arm mesh should be skinned in segments for each twist joint, plus one additional for the shoulder joint, where the segment at the base of the shoulder is skinned to the first twist joint and the final segment is skinned to the shoulder joint. \par Setup Shoulder Options: - \b Suffix \b of \b New \b Twist \b Joints: Specifies the base naming suffix to apply to the newly created joints. Their prefix will match the shoulder on which they are twisting and they will also be numbered from 1 to n. - \b Number \b of \b Twist \b Joints: Specifies the number of twist joints to create for each shoulder. You must create at least one and the first will always have the shoulder constraint applied to it. \par Shoulder Constraint Options: - \b Spine \b Object: Specifies the name of the object to use for computing the shoulder's elevation angle. The shoulder constraint is designed with the expectation that this is the terminal spine node that is the most direct parent of the shoulder joints (i.e. the ribcage). Though this will produce perfectly valid values if any intermediate joints exist (collar bone, scapula), such an intermediate joint could be used instead, provided that the axes given for the spine node (below) are transformed into the intermediate joint\'s local space.' - \b Raised \b Angle \b Offset: Specifies the amount that the first twist joint's up-vector constraint rotates back when the shoulder is raised. A value between 0 and 90 is ideal and should eliminate flipping in a normal human range of motion. The default value of 45 is recommended in most cases.' - \b Shoulder \b Aim \b Axis: Corresponds to the axis in the upper arm's local space that aims toward the elbow joint. - \b Shoulder \b Front \b Axis: Corresponds to the axis in the upper arm's local space that points toward the character's front. - \b Spine \b Aim \b Axis: Corresponds to the axis in the specified spine joint's local space that aims toward the next vertebra (up). - \b Spine \b Front \b Axis: Corresponds to the axis in the specified spine joint's local space that aims toward the character's front. \b Creation \b Info: \b Donations: http://adammechtley.com/donations/ \b License: The MIT License Copyright (c) 2011 Adam Mechtley (http://adammechtley.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \namespace amTools.rigging.shoulderSetup """ import sys import maya.cmds as cmds import amTools.utilities as utils import amTools.utilities.ui as amui # verify requirements utils.plugins.verifyPlugin('AM_ShoulderConstraint', __file__) ## options window name kSetupOptionsWindow = 'am_setupShoulderOptionsWindow' ## name of the tool kToolName = 'Setup Shoulder' ## current version of the tool kVersionNumber = '1.05' ## date of current version kVersionDate = '2011.03.27' def menuItem(*args): """This function calls optionsWindow() from a menu item""" optionsWindow() def optionsWindow(): """This function creates an options window for creating the shoulder twist structure. When executing it, select the elbows in the arms you are setting up, then press Create or Apply.""" # create the main interface if cmds.window(kSetupOptionsWindow, q=True, ex=True): cmds.deleteUI(kSetupOptionsWindow) mainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350)) # build the menu bar cmds.menu(label='Help') amui.helpMenuItem(kToolName, __file__) amui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate) mainForm = cmds.formLayout(nd=100) # build the section to get information about the new twist joints if_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:') if_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True) # position the input fields for the twist joints cmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')]) cmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)]) # build the section to get information for the shoulder constraint constraintFrame = eval('cmds.frameLayout(collapsable=True, label="Shoulder Constraint Options:" %s)'%(amui.__frameAlignCenter__)) constraintForm = cmds.formLayout(nd=100) # attempt to guess what the spine is if there is a selection when the GUI is created spineText = 'CenterSpine' sel = cmds.ls(sl=True, l=True, type='transform') if sel and len(sel) > 0: # BUG: in Maya 8.5, a selection of length 0 returns None rather than an empty list try: shoulder = cmds.listRelatives(sel[0], p=True, f=True) # just use the first elbow in the selection collar = cmds.listRelatives(shoulder[0], p=True, f=True) spine = cmds.listRelatives(collar[0], p=True, f=True) spineText = spine[0] except: pass if_spine = cmds.textFieldGrp(label='Spine Object:', tx=spineText) if_raisedAngleOffset = cmds.floatSliderGrp(v=45, min=0, max=90, fmn=-180, fmx=180, label='Raised Angle Offset:', field=True) if_shoulderAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Shoulder Aim Axis:') if_shoulderFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Shoulder Front Axis:') if_spineAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Spine Aim Axis:') if_spineFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Spine Front Axis:') # position the input fields for the shoulder constraint cmds.formLayout(constraintForm, edit=True, attachForm=[(if_spine, 'left', 30), (if_spine, 'top', 5)], attachNone=[(if_spine, 'right'), (if_spine, 'bottom')]) cmds.formLayout(constraintForm, edit=True, attachForm=[(if_raisedAngleOffset, 'left', 30)], attachNone=[(if_raisedAngleOffset, 'right'), (if_raisedAngleOffset, 'bottom')], attachControl=[(if_raisedAngleOffset, 'top', 5, if_spine)]) cmds.formLayout(constraintForm, edit=True, attachForm=[(if_shoulderAimAxis, 'left', 30)], attachNone=[(if_shoulderAimAxis, 'right'), (if_shoulderAimAxis, 'bottom')], attachControl=[(if_shoulderAimAxis, 'top', 5, if_raisedAngleOffset)]) cmds.formLayout(constraintForm, edit=True, attachForm=[(if_shoulderFrontAxis, 'left', 30)], attachNone=[(if_shoulderFrontAxis, 'right'), (if_shoulderFrontAxis, 'bottom')], attachControl=[(if_shoulderFrontAxis, 'top', 5, if_shoulderAimAxis)]) cmds.formLayout(constraintForm, edit=True, attachForm=[(if_spineAimAxis, 'left', 30)], attachNone=[(if_spineAimAxis, 'right'), (if_spineAimAxis, 'bottom')], attachControl=[(if_spineAimAxis, 'top', 5, if_shoulderFrontAxis)]) cmds.formLayout(constraintForm, edit=True, attachForm=[(if_spineFrontAxis, 'left', 30)], attachNone=[(if_spineFrontAxis, 'right'), (if_spineFrontAxis, 'bottom')], attachControl=[(if_spineFrontAxis, 'top', 5, if_spineAimAxis)]) cmds.setParent('..') # go up to constraintForm cmds.setParent('..') # go up to mainForm # position the frame for the shoulder constraint cmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')]) # create the buttons to execute the script cmd_create='amTools.rigging.shoulderSetup.doOptions ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s")'%( if_suffixName, if_numberTwistJoints, if_spine, if_raisedAngleOffset, if_shoulderAimAxis, if_shoulderFrontAxis, if_spineAimAxis, if_spineFrontAxis) utils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create) cmds.showWindow(mainWindow) def doOptions(input_suffix, input_numberTwistJoints, input_spine, input_raisedAngleOffset, input_shoulderAimAxis, input_shoulderFrontAxis, input_spineAimAxis, input_spineFrontAxis): """This is the function called when the apply or create button is clicked""" try: # validate selection selection = utils.dg.validateSelection(type='transform', name='elbow joint objects', min=1) # validate suffix suffix = cmds.textFieldGrp(input_suffix, q=True, tx=True) utils.dg.validateAffix(suffix) # validate spine spine = cmds.textFieldGrp(input_spine, q=True, tx=True) utils.dg.verifyNode(spine) # set up the shoulder numberTwistJoints = cmds.intSliderGrp(input_numberTwistJoints, q=True, v=True) newSelection = [] # perform setup for each elbow in the selection for elbow in selection: shoulder = cmds.listRelatives(elbow, p=True, f=True) shoulderShort = cmds.listRelatives(elbow, p=True) newJoints = doSetup( shoulderShort[0] + suffix, numberTwistJoints, elbow, shoulder[0], spine, cmds.floatSliderGrp(input_raisedAngleOffset, q=True, v=True), cmds.floatFieldGrp(input_shoulderAimAxis, q=True, v=True), cmds.floatFieldGrp(input_shoulderFrontAxis, q=True, v=True), cmds.floatFieldGrp(input_spineAimAxis, q=True, v=True), cmds.floatFieldGrp(input_spineFrontAxis, q=True, v=True)) newSelection += newJoints # select the newly created joints for easy editing cmds.select(newSelection) except: raise def doSetup(baseName, numberTwistJoints, elbow, shoulder, spine, raisedAngleOffset, shoulderAimAxis, shoulderFrontAxis, spineAimAxis, spineFrontAxis): """This function creates the new twist joints and returns a list of their names.""" try: # validate baseName utils.dg.validateNodeName(baseName) # validate incoming object names utils.dg.verifyNode(elbow) utils.dg.verifyNode(shoulder) utils.dg.verifyNode(spine) # get the translation value for the elbow elbowTranslate = cmds.getAttr('%s.translate'%elbow)[0] # see if there is a side label bodySide = cmds.getAttr('%s.side'%shoulder) # find out what rotate order the shoulder is using rotateOrder = cmds.getAttr('%s.rotateOrder'%shoulder) # create the twist joints twistJoints = [] for i in range(numberTwistJoints): cmds.select(cl=True) newJoint = cmds.joint(name='%s%s'%(baseName, i + 1)) # set up the first joint if i == 0: newJoint = cmds.parent(newJoint, shoulder)[0] jointRadius = 1.0 jointOrient = [] if cmds.objectType(shoulder, isType='joint'): jointRadius = cmds.getAttr('%s.radius'%shoulder) * 0.5 cmds.setAttr('%s.radius'%newJoint, jointRadius) cmds.setAttr('%s.jointOrient'%newJoint, 0,0,0) cmds.setAttr('%s.translate'%newJoint, 0,0,0) # create the shoulder constraint cmds.am_shoulderConstraint( newJoint, spineObject=spine, shoulderObject=shoulder, rao=raisedAngleOffset, sha=shoulderAimAxis, shf=shoulderFrontAxis, spa=spineAimAxis, spf=spineFrontAxis) # set up the rest of the joints else: newJoint = cmds.parent(newJoint, shoulder)[0] cmds.setAttr('%s.radius'%newJoint, jointRadius) cmds.setAttr('%s.jointOrient'%newJoint, 0,0,0) pct = float(i)/float(numberTwistJoints) cmds.setAttr('%s.translate'%newJoint, elbowTranslate[0]*pct, elbowTranslate[1]*pct, elbowTranslate[2]*pct) # create the orient constraint orientConstraint = cmds.orientConstraint([twistJoints[0], shoulder, newJoint]) targetWeights = cmds.orientConstraint(q=True, weightAliasList=True) cmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[0]), numberTwistJoints - i) cmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[1]), i) cmds.setAttr('%s.interpType'%orientConstraint[0], 1) # set label and rotate order cmds.setAttr('%s.side'%newJoint, bodySide) cmds.setAttr('%s.type'%newJoint, 18) cmds.setAttr('%s.otherType'%newJoint, 'Shoulder Twist %s'%(i + 1), type='string') cmds.setAttr('%s.rotateOrder'%newJoint, rotateOrder) # add the new joint to the list to return twistJoints.append(newJoint) return twistJoints; except: raise
UTF-8
Python
false
false
2,014
10,402,410,804,797
2b5f72920a2a6ae0476c14d28647f9353725a0bc
f31285f1adf3a0c83120c2a8f91ac5ef6f5f0b8b
/extract_alignments_psl.py
f2198dab87e2a2460e88631e98cfd68bc44789ee
[ "BSD-2-Clause" ]
permissive
RobinQi/BioUtils
https://github.com/RobinQi/BioUtils
1596cc1cec382567459d13d9e57a97099dbce76d
72693760620b8afb1797fd9f23e1540a194ef929
refs/heads/master
2021-01-13T06:12:55.111042
2014-07-21T18:02:45
2014-07-21T18:02:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
'''Extract only alignments of sequences in a list file.''' import sys pslfile = sys.argv[1] listfile = sys.argv[2] try: seqtype = sys.argv[3] except IndexError: seqtype = 9 else: if seqtype == 'query': seqtype = 9 elif seqtype == 'target': seqtype = 13 else: print >> sys.stderr, 'Unregconized sequence type.' raise SystemExit sequences = set([seq.strip() for seq in open(listfile)]) for align in open(pslfile): query = align.split()[seqtype] if query in sequences: print align,
UTF-8
Python
false
false
2,014
10,960,756,579,767
e7dc2ee01c3fb99b4754a679dccf7a5d956c2e1e
de0d7e99d970d75b5356744bacaf7f44fcc731a8
/bin/legacy/tremor_interface.py
e5419863191e4d24c0c7b4df957fcb7f04c2f774
[]
no_license
Valvador/NEEShubloader
https://github.com/Valvador/NEEShubloader
a3a9e6f072def12423d4516e560974a7f4a744da
a1007cfe37b895d38191d9380ba42b01d45430b3
refs/heads/master
2020-05-20T04:24:30.381117
2013-03-20T21:58:03
2013-03-20T21:58:03
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#THIS WILL BE USED TO INTERFACE DIRECTLY WITH THE TREMOR FILE SERVER import os import datetime import caching import hub_interface as bhi import ucsbsql_interface as bui import neesftp_interface as bni import report import utils import nees_logging import time from config import * #------------------------------------------------------------------------------------------------------------------------------------------- # I. HIGH LEVEL FUNCTIONS #------------------------------------------------------------------------------------------------------------------------------------------- # # I. A. Trial Placement Procedures # def check_cache_place_trials(expnum, start_time, end_time): '''Executes default trial structuring while at the same time creating a new cache file to make sure that no''' nees_logging.log_current_time(neeshub_log_filename) caching.create_hub_cache(expnum) place_trials_default(expnum, start_time, end_time) def place_trials_default(expnum, start_time, end_time, verbose=False): '''This is going to be the primary way of moving processed data from it's proper location to the PEN tool's subfolder. As long as the data is organized with our standard format where the metadata is located on the mysql database, this will handle all the uploading. WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.''' destination = experiment_path[expnum] current_trial = utils.find_last_trial(expnum) + 1 neeshub = bhi.conn mysqldb = bui.conn existing_evid_dict = caching.load_evid_dictionary(expnum) event_data_dicts = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum]) default_folder = mysqldb.retrieve_data_folder() # Look at every event in the database between time constraints. for event in event_data_dicts: site_evt_number = event[cfg_evt_siteEvt] site_evt_time = event[cfg_evt_time] site_event_id = event[cfg_evt_evid] site_event_dist = event[cfg_evt_dist] site_event_ml = event[cfg_evt_ml] file_data_dicts = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum]) # If this event has already been uploaded, report it and skip this event. if site_event_id in existing_evid_dict.values(): nees_logging.log_existing_evid(site_event_id) continue # Don't do anything if there's no data if file_data_dicts == []: continue # Generate file structure on NEEShub and local system. description = utils.generate_description(event) trialtitle = datetime.datetime.utcfromtimestamp(site_evt_time).strftime(default_time_format) trial_doc_folder = "%sTrial-%s/Documentation/" % (destination, current_trial) report_name = 'report.csv' caching.update_all_cache_dictionaries(expnum, current_trial, site_event_id, site_event_ml, site_event_dist) utils.generate_trial_structure(destination, current_trial) report.create_report(trial_doc_folder, event) neeshub.post_full_trial(experiment_id[expnum], trialtitle, description, current_trial) # Find and move every file within an event to the created file structure. move_datafiles(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder, expnum) upload_and_post_report(expnum, current_trial, trial_doc_folder, report_name) # Move on to next trial for further processing. nees_logging.log_goto_nextline(neeshub_log_filename) current_trial += 1 def move_datafiles(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder, expnum): '''Moves datafile from mysql-descripted location to file structure. file_ = mySQL-created dictionary that hold data location info. event = Dictionary containing event information for the event that "file_" belongs to. destination = Location of Experiment file structure as defined by configuration current_trial = Actually is the trial number that is being worked on. trial_doc_folder = Location of Documentation Files for the event. ''' mysqldb = bui.conn julian_folder = datetime.datetime.utcfromtimestamp(event['time']).strftime(data_retrieval_time_format) # Upload every file associated with event. for file_ in file_data_dicts: filename = file_[cfg_fl_dfile] oscommand_source = "%s%s%s" % (default_folder, julian_folder, filename) oscommand_destination = "%sTrial-%s/Rep-1/" % (destination, current_trial) pubChan = "%s_%s_%s" % (file_[cfg_fl_net], file_[cfg_fl_sta], file_[cfg_fl_chan]) channel_data_dict = mysqldb.retrieve_channel_position(pubChan, event[cfg_evt_time]) file_extensions = utils.find_extensions(oscommand_source) report.append_report_if_valid(trial_doc_folder, file_, channel_data_dict, event[cfg_evt_evid]) utils.copy_file_exts(oscommand_source, oscommand_destination, file_extensions) upload_and_post(expnum, current_trial, oscommand_destination, filename, file_extensions) # #TODO: THE BELOW ARE TOO MUCH DUPLICATE CODE, I need a generic UPLOAD and MULTI-PART POST that has FILE subroutines. # def upload_and_post(expnum, trial_number, source_folder, filename, extensions, selector = http_file_path): '''This function is designed to be used in the upload process within the move_datafiles function. The filename has to be specified without extension, and the extension has to be specified separately.''' for extension in extensions: full_source_folder = source_folder + cfg_hub_ext_fold[extension] + '/' bhi.ftpconn.upload_file(full_source_folder, filename, extension) bhi.conn.multipart_post(filename, expnum, trial_number, extension, selector) def upload_and_post_report(expnum, trial_number, source_folder, filename, selector = http_file_path): '''DUPLICATE CODE, SHOULD RESOLVE GENERIC MULTIPART GENERATOR REGARDLESS OF FILETYPE''' source_path = source_folder + filename bhi.ftpconn.upload_to_project(filename, source_path) bhi.conn.multipart_post_generic(filename, expnum, trial_number, selector) # # I. B. Update Report Only # def place_reports_only(expnum, start_time, end_time): '''Used in the case that the log gives warning that individual channel information was missing. This allows the used to re-create the report.csv files without having to completely re-do the upload process.''' destination = experiment_path[expnum] mysqldb = bui.conn event_data_dicts = mysqldb.retrieve_event_description(start_time, end_time, list_of_sites = mySQL_sitedef[expnum]) default_folder = mysqldb.retrieve_data_folder() for event in event_data_dicts: site_evt_number = event[cfg_evt_siteEvt] site_event_id = event[cfg_evt_evid] file_data_dicts = mysqldb.retrieve_file_location(site_evt_number,mySQL_stadef[expnum]) current_trial = caching.trial_num_from_evid(expnum, site_event_id) trial_doc_folder = "%sTrial-%s/Documentation/" % (destination, current_trial) report.create_report(trial_doc_folder, event) create_filereports(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder) def create_filereports(file_data_dicts, event, destination, current_trial, trial_doc_folder, default_folder): mysqldb = bui.conn for file_ in file_data_dicts: pubChan = "%s_%s_%s" % (file_[cfg_fl_net], file_[cfg_fl_sta], file_[cfg_fl_chan]) channel_data_dict = mysqldb.retrieve_channel_position(pubChan, event[cfg_evt_time]) report.append_report_if_valid(trial_doc_folder, file_, channel_data_dict, event[cfg_evt_evid]) # # TODO: THIS IS A ONE OFF PROCESSING SYSTEM # THIS WORKS ON EMILY'S AND TIM'S CODE. WE NEED TO FIND A WAY TO INTEGRATE IT # INTO A STANDARD SYSTEM. "lengthofstuff=10" IS A BIG NO-NO. THIS IS WAY TO TITLE # DEPENDENT. # def place_trials(filepath, expnum, lengthofstuff=10): '''This uses the "utils.find_last_trial" function to analyze the the destination folder for it's Trial content. Based on that information, it will take the files from the given "filepath" and place them in proper Trial locations into the destination. The third variable, 'lengthofstuff' is defaulted at 8, and is used to compare whether the events happened on the same day, allowing this function to differential between different Trials. WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.''' destination = experiment_path[expnum] previous = '' current_trial = utils.find_last_trial(expnum) pathlist = sorted(os.listdir(filepath)) neeshub = bhi.conn for f in pathlist: if previous != f[0:lengthofstuff]: current_trial += 1 precommand = "mkdir -p %sTrial-%s/Rep-1/Derived_Data" % (destination,current_trial) os.system(precommand) trialtitle = utils.get_trial_title(f, expnum) #Gives Julian Date: Year-Day description = experiment_description[expnum] neeshub.post_full_trial(experiment_id[expnum], trialtitle, description) command = "cp %s/%s %sTrial-%s/Rep-1/Derived_Data" % (filepath, f, destination, current_trial) os.system(command) #Places next Trial folder. previous = f[0:lengthofstuff] return current_trial # # DEBUGGING, REMOVE WHEN FINISHED # #The following is kept for troubleshooting purposes. This was before using %s formatting in my strings. If those methos fail use the ones below. def place_trials_no_hub(filepath, expnum, lengthofstuff=8): '''This uses the "utils.find_last_trial" function to analyze the the destination folder for it's Trial content. Based on that information, it will take the files from the given "filepath" and place them in proper Trial locations into the destination. WARNING: Currently this will not realize if you've pointed it to a folder that it already uploaded.''' destination = experiment_path[expnum] previous = '' trialscreated = 0 current_trial = utils.find_last_trial(destination) pathlist = sorted(os.listdir(filepath)) for f in pathlist: if previous != f[0:lengthofstuff]: precommand = "mkdir -p " +destination+"Trial-"+str(current_trial)+"/Rep-1/Derived_Data" os.system(precommand) command = "cp "+filepath+"/"+f+" "+destination+"Trial-"+str(current_trial)+"/Rep-1/Derived_Data" os.system(command) if previous == f[0:lengthofstuff]: current_trial += 1 trialscreated += 1 previous = f[0:lengthofstuff] return trialscreated
UTF-8
Python
false
false
2,013
14,010,183,327,518
bfbb7d365c37fc4f1e397ada0836a81f14649cb4
46a68635e05e3784069e333ef1b1727a7d68f616
/Lab Sheet 3/T5.py
81373ef5aa78317dab1f00dd984a6a685c35135b
[]
no_license
theref/Computing-For-Mathematics
https://github.com/theref/Computing-For-Mathematics
4190d178550f6d302eaae928d089a39e0d6ceecb
6bb7d8ec985375f04e6352701b4f8828d7199c6f
refs/heads/master
2016-09-02T02:18:16.245986
2013-12-12T18:40:38
2013-12-12T18:40:38
13,867,063
0
1
null
false
2023-08-22T10:07:29
2013-10-25T17:17:43
2013-12-12T18:39:03
2023-08-22T10:07:29
1,260
1
1
1
TeX
false
false
numbers = open('W04_D01.txt', 'r').read().split('\r\n') numbers = [ int(x) for x in numbers ] print numbers.index(4558)
UTF-8
Python
false
false
2,013
18,966,575,582,615
2fc90870a0c2d80f89c89bf31d906a62096ca561
e8583f018de761273f64d5d4e202fdfb0d30d4fd
/projectmanager/urls.py
2e45608399b28653ca1bfcf75982ec19d47842b9
[]
no_license
QA-long/django-projectmanager
https://github.com/QA-long/django-projectmanager
2d74c4f78fc8337f6f33c303232dd65f84c63144
90f890cc51c4f850528b1e2a1600d49da4ec716f
refs/heads/master
2021-01-17T23:25:40.793582
2013-05-09T23:57:54
2013-05-09T23:57:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls.defaults import * urlpatterns = patterns('projectmanager.views', (r'^calendar/$', 'project_time_calendar'), (r'^api/time/list/', 'api_project_time_list'), (r'^api/time/add/', 'api_project_time_add'), (r'^api/time/edit/', 'api_project_time_edit'), (r'^api/time/move/', 'api_project_time_move'), (r'^tasks/$', 'tasks'), (r'^tasks/(\d+)/$', 'tasks'), (r'^tasks/(all)/$', 'tasks'), (r'^invoice/(\d+)/$', 'invoice'), (r'^invoice/(\d+)/.+\.(pdf)$', 'invoice'), (r'^quote/(\d+)/$', 'quote'), (r'^quote/(\d+)/.+\.(pdf)$', 'quote'), (r'^itemise/(\d+)/$', 'projecttime_summary'), (r'^create_invoice_for_project/(\d+)/$', 'create_invoice_for_project'), )
UTF-8
Python
false
false
2,013
19,069,654,803,701
0e4cd6fdc18af9d5e4039f681bdf9ad1a7c6c9b9
35a14aea825e40b6284388827407e17b9e4fd688
/src/Utils.py
7a6cf15e5590cce19062fd223dc1eed12d23cf40
[]
no_license
spatzle/Practice-Battleship
https://github.com/spatzle/Practice-Battleship
77411a1ea950982924cfdce193ed07300ae8dd0f
9b062a12b3b1e958154c6089ab847d0632d6d971
refs/heads/master
2021-01-13T02:03:22.612825
2011-03-23T14:35:32
2011-03-23T14:35:32
1,516,553
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on 2011-03-23 @author: joyce ''' import re def matched(pattern, string, repl='_._'): r = re.compile(pattern) m = r.search(string) if m: return True # i = 0 # while m: # m_start = m.start() # m_end = m.end() # # i += 1 # print( '%d) start: %d, end: %d, str: %s' % # (i, m_start, m_end, string[m_start:m_end]) ) # # if m.groups(): # capturing groups # print(' groups: ' + str(m.groups())) # # if m_end == len(string): # infinite loop if # break # m_start == m_end == len(string) # elif m_start == m_end: # zero-width match; # m_end += 1 # keep things moving along # # m = r.search(string, m_end) # # print( 'global replace (%s):\n%s' % # (repl, re.sub(pattern, repl, string)) ) else: return False
UTF-8
Python
false
false
2,011
1,795,296,330,168
bdae7ae6dd6edbc0ff76db670409acf6f98255f8
c1992428b36cd8c5ccd072749e910856f9503fd8
/test_magic_square.py
e949c563c9777f5cf4a3379dece332a1c6f994c2
[]
no_license
croach/magic_squares
https://github.com/croach/magic_squares
f7f102974e44f328ff8c06e7405842bb5157f273
759e8e1b7b18912c6958fcd7964614ecc0a47885
refs/heads/master
2015-07-23T18:22:26
2013-11-11T09:30:45
2013-11-11T09:30:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import random import unittest from magic_square import generate, distance, cost, neighbors, solve from utils import flatten, key class MagicSquareTestCase(unittest.TestCase): def test_generate(self): # Test the lower bounds of the size of a magic square. Squares below the # minimum (2x2) should raise a value error, all others shoudl return a # square (i.e., a list of lists). self.assertEquals(list, type(generate(2))) # the smallest square allowed self.assertRaises(ValueError, generate, 1) # 1 less than the smallest self.assertRaises(ValueError, generate, 0) # 0 size square self.assertRaises(ValueError, generate, -10) # negative size square # The default should be a 3x3 square DEFAULT_SQUARE_DIMENSIONS = 3 default_square = generate() self.assertEquals(DEFAULT_SQUARE_DIMENSIONS, len(default_square)) for row in default_square: self.assertEquals(DEFAULT_SQUARE_DIMENSIONS, len(row)) # Create a pool of randomly sized squares to test squares = [generate(random.randint(2, 100)) for _ in xrange(10)] for square in squares: # Make sure all rows in a square are the same length self.assertTrue(all(len(row) == len(square[0]) for row in square)) # Squares should be square, i.e., width == height width = len(square) height = len(square[0]) self.assertEquals(width, height) # All cells in each square should be unique, consecutive (when sorted), # and have only one empty cell (i.e., None) self.assertEquals([None] + range(1, width**2), sorted(flatten(square))) def test_distance(self): square = [ [8, 5, 6], [2, 1, 4], [3, None, 7] ] self.assertEquals(2, distance(1, 1, square)) # 1 square (up 1, left 1) self.assertEquals(1, distance(2, 1, square)) # empty square (right 1) self.assertEquals(4, distance(2, 0, square)) # 3 square (up 2, right 2) def test_cost(self): # Perfect square perfect_square = [ [1, 2, 3 ], [4, 5, 6 ], [7, 8, None] ] self.assertEquals(0, cost(perfect_square)) # Pretty good good_square = [ [8, 2, 1 ], [4, 6, 5 ], [3, 7, None] ] self.assertEquals(12, cost(good_square)) # Pretty bad bad_square = [ [None, 8, 7], [6, 2, 4], [3, 5, 1] ] self.assertEquals(24, cost(bad_square)) self.assertTrue(cost(perfect_square) < cost(good_square) < cost(bad_square)) def test_neighbors(self): def test_neighbors(square, expected_squares): result = set(key(n) for n in neighbors(square)) expected = set(key(n) for n in expected_squares) self.assertEquals(result, expected) # Square with the empty tile in the center center_square = [ [2, 8, 7], [6, None, 4], [3, 5, 1] ] neighboring_squares = [ [ [2, None, 7], [6, 8, 4], [3, 5, 1] ], [ [2, 8, 7], [6, 5 , 4], [3, None, 1] ], [ [2, 8, 7], [None, 6, 4], [3, 5, 1] ], [ [2, 8, 7], [6, 4, None], [3, 5, 1] ] ] test_neighbors(center_square, neighboring_squares) # Square with the empty tile in a corner corner_square = [ [None, 8, 7], [6, 2, 4], [3, 5, 1] ] neighboring_squares = [ [ [8, None, 7], [6, 2, 4], [3, 5, 1] ], [ [6, 8, 7], [None, 2, 4], [3, 5, 1] ] ] test_neighbors(corner_square, neighboring_squares) # Square with the empty tile on a side side_square = [ [4, 8, 7 ], [6, 2, None], [3, 5, 1 ] ] neighboring_squares = [ [ [4, 8, None], [6, 2, 7 ], [3, 5, 1 ] ], [ [4, 8, 7 ], [6, 2, 1 ], [3, 5, None] ], [ [4, 8, 7], [6, None, 2], [3, 5, 1] ] ] test_neighbors(side_square, neighboring_squares) def test_solve(self): solvable_square = [ [None, 1], [3, 2] ] self.assertIsNotNone(solve(solvable_square, False)) unsolvable_square = [ [None, 1], [2, 3] ] self.assertIsNone(solve(unsolvable_square, False))
UTF-8
Python
false
false
2,013
506,806,180,402
60a4993e77fad1abe7d5ed1648ebe079ada5d28c
e443a0b5b31f454c7f7f5f96f1bd26d8fa144e26
/World_crisis_data_base/phase_I/WCDB.py
7311c869955073d16376ab74642bcb1eed32f844
[]
no_license
XiaoqinLI/Element-software-engineering-and-design-of-database
https://github.com/XiaoqinLI/Element-software-engineering-and-design-of-database
0aeb6aa63fa68b31d7f8fec0b85faac5bef1db1b
c9b3d1ca28cebfba02efcdb98a9282e462fd1dfb
refs/heads/master
2021-01-19T05:22:14.536760
2014-04-22T14:44:16
2014-04-22T14:44:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# --------------------------------------------------------------------------- # projects/WCDB(phase1)/WCDB.py # Author: Xiaoqin LI # Description: #Create an import/export facility from the XML into Element Tree and back. #The input is guaranteed to have validated XML. #The import facility must import from a file. #The export facility must export to a file. #Import/export the XML on only the ten crises, ten organizations, #and ten people of the group. # Date: 03/09/2014 # ---------------------------------------------------------------------------- # ------- # imports # ------- import sys import xml.etree.ElementTree as ET # ---------- # wcdb_read # ---------- def wcdb_read (r) : """ reads an input from a file which have a single top tag creates an element tree from string """ imported_str_data = "<WorldCrises>" + "".join(r.read()) + "</WorldCrises>" assert(type(imported_str_data) is str) data_tree = ET.fromstring(imported_str_data) assert(type(data_tree) is ET.Element) return data_tree # ---------- # wcdb_write # ---------- def wcdb_write (w, data_tree): """ converts an element string to a string data exports the string data """ data_exported_string = ET.tostring(data_tree,encoding = "unicode", method = "xml") data_exported_string = data_exported_string[13:-14] assert(type(data_exported_string) is str) w.write(data_exported_string) # ---------- # wcdb_solve # ---------- def wcdb_solve (stdin, stdout) : """ stdin is a reader stdout is a writer """ imported_tree = wcdb_read (stdin) wcdb_write (stdout, imported_tree)
UTF-8
Python
false
false
2,014
7,043,746,375,605
4cbd3c90444fffc0391ec96d100ac0b3300771e9
739bb203a7f958a44c0f099bc829149d73d44f77
/tests/settings_tests.py
d0d5502d6fb66300ace739093b3ef6778d008ffe
[ "ISC" ]
permissive
trehn/django-installer
https://github.com/trehn/django-installer
fbc5fdb16227ad3f7f1722a232f08f0dc90ec105
672c988ae33b311125e78202d2d0b3e298281d2c
refs/heads/master
2020-06-07T09:13:18.155326
2014-09-14T07:52:11
2014-11-14T13:48:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
try: from configparser import SafeConfigParser except ImportError: from ConfigParser import SafeConfigParser from os import environ from tempfile import NamedTemporaryFile from unittest import TestCase IMPORT_MAGIC = """ import sys try: del sys.modules["django_installer.settings"] except KeyError: pass from django_installer.settings import * """ IMPORTED_GLOBALS = ['__builtins__', 'sys'] class BaseURLTest(TestCase): def test_base_url(self): tmpfile = NamedTemporaryFile() config = SafeConfigParser() config.add_section("baseurl") config.set("baseurl", "url", "https://www.example.com/foo") config.write(tmpfile) tmpfile.flush() environ["DJANGO_INSTALLER_SETTINGS"] = tmpfile.name env = {} exec(IMPORT_MAGIC, env) self.assertEqual(env.keys(), IMPORTED_GLOBALS + ['ALLOWED_HOSTS']) self.assertEqual(env['ALLOWED_HOSTS'], ("www.example.com",)) class DatabaseTest(TestCase): def test_database(self): tmpfile = NamedTemporaryFile() config = SafeConfigParser() config.add_section("database") config.set("database", "engine", "django.db.backends.mysql") config.set("database", "host", "db.example.com") config.set("database", "name", "example") config.set("database", "password", "secret") config.set("database", "port", "3306") config.set("database", "user", "jdoe") config.write(tmpfile) tmpfile.flush() environ["DJANGO_INSTALLER_SETTINGS"] = tmpfile.name env = {} exec(IMPORT_MAGIC, env) self.assertEqual(env.keys(), IMPORTED_GLOBALS + ['DATABASES']) self.assertEqual(env['DATABASES'], { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'example', 'HOST': 'db.example.com', 'USER': 'jdoe', 'PASSWORD': 'secret', 'PORT': '3306', } })
UTF-8
Python
false
false
2,014
16,200,616,672,758
2417d454f03b44ceb8c310b71967a737b0969db4
fb9116de4f8536fda86397379c2ed64086e3681c
/hmm.py
30448a5537fb94966cfc70fa8c6d4b4102b3ad22
[ "GPL-2.0-only" ]
non_permissive
pbouda/gesturefollower
https://github.com/pbouda/gesturefollower
dfbbc2031b1e4bc234b7da61f469f9db144ffcfe
a43e2b0e8cb983871b5e9a3e3c927b7a4fb82336
refs/heads/master
2021-01-16T17:49:07.494015
2014-09-19T12:43:43
2014-09-19T12:43:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import math import scipy.stats as st import numpy as np import matplotlib.pyplot as plt A1 = 1./3. A2 = 1./3. A3 = 1./3. MU = .2 class HMM: # This class converted with modifications from http://kastnerkyle.github.io/blog/2014/05/22/single-speaker-speech-recognition/ # Implementation of: http://articles.ircam.fr/textes/Bevilacqua09b/index.pdf def __init__(self, n_states, reference): self.n_states = n_states self.reference = reference #self.random_state = np.random.RandomState(0) # Initial state # left-to-right HMM, we start with state 1 self.prior = np.zeros(self.n_states) self.prior[0] = 1. self.A = np.zeros((self.n_states, self.n_states)) #self.A = self._stochasticize(self.random_state.rand(self.n_states, self.n_states)) for i in range(self.n_states): self.A[i, i] = A1 if (i+1) < self.A.shape[1]: self.A[i, i+1] = A2 if (i+2) < self.A.shape[1]: self.A[i, i+2] = A3 self.A[-1, -1] = 1. self.mu = np.array([MU]*len(self.reference)) def _forward(self, B): log_likelihood = 0. T = B.shape[1] alpha = np.zeros(B.shape) #T = B.shape[1] #T = B.shape[0] #print(B) #alpha = np.zeros((self.n_states, self.n_states, self.reference.shape[1])) #for t in range(self.n_states): for t in range(T): if t == 0: #print(B[:, t].shape) #print(self.prior.ravel().shape) #alpha[t] = (B.transpose(1,0) * self.prior).transpose(1,0) alpha[:, t] = B[:, t] * self.prior.ravel() else: #alpha[t] = B * np.dot(self.A.T, alpha[t-1]) alpha[:, t] = B[:, t] * np.dot(self.A.T, alpha[:, t - 1]) alpha_sum = np.sum(alpha[:, t]) alpha[:, t] /= alpha_sum #log_likelihood = log_likelihood + np.log(alpha_sum) log_likelihood = log_likelihood + alpha_sum #print(B[:, 3]) return log_likelihood, alpha def _state_likelihood(self, obs): obs = np.atleast_2d(obs) B = np.zeros((self.n_states, obs.shape[0])) for s in range(self.n_states): #B[s, :] = st.multivariate_normal.pdf(obs.T, mean=self.mu) b = np.zeros(obs.shape[0]) for o in range(obs.shape[0]): b[o] = 0. b[o] = (1./(self.mu[s]*math.sqrt(2*math.pi))) * \ math.exp( -( (obs[o][0]-self.reference[s][0])**2 / (2*(self.mu[s]**2)) ) ) #B[s, :] = self._normalize(b) B[s, :] = b #Needs scipy 0.14 #B[s, :] = st.multivariate_normal.pdf(obs.T, mean=self.mu[:, s].T, cov=self.covs[:, :, s].T) #This function can (and will!) return values >> 1 #See the discussion here for the equivalent matlab function #https://groups.google.com/forum/#!topic/comp.soft-sys.matlab/YksWK0T74Ak #Key line: "Probabilities have to be less than 1, #Densities can be anything, even infinite (at individual points)." #This is evaluating the density at individual points... return B def _normalize(self, x): return (x + (x == 0)) / np.sum(x) def _stochasticize(self, x): return (x + (x == 0)) / np.sum(x, axis=1) if __name__ == "__main__": reference_signal = np.concatenate(( np.zeros(50), np.sin(np.linspace(-np.pi, np.pi, 40)), np.zeros(50), np.sin(np.linspace(-np.pi, np.pi, 40)), np.zeros(50))) noise = np.random.normal(0,.1,230) offset = .2 test_signal = np.concatenate(( np.random.normal(0,.1,70) + offset, noise + reference_signal + reference_signal + offset)) #test_signal = np.concatenate(( # np.zeros((50,)), # reference_signal)) # test signal 2 is just noise test_signal2 = np.random.normal(0,1,230) # plt.plot(reference_signal) # plt.plot(test_signal) # plt.plot(test_signal2) # plt.show() r = np.reshape(reference_signal, (-1, 1)) t = np.reshape(test_signal[:150], (-1, 1)) t2 = np.reshape(test_signal2, (-1, 1)) # Build HMM based on reference data h = HMM(len(r), r) B = h._state_likelihood(t) B2 = h._state_likelihood(t2) B3 = h._state_likelihood(r) lik, alpha = h._forward(B) for t in range(alpha.shape[0]): print(np.argmax(alpha[t, :])) print("Likelihood for test data: {}".format(lik)) print("Likelihood for noise data: {}".format(h._forward(B2)[0])) print("Likelihood for reference data: {}".format(h._forward(B3)[0]))
UTF-8
Python
false
false
2,014
15,736,760,213,749
e0cc7dfa6cf2e4ae121e84ac080fe796ad64a61d
02822dd303104bb36b220a0d8d21275263057812
/cmd/memtest-cmd.py
0e3cf0c839d6a86402685a8589a0e5c6acf5b1b9
[ "LGPL-2.0-or-later", "BSD-3-Clause", "LGPL-2.0-only", "Python-2.0" ]
non_permissive
zoranzaric/bup
https://github.com/zoranzaric/bup
21bcd21bebecdeca644bafc248577f05a6634966
53ffc4d336b06b3cecac0d817d192d22cb75a1bd
refs/heads/master
2020-12-24T23:29:49.384092
2013-07-31T16:34:29
2013-07-31T16:34:30
826,859
12
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import sys, re, struct, time, resource from bup import git, bloom, midx, options, _helpers from bup.helpers import * handle_ctrl_c() _linux_warned = 0 def linux_memstat(): global _linux_warned #fields = ['VmSize', 'VmRSS', 'VmData', 'VmStk', 'ms'] d = {} try: f = open('/proc/self/status') except IOError, e: if not _linux_warned: log('Warning: %s\n' % e) _linux_warned = 1 return {} for line in f: # Note that on Solaris, this file exists but is binary. If that # happens, this split() might not return two elements. We don't # really need to care about the binary format since this output # isn't used for much and report() can deal with missing entries. t = re.split(r':\s*', line.strip(), 1) if len(t) == 2: k,v = t d[k] = v return d last = last_u = last_s = start = 0 def report(count): global last, last_u, last_s, start headers = ['RSS', 'MajFlt', 'user', 'sys', 'ms'] ru = resource.getrusage(resource.RUSAGE_SELF) now = time.time() rss = int(ru.ru_maxrss/1024) if not rss: rss = linux_memstat().get('VmRSS', '??') fields = [rss, ru.ru_majflt, int((ru.ru_utime - last_u) * 1000), int((ru.ru_stime - last_s) * 1000), int((now - last) * 1000)] fmt = '%9s ' + ('%10s ' * len(fields)) if count >= 0: print fmt % tuple([count] + fields) else: start = now print fmt % tuple([''] + headers) sys.stdout.flush() # don't include time to run report() in usage counts ru = resource.getrusage(resource.RUSAGE_SELF) last_u = ru.ru_utime last_s = ru.ru_stime last = time.time() optspec = """ bup memtest [-n elements] [-c cycles] -- n,number= number of objects per cycle [10000] c,cycles= number of cycles to run [100] ignore-midx ignore .midx files, use only .idx files existing test with existing objects instead of fake ones """ o = options.Options(optspec) (opt, flags, extra) = o.parse(sys.argv[1:]) if extra: o.fatal('no arguments expected') git.ignore_midx = opt.ignore_midx git.check_repo_or_die() m = git.PackIdxList(git.repo('objects/pack')) report(-1) _helpers.random_sha() report(0) if opt.existing: def foreverit(mi): while 1: for e in mi: yield e objit = iter(foreverit(m)) for c in xrange(opt.cycles): for n in xrange(opt.number): if opt.existing: bin = objit.next() assert(m.exists(bin)) else: bin = _helpers.random_sha() # technically, a randomly generated object id might exist. # but the likelihood of that is the likelihood of finding # a collision in sha-1 by accident, which is so unlikely that # we don't care. assert(not m.exists(bin)) report((c+1)*opt.number) if bloom._total_searches: print ('bloom: %d objects searched in %d steps: avg %.3f steps/object' % (bloom._total_searches, bloom._total_steps, bloom._total_steps*1.0/bloom._total_searches)) if midx._total_searches: print ('midx: %d objects searched in %d steps: avg %.3f steps/object' % (midx._total_searches, midx._total_steps, midx._total_steps*1.0/midx._total_searches)) if git._total_searches: print ('idx: %d objects searched in %d steps: avg %.3f steps/object' % (git._total_searches, git._total_steps, git._total_steps*1.0/git._total_searches)) print 'Total time: %.3fs' % (time.time() - start)
UTF-8
Python
false
false
2,013
824,633,732,765
209aafd1625122c381d277ab4c2204c5bceca463
ce8f9cb01cc533fbba6055c6c6750b320bc1d43e
/poembot/util/PoemImporter.py
ff5076e632f40f0f6b67a5b407c6083946e35b33
[]
no_license
seanxiaoxiao/poembot
https://github.com/seanxiaoxiao/poembot
ecec1a3e11aab12e4aa53650484b596a1386b554
61466698720ff44dc1e15437dea18fa77f5bd50d
refs/heads/master
2021-01-20T00:57:04.776307
2014-06-07T17:32:24
2014-06-07T17:32:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- CI_RESOURCE_FIEL = "../../resource/ci-set-first" CI_TEMPLATE_RESOURCE_FILE = "../../resource/ci-template" from pymongo import Connection def read_poems(path): with open(path, "r") as poem_file: results = [] contents = poem_file.readlines() poem = {} for line in contents: line = line.strip() if len(line) == 0: if poem.get("template") and poem.get("author"): results.append(poem) poem = {} elif not poem.get("template"): components = line.split('-') poem["template"] = components[0] poem["author"] = components[1] if len(components) >= 3: poem["title"] = components[2] if len(components) >= 4: poem["summary"] = components[3] poem["contents"] = [] else: poem["contents"].append(line) return results def read_template(path): with open(path, "r") as template_file: results = [] contents = template_file.readlines() template = {} for line in contents: line = line.strip() if len(line) == 0: if template.get("title"): results.append(template) template = {} elif not template.get("title"): template["title"] = line template["contents"] = [] else: template["contents"].append(line) return results def read_character(path): _vowel_map = vowel_map() _vowel_list = vowel_list() _vowel_tune_number = vowel_tune_number() with open(path, "r") as character_file: results = [] contents = character_file.readlines() character = {} for line in contents: line = line.strip() components = line.split("\t") character["char"] = components[0] character["pronunciation"] = [] for i in range(1, len(components)): pronunciations = components[i].split(',') for j in range(0, len(pronunciations)): pronunciation = pronunciations[j] character["pronunciation"].append(_format_pronunciation(pronunciation, _vowel_map, _vowel_list, _vowel_tune_number)) results.append(character) character = {} return results def _format_pronunciation(pronunciation, _vowel_map, _vowel_list, _vowel_tune_number): formatted = "" tune = 0 for c in pronunciation.decode('utf8'): if c in _vowel_list: formatted += _vowel_map[c] tune = _vowel_tune_number[c] else: formatted += c formatted += str(tune) return formatted def vowel_map(): result = {} result[u'a'] = 'a' result[u'ā'] = 'a' result[u'á'] = 'a' result[u'ǎ'] = 'a' result[u'à'] = 'a' result[u'e'] = 'e' result[u'ē'] = 'e' result[u'é'] = 'e' result[u'ě'] = 'e' result[u'è'] = 'e' result[u'i'] = 'i' result[u'ī'] = 'i' result[u'í'] = 'i' result[u'ǐ'] = 'i' result[u'ì'] = 'i' result[u'o'] = 'o' result[u'ō'] = 'o' result[u'ó'] = 'o' result[u'ǒ'] = 'o' result[u'ò'] = 'o' result[u'u'] = 'u' result[u'ū'] = 'u' result[u'ú'] = 'u' result[u'ǔ'] = 'u' result[u'ù'] = 'u' result[u'v'] = 'v' result[u'ǘ'] = 'v' result[u'ǚ'] = 'v' result[u'ǜ'] = 'v' return result def vowel_list(): return [u'ā', u'á', u'ǎ', u'à', u'ē', u'é', u'ě', u'è', u'ī', u'í', u'ǐ', u'ì', u'ō', u'ó', u'ǒ', u'ò', u'ū', u'ú', u'ǔ', u'ù', u'ǘ', u'ǚ', u'ǜ'] def vowel_tune_number(): result = {} result[u'ā'] = 1 result[u'á'] = 2 result[u'ǎ'] = 3 result[u'à'] = 4 result[u'ē'] = 1 result[u'é'] = 2 result[u'ě'] = 3 result[u'è'] = 4 result[u'ī'] = 1 result[u'í'] = 2 result[u'ǐ'] = 3 result[u'ì'] = 4 result[u'ō'] = 1 result[u'ó'] = 2 result[u'ǒ'] = 3 result[u'ò'] = 4 result[u'ū'] = 1 result[u'ú'] = 2 result[u'ǔ'] = 3 result[u'ù'] = 4 result[u'ǘ'] = 2 result[u'ǚ'] = 3 result[u'ǜ'] = 4 return result def import_poems(poems): db = Connection().poembot poems_collection = db.poems poems_collection.insert(poems) def import_templates(templates): db = Connection().poembot template_collection = db.templates template_collection.insert(templates) def import_characters(characters): db = Connection().poembot character_collection = db.characters character_collection.insert(characters) def remove_poems(): db = Connection().poembot db.drop_collection("poems") def remove_templates(): db = Connection().poembot db.drop_collection("templates") def remove_characters(): db = Connection().poembot db.drop_collection("characters") def import_tokens(tokens): db = Connection().poembot token_collection = db.tokens token_collection.insert(tokens) def remove_tokens(): db = Connection().poembot db.drop_collection("tokens") def import_authors(authors): db = Connection().poembot authors_collection = db.authors authors_collection.insert(authors) def remove_authors(): db = Connection().poembot db.drop_collection("authors")
UTF-8
Python
false
false
2,014
7,662,221,687,682
a4f2c27250b02005b06c303ce5ed6cc14de4f17d
24137e4b1c04c43b0ce36f2dfaf067f5127a2f41
/fizzbuzz2.py
c396bfe781ce8647c288bfee9b06f7d62a58fa9a
[]
no_license
christopher-s-b/sandbox
https://github.com/christopher-s-b/sandbox
ed68756e2d29393dcd22169b7e908ffc98f77651
c8e6b7ba0f6bfd110e99193cf66623dde50f2ea4
refs/heads/master
2021-01-17T18:46:06.684300
2014-11-25T22:56:57
2014-11-25T22:56:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def mul_of(a,b): return 0 == a % b targets = {3:"fizz", 5:"buzz"} def fizzbuzz(n): matches = filter(lambda i: mul_of(n, i), targets.keys()) #curried return str(n) if len(matches) == 0 else "".join(map(lambda i: targets[i], matches)) print ", ".join(map(fizzbuzz, range(20)))
UTF-8
Python
false
false
2,014
18,588,618,474,054
346d570bc495b19d067ea0586986a5527f8213ce
e9a3c11ccf90339184edd17562eaf6d99c063cae
/libraries/cakemail/CakeClient.py
de37a6ebd24f3ab67cbc1f80cfcd47d0908877e9
[]
no_license
silent1mezzo/HackMTL
https://github.com/silent1mezzo/HackMTL
ef29ece97334ff85bc586349ab3ebf7cbd486634
d945354e96bca728014308bbd1db86f9dfc98937
refs/heads/master
2020-05-22T14:10:45.478560
2010-11-27T21:20:25
2010-11-27T21:20:25
1,112,867
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import CakeGlobals import CakeFunctions CLASS_NAME = "ClassClient" # Activates a client def Activate(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "Activate", parameters, locale) return CakeFunctions.ParseXML(methodNode) # Adds credits to a client def AddCredits(parameters, locale = CakeGlobals.DEFAULT_LOCALE): CakeFunctions.Proccess(CLASS_NAME, "AddCredits", parameters, locale) return True # Creates a new client def Create(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "Create", parameters, locale) return CakeFunctions.ParseXML(methodNode) # Gets the credit balance def GetCreditBalance(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "GetCreditBalance", parameters, locale) return CakeFunctions.ParseXML(methodNode) # Gets the credit transactions def GetCreditTransactions(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "GetCreditTransactions", parameters, locale) return CakeFunctions.ParseXML(methodNode) # Retrieves the informations about the client def GetInfo(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "GetInfo", parameters, locale) return CakeFunctions.ParseXML(methodNode) # Gets the list with a specified status def GetList(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "GetList", parameters, locale) res = CakeFunctions.ParseXML(methodNode, ["client"]) CakeFunctions.ChangeKey(res, "client", "clients") return res # Gets the timezones def GetTimezones(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "GetTimezones", parameters, locale) res = CakeFunctions.ParseXML(methodNode, ["timezone"]) CakeFunctions.ChangeKey(res, "timezone", "timezones") return res # Adds or removes credits to a client for the balance to be 0 at the end of the month def ResetCredits(parameters, locale = CakeGlobals.DEFAULT_LOCALE): CakeFunctions.Proccess(CLASS_NAME, "ResetCredits", parameters, locale) return True # Sets the parameters for a user def SetInfo(parameters, locale = CakeGlobals.DEFAULT_LOCALE): CakeFunctions.Proccess(CLASS_NAME, "SetInfo", parameters, locale) return True # Searchs for clients based on a query string def Search(parameters, locale = CakeGlobals.DEFAULT_LOCALE): methodNode = CakeFunctions.Proccess(CLASS_NAME, "Search", parameters, locale) res = CakeFunctions.ParseXML(methodNode, ["client"]) CakeFunctions.ChangeKey(res, "client", "clients") return res
UTF-8
Python
false
false
2,010
11,905,649,346,418
1441317d07ed1890e330bd605343306a7de076f1
a4b76507c86458c250450e640f93fb76d5cccc9d
/src/models/schema.py
cedfdfbea65fc815c603e509a0e0540df8e5b3fb
[]
no_license
NickolausDS/Uniquity
https://github.com/NickolausDS/Uniquity
9bfb6b8761c4c6e1833a7a6166be435eb2c67846
b8d3f31df5db0a628da99652e7a42452995c0c74
refs/heads/master
2021-01-23T18:49:11.633354
2014-06-06T22:49:07
2014-06-06T22:49:07
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" This file shouldn't be used as of 0.4.2. It's still used by some objects. When they're refactored, this will go away. """ #Register the types here TYPES = ["FILE", "SCANPARENT"] FILE = ( ("filename", str), ("shortname", str), ("basename", str), ("rootParent", str), ("size", int), ("niceSize", str), ("niceSizeAndDesc", str), ("weakHash", str), ("weakHashFunction", str), ("strongHash", str), ("strongHashFunction", str), ) SCANPARENT = ( ("filename", str), )
UTF-8
Python
false
false
2,014
16,449,724,745,596
8ddcddbfc47e5584416401c415f995a7d1a25e2a
a1a2af1fb3800f698a8b3c431f30d7a147f98791
/gmlStore/models.py
c3782b0a466da798ef515efc35c2f8ae66632953
[]
no_license
ashirley/mapbin
https://github.com/ashirley/mapbin
a2a045a8f0f8856dbeca80a8d7285a896b6e0612
7d7bfc2aa9fb967017e8126350e5f926ad3ed658
refs/heads/master
2021-01-19T04:51:50.712833
2010-05-06T07:48:56
2010-05-06T07:48:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models # Create your models here. class Annotation(models.Model): gml = models.XMLField() creation_date = models.DateTimeField('date created')
UTF-8
Python
false
false
2,010
5,901,285,095,027
e638675c88871cf5ac8e810e1bb23966126d9a5a
b475797332e9bf23bf90a433efb101b9af3101ed
/partial/templating.py
0cdd0b59219f5b02b5a192677d94cb26c4a484aa
[ "MIT" ]
permissive
RentennaDev/partial
https://github.com/RentennaDev/partial
2960ef1df958909f11dd76822260854ebda8ee44
f55a7919da4bf0a595854f173eb2380c0138946c
refs/heads/master
2018-03-26T06:28:57.679617
2013-11-21T00:48:58
2013-11-21T00:48:58
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from jinja2 import Environment, BaseLoader, TemplateNotFound from partial.bundleComponent import BundleComponent from partial.scanner import ClassNotFound class TemplateBundleComponent(BundleComponent): type = 'template' def renderTemplate(template, context=None): if context is None: context = {} template = env.get_template(template) return template.render(**context) class _PartialLoader(BaseLoader): def get_source(self, environment, template): from partial import scanner try: component = scanner.getBundleComponent('template', template) return (component, None, lambda: True) except ClassNotFound: raise TemplateNotFound(template) env = Environment( loader=_PartialLoader(), ) from partial import routing env.globals['url'] = routing.generate def _partial(partialName, **kwargs): from partial import render return render.partial(partialName, kwargs) env.globals['partial'] = _partial def _plural(number, singular, plural=None): return "%s %s" % (number, _pluralName(number, singular, plural)) env.globals['plural'] = _plural def _pluralName(number, singular, plural=None): if number == 1: return singular elif plural is None: return "%ss" % singular else: return plural env.globals['pluralName'] = _pluralName def _nullable(val): if val is None: return "" elif isinstance(val, str): return unicode(val, encoding='utf-8', errors='replace') else: return val env.finalize = _nullable
UTF-8
Python
false
false
2,013
12,945,031,465,532
c31dcb4379b1911310710c28fc208747c3ebd80b
278f1f4727cafc582841bd60ed32ddb611acdf27
/src/gui/gui_control.py
e79e8d7e09093e704eaff05250a8774be93b1f37
[]
no_license
Ormazz/ESIR3-SR-katch
https://github.com/Ormazz/ESIR3-SR-katch
46d8b4106b6e1cb289f7bf8beb41ed2696d6b641
bf98e8af71524df05c3098144be03c3194d12efe
refs/heads/master
2016-09-15T21:51:28.207234
2014-01-19T13:18:20
2014-01-19T13:18:20
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from abc import ABCMeta, abstractmethod from control import katch class GuiControl(metaclass=ABCMeta): _katch = katch.Katch()
UTF-8
Python
false
false
2,014
8,881,992,385,437
1d0ca17365d8d8127d056a917c053c15065128ae
b122d95ac1f059567ea4753fbd0ad4601caf3752
/processing/commandline.py
7450fdbc72e81701dbd24bdc61bbe34ef71afb68
[]
no_license
JNazare/IntroduceMeTo
https://github.com/JNazare/IntroduceMeTo
442ab6f084688286ccda0a419cd082a6029907db
3edd082ed5266eefbafe01a7856aa996e6992166
refs/heads/master
2020-05-30T18:50:04.003557
2013-05-06T00:55:17
2013-05-06T00:55:17
9,875,695
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import query while True: var = raw_input("\033[92mDear Introducemeto, introduce me to... \033[0m") try: print ">", [x[0] for x in query.ranked_query(str("@IntroduceMeTo " + var))] print "" except: print "> NO HUMANS FOUND" print ""
UTF-8
Python
false
false
2,013
15,882,789,092,271
559f10eda7c7ae1c2b3888e9f8a5550b89fd5295
1f4204f903657884d9cccfd44b19ecb531b59ded
/setup.py
fd49ab17e6e9821e129493aeb8450171d8b9167b
[]
no_license
fmcc/StylometricAnalyser
https://github.com/fmcc/StylometricAnalyser
795a8e4abe264ee18ab3bcb34bd128bcd06ac5ca
e86305a63c95d8b533cab4a3be0010c2fee0ff14
refs/heads/master
2021-01-23T08:38:44.961082
2013-08-31T20:23:36
2013-08-31T20:23:36
11,097,508
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
from database import * from database.models import * from database.utilities import get_or_create from settings import DB_PATH, NGRAM_LENGTHS, RESTRICT_VECTOR_SPACE from collections import Counter import os if os.path.exists(DB_PATH): print('Database already exists') else: Base.metadata.create_all(engine) session = Session() get_or_create(session, VectorSpace, space=set()) get_or_create(session, GlobalNgrams, counts=Counter()) get_or_create(session, GlobalVersion) session.commit() print(str(NGRAM_LENGTHS['MIN']) + ' - ' + str(NGRAM_LENGTHS['MAX']) + ' - ' + str(RESTRICT_VECTOR_SPACE))
UTF-8
Python
false
false
2,013
2,327,872,292,453
b485db0c38ac0f2fcd8c28154b55e0ad7b055539
481f3aa9767dfaeb99315ce3da45040439d2ca7a
/peer.py
ac7f1c3e5dc236944342de912be005ea454fc735
[]
no_license
jwilner/torrentPy
https://github.com/jwilner/torrentPy
eda61e366624be49e2e9698159dec57f6ab8f78d
a9838312456c681978299b09d636b803692ba3ff
refs/heads/master
2016-09-06T01:07:41.270779
2013-12-17T00:25:38
2013-12-17T00:25:38
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import messages import config import torrent_exceptions import logging import events from time import time from collections import deque from functools import partial from utils import four_bytes_to_int, StreamReader logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) class Peer(torrent_exceptions.ExceptionManager, messages.MessageManager, events.EventManager, object): '''Class representing peer for specific torrent download and providing interface with specific TCP socket''' def __init__(self, socket): logger.info('Instantiating peer %s', str(socket.getpeername())) self.socket = socket self.active = True self.peer_id = None self.address = socket.getpeername() self.ip, self.port = self.address self.outbox = deque() self.sent_folder, self.archive = [], [] self.handshake = {'sent': False, 'received': False} self.handle_event(events.PeerRegistration(peer=self, read=self.handle_incoming, write=self.handle_outgoing, error=self.handle_exception)) self.last_heard_from = time() self.last_spoke_to = 0 self._read_buffer = '' self._pending_send = deque() self.outstanding_requests = set() self.am_choking, self.am_interested = True, False self.choking_me, self.interested_me = True, False # Am I anal? Maybe. attr_setter = partial(self.__setattr__) choking_me_setter = partial(attr_setter, 'choking_me') am_choking_setter = partial(attr_setter, 'am_choking') interested_me_setter = partial(attr_setter, 'interested_me') am_interested_setter = partial(attr_setter, 'am_interested') # Don't need to define a handler for KeepAlive, because undefined # messages fail silently but still update 'last_heard_from' self._message_handlers = { messages.INCOMING: { messages.Handshake: self._process_handshake, messages.Choke: lambda _: choking_me_setter(True), messages.Unchoke: lambda _: choking_me_setter(False), messages.Interested: lambda _: interested_me_setter(True), messages.NotInterested: lambda _: interested_me_setter(False), messages.Have: self._process_have, messages.Bitfield: self._process_bitfield, messages.Request: self._process_request, messages.Piece: lambda m: self.outstanding_requests.discard((m.index, m.begin)), messages.Cancel: self._process_cancel }, messages.OUTGOING: { messages.Handshake: self._record_handshake, messages.Request: lambda m: self.outstanding_requests.add(m.get_triple()[:2]), messages.Cancel: lambda m: self.outstanding_requests.discard(m .get_triple()[:2]), messages.Choke: lambda _: am_choking_setter(True), messages.Unchoke: lambda _: am_choking_setter(False), messages.Interested: lambda _: am_interested_setter(True), messages.NotInterested: lambda _: am_interested_setter(False) } } # exception handling self._exception_handlers = {} def __str__(self): return '<Peer at {0}:{1}>'.format(self.ip, self.port) def fileno(self): return self._socket.fileno() def handle_incoming(self): self.last_heard_from = time() for msg in self._read_from_socket(): try: self.handle_message(msg) except torrent_exceptions.FatallyFlawedIncomingMessage as e: self.handle_exception(e) def handle_outgoing(self): sent_msgs = self._send_via_socket() if sent_msgs: self.last_spoke_to = time() for msg in sent_msgs: try: self.handle_message(msg) except torrent_exceptions.FatallyFlawedOutgoingMessage as e: self.handle_exception(e) if not self._pending_send: self.handle_event(events.PeerDoneSending(peer=self)) def enqueue_message(self, msg): # if outbox is currently empty, then we'll want to tell the client notify = not self._pending_send self._pending_send.append([msg, len(msg)]) if notify: # tell client self.handle_event(events.PeerReadyToSend(peer=self)) def drop(self): '''Procedure to disconnect socket''' self.active = False self.socket.close() def _read_from_socket(self): new_string = self.socket.recv(config.DEFAULT_READ_AMOUNT) stream = StreamReader(self._read_buffer + new_string) try: while True: yield self._parse_string_to_message(stream) except torrent_exceptions.LeftoverException as e: self._read_buffer = e.leftover def _send_via_socket(self): '''Attempts to send message via socket. Returns a list of msgs sent -- potentially empty if sent was incomplete''' strung = ''.join(str(msg)[-length:] for msg, length in self._pending_send) amt_sent = self._socket.send(strung) sent_msgs = [] while amt_sent: # loop over lengths of pending msgs, updating their remaining # amount or appending them to the response list if they've been # completely sent if self._pending_send[0][1] > amt_sent: self._pending_send[0][1] -= amt_sent amt_sent = 0 else: amt_sent -= length # appends actual msg to self sent_msgs.append(self._pending_send.leftpop()[0]) return sent_msgs def _parse_string_to_message(self, stream): parts = [] try: if not self.handshake['received']: # must be handshake try: parts.append(ord(stream.read(1))) pstrlen = parts[0] # protocol string, reserved, info hash, peer_id for l in (pstrlen, 8, 20, 20): parts.append(stream.read(l)) info_hash, peer_id = parts[3], parts[4] return messages.Handshake(peer_id, info_hash, reserved=parts[2], pstr=parts[1], msg_event=messages.INCOMING) except torrent_exceptions.RanDryException as e: leftover = ''.join(parts)+e.unused raise torrent_exceptions.LeftoverException(value=leftover) # normal message try: parts.append(stream.read(4)) bytes_length_prefix = parts[0] length = four_bytes_to_int(bytes_length_prefix) if length == 0: return messages.KeepAlive() parts.append(stream.read(length)) msg_body = parts[1] msg_id = ord(msg_body[0]) return messages.lookup[msg_id](msg_body[1:], from_string=True, msg_event=messages.INCOMING) except torrent_exceptions.RanDryException as e: leftover = ''.join(parts)+e.unused raise torrent_exceptions.LeftoverException(value=leftover) except torrent_exceptions.MessageParsingError as e: self.handle_exception(e) def _record_handshake(self, msg): '''Fires as callback when handshake is sent. This is a method because assignment can't happen in lambdas...''' self.handshake['sent'] = True def _process_handshake(self, msg): self.handshake['received'] = True self.peer_id = msg.peer_id if msg.pstr != config.PROTOCOL: # will be caught by strategy raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self, msg=msg) if not self.handshake['sent']: # this is an unknown peer # will resolve to client, where it'll be handled self.handle_event(events.UnknownPeerHandshake(msg=msg, peer=self)) def _process_have(self, msg): self.has[msg.piece_index] = 1 def _process_bitfield(self, msg): quotient, remainder = divmod(self.torrent.num_pieces, 8) # this appropriately rounds up the required length of the bitfield req_len = (quotient+1)*8 if remainder != 0 else quotient*8 if len(msg.bitfield) != req_len: # gets caught by strategy raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self, msg=msg) for i, p in enumerate(msg.bitfield): try: self.has[i] = p except IndexError: break def _process_request(self, msg): if self.am_choking: # peer is being obnoxious -- do something about it? pass if msg.length > config.MAX_REQUESTED_PIECE_LENGTH: raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self, msg=msg) self.wants.add((msg.index, msg.begin, msg.length)) def _process_cancel(self, msg): if msg.length > config.MAX_REQUESTED_PIECE_LENGTH: raise torrent_exceptions.FatallyFlawedIncomingMessage(peer=self, msg=msg) self.wants.discard((msg.index, msg.begin, msg.length)) def _process_piece(self, msg): # do something in this context? self.outstanding_requests.discard((msg.index, msg.begin))
UTF-8
Python
false
false
2,013
10,737,418,264,677
8071f52385d92ef99483ccbef1d619f19971ee35
41f5fb2b76efe6f7a10c96ff197b0785e247ca12
/gather/gathered/rand_test.py
6ad2b910e2535ac920da7050833603cbec016892
[ "BSD-2-Clause", "BSD-Advertising-Acknowledgement" ]
non_permissive
jtwhite79/my_python_junk
https://github.com/jtwhite79/my_python_junk
2f33d102e0e2875cf617b11dc31127678e9e9756
2ee0044f9b455d40e3b1967081aa7ac2dbfa64c9
refs/heads/master
2021-01-23T05:45:21.432421
2014-07-01T17:30:40
2014-07-01T17:30:40
4,587,435
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np rand = np.random.randn(1000000) np.savetxt('rand.dat',rand)
UTF-8
Python
false
false
2,014
773,094,132,951
a7b3296cea6db6903fd6117a8d75b5ee00e039b5
c9575089d08f9c1f701c7fdb5305b38142131975
/test/git/test_diff.py
9b7e9c73ff3d0f88978636ecd14438b5c590ea68
[ "BSD-3-Clause" ]
permissive
directeur/git-python
https://github.com/directeur/git-python
19e7de6863cd841ddd40335a67d3ae87ac868820
b00f3689aa19938c10576580fbfc9243d9f3866c
refs/heads/master
2016-08-04T11:44:22.840081
2008-09-16T06:08:47
2008-09-17T06:08:09
84,097
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# test_diff.py # Copyright (C) 2008 Michael Trier ([email protected]) and contributors # # This module is part of GitPython and is released under # the BSD License: http://www.opensource.org/licenses/bsd-license.php from test.testlib import * from git import * class TestDiff(object): def setup(self): self.repo = Repo(GIT_REPO) def test_list_from_string_new_mode(self): output = fixture('diff_new_mode') diffs = Diff.list_from_string(self.repo, output) assert_equal(1, len(diffs)) assert_equal(10, len(diffs[0].diff.splitlines()))
UTF-8
Python
false
false
2,008
13,477,607,385,152
38e09e752d1699a42c468f835dc12c0ce717a2be
20ddd4e890bd69e7b4403c684dfd7d1d54f94803
/hw1/analyze.py
ff0fc08bccc9f147ddabf516581be1324df0127c
[]
no_license
jasenmh/CS240A-Winter14
https://github.com/jasenmh/CS240A-Winter14
50292cf901d408f9384dee0c6b1eca9975f33627
5650a8b9e5c5008b0b8e921751f64da0d879e0bc
refs/heads/master
2016-08-06T07:15:37.247654
2014-02-16T21:27:28
2014-02-16T21:27:28
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import commands import re MAXTIME = 600.0 ITERATIONS = 4 DEBUG = False f = open("hw1data.csv", "w") for algo in range(1, 8): if DEBUG: print('Algorithm ' + str(algo) +':') matdim = 2 multtime = 0 tottime = 0 while multtime < MAXTIME: itercount = 0 while itercount < ITERATIONS: if DEBUG: print('\t' + str(matdim) + 'x' + str(matdim) + 'matrices, run ' + str(itercount + 1)) cmd = "./matrix_multiply -n" + str(matdim) + " -a" + str(algo) o = commands.getoutput(cmd) multtime = float(re.search('Time \= (.*) sec, .*', o).group(1)) tottime = tottime + multtime if DEBUG: print('\t\trun time ' + str(multtime)) itercount = itercount + 1 f.write(str(tottime/float(ITERATIONS)) + ", ") matdim = matdim << 1 f.write('\n') f.flush() if DEBUG: print('\nAnalysis complete.') f.close()
UTF-8
Python
false
false
2,014
13,477,607,377,447
925588902072d5f2b8aedd9da0719c40986147aa
5a4d4d84097dc34bdb7a9a7bf4a07f0510694067
/check.py
aaf06d6c3602001811aee4e74e10d101bba3ec9c
[ "BSD-3-Clause" ]
permissive
baharev/apidocfilter
https://github.com/baharev/apidocfilter
274e10a85713260ce4fb88c469260e4f71fd7ad8
9387e8d3d04ecb35d6287b5f7d761a15b94a975a
refs/heads/master
2020-08-10T19:50:12.837033
2014-09-28T17:17:45
2014-09-28T17:17:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from importlib import import_module from inspect import getdoc def attribs(name): mod = import_module(name) print name print 'Has __all__?', hasattr(mod, '__all__') print 'Has __doc__?', hasattr(mod, '__doc__') print 'doc: ', getdoc(mod) if __name__=='__main__': attribs('cairo') attribs('zope') attribs('A.B.C') import hacked class Object(object): pass opt = Object() opt.ignore_errors = False a, d = hacked.get_all_attr_has_docstr('/home/ali/ws-pydev/apidocfilter/A/B', '/home/ali/ws-pydev/apidocfilter/A/B/C', opt) print(a) print(d)
UTF-8
Python
false
false
2,014
4,346,506,933,482
abcf43eedf7809c5dda937841797eaf34253fc96
7fb1fee4bda177bf81a094bcd02d5c7092630630
/midtermproject/hitch/forms.py
4ae3a5e421a6a60fccc62a8e624372566d520f94
[]
no_license
wdesalu/Hitch
https://github.com/wdesalu/Hitch
3d7c81996418927017fe7615898d149d86a2b303
1ff38b831dff028f694b8dff4cfe41ba5f61cd99
refs/heads/master
2020-06-01T13:44:53.345532
2013-12-11T01:20:37
2013-12-11T01:20:37
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django import forms from django.contrib.auth.models import User from models import * # ---------- Driver Registration ---------------- class RegistrationForm(forms.Form): username = forms.CharField(max_length=200, widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Name','required':'on','autofocus':'on'})) emailGiven = forms.EmailField(max_length=200, widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Email','required':'on','autofocus':'on'})) clientNumber = forms.IntegerField( widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Client Capacity','required':'on','autofocus':'on'})) ppm = forms.IntegerField( widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Price Per Mile','required':'on','autofocus':'on'})) bagCapacity = forms.IntegerField( widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Bag Capacity','required':'on','autofocus':'on'})) departureGiven = forms.DateTimeField( widget=forms.DateTimeInput(attrs={'class':'form-control','placeholder':'Departure Date','required':'on','autofocus':'on'})) destinationGiven = forms.CharField(max_length=200, widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Destination','required':'on','autofocus':'on'})) def clean(self): cleaned_data = super(RegistrationForm, self).clean() username = cleaned_data['username'] email = cleaned_data['emailGiven'] clientNumber = cleaned_data['clientNumber'] ppm = cleaned_data['ppm'] bagCapacity = cleaned_data['bagCapacity'] departureGiven = cleaned_data['departureGiven'] destinationGiven = cleaned_data['destinationGiven'] if not (username and email and clientNumber and ppm and bagCapacity and departureGiven and destinationGiven): raise forms.ValidationError("Not Enough Information") return cleaned_data
UTF-8
Python
false
false
2,013
5,454,608,466,489
c3ea05c259f64080d2efac0c0de5bf698c0439a8
fcf819a9dcd1bedef3c70a38e0541bd18c1d49ee
/Zhidao_grabber.py
5b1b5fc41eb4728c5703bd539ae7580f4e175a48
[]
no_license
wblyy/Zhidao_grabber
https://github.com/wblyy/Zhidao_grabber
821d5800ebd35b25b0f24d3344ae85fa27984dfc
295fc8e32ff404bcd72d0423225c2c89a2e69f0b
refs/heads/master
2020-05-18T17:03:11.855981
2014-11-27T03:59:34
2014-11-27T03:59:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# encoding: UTF-8 import ConfigParser import re import urllib2 import urllib import time from mydbV2 import MydbV2 from random import choice import random import requests from IPdb import IPdb import sys import logging import time logging.basicConfig(filename='zhidao_buzhidao.log',level=logging.DEBUG) reload(sys) sys.setdefaultencoding('utf-8') dbV2 = MydbV2() myIPdb=IPdb() proxy_dict=['http://113.11.198.163:2223/', #r'http://113.11.198.164:2223/', #r'http://113.11.198.165:2223/', #r'http://113.11.198.166:2223/', 'http://113.11.198.167:2223/', 'http://113.11.198.168:2223/', 'http://113.11.198.169:2223/', ] for url_index in xrange(85787890,0,-1): try: is_answerable=0 is_used=0 content_data='' title_data='' style_data='' page_url='http://zhidao.baidu.com/question/'+str(url_index) related_IP=random.choice(proxy_dict) req=requests.get(page_url,proxies={"http": related_IP}) req.encoding='gbk' msg=req.text #msg.encoding ='utf-8' title=re.findall('<title>(.*?)</title>'.decode('utf-8').encode('utf-8'), msg, re.DOTALL) content=re.findall('accuse="qContent">(.*?)</pre>'.decode('utf-8').encode('utf-8'), msg, re.DOTALL)#accuse="qContent"> used=re.findall('<span class="answer-title h2 grid">(.*?)</span>'.decode('utf-8').encode('utf-8'), msg, re.DOTALL) answerable=re.findall('id="answer-bar">(.*?)<i class="i-arrow-down">'.decode('utf-8').encode('utf-8'), msg, re.DOTALL) style=re.findall('<a class="f-aid" alog-alias="qb-class-info" href="(.*?)</a>'.decode('utf-8').encode('utf-8'), msg, re.DOTALL) #<a class="f-aid" alog-alias="qb-class-info" href=" #</a> #id="answer-bar"> #<i class="i-arrow-down"> print 'title:',title[0] title_data=title[0] if content: print 'content:',content[0] content_data=content[0] if used: print 'used:',used[0] is_used=1 if answerable: print 'answerable',answerable[0] is_answerable=1 if style: print style[0] style_data=style[0] qid=url_index if '百度知道 - 信息提示' not in title_data: dbV2.insert_data(qid, title_data, content_data, style_data, is_used,is_answerable,related_IP) except Exception, e: systime=time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time())) logging.debug(e) #print 'title:',title[0],'content:',content[0],'used:',used[0] #time.sleep(2) #<span class="answer-title h2 grid">
UTF-8
Python
false
false
2,014
17,420,387,382,900
38e1fd6156f243f9a88aaab2acf7cc41d83d2bff
518651a389f0ee2ed341ca2162c1bcbf480ea3d8
/RNASeq/src/run_pipeline.py
63d6752be4c3637c47073f228bc702ac63fdc1d7
[ "CC-BY-NC-3.0", "LicenseRef-scancode-proprietary-license" ]
non_permissive
singhalg/gsinghal_python_src
https://github.com/singhalg/gsinghal_python_src
ee0eb19ca842f7864285ff67bf335126e24ccae0
88c20f5dfbbb63d0c464c706cc9196edb9b8dfd2
refs/heads/master
2020-05-18T00:02:57.830324
2013-05-08T18:22:22
2013-05-08T18:22:43
7,980,456
6
4
null
null
null
null
null
null
null
null
null
null
null
null
null
import subprocess import sys bowtie_dir = '/net/artemis/mnt/work2/seqApps/bowtie-0.12.7/' shortfuse_dir = '/net/artemis/mnt/work1/projects/gsinghalWork/GSseqApps/ShortFuse/' offset = '33' EM_bin = shortfuse_dir + 'EmFusion' bowtie_bin = bowtie_dir + 'bowtie' bowtie_build_bin = bowtie_dir + 'bowtie-build' exon_structure_file = shortfuse_dir + 'ref/refseq_exon_structure.txt' exon_seq_file = shortfuse_dir + 'ref/exon_seqs_refseq.fa' initial_bowtie_flags = '-l 35 -e 150 -n 2 -a -m 150 -p 10 ' discord_bowtie_flags = '-l 22 -e 350 -n 3 -y -a -m 5000 -p 10 ' pe_bowtie_flags = '-a -p 10 -X 2000 -m 1500 --chunkmbs 1024 ' chrom_regions = shortfuse_dir + 'ref/chrom_regions.txt' transcripts_plus_genome = shortfuse_dir + 'ref/transcripts_plus_genome' reference_root = shortfuse_dir + 'ref/refseq_transcripts' reference_fasta = shortfuse_dir + 'ref/RefSeqTranscripts_50up_polyA.fasta' def main(fastq1, fastq2): p = subprocess.Popen(' '.join([bowtie_bin, initial_bowtie_flags, '--max 1.repeat', reference_root, fastq1, fastq1 + '.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join([bowtie_bin, initial_bowtie_flags, '--max 2.repeat', reference_root, fastq2, fastq2 + '.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'bowtie', fastq1 + '.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'bowtie', fastq2 + '.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'fasta', '1.repeat']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'fasta', '2.repeat']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'fastq', fastq1]), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'fastq', fastq2]), shell = True) p.wait() p = subprocess.Popen(' '.join([EM_bin, 'sift', fastq1 + '.bowtie', fastq2 + '.bowtie', fastq1, fastq2, offset, '1.repeat', '2.repeat']), shell = True) p.wait() p = subprocess.Popen(' '.join(['sort -k 2,2 -k 3,3', fastq1 + '.discord', '>', fastq1 + '.discord.sorted']), shell = True) p.wait() p = subprocess.Popen(' '.join([bowtie_bin, discord_bowtie_flags, '--max', fastq1 + '.discord.fastq.bust', transcripts_plus_genome, fastq1 + '.discord.fastq', 'concordant.bt']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'fa2ridlist.py', fastq1 + '.discord.fastq.bust', '>', 'concordant.reads']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'parseBowtie.py', 'concordant.bt', chrom_regions, '100', '>>', 'concordant.reads']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'ExtendReference.py', exon_structure_file, fastq1 + '.discord.sorted', 'concordant.reads']), shell = True) p.wait() p = subprocess.Popen(' '.join(['sort -k 2 -u', fastq1 + '.discord.sorted.exons', '>', fastq1 + '.discord.sorted.exons.uniq']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'MakeFusionTranscripts.py', exon_seq_file, fastq1 + '.discord.sorted.exons.uniq']), shell = True) p.wait() p = subprocess.Popen(' '.join(['cat', fastq1 + '.discord.sorted.exons.uniq.seqs', reference_fasta, '>', 'augmented_ref.fa']), shell = True) p.wait() p = subprocess.Popen(' '.join(['rm', fastq1 + '.bowtie', fastq2 + '.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join([bowtie_build_bin, '-o 0', 'augmented_ref.fa', 'augmented_ref']), shell = True) p.wait() p = subprocess.Popen(' '.join([bowtie_bin, pe_bowtie_flags, 'augmented_ref', '-1', fastq1, '-2', fastq2, fastq1 + '.pe.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'PythonSort.py', 'paired_bowtie', fastq1 + '.pe.bowtie']), shell = True) p.wait() p = subprocess.Popen(' '.join(['sort -n -k 1,1', fastq1 + '.mapdist', '>', fastq1 + '.mapdist.sorted']), shell = True) p.wait() p = subprocess.Popen(' '.join(['python', shortfuse_dir + 'getdistprob.py', fastq1 + '.mapdist.sorted', 'dist.prob']), shell = True) p.wait() p = subprocess.Popen(' '.join([EM_bin, 'EM', fastq1 + '.pe.bowtie', 'dist.prob', 'augmented_ref.fa', offset]), shell = True) p.wait() if __name__ == '__main__': if len(sys.argv) != 3: print "Usage: python run_pipeline.py <fastq1> <fastq2>" sys.exit() main(*sys.argv[1:])
UTF-8
Python
false
false
2,013
13,245,679,158,628
9052165998312c8f6b8864a8062220fdd0c24e5c
f3dc5339549fff5588d69da0cc81f98aa54f01e0
/tests/hello_test.py
b01613c821654024eeaea82948b7dc0e526b4fc1
[]
no_license
dkoepke/hedgie
https://github.com/dkoepke/hedgie
7d819496e8aecec100f26c3a7a5f94517d9c6918
104f140868d3c10efae6026ee7b2577afe5879a0
refs/heads/master
2016-08-06T15:12:23.579061
2014-11-18T01:10:54
2014-11-18T01:10:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import hedgie def hello_fallback(command): return 'fallback' @hedgie.command(fallback=hello_fallback) def hello(name, should_fail): """Say hello.""" assert should_fail is False return 'Hello, {0}'.format(name) def test_hello_should_pass_naive_introspection(): assert hello.__module__ == hello_fallback.__module__ assert hello.__name__ == 'hello' assert hello.__doc__ == 'Say hello.' def test_hello_should_be_a_hedgie_command(): assert isinstance(hello, hedgie.Command) def test_hello_should_return_greeting_if_not_should_fail_when_called(): result = hello('world', False) assert result == 'Hello, world' def test_hello_should_return_fallback_if_should_fail_when_called(): result = hello('world', True) assert result == 'fallback'
UTF-8
Python
false
false
2,014
1,941,325,242,706
8625f37e6c64abf035420a37b3baa0158a1279c2
f8c396afc8b51002868a838b0bcddd7b3bdf97a2
/volumina/widgets/multiStepProgressDialog.py
4907fafc470775f86c3af3a35c44b886055dabda
[ "MIT" ]
permissive
lfiaschi/volumina
https://github.com/lfiaschi/volumina
51ca1e4c55b4f032f0765b4d359f1eb66a6b8454
f05c10a791929d8e52fbdc291ceae0884b061bcc
refs/heads/master
2021-01-23T20:56:25.120419
2013-06-30T15:29:54
2013-06-30T15:29:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os, time from PyQt4 import uic from PyQt4.QtGui import QDialog, QDialogButtonBox class MultiStepProgressDialog(QDialog): def __init__(self, parent=None): QDialog.__init__(self, parent) self._initUic() self._numberOfSteps = 1 self._currentStep = 0 self._steps = [] self._update() def setNumberOfSteps(self, n): assert n >= 1 self._numberOfSteps = n self._currentStep = 0 self._update() self.time1 = time.time() self.times = [] def setSteps(self, steps): self._steps = steps self.setNumberOfSteps(len(self._steps)) def finishStep(self): self._currentStep = self._currentStep + 1 self._update() if self._currentStep == self._numberOfSteps: self.buttonBox.button(QDialogButtonBox.Ok).setText("Finished!") self.buttonBox.button(QDialogButtonBox.Cancel).hide() self.currentStepProgress.setValue(100) def _update(self): self.currentStepProgress.setValue(0) self.overallProgress.setMinimum(0) self.overallProgress.setMaximum(self._numberOfSteps) self.overallProgress.setFormat("step %d of %d" % (self._currentStep, self._numberOfSteps)) self.overallProgress.setValue(self._currentStep) def setStepProgress(self, x): oldx = self.currentStepProgress.value() self.time2 = time.time() self.currentStepProgress.setValue(x) if x - oldx > 0: timeLeft = (100 - x) * (self.time2 - self.time1) / (x - oldx) self._updateCurrentStepLabel( timeLeft) self.time1 = self.time2 def _updateCurrentStepLabel(self, singlet): self.times.append(singlet) t = sum(self.times) / len(self.times) if len(self.times) > 5: self.times.pop(0) if t < 120: self.currentStepLabel.setText("ETA: %.02f sec" % (t)) else: self.currentStepLabel.setText("ETA: %.02f min" % (t / 60)) def _initUic(self): p = os.path.split(__file__)[0]+'/' if p == "/": p = "."+p uic.loadUi(p+"ui/multiStepProgressDialog.ui", self) if __name__ == "__main__": from PyQt4.QtGui import QApplication import vigra, numpy app = QApplication(list()) d = MultiStepProgressDialog() d.setNumberOfSteps(5) d.show() app.exec_()
UTF-8
Python
false
false
2,013
12,463,995,124,637
19ff4e17522ae0ce2b735b63b29c8ca144e28a06
15839868b3a41927b4eda5d0310416cb6f30eb94
/gui/ConfigBox.py
406e6ec525502f105e22ebfc45e678c87829059b
[ "GPL-3.0-only" ]
non_permissive
bergetkorn82/lrg
https://github.com/bergetkorn82/lrg
46e79bf023dbc3b96f5b6ac3f3f465111d436a03
5a33d017f66104cfe9bb6c0dbd7436426bf7adba
refs/heads/master
2018-01-17T01:36:14.059721
2008-10-26T18:37:11
2008-10-26T18:37:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import wx from ConfigUtils import * ID_TMPDIR_BUT = 81001 ID_INDIR_BUT = 81002 ID_OK_BUT = 82001 ID_CLOSE_BUT = 82002 PADDING = 10 class ConfigBox(wx.Frame): def __init__(self, parent, id, title): wx.Frame.__init__(self, parent, id, title, style = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER | wx.MINIMIZE_BOX | wx.RESIZE_BOX | wx.MAXIMIZE_BOX)) self.incomingDirDialog = None self.tmpDirDialog = None self.splitter = wx.SplitterWindow(self, wx.ID_ANY) self.splitter.SetBorderSize(0) self.noteBookPanel = wx.Panel(self.splitter, wx.ID_ANY) self.noteBook = wx.Notebook(self.noteBookPanel) self.mainSizer = wx.BoxSizer(wx.VERTICAL) self.generalSettingsPanel = wx.Panel(self.noteBook, wx.ID_ANY) self.generalSettingsSizer = wx.BoxSizer(wx.VERTICAL) self.rapidAccountBox = wx.StaticBox(self.generalSettingsPanel, label='Account Settings') self.rapidAccountBoxSizer = wx.StaticBoxSizer(self.rapidAccountBox, wx.VERTICAL) self.usernameLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Rapidshare username:') self.username = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY) self.username.SetValue(Config.settings.rapidshareUsername) self.passwordLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Rapidshare password:') self.password = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY) self.password.SetValue(Config.settings.rapidsharePassword) self.rapidAccountBoxSizer.Add(self.usernameLbl, 0, wx.ALL, PADDING) self.rapidAccountBoxSizer.Add(self.username, 0, wx.EXPAND) self.rapidAccountBoxSizer.Add(self.passwordLbl, 0, wx.ALL, PADDING) self.rapidAccountBoxSizer.Add(self.password, 0, wx.EXPAND) self.generalSettingsSizer.Add(self.rapidAccountBoxSizer, 0, wx.EXPAND) self.directoryBox = wx.StaticBox(self.generalSettingsPanel, label='Directory settings') self.directoryBoxSizer = wx.StaticBoxSizer(self.directoryBox, wx.VERTICAL) self.incomingDirLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Incoming directory:') self.incomingDir = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY) self.incomingDir.SetValue(Config.settings.downloadDir) self.incomingBut = wx.Button(self.generalSettingsPanel, ID_INDIR_BUT, 'Browse') wx.EVT_BUTTON(self, ID_INDIR_BUT, self.onSelectIncomingDir) self.incomingDirSizer = wx.BoxSizer(wx.HORIZONTAL) self.incomingDirSizer.Add(self.incomingDir, 3, wx.EXPAND, PADDING) self.incomingDirSizer.Add(self.incomingBut, 0, wx.EXPAND, PADDING) self.tmpDirLbl = wx.StaticText(self.generalSettingsPanel, wx.ID_ANY, 'Temporary directory:') self.tmpDir = wx.TextCtrl(self.generalSettingsPanel, wx.ID_ANY) self.tmpDir.SetValue(Config.settings.tmpDir) self.tmpBut = wx.Button(self.generalSettingsPanel, ID_TMPDIR_BUT, 'Browse') wx.EVT_BUTTON(self, ID_TMPDIR_BUT, self.onSelectTmpDir) self.tmpDirSizer = wx.BoxSizer(wx.HORIZONTAL) self.tmpDirSizer.Add(self.tmpDir, 3, wx.EXPAND, PADDING) self.tmpDirSizer.Add(self.tmpBut, 0, wx.EXPAND, PADDING) self.directoryBoxSizer.Add(self.incomingDirLbl, 0, wx.ALL, PADDING) self.directoryBoxSizer.Add(self.incomingDirSizer, 0, wx.EXPAND) self.directoryBoxSizer.Add(self.tmpDirLbl, 0, wx.ALL, PADDING) self.directoryBoxSizer.Add(self.tmpDirSizer, 0, wx.EXPAND) self.generalSettingsSizer.Add(self.directoryBoxSizer, 0, wx.EXPAND) self.generalSettingsPanel.SetSizerAndFit(self.generalSettingsSizer) self.generalSettingsPanel.SetAutoLayout(True) self.generalSettingsPanel.Layout() self.networkSettingsPanel = wx.Panel(self.noteBook, wx.ID_ANY) self.networkSettingsSizer = wx.BoxSizer(wx.VERTICAL) self.networkSettingsBox = wx.StaticBox(self.networkSettingsPanel, label='General Settings') self.networkSettingsBoxSizer = wx.StaticBoxSizer(self.networkSettingsBox, wx.VERTICAL) self.numberOfConnSizer = wx.BoxSizer(wx.HORIZONTAL) self.numberOfConnLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Max. Simultaneous Files:') self.numberOfConn = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.numberOfConn.SetValue(str(Config.settings.maxConcurrentDownload)) self.numberOfConnSizer.Add(self.numberOfConnLbl, 0, wx.ALL, PADDING) self.numberOfConnSizer.Add(self.numberOfConn, 0, wx.ALL) self.numberOfConnPerFileSizer = wx.BoxSizer(wx.HORIZONTAL) self.numberOfConnPerFileLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Connections per File:') self.numberOfConnPerFile = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.numberOfConnPerFile.SetValue(str(Config.settings.maxConnectionPerFile)) self.numberOfConnPerFileSizer.Add(self.numberOfConnPerFileLbl, 0, wx.ALL, PADDING) self.numberOfConnPerFileSizer.Add(self.numberOfConnPerFile, 0, wx.ALL) self.maxRetrySizer = wx.BoxSizer(wx.HORIZONTAL) self.maxRetryLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Max. Retries:') self.maxRetry = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.maxRetry.SetValue(str(Config.settings.maxRetry)) self.maxRetrySizer.Add(self.maxRetryLbl, 0, wx.ALL, PADDING) self.maxRetrySizer.Add(self.maxRetry, 0, wx.ALL) self.networkSettingsBoxSizer.Add(self.numberOfConnSizer, 0, wx.EXPAND) self.networkSettingsBoxSizer.Add(self.numberOfConnPerFileSizer, 0, wx.EXPAND) self.networkSettingsBoxSizer.Add(self.maxRetrySizer, 0, wx.EXPAND) self.networkSettingsSizer.Add(self.networkSettingsBoxSizer, 0, wx.EXPAND, PADDING) self.proxyBox = wx.StaticBox(self.networkSettingsPanel, label='Proxy settings') self.proxyBoxSizer = wx.StaticBoxSizer(self.proxyBox, wx.VERTICAL) self.useProxy = wx.CheckBox(self.networkSettingsPanel, -1, 'Use proxy') self.useProxy.SetValue(Config.settings.useProxy) self.proxyTypeSizer = wx.BoxSizer(wx.VERTICAL) self.proxyTypeLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Type:') self.proxyTypeList = wx.ComboBox(self.networkSettingsPanel, wx.ID_ANY, value = proxyTypeList[0], choices = proxyTypeList, style = wx.CB_READONLY) self.proxyTypeList.SetStringSelection(proxyTypeValueList[Config.settings.proxyType]) self.proxyTypeSizer.Add(self.proxyTypeLbl, 0, wx.EXPAND) self.proxyTypeSizer.Add(self.proxyTypeList, 0, wx.EXPAND) self.proxyAddrSizer = wx.BoxSizer(wx.VERTICAL) self.proxyAddrLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Address:') self.proxyAddr = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.proxyAddr.SetValue(str(Config.settings.proxyAddr)) self.proxyAddrSizer.Add(self.proxyAddrLbl, 0, wx.EXPAND) self.proxyAddrSizer.Add(self.proxyAddr, 0, wx.EXPAND) self.proxyPortSizer = wx.BoxSizer(wx.VERTICAL) self.proxyPortLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Port:') self.proxyPort = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.proxyPort.SetValue(str(Config.settings.proxyPort)) self.proxyPortSizer.Add(self.proxyPortLbl, 0, wx.EXPAND) self.proxyPortSizer.Add(self.proxyPort, 0, wx.EXPAND) self.proxySettingsSizer = wx.BoxSizer(wx.HORIZONTAL) self.proxySettingsSizer.Add(self.proxyTypeSizer, 2, wx.EXPAND, PADDING) #self.proxySettingsSizer.Add(self.proxyAddrSizer, 2, wx.EXPAND, PADDING) self.proxySettingsSizer.Add(self.proxyPortSizer, 1, wx.EXPAND, PADDING) self.proxyBoxSizer.Add(self.useProxy, 0, wx.EXPAND) self.proxyBoxSizer.Add(self.proxyAddrSizer, 0, wx.EXPAND) self.proxyBoxSizer.Add(self.proxySettingsSizer, 0, wx.EXPAND) self.proxyUserPassSizer = wx.BoxSizer(wx.HORIZONTAL) self.proxyUserSizer = wx.BoxSizer(wx.VERTICAL) self.proxyUserNameLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Username:') self.proxyUsername = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.proxyUsername.SetValue(str(Config.settings.proxyUsername)) self.proxyUserSizer.Add(self.proxyUserNameLbl, 0, wx.EXPAND) self.proxyUserSizer.Add(self.proxyUsername, 0, wx.EXPAND) self.proxyPasswordSizer = wx.BoxSizer(wx.VERTICAL) self.proxyPasswordLbl = wx.StaticText(self.networkSettingsPanel, wx.ID_ANY, 'Password:') self.proxyPassword = wx.TextCtrl(self.networkSettingsPanel, wx.ID_ANY) self.proxyPassword.SetValue(str(Config.settings.proxyPassword)) self.proxyPasswordSizer.Add(self.proxyPasswordLbl, 0, wx.EXPAND) self.proxyPasswordSizer.Add(self.proxyPassword, 0, wx.EXPAND) self.proxyUserPassSizer.Add(self.proxyUserSizer, 1, wx.EXPAND) self.proxyUserPassSizer.Add(self.proxyPasswordSizer, 1, wx.EXPAND) self.proxyBoxSizer.Add(self.proxyUserPassSizer, 0, wx.EXPAND) self.networkSettingsSizer.Add(self.proxyBoxSizer, 0, wx.EXPAND) self.networkSettingsPanel.SetSizerAndFit(self.networkSettingsSizer) self.networkSettingsPanel.SetAutoLayout(True) self.networkSettingsPanel.Layout() self.noteBook.AddPage(self.generalSettingsPanel, 'General Settings') self.noteBook.AddPage(self.networkSettingsPanel, 'Network Settings') self.buttonsPanel = wx.Panel(self.splitter, wx.ID_ANY) self.buttonsPanelSizer = wx.BoxSizer(wx.VERTICAL) self.okBut = wx.Button(self.buttonsPanel, ID_OK_BUT, 'Save') wx.EVT_BUTTON(self, ID_OK_BUT, self.OnClickSave) self.closeBut = wx.Button(self.buttonsPanel, ID_CLOSE_BUT, 'Close') wx.EVT_BUTTON(self, ID_CLOSE_BUT, self.OnClickClose) self.buttonsSizer = wx.BoxSizer(wx.HORIZONTAL) self.buttonsSizer.Add(self.okBut, 0, wx.CENTER) self.buttonsSizer.Add(self.closeBut, 0, wx.CENTER) self.buttonsPanelSizer.Add(self.buttonsSizer, 0, wx.ALIGN_CENTER | wx.ALIGN_BOTTOM) self.buttonsPanel.SetSizerAndFit(self.buttonsPanelSizer) self.splitter.SplitHorizontally(self.noteBookPanel, self.buttonsPanel) self.splitterSizer = wx.BoxSizer(wx.VERTICAL) self.splitterSizer.Add(self.splitter, 1, wx.EXPAND) self.SetSizer(self.splitterSizer) self.mainSizer.Add(self.noteBook, 0, wx.EXPAND) self.noteBookPanel.SetSizerAndFit(self.mainSizer) #self.noteBookPanel.SetAutoLayout(True) #self.noteBookPanel.Layout() self.Center(wx.BOTH) self.Fit() self.Show(True) def onSelectIncomingDir(self, event): if (self.incomingDirDialog): self.incomingDirDialog.show() else: self.incomingDirDialog = wx.DirDialog(self, 'Please select a directory for your incoming files', Config.settings.downloadDir) if self.incomingDirDialog.ShowModal() == wx.ID_OK: self.incomingDirName = self.incomingDirDialog.GetPath() if (Config.checkExistence(self.incomingDirName, TYPE_DIR) != EXIST_W): self.incomingDirMessageDialog = wx.MessageDialog(self, 'You dont have permission to write to this directory', 'Error', style = wx.OK) self.incomingDirMessageDialog.ShowModal() else: Config.settings.downloadDir = self.incomingDirName self.incomingDir.SetValue(Config.settings.downloadDir) self.incomingDirDialog.Destroy() def onSelectTmpDir(self, event): if (self.tmpDirDialog): self.tmpDirDialog.show() else: self.tmpDirDialog = wx.DirDialog(self, 'Please select a directory for temporary files', Config.settings.tmpDir) if self.tmpDirDialog.ShowModal() == wx.ID_OK: self.tmpDirName = self.tmpDirDialog.GetPath() if (Config.checkExistence(self.tmpDirName, TYPE_DIR) != EXIST_W): self.tmpDirMessageDialog = wx.MessageDialog(self, 'You dont have permission to write to this directory', 'Error', style = wx.OK) self.tmpDirMessageDialog.ShowModal() else: Config.settings.tmpDir = self.tmpDirName self.tmpDir.SetValue(Config.settings.tmpDir) self.tmpDirDialog.Destroy() def OnClickSave(self, event): Config.settings.rapidshareUsername = self.username.GetValue() Config.settings.rapidsharePassword = self.password.GetValue() Config.settings.downloadDir = self.incomingDir.GetValue() Config.settings.tmpDir = self.tmpDir.GetValue() Config.settings.maxConnectionPerFile = int(self.numberOfConnPerFile.GetValue()) Config.settings.maxConcurrentDownload = int(self.numberOfConn.GetValue()) Config.settings.maxRetry = int(self.maxRetry.GetValue()) Config.settings.useProxy = self.useProxy.GetValue() Config.settings.proxyType = proxyTypeCurlList[self.proxyTypeList.GetValue()] Config.settings.proxyAddr = str(self.proxyAddr.GetValue()) Config.settings.proxyPort = int(self.proxyPort.GetValue()) Config.settings.proxyUsername = str(self.proxyUsername.GetValue()) Config.settings.proxyPassword = str(self.proxyPassword.GetValue()) Config.save() self.Destroy() def OnClickClose(self, event): self.Destroy()
UTF-8
Python
false
false
2,008
7,301,444,437,588
3e947547625a5dc163c06440333458c536dcd90b
abb9be2a1f6ecad7bb00fec5735002f34b44858e
/application/decorator/auth_decorator.py
a1b181a6386f487b3b4268b3c52079cd45bbc4da
[ "MIT" ]
permissive
Danielhhs/Victory
https://github.com/Danielhhs/Victory
9b50c21c07145172654204881941beef91d7a184
132f186f18ea6a709afb003e38ed1d6de3c0579f
refs/heads/master
2021-01-17T14:20:53.181664
2014-06-11T13:40:03
2014-06-11T13:40:03
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# flask from flask import g, abort # application from application.models.datastore.user_model import * def authorization(level): """ Authorization decorator. :param level: UserLevel """ def decorator(f): def wraps(*args, **kwargs): # root if level == UserLevel.root: if g.user is None or g.user.level != UserLevel.root: return abort(403) # normal elif level == UserLevel.normal: if g.user is None: return abort(403) return f(*args, **kwargs) return wraps return decorator
UTF-8
Python
false
false
2,014
13,769,665,155,917
f28ff8079be09ed42f018c1169f9c3765d3b7e16
14bf4023ccc3dd95e23c71935fc8e17deecd0175
/baby/urls.py
57da525230bdf0bdcedd391879b703b2036bd9e3
[]
no_license
rsadwick/alice_sadwick_website
https://github.com/rsadwick/alice_sadwick_website
64ff0d0fe8b6401cdab88ffdbcdf3bb62a08d22f
bb04db2edc8b7526f9f4c04895374257b6a46517
refs/heads/master
2021-01-10T21:26:46.347038
2013-05-26T18:57:27
2013-05-26T18:57:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls import patterns, url from django.views.decorators.cache import cache_page from views import get_article, index urlpatterns = patterns('baby.views', url(r'^$', cache_page(60 * 5)(index)), url(r'^services/rsvp/$', 'rsvp'), url(r'^(?P<slug>[-\w]+)/$', cache_page(60 * 30)(get_article)), url(r'^services/instagram/$', 'get_instagram'), )
UTF-8
Python
false
false
2,013
19,387,482,399,627
a41e563e7e6e3535c049cc74239a7c255c5fd130
d297aab40d81b1724736e4674a104fa85410c6cf
/source/python/3-photos/16-get-emotions.py
d42bc95c1b861c83583ed5b0730a0e03900e82b1
[]
no_license
kthcorp/openapi.pudding.to_samples
https://github.com/kthcorp/openapi.pudding.to_samples
c0bb70176f1c19b2b4ef596eb2588f7567292ae3
0d7212eaf7659ae572f48af1dde7ea9bdb664f64
refs/heads/master
2021-01-10T22:07:46.960634
2012-08-08T07:25:52
2012-08-08T07:25:52
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf8 -*- import simplejson, urllib import urllib2 """ 16 get emotion infomation format : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN sample : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN """ ACCESS_KEY = "96474e57-cb16-11e1-91b7-12313f062e84" API_BASE = "http://openapi.pudding.to/api/v1/emotions/" def get_emotions(lang_id, **args): """ Get emotions """ args.update({ 'access_key': ACCESS_KEY }) url = API_BASE + lang_id + "?" + urllib.urlencode(args) if('format' in args and args['format'] == 'xml'): result = urllib2.urlopen(url).read() else: result = simplejson.load(urllib.urlopen(url)) return result if __name__ == "__main__" : langid = "en" # ko, en, ja json = get_emotions(langid) print json xml = get_emotions(langid, format='xml') print xml
UTF-8
Python
false
false
2,012
6,081,673,735,108
0213cfd1d56fadc9493035ab44f7bbd52e5caeab
e7e453268dc74c74a54c85d35a1f9b254298b9a2
/sage/finance/stock.py
bbc5f56065ccabf0fbfb7a74f00f8ab7186374ea
[]
no_license
pombredanne/sage-1
https://github.com/pombredanne/sage-1
4128172b20099dfcdaa9792a61945e97537501bd
4262d856b92f9e1772d71f993baa6aecbbd87a87
refs/heads/master
2018-03-04T20:07:27.273680
2013-03-22T02:31:54
2013-03-22T02:31:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Stock Market Price Series AUTHORS: - William Stein, 2008 - Brett Nakayama, 2008 - Chris Swierczewski, 2008 TESTS:: sage: ohlc = sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092) sage: loads(dumps(ohlc)) == ohlc True """ import urllib from sage.structure.all import Sequence from datetime import date class OHLC: def __init__(self, timestamp, open, high, low, close, volume): """ Open, high, low, and close information for a stock. Also stores a timestamp for that data along with the volume. INPUT: - ``timestamp`` -- string - ``open``, ``high``, ``low``, ``close`` -- float - ``volume`` -- int EXAMPLES:: sage: sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092) 18-Aug-04 100.01 104.06 95.96 100.34 22353092 """ self.timestamp = timestamp self.open=float(open); self.high=float(high); self.low=float(low); self.close=float(close) self.volume=int(volume) def __repr__(self): """ Return string representation of stock OHLC data. EXAMPLES:: sage: sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092).__repr__() ' 18-Aug-04 100.01 104.06 95.96 100.34 22353092' """ return '%10s %4.2f %4.2f %4.2f %4.2f %10d'%(self.timestamp, self.open, self.high, self.low, self.close, self.volume) def __cmp__(self, other): """ Compare ``self`` and ``other``. EXAMPLES:: sage: ohlc = sage.finance.stock.OHLC('18-Aug-04', 100.01, 104.06, 95.96, 100.34, 22353092) sage: ohlc2 = sage.finance.stock.OHLC('18-Aug-04', 101.01, 104.06, 95.96, 100.34, 22353092) sage: cmp(ohlc, ohlc2) -1 """ if not isinstance(other, OHLC): return cmp(type(self), type(other)) return cmp((self.timestamp, self.open, self.high, self.low, self.close, self.volume), (other.timestamp, other.open, other.high, other.low, other.close, other.volume)) class Stock: """ Class for retrieval of stock market information. """ def __init__(self, symbol, cid=''): """ Create a ``Stock`` object. Optional initialization by ``cid``: an identifier for each equity used by Google Finance. INPUT: - ``symbol`` -- string, a ticker symbol (with or without market). Format: ``"MARKET:SYMBOL"`` or ``"SYMBOL"``. If you don't supply the market, it is assumed to be NYSE or NASDAQ. e.g. "goog" or "OTC:NTDOY" - ``cid`` -- Integer, a Google contract ID (optional). .. NOTE:: Currently, the symbol and cid do not have to match. When using ``google()``, the cid will take precedence. EXAMPLES:: sage: S = finance.Stock('ibm') sage: S # random; optional -- internet IBM (127.48) """ self.symbol = symbol.upper() self.cid = cid def __repr__(self): """ Return string representation of this stock. EXAMPLES:: sage: finance.Stock('ibm').__repr__() # random; optional -- internet 'IBM (127.47)' """ return "%s (%s)"%(self.symbol, self.market_value()) def market_value(self): """ Return the current market value of this stock. OUTPUT: A Python float. EXAMPLES:: sage: finance.Stock('goog').market_value() # random; optional - internet 575.83000000000004 """ return float(self.yahoo()['price']) def yahoo(self): """ Get Yahoo current price data for this stock. OUTPUT: A dictionary. EXAMPLES:: sage: finance.Stock('GOOG').yahoo() # random; optional -- internet {'stock_exchange': '"NasdaqNM"', 'market_cap': '181.1B', '200day_moving_avg': '564.569', '52_week_high': '747.24', 'price_earnings_growth_ratio': '1.04', 'price_sales_ratio': '10.16', 'price': '576.48', 'earnings_per_share': '14.463', '50day_moving_avg': '549.293', 'avg_daily_volume': '6292480', 'volume': '1613507', '52_week_low': '412.11', 'short_ratio': '1.00', 'price_earnings_ratio': '40.50', 'dividend_yield': 'N/A', 'dividend_per_share': '0.00', 'price_book_ratio': '7.55', 'ebitda': '6.513B', 'change': '-9.32', 'book_value': '77.576'} """ url = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (self.symbol, 'l1c1va2xj1b4j4dyekjm3m4rr5p5p6s7') values = urllib.urlopen(url).read().strip().strip('"').split(',') data = {} data['price'] = values[0] data['change'] = values[1] data['volume'] = values[2] data['avg_daily_volume'] = values[3] data['stock_exchange'] = values[4] data['market_cap'] = values[5] data['book_value'] = values[6] data['ebitda'] = values[7] data['dividend_per_share'] = values[8] data['dividend_yield'] = values[9] data['earnings_per_share'] = values[10] data['52_week_high'] = values[11] data['52_week_low'] = values[12] data['50day_moving_avg'] = values[13] data['200day_moving_avg'] = values[14] data['price_earnings_ratio'] = values[15] data['price_earnings_growth_ratio'] = values[16] data['price_sales_ratio'] = values[17] data['price_book_ratio'] = values[18] data['short_ratio'] = values[19] return data def google(self,startdate='Jan+1,+1900',enddate=date.today().strftime("%b+%d,+%Y"), histperiod='daily'): """ Return an immutable sequence of historical price data for this stock, obtained from Google. OHLC data is stored internally as well. By default, returns the past year's daily OHLC data. Dates ``startdate`` and ``enddate`` should be formatted ``'Mon+d,+yyyy'``, where ``'Mon'`` is a three character abbreviation of the month's name. .. NOTE:: Google Finance returns the past year's financial data by default when ``startdate`` is set too low from the equity's date of going public. By default, this function only looks at the NASDAQ and NYSE markets. However, if you specified the market during initialization of the stock (i.e. ``finance.Stock("OTC:NTDOY")``), ``Stock.google()`` will give correct results. INPUT: - ``startdate`` -- string, (default: ``'Jan+1,+1900'``) - ``enddate`` -- string, (default: current date) - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``) OUTPUT: A sequence. EXAMPLES: We get the first five days of VMware's stock history:: sage: finance.Stock('vmw').google()[:5] # optional -- internet [ 28-Nov-07 80.57 88.49 80.57 87.69 7496000, 29-Nov-07 90.91 93.20 89.50 90.85 5497600, 30-Nov-07 95.39 95.60 89.85 91.37 4750200, 3-Dec-07 89.87 96.00 88.70 94.97 4401100, 4-Dec-07 92.26 97.10 92.05 95.08 2896600 ] sage: finance.Stock('F').google('Jan+3,+1978', 'Jul+7,+2008')[:5] # optional -- internet [ 3-Jan-78 0.00 1.93 1.89 1.89 1618200, 4-Jan-78 0.00 1.89 1.87 1.88 2482700, 5-Jan-78 0.00 1.89 1.84 1.84 2994900, 6-Jan-78 0.00 1.84 1.82 1.83 3042500, 9-Jan-78 0.00 1.81 1.79 1.81 3916400 ] Note that when ``startdate`` is too far prior to a stock's actual start date, Google Finance defaults to a year's worth of stock history leading up to the specified end date. For example, Apple's (AAPL) stock history only dates back to September 7, 1984:: sage: finance.Stock('AAPL').google('Sep+1,+1900', 'Jan+1,+2000')[0:5] # optional -- internet [ 4-Jan-99 0.00 10.56 10.00 10.31 34031600, 5-Jan-99 0.00 10.98 10.38 10.83 50360400, 6-Jan-99 0.00 11.03 10.25 10.44 48160800, 7-Jan-99 0.00 11.27 10.53 11.25 51036400, 8-Jan-99 0.00 11.72 11.00 11.25 24240000 ] Here is an example where we create and get the history of a stock that is not in NASDAQ or NYSE:: sage: finance.Stock("OTC:NTDOY").google(startdate="Jan+1,+2007", enddate="Jan+1,+2008")[:5] # optional -- internet [ 3-Jan-07 32.44 32.75 32.30 32.44 156283, 4-Jan-07 31.70 32.40 31.20 31.70 222643, 5-Jan-07 30.15 30.50 30.15 30.15 65670, 8-Jan-07 30.10 30.50 30.00 30.10 130765, 9-Jan-07 29.90 30.05 29.60 29.90 103338 ] Here, we create a stock by cid, and get historical data. Note that when using historical, if a cid is specified, it will take precedence over the stock's symbol. So, if the symbol and cid do not match, the history based on the contract id will be returned. :: sage: sage.finance.stock.Stock("AAPL", 22144).google(startdate='Jan+1,+1990')[:5] #optional -- internet [ 2-Jan-90 0.00 9.38 8.75 9.31 6542800, 3-Jan-90 0.00 9.50 9.38 9.38 7428400, 4-Jan-90 0.00 9.69 9.31 9.41 7911200, 5-Jan-90 0.00 9.56 9.25 9.44 4404000, 8-Jan-90 0.00 9.50 9.25 9.50 3627600 ] """ cid = self.cid symbol = self.symbol if self.cid=='': if ':' in symbol: R = self._get_data('', startdate, enddate, histperiod) else: R = self._get_data('NASDAQ:', startdate, enddate, histperiod) if "Bad Request" in R: R = self._get_data("NYSE:", startdate, enddate, histperiod) else: R = self._get_data('', startdate, enddate, histperiod) if "Bad Request" in R: raise RuntimeError self.__historical = [] self.__historical = self._load_from_csv(R) return self.__historical def open(self, *args, **kwds): r""" Return a time series containing historical opening prices for this stock. If no arguments are given, will return last acquired historical data. Otherwise, data will be gotten from Google Finance. INPUT: - ``startdate`` -- string, (default: ``'Jan+1,+1900'``) - ``enddate`` -- string, (default: current date) - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``) OUTPUT: A time series -- close price data. EXAMPLES: You can directly obtain Open data as so:: sage: finance.Stock('vmw').open(startdate='Jan+1,+2008', enddate='Feb+1,+2008') # optional -- internet [83.0500, 85.4900, 84.9000, 82.0000, 81.2500 ... 82.0000, 58.2700, 54.4900, 55.6000, 56.9800] Or, you can initialize stock data first and then extract the Open data:: sage: c = finance.Stock('vmw') sage: c.google(startdate='Feb+1,+2008', enddate='Mar+1,+2008')[:5] # optional -- internet [ 31-Jan-08 55.60 57.35 55.52 56.67 2591100, 1-Feb-08 56.98 58.14 55.06 57.85 2473000, 4-Feb-08 58.00 60.47 56.91 58.05 1816500, 5-Feb-08 57.60 59.30 57.17 59.30 1709000, 6-Feb-08 60.32 62.00 59.50 61.52 2191100 ] sage: c.open() # optional -- internet [55.6000, 56.9800, 58.0000, 57.6000, 60.3200 ... 56.5500, 59.3000, 60.0000, 59.7900, 59.2600] Otherwise, ``self.google()`` will be called with the default arguments returning a year's worth of data:: sage: finance.Stock('vmw').open() # random; optional -- internet [52.1100, 60.9900, 59.0000, 56.0500, 57.2500 ... 83.0500, 85.4900, 84.9000, 82.0000, 81.2500] """ from time_series import TimeSeries if len(args) != 0: return TimeSeries([x.open for x in self.google(*args, **kwds)]) try: return TimeSeries([x.open for x in self.__historical]) except AttributeError: pass return TimeSeries([x.open for x in self.google(*args, **kwds)]) def close(self, *args, **kwds): r""" Return the time series of all historical closing prices for this stock. If no arguments are given, will return last acquired historical data. Otherwise, data will be gotten from Google Finance. INPUT: - ``startdate`` -- string, (default: ``'Jan+1,+1900'``) - ``enddate`` -- string, (default: current date) - ``histperiod`` -- string, (``'daily'`` or ``'weekly'``) OUTPUT: A time series -- close price data. EXAMPLES: You can directly obtain close data as so:: sage: finance.Stock('vmw').close(startdate='Jan+1,+2008', enddate='Feb+1,+2008') # optional -- internet [84.9900, 84.6000, 83.9500, 80.4900, 72.9900 ... 83.0000, 54.8700, 56.4200, 56.6700, 57.8500] Or, you can initialize stock data first and then extract the Close data:: sage: c = finance.Stock('vmw') sage: c.google(startdate='Feb+1,+2008', enddate='Mar+1,+2008')[:5] # optional -- internet [ 31-Jan-08 55.60 57.35 55.52 56.67 2591100, 1-Feb-08 56.98 58.14 55.06 57.85 2473000, 4-Feb-08 58.00 60.47 56.91 58.05 1816500, 5-Feb-08 57.60 59.30 57.17 59.30 1709000, 6-Feb-08 60.32 62.00 59.50 61.52 2191100 ] sage: c.close() # optional -- internet [56.6700, 57.8500, 58.0500, 59.3000, 61.5200 ... 58.2900, 60.1800, 59.8600, 59.9500, 58.6700] Otherwise, ``self.google()`` will be called with the default arguments returning a year's worth of data:: sage: finance.Stock('vmw').close() # random; optional -- internet [57.7100, 56.9900, 55.5500, 57.3300, 65.9900 ... 84.9900, 84.6000, 83.9500, 80.4900, 72.9900] """ from time_series import TimeSeries if len(args) != 0: return TimeSeries([x.close for x in self.google(*args, **kwds)]) try: return TimeSeries([x.close for x in self.__historical]) except AttributeError: pass return TimeSeries([x.close for x in self.google(*args, **kwds)]) def load_from_file(self, file): r""" Load historical data from a local csv formatted data file. Note that no symbol data is included in Google Finance's csv data. The csv file must be formatted in the following way, just as on Google Finance:: Timestamp,Open,High,Low,Close,Volume INPUT: - ``file`` -- local file with Google Finance formatted OHLC data. OUTPUT: A sequence -- OHLC data. EXAMPLES: Suppose you have a file in your home directory containing Apple stock OHLC data, such as that from Google Finance, called ``AAPL-minutely.csv``. One can load this information into a Stock object like so. Note that the path must be explicit:: sage: filename = tmp_filename(ext='.csv') sage: open(filename,'w').write("Date,Open,High,Low,Close,Volume\n1212405780,187.80,187.80,187.80,187.80,100\n1212407640,187.75,188.00,187.75,188.00,2000\n1212407700,188.00,188.00,188.00,188.00,1000\n1212408000,188.00,188.11,188.00,188.00,2877\n1212408060,188.00,188.00,188.00,188.00,687") sage: finance.Stock('aapl').load_from_file(filename)[:5] [ 1212408060 188.00 188.00 188.00 188.00 687, 1212408000 188.00 188.11 188.00 188.00 2877, 1212407700 188.00 188.00 188.00 188.00 1000, 1212407640 187.75 188.00 187.75 188.00 2000, 1212405780 187.80 187.80 187.80 187.80 100 ] Note that since the source file doesn't contain information on which equity the information comes from, the symbol designated at initialization of Stock need not match the source of the data. For example, we can initialize a Stock object with the symbol ``'goog'``, but load data from ``'aapl'`` stock prices:: sage: finance.Stock('goog').load_from_file(filename)[:5] [ 1212408060 188.00 188.00 188.00 188.00 687, 1212408000 188.00 188.11 188.00 188.00 2877, 1212407700 188.00 188.00 188.00 188.00 1000, 1212407640 187.75 188.00 187.75 188.00 2000, 1212405780 187.80 187.80 187.80 187.80 100 ] This tests a file that doesn't exist:: sage: finance.Stock("AAPL").load_from_file("I am not a file") Traceback (most recent call last): ... IOError: [Errno 2] No such file or directory: 'I am not a file' """ file_obj = open(file, 'r') R = file_obj.read(); self.__historical = self._load_from_csv(R) file_obj.close() return self.__historical def _load_from_csv(self, R): r""" EXAMPLES: This indirectly tests ``_load_from_csv()``:: sage: filename = tmp_filename(ext='.csv') sage: open(filename,'w').write("Date,Open,High,Low,Close,Volume\n1212405780,187.80,187.80,187.80,187.80,100\n1212407640,187.75,188.00,187.75,188.00,2000\n1212407700,188.00,188.00,188.00,188.00,1000\n1212408000,188.00,188.11,188.00,188.00,2877\n1212408060,188.00,188.00,188.00,188.00,687") sage: finance.Stock('aapl').load_from_file(filename) [ 1212408060 188.00 188.00 188.00 188.00 687, 1212408000 188.00 188.11 188.00 188.00 2877, 1212407700 188.00 188.00 188.00 188.00 1000, 1212407640 187.75 188.00 187.75 188.00 2000, 1212405780 187.80 187.80 187.80 187.80 100 ] """ R = R.splitlines() headings = R[0].split(',') hist_data = [] for x in reversed(R[1:]): try: timestamp, opn, high, low, close, volume = x.split(',') ohlc = OHLC(timestamp, opn,high,low,close,volume) hist_data.append(ohlc) except ValueError: pass hist_data = Sequence(hist_data,cr=True,universe=lambda x:x, immutable=True) return hist_data def _get_data(self, exchange='', startdate='Jan+1,+1900', enddate=date.today().strftime("%b+%d,+%Y"), histperiod='daily'): """ This function is used internally. EXAMPLES: This indirectly tests the use of ``_get_data()``:: sage: finance.Stock('aapl').google(startdate='Jan+1,+1990')[:2] # optional -- internet [ 2-Jan-90 0.00 9.38 8.75 9.31 6542800, 3-Jan-90 0.00 9.50 9.38 9.38 7428400 ] """ symbol = self.symbol cid = self.cid if cid == '': url = 'http://finance.google.com/finance/historical?q=%s%s&startdate=%s&enddate=%s&histperiod=%s&output=csv'%(exchange, symbol.upper(), startdate, enddate, histperiod) else: url = 'http://finance.google.com/finance/historical?cid=%s&startdate=%s&enddate=%s&histperiod=%s&output=csv'%(cid, startdate, enddate, histperiod) return urllib.urlopen(url).read()
UTF-8
Python
false
false
2,013
15,659,450,808,879
ae5b30ee182bd1e437dee0f30c013e3286f0f475
3fa4e825b925d3385da09f3012f635fb0a21cccf
/exponential_sol.py
c5dbef6f6c1370f580391225fddd61a4164c1e50
[]
no_license
aizwellenstan/ARC-moving-averages
https://github.com/aizwellenstan/ARC-moving-averages
84cd8939ba6c1dfa393b4f74d9ddd31ed43ac13e
33112fcd3ef6a48b7dd8c1f1f231b90a51a0f7c7
refs/heads/master
2021-05-27T23:23:06.171858
2014-01-20T04:42:11
2014-01-20T04:42:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from stock import Stock from Queue import Queue from arc_utils import print_ma # we can use the recurrence equation to obtain the EMA # X_{0,a} = D_0 # X_{t,a} = (1-a) * D_t + a * X_{t-1,a} #M, the number of days we care about ma_days = 10 # alpha, must satisfy 0 < a <= 1 alpha = 0.90 #simulation length in days sim_len = 50 # calculate the next moving average, using the current one # using our knowledge of the recurrence equation def populate_ma(ma_arr, s): new_price = s.next_price() new_ma = ((1 - alpha) * new_price) + (alpha * s.get_ma()) s.update_ma(new_ma) ma_arr.append(new_ma) def main(): s = Stock() ma_arr = [] #populate the moving avg array with the first one ma_arr.append(s.get_ma()) #loop that actually runs the simulation for x in range(1,sim_len,1): populate_ma(ma_arr, s) #pretty prints our array of moving avgs map(print_ma, ma_arr) main()
UTF-8
Python
false
false
2,014
7,533,372,645,289
d3641acaa31f3f5bdc67953e00a6fe2d1fa66547
affa4d21751585f50ba9ab210cbc1ae30126566a
/test_5.py
2b421e0a50bd79bde73f71facbb04a6e3f1af176
[]
no_license
rhintz42/Learning-Python-fix-errors
https://github.com/rhintz42/Learning-Python-fix-errors
50e6ae9498b3510788773aefba308214d51b9482
ae4ce9ae2727c038653554ee09026e55249e086c
refs/heads/master
2021-01-20T12:37:29.561094
2014-05-24T22:32:21
2014-05-24T22:32:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def bar(l): l.append("Hello') return l def foo(): return bar([]) print("Should print out a list with 'Hello' in it") print(foo())
UTF-8
Python
false
false
2,014
6,433,861,021,431
be504405d7db1315aee5c5bdc74998446a0f9262
5669fbf3015a5c83d268e2dcc85bc8fc74c6ed2d
/resource_wagon/web/urls.py
9617021b883ebc01e875cbc9e3f659ed088781a8
[]
no_license
technomicssolutions/resource_wagon
https://github.com/technomicssolutions/resource_wagon
7e32a26d2bd1bab7b1be704f04a20dbd8527fa4b
8a61aff833ea0bc1785a1b15bb051b61f591e443
refs/heads/master
2016-08-07T17:15:22.752259
2014-10-27T07:47:45
2014-10-27T07:47:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf.urls import patterns, url from django.contrib.auth.decorators import login_required from web.views import (Login, Logout, Home, ResetPassword, RequestView, ReplyEmployer, \ DeleteRequest, ForgotPassword, Aboutus, Dashboard, Companies, Company , PremiumEmployer, \ MissionStatement, ResourcesWagon, WagonDrivers, CandidatePreparation, CompetencyAnalysis, \ Contact, RecruitmentDivisions, DeleteEmployer, DeleteJobseeker, \ MissionStatement, ResourcesWagon, WagonDrivers, CandidatePreparation, CompetencyAnalysis, RecruitmentDivisions, RequestCV, TermsAndConditions, ViewTestmonials) urlpatterns = patterns('', url(r'login/$', Login.as_view(), name="login"), url(r'logout/$', Logout.as_view(), name="logout"), url(r'^$', Home.as_view(), name="home"), url(r'^admin_dashboard/$', login_required(Dashboard.as_view(), login_url="/login/"), name="admin_dashboard"), url(r'^forgot_password/$', ForgotPassword.as_view(), name='forgot_password'), url(r'^reset_password/(?P<user_id>\d+)/$', login_required(ResetPassword.as_view(), login_url="/login/"), name="reset_password"), url(r'^request/$', login_required(RequestView.as_view(), login_url="/login/"), name="request"), url(r'^reply/(?P<request_id>\d+)/$', login_required(ReplyEmployer.as_view(), login_url="/login/"), name="reply"), url(r'^delete_request/(?P<request_id>\d+)/$', login_required(DeleteRequest.as_view(), login_url="/login/"), name='delete_request'), url(r'^aboutus/$', Aboutus.as_view(), name='aboutus'), url(r'^contact/$', Contact.as_view(), name='contact'), url(r'^cv_request/$', RequestCV.as_view(), name='cv_request'), url(r'^aboutus/mission_statement/$', MissionStatement.as_view(), name='mission_statement'), url(r'^aboutus/resources_wagon/$', ResourcesWagon.as_view(), name='resources_wagon'), url(r'^aboutus/wagon_drivers/$', WagonDrivers.as_view(), name='wagon_drivers'), url(r'^employers/recruitment_divisions/$', RecruitmentDivisions.as_view(), name='recruitment_divisions'), url(r'^employers/competency_analysis/$', CompetencyAnalysis.as_view(), name='competency_analysis'), url(r'^candidates/candiadte_preparation/$', CandidatePreparation.as_view(), name='candidate_preparation'), url(r'^company/(?P<company_id>\d+)/$', Company.as_view(), name='company'), url(r'^companies/$', Companies.as_view(), name='companies'), url(r'^delete_employer/(?P<recruiter_id>\d+)/$', DeleteEmployer.as_view(), name='delete_employer'), url(r'^delete_jobseeker/(?P<jobseeker_id>\d+)/$', DeleteJobseeker.as_view(), name='delete_jobseeker'), url(r'^save_premium_employer/$', PremiumEmployer.as_view(), name='save_premium_employer'), url(r'^terms_conditions/$', TermsAndConditions.as_view(), name='terms_conditions'), url(r'^testmonials/$', ViewTestmonials.as_view(), name='testmonials') )
UTF-8
Python
false
false
2,014
15,857,019,269,271
226ddb4aa5d1c8792c8dc96e7c521049f451fa52
28b71261dc65c4d777951e20588c81ac45aef0d5
/inheritence-quiz/main.py
b3b2bd23a601eab00cebd3d44019728d408c5ba5
[]
no_license
DOCarroll/DPW1405
https://github.com/DOCarroll/DPW1405
91a562c50094839f9de74638a4e5d8e93f9ff943
bad34aeff2763bbe67d1c21fde29e850c012c6f4
refs/heads/master
2021-01-22T03:13:53.456017
2014-05-22T03:30:06
2014-05-22T03:30:06
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Daniel O'Carroll #DPW1405 #May 19th, 2014 import webapp2 class MainHandler(webapp2.RequestHandler): def get(self): self.response.write('Hello world!') class Dog(object): def __init__(self): __dog_sound = 'Bark!' favorite_game = 'Fetch' def getsound(self, __dog_sound): return __dog_sound def getgame(self, favorite_game): return favorite_game class Husky(Dog): Dog.__init__(self): husky = Dog() husky app = webapp2.WSGIApplication([ ('/', MainHandler) ], debug=True)
UTF-8
Python
false
false
2,014
1,640,677,546,162
124279c3b63700a8fbc351915c51efdb070bb8f3
637088bf9f54b75e50d20c801e62b9fb4c237256
/views/register.py
03b9c085ff0610a7267b346344cc78ec1773dc7a
[]
no_license
ehsansh84/Nafis_Project
https://github.com/ehsansh84/Nafis_Project
aa9e8450425843592660840f8271c9b0708b951f
a04a7293076a5a28f5ac8238dd84e03aea8247e6
refs/heads/master
2015-08-13T10:15:37.744739
2014-09-19T18:08:16
2014-09-19T18:08:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from views.data import * from models.user import * class Register(tornado.web.RequestHandler): def get(self, *args, **kwargs): self.render('register.html') def post(self, *args, **kwargs): name=self.get_argument('name') family=self.get_argument('family') age=self.get_argument('age') username=self.get_argument('username') password=self.get_argument('password') obj = User() obj.name = name obj.username = username obj.family = family obj.age = age obj.password=password obj.save()
UTF-8
Python
false
false
2,014
9,869,834,871,328
93ad2be37013f3b427772713b422c7683e336f59
1696542c9d76fbd9c3da46ac3ab895c879e5a142
/scripts/ml_benchmark.py
d58ff6618b2dfdd67441f92022a9d0c755be0b19
[]
no_license
ryeakle/DigitClassifier
https://github.com/ryeakle/DigitClassifier
a17fffca9158481621cbcd0c803b36ac6165f342
5680d58b91cc206be207f0c645a66995c6e10bd9
refs/heads/master
2020-07-02T03:04:59.823295
2013-08-19T03:15:55
2013-08-19T03:15:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import time import scipy import matplotlib.pyplot as plt import matplotlib.cm as cm import sklearn as sk import sklearn.neighbors import sklearn.ensemble import numpy as np import Image import sys IMG_SIZE = 28*28 TRAIN_CSV_PATH = "../data/train.csv" # Load images and labels train_labels = np.loadtxt(open(TRAIN_CSV_PATH, "rb"), delimiter=",", skiprows=1, usecols=[0]) img_train = np.loadtxt(open(TRAIN_CSV_PATH, "rb"), delimiter=",", skiprows=1, usecols=range(1, IMG_SIZE + 1)) set_size = np.size(train_labels) print "Total Set Size: " + str(set_size) train_size = (int) (set_size * 0.5) test_size = set_size - train_size train_range = range(0, train_size) test_range = range(train_size, set_size) def showTrainImage(test_image): img = test_image.reshape(28,28) plt.imshow(img, cmap = cm.Greys_r) plt.show() #benchmark K Nearest Neighbors def benchmarkKNN(train_imgs, train_labels, test_imgs, test_labels): knn = sk.neighbors.KNeighborsClassifier(n_neighbors = 5, weights='uniform', algorithm='auto', leaf_size=30, warn_on_equidistant=True, p=2) train_size = len(train_labels) test_size = len(test_labels) train_range = range(0, train_size) test_range = range(0, test_size) start_t = time.time() knn.fit(img_train[train_range, :], train_labels[train_range]) train_t = time.time() - start_t; print "KNN train time: " + str(train_t) print "KNN training complete, beginning test set:" error_count = 0 start_t = time.time() for i in test_range: if knn.predict(img_train[i, :]) != train_labels[i]: error_count = error_count + 1 if i % 100 == 0: sys.stdout.write("\r") sys.stdout.write(str((100.0*(i)/len(test_labels))) + "% complete (KNN)") error_rate = 100.0*error_count/len(test_labels) test_t = time.time() - start_t print "Test time = " + str(test_t) return error_rate, train_t, test_t # Benchmark Random Forest Classifier def benchmarkRF(train_imgs, train_labels, test_imgs, test_labels): rf = sk.ensemble.RandomForestClassifier() start_t = time.time() rf.fit(train_imgs, train_labels) train_t = time.time() - start_t error_count = 0 print "Test Size = " + str(len(test_labels)) start_t = time.time() for i in range(0, len(test_labels)): if rf.predict(test_imgs[i, :]) != test_labels[i]: error_count = error_count + 1 if (i % 100) == 0: sys.stdout.write("\r") sys.stdout.write(str((100.0*(i)/len(test_labels))) + "% complete (RF)") test_t = time.time() - start_t error_rate = 100.0 * error_count/(len(test_labels)) return error_rate, train_t, test_t err_rf, train_t_rf, test_t_rf = benchmarkRF(img_train[train_range, :], train_labels[train_range], img_train[test_range, :], train_labels[test_range]) err_knn, train_t_knn, test_t_knn = benchmarkKNN(img_train[train_range, :], train_labels[train_range], img_train[test_range, :], train_labels[test_range]) print "RF Performance [Error %, train_t, test_t]: " print err_rf print train_t_rf print test_t_rf print "KNN Performance [Error %, train_t, test_t]: " print err_knn print train_t_knn print test_t_knn
UTF-8
Python
false
false
2,013
15,917,148,829,991
feef6d40f8ceae7981a241f84c6c5bc2157704bd
e5150b8862688eff630a542df68fc4f634c88349
/views.py
fa8051a5fa4154cee756e102aee3eb6433694291
[]
no_license
derekzhang79/outofthedarj.com
https://github.com/derekzhang79/outofthedarj.com
265bbd765fdad691de9384d756fd2853da69f00c
2b4463548f7ef8869c4d2a51735336bf6ef07ec5
refs/heads/master
2021-01-18T06:59:21.208366
2010-05-15T13:07:29
2010-05-15T13:07:29
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.http import HttpResponse, Http404 from django.shortcuts import render_to_response def coming_soon(request): return render_to_response('coming_soon.html',) def base_template(request): return render_to_response('base_template.html',)
UTF-8
Python
false
false
2,010
9,302,899,183,259
321a692c776bc9ff7b0d9f112d4a505ff9f45257
6cde2f21ba42762b10bd0092ccff262ea0d62e53
/src/mastersproject/CMPE295B_Submission/sentimentAnalyzer.py
7819b279338fde696e73bd78aec3f95c17dd75b5
[]
no_license
abhimanyusalokhe/DSA
https://github.com/abhimanyusalokhe/DSA
ca63d8861bf203ee4554e14525a496f8481a6df3
d0620fffabca5df0fa3c8b5fcb6efe01ee6a87a6
refs/heads/master
2016-09-05T19:55:19.931272
2014-02-23T05:13:17
2014-02-23T05:13:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import re import csv import pprint import nltk.classify import MySQLdb as mdb import sys import time import pickle #Load emoticons from file fp = open('emoticons.txt', "r") emoticons = {} for line in fp: row = line.rstrip().split(',') emoticons[str(row[0])] = row[1] fp.close() #replace repeated characters def replaceRepeatedLetters(s): #look for 2 or more repetitions of character pattern = re.compile(r"(.)\1{1,}", re.DOTALL) return pattern.sub(r"\1\1", s) #end #load StopWords from given file def readStopWordList(stopWordFile): stopWords = [] fp = open(stopWordFile, 'r') line = fp.readline() while line: word = line.strip() stopWords.append(word) line = fp.readline() stopWords.append('URL') stopWords.append('AT_USER') fp.close() return stopWords #end #start process_tweet def processRawTweet(tweet): # process the tweets #lower case conversion tweet = tweet.lower() #Replace www.* or https?://* by URL tweet = re.sub('((www\.[\s]+)|(https?://[^\s]+))','URL',tweet) #Replace @username by AT_USER tweet = re.sub('@[^\s]+','AT_USER',tweet) #strip extra white spaces tweet = re.sub('[\s]+', ' ', tweet) #Convert #word to word tweet = re.sub(r'#([^\s]+)', r'\1', tweet) tweet = tweet.strip('\'"') return tweet #end #start getFeaturesInVector def getFeaturesInVector(tweet, stopWords): FeatureVectoryContainer = [] words = tweet.split() for w in words: #replace two or more with two occurrences w = replaceRepeatedLetters(w) #strip punctuation w = w.strip('\'"?,.') #check if it consists of only words val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", w) #replace emoticons word_to_replace= emoticons.get(w) if (word_to_replace is not None): w = word_to_replace #ignore if it is a stopWord if(w in stopWords or val is None): continue else: FeatureVectoryContainer.append(w.lower()) return FeatureVectoryContainer #end #start getTotalFeatureList def getTotalFeatureList(fileName): fp = open(fileName, 'r') line = fp.readline() featureListContainer = [] while line: line = line.strip() featureListContainer.append(line) line = fp.readline() fp.close() return featureListContainer #end #Feature set extraction from tweet def extractFeaturesFromTweet(tweet): tweet_words = set(tweet) features = {} for word in featureListContainer: features['contains(%s)' % word] = (word in tweet_words) return features #end #Read the tweets one by one and process it input_Tweets = csv.reader(open('data/feature_list/full_training_dataset.csv', 'rb'), delimiter=',', quotechar='|') st = open('data/feature_list/stopwords.txt', 'r') stopWords = readStopWordList('data/feature_list/stopwords.txt') featureListContainer = getTotalFeatureList('data/feature_list/feature_list.txt') pp = pprint.PrettyPrinter() count = 0; tweets = [] for row in input_Tweets: sentiment = row[0] tweet = row[1] processedTweet = processRawTweet(tweet) FeatureVectoryContainer = getFeaturesInVector(processedTweet, stopWords) tweets.append((FeatureVectoryContainer, sentiment)); #end loop #training_set = nltk.classify.util.apply_features(extractFeaturesFromTweet, tweets) #pp.pprint(training_set) # Train and save the Naive Bayes classifier #NBClassifier = nltk.NaiveBayesClassifier.train(training_set) #fc = open('my_classifier.pickle', 'wb') #pickle.dump(NBClassifier, fc) #fc.close() # Load the Naive Bayes classifier f= open('my_classifier.pickle') classifier = pickle.load(f) f.close() #Infinite loop to read and process tweets var = 1 while var == 1 : #Connect to database and fetch tweets: conn = None try: conn = mdb.connect(host="localhost", user="root", passwd="nazya", db="mydb") cursor = conn.cursor() query = "Select id, TweetText, Location from tweets where status='u'" cursor.execute(query) rows = cursor.fetchall() for row in rows: newTweet = row[1] t_id = row[0] processedNewTweet = processRawTweet(newTweet) sentiment = "neutral" #Using trained classifier to extract sentiment sentiment = classifier.classify(extractFeaturesFromTweet(getFeaturesInVector(processedNewTweet, stopWords))) print "newTweet = %s, sentiment = %s\n" % (newTweet, sentiment) #updateQuery="update tweets set status = 'p', sentiment= where TweetText != 'effective but too-tepid biopic';" cursor.execute("UPDATE tweets SET Sentiment = %s, status = %s WHERE id = %s", ( sentiment, 'p', t_id)) conn.commit() print "Number of rows updated: %d" % cursor.rowcount except mdb.Error, e: print "Error %d: %s" % (e.args[0],e.args[1]) sys.exit(1) finally: if conn: cursor.close() conn.close() print "Sleeping of 100 seconds!!!" time.sleep(100) #end db else : print "Good Bye!!!"
UTF-8
Python
false
false
2,014
17,875,653,896,027
4ff989abedc4b3a9c111ac78e8f64af83d45c637
80a338adee13ea767f3b85b68d7bf4decdb05755
/tailseq/tailseq/logger.py
424385635c029b4b0d1c250e86ffc8250768611e
[]
no_license
hbc/daley_tailseq
https://github.com/hbc/daley_tailseq
8f612f0d4dc01f7985b5c7323a5852f230b0f63c
58d04357fe620f04d663ad1283f609ea33bd39e1
refs/heads/master
2016-09-08T01:58:56.108941
2014-11-16T01:01:11
2014-11-16T01:01:11
19,383,744
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging logging.basicConfig(level=logging.DEBUG) my_logger = logging.getLogger('Tail-Seq')
UTF-8
Python
false
false
2,014
2,946,347,576,916
ff3485aa0a2788d11638b16680890556c9532cca
7a64e3dd6a74dfa2853e0bc10e5ec934badb36a2
/csv_transformer.py
2fec12c00fdb2b21b158a25d2b550b5fc406a555
[]
no_license
siteshen/taobaospider
https://github.com/siteshen/taobaospider
b3d4dd70d1f69201921a42ee858ebc3e6f31327c
82ed6c6eb9b8a93a179b1f8b7be04caafddd8724
refs/heads/master
2021-01-18T12:28:06.582169
2013-09-26T10:24:46
2013-09-26T10:24:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import csv import types import sys class Table(object): def __init__(self, tablename): self.tablename = tablename self.records = [] def __len__(self): return len(self.records) def getrecords(self, filename, skipheader=True, n=None): self.readfile(filename, self.getfields(), skipheader, n) print "Len of records: %d" % len(self) def create_and_insert(self, conn, batch=100): cnt = 0 conn.execute(self.table_create_sql()) conn.commit() print "total: %d" % len(self) for record in self.records: print "cnt: %d" % cnt conn.execute(self.insertrecord_sql(record)) cnt += 1 if cnt % batch == 0: conn.commit() conn.commit() def getfields(self, fields=[]): """ The fields is [] """ newfields = [] for field in fields: l = list(field) if len(field) == 3: l.append(None) newfields.append(l) return newfields def readfile(self, data_file, fields, skipheader=True, n=None): print "Read file: %s" % data_file fp = open(data_file) for i, line in enumerate(fp): if i == 0 and skipheader == True: continue if i == n: break record = self.makerecord(line, fields) self.addrecord(record) fp.close() def makerecord(self, line, fields): obj = {} rawdata = line.split(',') for field, index, cast, typename in fields: try: v = rawdata[index] v = cast(v) except Exception as ex: print ex, field, index v = None obj[field] = v return obj def addrecord(self, record): self.records.append(record) def table_create_sql(self): mapper = { "str": "text", "int": "int" } sql_template = "create table IF NOT EXISTS %s ( id int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY, %s ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci" values = [] for field, idx, cast, typename in self.getfields(): if typename: values.append("%s %s" % (field, typename)) else: values.append("%s %s" % (field, mapper[cast.__name__])) return sql_template % (self.tablename, ", ".join(values)) def insertrecord_sql(self, record): """ Generate the sql according to the tablename and fields """ kvs = [[field, record[field]] for field, idx, cast, typename in self.getfields()] def postprocess(v): if v == None: return 'NULL' else: return "'%s'" % str(v) return "insert into %s (%s) values (%s)" % \ (self.tablename, ','.join([kv[0] for kv in kvs]), ','.join([postprocess(kv[1]) for kv in kvs])) class Taobao(Table): def __init__(self, tablename): super(Taobao, self).__init__(tablename) self.tablename = tablename def getfields(self): fields = [ ('uid', 0, str), ('ip', 1, str), ('agent', 2, str), ('url', 5, str), ('site', 6, str), ('domain', 7, str), ('referurl', 8, str), ('date', 11, str, 'datetime'), ('staytime', 12, int), ('url_kw', 15, str), ('refer_kw', 16, str), ('raw_id', 18, int), ('gener', 21, int), ('age', 22, int), ('city', 23, int), ('income_pre', 24, int), ('income_fml', 25, int), ('education', 26, int), ('job', 27, int), ('industry', 28, int), ('birth', 29, str) ] return super(Taobao, self).getfields(fields) def main(filename): taobao = Taobao('taobao') print taobao.table_create_sql() taobao.getrecords(filename, skipheader=True) print len(taobao) print taobao.records[0] record = taobao.records[0] record['birth'] = None print taobao.insertrecord_sql(record) # import sqlite3 # conn = sqlite3.connect('test.db') # taobao.create_and_insert(conn) if __name__ == '__main__': print sys.argv main(*sys.argv[1:])
UTF-8
Python
false
false
2,013
5,162,550,708,927
60ae3ac4a181c40439a15857e0080fcd9b7ccee8
ecc1e8a020e5328a8fdda17513ad56c11dd8993f
/007.py
cc4435db99e19f67fe14485793426e4d06bb00f2
[]
no_license
masato-mi/NLP_100
https://github.com/masato-mi/NLP_100
ffe8338ac11b452d47c5c5f343db9dd047b3f703
2e3b0465f2d25131aab673e0a72e8a44e1e7fb3a
refs/heads/master
2016-09-11T03:01:00.283957
2014-05-19T11:10:14
2014-05-19T11:10:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding : UTF-8 file = open("address.txt", "r") new_set = set() for line in file.readlines(): item = line.split(" ") #1コラム目と2コラム目を1スペースで分けておく. new_set.add(item[0]) #1コラム目を集合(new_set)に加える.これより重複がなくなる. print len(new_set), file.close()
UTF-8
Python
false
false
2,014
1,116,691,531,771
ef009f4230fae4701ae02d2cbc4d363def3795e9
982f68f5ad8972523913eb01a322d2ba1cd02e92
/source/Input/KeyboardInputSource.py
d07b7740003b79b2711d9ba6aed1ae15cbaa2614
[]
no_license
blakeohare/pyweek-soulshenanigans
https://github.com/blakeohare/pyweek-soulshenanigans
e96d279b9f672631255e0b5aececa94cb8be38ee
c007d14299901ca3cc72ebf1fcde03a075fcc274
refs/heads/master
2016-09-06T00:23:26.837501
2010-04-04T00:05:38
2010-04-04T00:05:38
38,605,569
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class KeyboardInputSource: def __init__(self): self.keymap = { K_UP : 'up', K_DOWN : 'down', K_LEFT : 'left', K_RIGHT : 'right', K_SPACE : 'B', K_z : 'Y', K_x : 'A', K_m : 'X', K_RETURN : 'start', K_1 : 'L', K_2 : 'R' } def get_input_type(self): return 'keyboard' def process_events(self, pygame_events): events = [] for event in pygame_events: if event.type == KEYDOWN or event.type == KEYUP: if event.key in self.keymap.keys(): events.append(InputEvent(self.keymap[event.key], event.type == KEYDOWN)) return events def get_name(self): return "Keyboard" def configure_key(self, key): pass #TODO: configure keyboard keys
UTF-8
Python
false
false
2,010
1,640,677,556,588
37ca807a6625d370fbd4f3f36ebc65de94b7af52
1a191e99d978691d184441bac1ad8e69f74c33ad
/src/weapon.py
607920ee5728d5293fd1563e54590f6d534377ce
[]
no_license
GregorStocks/mobster
https://github.com/GregorStocks/mobster
503ba41842065d4278b0dcb1e71bf5cb439e799a
bcdb9b2c40f5ae6e1d32028b9b3dbca48ab41c83
refs/heads/master
2020-06-05T02:12:21.830116
2011-01-18T22:22:11
2011-01-18T22:22:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python class Weapon(object): def __init__(self, range): self.range = range
UTF-8
Python
false
false
2,011
4,741,643,933,105
a232121eaa789c6639bb5844ac2bb9cf5ba1af3f
d5a35ee0546b79bb1ca602a84d93567901fc22a6
/blog/context_processors.py
904e1a22b3998a6108dacd14fe882d65ef78711d
[ "GPL-3.0-only" ]
non_permissive
brsyuksel/how-to
https://github.com/brsyuksel/how-to
9270591da8fd97d5036f3971963820c2f9dde2dc
0b2a3cd0e5f19b0b3ba76844167641a92f28b835
refs/heads/master
2020-04-14T06:52:03.703455
2014-03-24T11:54:12
2014-03-24T11:54:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from .utils import get_settings def howto_settings(request): _settings = get_settings() return {'blog': _settings}
UTF-8
Python
false
false
2,014
12,369,505,845,252
691c60ac581479c653144d6e1774c663872195fc
d3136f125eb8b7f57cf73275696588b2f723d409
/geburtstag.py
a786ceae40ab2d0c142b5bbdf8329b2b4da7c5a0
[]
no_license
tom111/NullenEinsen
https://github.com/tom111/NullenEinsen
0ab1c94a883a7c55cd97e09d4ad412437ad1fe6e
ef3e97143d90e15e56ee9483642307e68289ee78
refs/heads/master
2021-01-02T04:53:19.552764
2014-05-16T13:35:15
2014-05-16T13:35:15
19,422,760
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def geburtstags_wahrscheinlichkeit (n): pbar = 1.0; for i in range (n): pbar = pbar * (1 - float(i)/365) return (1 - pbar) print [(n,geburtstags_wahrscheinlichkeit(n)) for n in range (50)]
UTF-8
Python
false
false
2,014
687,194,788,752
76342996a6f3d2be25ee1557015c9e772a55ad85
c65aa011cb879dc77d74ed3b111dc66508c50f15
/server_controller.py
831a6e969f253ba0db12bcbc29ce095b5db0eac9
[]
no_license
nolifelover/Football-Info
https://github.com/nolifelover/Football-Info
f288d8fcfe8d9b738f451d3a249888843a47c9e5
258d14f690d96594a9a32321b2f1f485a3f2652a
refs/heads/master
2021-01-18T07:33:10.557972
2010-06-10T13:21:02
2010-06-10T13:21:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/env python #! coding: utf-8 # pylint: disable-msg=W0311 ## @server_controller - May 31, 2010 # Remote Controller for Football Info 2010 # # - Login via SSH with private_key # - Use rsync to move local files to server # from lib import ssh from sys import argv private_key = "/home/Workspace/FootballInfo2010/id_rsa" remote = ssh.Connection('203.128.246.60', 'root', private_key) help = """Sử dụng: python server_controller.py update | rollback """ def update(): command = "ls -l" remote.execute(command) def rollback(): pass def connect(): pass def sync(): pass if __name__ == '__main__': try: command = argv[1] except IndexError: command = None if command == "update": remote.execute("update command") elif command == "rollback": remote.execute("rollback command") else: remote.execute("ls -l /home") # print help
UTF-8
Python
false
false
2,010
5,806,795,800,676
b64d45496e866970edc88e3e45061f0f99a786bd
1461c2d47cebc3ae4570ef3c97e10762e6e72af7
/dlc-database/gendb.py
5680989b2987de90fe0a243a66391917539cbe44
[ "GPL-3.0-only" ]
non_permissive
rossengeorgiev/sg-enhancement-addon
https://github.com/rossengeorgiev/sg-enhancement-addon
5194314fa751cde2e96deafa31b779dad32673a2
6db1836d00336a5207ec67ca59a56275d7ae6665
refs/heads/master
2021-01-13T01:25:55.184333
2013-10-26T05:46:41
2013-10-26T05:46:41
6,637,494
3
2
null
false
2013-10-26T05:46:42
2012-11-11T10:44:20
2013-10-26T05:46:41
2013-10-26T05:46:41
3,639
null
4
1
JavaScript
null
null
#!/usr/bin/python -u """ DLC Database generator - extracts all active DLCs from store.steampowered.com Copyright (C) 2012 Rossen Georgiev This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import json import urllib2 import re import sys import threading # global vars page = 1 s_url = "http://store.steampowered.com/search/?term=%20&category1=21&sort_order=ASC&page=" dlc = {} output_filename = "sge_dlc_database.json" max_threads = 5 def flush_db(): #output to json print "\nWritting to %s" % output_filename out = open(output_filename, 'w') out.write(json.dumps(dlc,indent=4).replace(" ",'\t')) # close files out.close() log.close() def worker(url,name): global dlc, thread_n, log try: r = bro.open(url).read() game = re.findall(r"base game.*?>(.*?)<\/a> on Steam", r)[0] if not dlc.has_key(game): dlc[game] = [] dlc[game].append(name) except: log.write("Failed to parse: %s\n" % url) thread_n -= 1 # thread is finished bro = urllib2.build_opener() bro.addheaders.append(('Cookie', 'birthtime=-1735660799')) # to pass steam age check thread_n = 0 log = open('error.log', 'w') try: while 1: # print progress print "\rParsing page %d... " % page, " "*10, r = re.sub("[\n\r]", "", bro.open(s_url + str(page)).read()) d = re.findall(r"href=\"(.{,150})\" class=\"search_result_row.*?<h4>(.*?)<\/h4>", r) # we've reached the end, stop if len(d) == 0: break; print "\n", # this is 2012, parallel is king x = 0 thread_n = 0 while x < len(d): if thread_n < max_threads: dlc_url = d[x][0] dlc_name = d[x][1] threading.Thread(target=worker, args=(dlc_url,dlc_name,)).start() thread_n += 1 # update progress x += 1 print "\rParsing page %d..." % page, "%d/%d" % (x,len(d)), # move to next page page += 1 except KeyboardInterrupt: print "\nInterrupted, stopping...\n", flush_db() sys.exit() flush_db()
UTF-8
Python
false
false
2,013
9,715,216,041,193
80b8e71e03fdc33f73e9ba022a32bdf6601bfb44
5b78dba6e26e513823da9187daf998022aa16de4
/Console.py
38037063c6025ae05650f4756c9a46dbea3e649a
[]
no_license
rinrinne/mkaproc
https://github.com/rinrinne/mkaproc
3aaab7dd5fb25927d3f8b2aed74577d851f157d7
a083fe420abe64b57a3b4ace7d4599b574f34637
refs/heads/master
2021-01-22T19:21:50.795913
2009-12-29T17:49:43
2009-12-29T17:49:43
139,993
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import sys, codecs, os, os.path from subprocess import * class Console: def __init__(self, charset = 'iso8859-1'): self.writable = False self.executable = True self.logging = False self.syscharset = charset self.paths = [] self.current = None self.log = [] def __repr__(self): """String representation.""" return '<%r: writable=%r, executable=%r, logging=%r, syscharset=%r, paths=%r, current=%r, log=%r>' % ( self.__class__.__name__, self.writable, self.executable, self.logging, self.syscharset, self.paths, self.current, self.log ) def root(self, current): if not os.path.isdir(current): os.mkdir(current) self.current = current def write(self, line, prefix = u'[WRITE]: ', fd=None): if self.writable: if fd is not None: out = sys.stdout else: out = fd out.write((prefix + line).encode(self.syscharset)) out.write('\n') if self.logging: self.log.append(prefix + line) self.log.append('\n') def writeerr(self, line, prefix = u'[ERROR]: '): self.write(line, prefix, sys.stderr) def appendpath(self, path): if isinstance(path, str): self.paths.append([path]) elif isinstance(path, list): self.paths.append(path) def poppath(self, depth=0): ret = [] if(depth==0): ret = self.paths self.paths = [] else: for i in range(depth): ret.append(self.paths.pop()) return ret def execute(self, cmd): cwd = os.getcwd() line = ' '.join(cmd) self.writeerr(line, u'[EXEC]: ') if self.executable: pathlist = [] for path in self.paths: pathlist += path envmap = os.environ.copy() envmap['PATH'] = os.pathsep.join(map(lambda x: x.encode(self.syscharset), pathlist) + [envmap['PATH']]) if self.logging: proc = Popen(line.encode(self.syscharset), shell=True, stdout=PIPE, stderr=STDOUT, cwd=self.current, env=envmap) for line in proc.stdout: self.write(line.decode(self.syscharset)) ret = proc.retcode else: ret = call(line.encode(self.syscharset), shell=True, cwd=self.current, env=envmap) return ret else: return -1
UTF-8
Python
false
false
2,009
15,547,781,656,112
dc341bd5ec6098c1b33d31b1967e87c1277d6542
d6d9bdf00c73cfc77131a965ecd68f20e9860540
/impact/tests/test_api.py
36bc0dbd2279085ad6c9d81698d3aa9dccd044d7
[]
no_license
AIFDR/riab
https://github.com/AIFDR/riab
aab9b433640464af43817e9658b14620e6b516a3
0bc2cbbef82be588e3568bb2ac8739727fb906c1
refs/heads/master
2016-09-10T02:03:20.271377
2012-03-05T09:16:24
2012-03-05T09:16:24
1,462,033
7
4
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest import os from django.test.client import Client from django.utils import simplejson as json from django.conf import settings from impact.storage.io import save_to_geonode from geonode.maps.utils import check_geonode_is_up from geonode.maps.models import Layer from geonode.maps.utils import get_valid_user from impact.storage.io import check_layer from impact.tests.utilities import TESTDATA, INTERNAL_SERVER_URL from impact.tests.plugins import unspecific_building_impact_model class Test_HTTP(unittest.TestCase): """Test suite for API """ def setUp(self): """Check geonode and create valid superuser """ check_geonode_is_up() self.user = get_valid_user() def tearDown(self): pass def test_functions(self): """Functions can be retrieved from the HTTP Rest API """ c = Client() rv = c.get('/impact/api/functions/') self.assertEqual(rv.status_code, 200) self.assertEqual(rv['Content-Type'], 'application/json') data = json.loads(rv.content) msg = ('The api should return a dictionary with at least one item. ' 'The key of that item should be "functions"') assert 'functions' in data, msg functions = data['functions'] msg = ('No functions were found in the functions list, ' 'not even the built-in ones') assert len(functions) > 0, msg def test_layers(self): """Layers can be retrieved from the HTTP Rest API """ c = Client() rv = c.get('/impact/api/layers/') self.assertEqual(rv.status_code, 200) self.assertEqual(rv['Content-Type'], 'application/json') data = json.loads(rv.content) def test_calculate_fatality(self): """Earthquake fatalities calculation via the HTTP Rest API is correct """ # Upload required data first for filename in ['Earthquake_Ground_Shaking.asc', 'Population_2010_clip.tif']: thefile = os.path.join(TESTDATA, filename) uploaded = save_to_geonode(thefile, user=self.user, overwrite=True) check_layer(uploaded, full=True) # Run calculation through API c = Client() rv = c.post('/impact/api/calculate/', dict(hazard_server=INTERNAL_SERVER_URL, hazard='geonode:earthquake_ground_shaking', exposure='geonode:population_2010_clip', exposure_server=INTERNAL_SERVER_URL, bbox='99.36,-2.199,102.237,0.00', impact_function='Earthquake Fatality Function', keywords='test,earthquake,fatality')) msg = 'Expected status code 200, got %i' % rv.status_code self.assertEqual(rv.status_code, 200), msg msg = ('Expected Content-Type "application/json", ' 'got %s' % rv['Content-Type']) self.assertEqual(rv['Content-Type'], 'application/json'), msg data = json.loads(rv.content) if data['stacktrace'] is not None: msg = data['stacktrace'] raise Exception(msg) assert 'hazard_layer' in data.keys() assert 'exposure_layer' in data.keys() assert 'run_duration' in data.keys() assert 'run_date' in data.keys() assert 'layer' in data.keys() assert 'bbox' in data.keys() assert 'impact_function' in data.keys() layer_uri = data['layer'] #FIXME: This is not a good way to access the layer name typename = layer_uri.split('/')[4] name = typename.split(':')[1] # Check the autogenerated styles were correctly uploaded layer = Layer.objects.get(name=name) msg = ('A new style should have been created for layer [%s] ' 'got [%s] style instead.' % (name, layer.default_style.name)) assert layer.default_style.name == name, msg def test_calculate_school_damage(self): """Earthquake school damage calculation works via the HTTP REST API """ # Upload required data first for filename in ['lembang_mmi_hazmap.asc', 'lembang_schools.shp']: thefile = os.path.join(TESTDATA, filename) uploaded = save_to_geonode(thefile, user=self.user, overwrite=True) check_layer(uploaded, full=True) # Run calculation through API c = Client() rv = c.post('/impact/api/calculate/', data=dict( hazard_server=INTERNAL_SERVER_URL, hazard='geonode:lembang_mmi_hazmap', exposure_server=INTERNAL_SERVER_URL, exposure='geonode:lembang_schools', bbox='105.592,-7.809,110.159,-5.647', impact_function='Earthquake Building Damage Function', keywords='test,schools,lembang', )) msg = 'Expected status code 200, got %i' % rv.status_code self.assertEqual(rv.status_code, 200), msg msg = ('Expected Content-Type "application/json", ' 'got %s' % rv['Content-Type']) self.assertEqual(rv['Content-Type'], 'application/json'), msg data = json.loads(rv.content) if data['stacktrace'] is not None: msg = data['stacktrace'] raise Exception(msg) assert 'hazard_layer' in data.keys() assert 'exposure_layer' in data.keys() assert 'run_duration' in data.keys() assert 'run_date' in data.keys() assert 'layer' in data.keys() # FIXME (Ole): Download result and check. if __name__ == '__main__': suite = unittest.makeSuite(Test_HTTP, 'test') runner = unittest.TextTestRunner(verbosity=2) runner.run(suite)
UTF-8
Python
false
false
2,012
738,734,417,862
53c925fabc9a230fc5f184c67d732cd4fe5d777e
e4777f957d1ca17c7b99667ed66b0923e216754e
/e/e.py
84606d9308ca1ad76d8e3d4b278e0682eeed67e5
[]
no_license
donce/university
https://github.com/donce/university
c77071a0133620ec5d0b3fbe8acd6d2289fcee5f
fbdb8a8c616fd1305144e944a9e68dce52678d50
refs/heads/master
2021-01-10T20:54:34.666150
2013-03-16T09:01:46
2013-03-16T09:01:46
6,204,675
0
2
null
null
null
null
null
null
null
null
null
null
null
null
null
def gcd(a, b): while b != 0: t = a a = b b = t % b return a def fix(frac): d = gcd(frac[0], frac[1]) return frac[0]/d, frac[1]/d def join(a, b): return (a[0]*b[1] + b[0], a[1] * b[1]) def calcInterval(start, end): f = 1 add = 0 for i in xrange(start, end): f *= i add = i*add+1 return (add, f) from multiprocessing import Process, Value, Queue, Lock results = Queue() currentJob = Value('L', 0) jobLock = Lock() intervalSize = 10 def getJob(): jobLock.acquire() nr = currentJob.value currentJob.value += 1 jobLock.release() interval = nr * intervalSize + 1, (nr+1) * intervalSize + 1 return (nr, interval) def calculate(): try: while True: job = getJob() result = calcInterval(*job[1]) results.put([job[0], result]) except KeyboardInterrupt: pass def bitSize(size): size = float(size) / 8 letter = ['', 'K', 'M', 'G'] for l in letter: if size < 1024: return str(round(size, 2)) + l + 'B' size /= 1024 return str(round(1024*size, 2)) + letter[len(letter)-1] + 'B' from sys import stdout def printFrac(frac, stream=stdout, length=8): print "GCD..." frac = fix(frac) print "Printing..." stream.write(str(frac[0] / frac[1]) + '.') bottom = frac[1] top = frac[0] % frac[1] count = 0 for i in xrange(length): top *= 10 stream.write(str(top / bottom)) top %= bottom count += 1 if count % 1000 == 0: print count stream.write('\n') def calcE(threadsNumber, _intervalSize): global intervalSize intervalSize = _intervalSize threads = [] for i in range(threadsNumber): Process(target=calculate).start() e = (1, 1) eDict = {} eNow = 0 finished = False try: while True: result = results.get() eDict[result[0]] = result[1] while eNow in eDict: e = join(e, eDict[eNow]) del eDict[eNow] eNow += 1 except KeyboardInterrupt: print "Calculation closing." print "Iterations done:", eNow * intervalSize print bitSize(e[0].bit_length() + e[1].bit_length()) from math import log10 digits = int(log10(e[1])) print digits f = open('output', 'w') printFrac(e, f, digits) f.close() return e calcE(8, 10000) print "Done!"
UTF-8
Python
false
false
2,013
15,771,119,947,690
a41fe9ac2ff8a971c858974f46c547766ad1f1a9
4707881af349b5d9ae07417f54c1c341170d1de9
/mledu/algorithm/__init__.py
f9b15b0a5938f53c5adcdbf75eb794c777adea7a
[ "GPL-2.0-only" ]
non_permissive
dreamwalkerrr/mledu
https://github.com/dreamwalkerrr/mledu
e83032ae6e66ba69c86c49c55be05c02984f1035
911d16361cd753d920f2ac2ce4d132d85e30fe68
refs/heads/master
2016-09-01T20:03:01.988928
2013-12-25T08:54:12
2013-12-25T08:54:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from perceptron import Perceptron from linregression import LinearRegression from babynn import BabyNN del perceptron, linregression, babynn
UTF-8
Python
false
false
2,013
6,365,141,534,739
c6035f823ff66418d3ac3c74ef57e97c5ed20177
98c46168e904b48482d7222b8d6b013ed50bfe86
/ssm.py
debc125c2d62ac3cc168abaf9d5dd7c441d9aa16
[]
no_license
bboyjacks/Python-Codes
https://github.com/bboyjacks/Python-Codes
4beae70bcca75c2873621df73fccebf54752656b
46ca5f7c2d46e75c1d69cc72fec8f8c6578e89e1
refs/heads/master
2016-09-06T11:41:47.980675
2014-06-21T04:37:38
2014-06-21T04:37:38
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#-----------STOCHASTIC STATE MACHINE---------------- # # The following library implements discrete distributions, # stochastic state machines and state estimators. # # # Scott's Note: This project was not fully completed, as the SSM # only works without input, since a fully generalized JDist class # wasn't created. It's only a demonstration, so full testing suites # and correct modularization have also not been fully implemented # #--------------------------------------------------- import sm import random #-----Discrete Distribution---------- # The following class stores discrete distributions # by using a dictionary which associates events and # their probabilities #------------------------------------ class DDist(): def __init__(self, dictionary): self.d = dictionary #returns the probability of an event def prob(self, event): if event in self.d: p = self.d[event] else: p = 0 return p #returns a list of all possible events def support(self): return [k for k in self.d.keys() if self.prob(k) > 0] #draws an element from the distribution def draw(self): r = random.random() probSum = 0.0 for event in self.support(): probSum += self.prob(event) if r < probSum: return event print "Error: draw() failed to find an event." return None def output(self): for k in self.support(): print k,": ",self.d[k],"\n" def testSuite_DDist(): chance = 1.0/6.0 die = DDist({1:chance,2:chance,3:chance,4:chance,5:chance,6:chance}) weightedDie = DDist({1:chance/2,2:chance/2,3:chance,4:chance,5:chance*2,6:chance*2}) print die.support() for i in range(10): print die.draw() for i in range(10): print weightedDie.draw() #------JOINT DISTRIBUTION-------------- # This class allows you to create a joint distribution # given a distribution and a function for calculating # the conditional distribution given that distribution #-------------------------------------- class JDist(DDist): #Takes a distribution of a random variable and the #function which determines the conditional distribution def __init__(self, pA, pBgivenA): self.d = {} possibleAs = pA.support() for a in possibleAs: conditional = pBgivenA(a) for b in conditional.support(): self.d[(a,b)] = pA.prob(a)*conditional.prob(b) #returns the individual distribution of just one of the #two random variable components def marginalizeOut(self, variable): newD = {} for event in self.d.keys(): newEvent = removeElt(event, variable) incrDictEntry(newD, newEvent, self.prob(event)) return(DDist(newD)) #returns the distribution of a variable, given the value #of the other variable def conditionOn(self, variable, value): newD = {} totalProb = 0.0 #first construct an incomplete distribution, with only #the joint probabilities of valued elements for event in self.d.keys(): if event[variable] == value: indivProb = self.d[event] totalProb += indivProb newEvent = removeElt(event, variable) newD[newEvent] = indivProb #divide by the total sub-probability to ensure all #probabilities sum to 1 for subEvent in newD.keys(): newD[subEvent] /= totalProb return(DDist(newD)) def output(self): for event in self.d.keys(): print "Event ",event," has a ",self.prob(event)," probability." def removeElt(items, i): result = items[:i] + items[i+1:] if len(result)==1: return result[0] else: return result def incrDictEntry(d, k, v): if d.has_key(k): d[k] += v else: d[k] = v def testSuite_JDist(): pIll = DDist({'disease':.01, 'no disease':.99}) def pSymptomGivenIllness(status): if status == 'disease': return(DDist({'cough':.9, 'none':.1})) elif status == 'no disease': return(DDist({'cough':.05, 'none':.95})) jIllnessSymptoms = JDist(pIll, pSymptomGivenIllness) jIllnessSymptoms.output() dSymptoms = jIllnessSymptoms.marginalizeOut(0) print "Symptoms include: \n", dSymptoms.d symptomsGivenIll = jIllnessSymptoms.conditionOn(0,'no disease') print "Symptoms given no disease: \n", symptomsGivenIll.d #===================STOCHASTIC STATE MACHINE=================== class SSM(sm.SM): def __init__(self, prior, transition, observation): self.prior = prior self.transition = transition self.observation = observation def startState(self): return self.prior.draw() def getNextValues(self, state, inp): return(self.transition(inp)(state).draw(), self.observation(state).draw()) def testSuite_SSM(): prior = DDist({'good':0.9, 'bad':0.1}) def observationModel(state): if state == 'good': return DDist({'perfect':0.8, 'smudged':0.1, 'black':0.1}) else: return DDist({'perfect':0.1, 'smudged':0.7, 'black':0.2}) def transitionModel(input): def transitionGivenInput(oldState): if oldState == 'good': return DDist({'good':0.7, 'bad':0.3}) else: return DDist({'good':0.1, 'bad':0.9}) return transitionGivenInput copyMachine = SSM(prior,transitionModel,observationModel) print copyMachine.transduce(['copy']*20) #==========STOCHASTIC STATE ESTIMATOR============== class SSE(sm.SM): def __init__(self, machine): self.machine = machine self.startState = machine.prior self.transitionModel = machine.transition self.observationModel = machine.observation #Keep in mind for a Stochastic State Estimator the input #must be the last observed value and the state is the Bayesian #Machine's degree of belief, expressed as a probability distribution #over all known internal states belonging to the SSM def getNextValues(self, state, inp): #First, calculate an updated belief of the last state, given #the known output #Calculates Pr(S|O = obs) belief = JDist(state, self.observationModel).conditionOn(1, inp) #Second, run the belief state through the transition model #to predict the current state of the machine n=0 partialDist={} for possibility in belief.d.keys(): #go through all states partialDist[n] = self.transitionModel(0)(possibility).d #figure out what would happen, were you in that state for event in partialDist[n].keys(): partialDist[n][event] *= belief.prob(possibility) #multiply by the chance you actually were in that state n+=1 totalDist = partialDist[0] for event in partialDist[0].keys(): for count in range(1, n): totalDist[event] += partialDist[count][event] #sum up the partial probabilities beliefPrime = DDist(totalDist) return (beliefPrime, beliefPrime) def testSuite_SSE(): prior = DDist({'good':0.9, 'bad':0.1}) def observationModel(state): if state == 'good': return DDist({'perfect':0.8, 'smudged':0.1, 'black':0.1}) else: return DDist({'perfect':0.1, 'smudged':0.7, 'black':0.2}) def transitionModel(input): def transitionGivenInput(oldState): if oldState == 'good': return DDist({'good':0.7, 'bad':0.3}) else: return DDist({'good':0.1, 'bad':0.9}) return transitionGivenInput copyMachine = SSM(prior,transitionModel,observationModel) copyEstimator = SSE(copyMachine) copyMachine.start() copyEstimator.start() for n in range(20): observation = copyMachine.step("copy") print "Copy machine => ", observation belief = copyEstimator.step(observation) print "Estimate of copier's status: \n", belief.output(),"\n\n" #============================MAIN============================== def main(): testSuite_SSE() print "Program complete." #This will run the testing suite if the program is run directly if __name__ == '__main__': main()
UTF-8
Python
false
false
2,014
10,728,828,347,847
e8a63bd309e18dba6fc9c3cc8019d6d693b200d1
154108ba1afff2c7d679c3aee47f867f673d03fe
/django_project/changes/views/entry.py
26e5d4676b95a03169af013588e95f3a6e3809c4
[]
no_license
dodobas/visual_changelog
https://github.com/dodobas/visual_changelog
0101a3fab93233879d01fa32532e7a5be9828939
d4b4590252b98a32e8c7993a6ab8f39f1867823f
refs/heads/master
2021-01-17T10:48:19.925270
2013-09-15T15:32:15
2013-09-15T15:32:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging logger = logging.getLogger(__name__) # noinspection PyUnresolvedReferences import logging logger = logging.getLogger(__name__) from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.views.generic import ( ListView, CreateView, DeleteView, DetailView, UpdateView, RedirectView, TemplateView) from django.http import HttpResponseRedirect from braces.views import LoginRequiredMixin, StaffuserRequiredMixin from pure_pagination.mixins import PaginationMixin from ..models import Project, Category, Version, Entry from ..forms import ProjectForm, CategoryForm, VersionForm, EntryForm class EntryMixin(object): model = Entry # implies -> queryset = Entry.objects.all() form_class = EntryForm class EntryCreateUpdateMixin(EntryMixin, LoginRequiredMixin): def get_context_data(self, **kwargs): context = super(EntryMixin, self).get_context_data(**kwargs) return context def form_invalid(self, form): return self.render_to_response(self.get_context_data(form=form)) class EntryListView(EntryMixin, PaginationMixin, ListView): context_object_name = 'entries' template_name = 'entry/list.html' paginate_by = 10 def get_context_data(self, **kwargs): context = super(EntryListView, self).get_context_data(**kwargs) context['num_entries'] = self.get_queryset().count() context['unapproved'] = False return context def get_queryset(self): """Only approved objects are shown.""" qs = Entry.objects.all() return qs class EntryDetailView(EntryMixin, DetailView): context_object_name = 'entry' template_name = 'entry/detail.html' def get_context_data(self, **kwargs): context = super(EntryDetailView, self).get_context_data(**kwargs) return context def get_queryset(self): """Anyone can see any entry.""" qs = Entry.all_objects.all() return qs def get_object(self, queryset=None): obj = super(EntryDetailView, self).get_object(queryset) obj.request_user = self.request.user return obj class EntryDeleteView(EntryMixin, DeleteView, LoginRequiredMixin): context_object_name = 'entry' template_name = 'entry/delete.html' def get_success_url(self): return reverse('entry-list') def get_queryset(self): qs = Entry.all_objects.all() if self.request.user.is_staff: return qs else: qs.filter(creator=self.request.user) class EntryCreateView(EntryCreateUpdateMixin, CreateView): context_object_name = 'entry' template_name = 'entry/create.html' def get_success_url(self): return reverse('pending-entry-list') def form_valid(self, form): self.object = form.save(commit=False) self.object.save() return HttpResponseRedirect(self.get_success_url()) class EntryUpdateView(EntryCreateUpdateMixin, UpdateView): context_object_name = 'entry' template_name = 'entry/update.html' def get_form_kwargs(self): kwargs = super(EntryUpdateView, self).get_form_kwargs() return kwargs def get_queryset(self): qs = Entry.all_objects.all() if self.request.user.is_staff: return qs else: return qs.filter(creator=self.request.user) def get_success_url(self): return reverse('pending-entry-list') class PendingEntryListView(EntryMixin, PaginationMixin, ListView, StaffuserRequiredMixin): """List all unapproved entries""" context_object_name = 'entries' template_name = 'entry/list.html' paginate_by = 10 def get_context_data(self, **kwargs): context = super(PendingEntryListView, self).get_context_data(**kwargs) context['num_entries'] = self.get_queryset().count() context['unapproved'] = True return context def get_queryset(self): qs = Entry.unapproved_objects.all() if self.request.user.is_staff: return qs else: return qs.filter(creator=self.request.user) class ApproveEntryView(EntryMixin, StaffuserRequiredMixin, RedirectView): permanent = False query_string = True pattern_name = 'pending-entry-list' def get_redirect_url(self, pk): entry_qs = Entry.unapproved_objects.all() entry = get_object_or_404(entry_qs, pk=pk) entry.approved = True entry.save() return reverse(self.pattern_name)
UTF-8
Python
false
false
2,013
13,151,189,893,717
cba074364ca99de4801e5bcc1b0fa22e45c40e34
66075efb4a9c0f6e5c7cb1d0024487b5c69f22c4
/Balanced/attack_algorithms.py
1226d503a37995f622fc19b018982555fd0be112
[]
no_license
todayispotato/tron
https://github.com/todayispotato/tron
248d7f96156a55c1f7fe5433ceb58b08c0d1313d
eae88e529d402f19c127f2e59dd7018a54311e3e
refs/heads/master
2020-11-26T19:37:24.505110
2013-05-31T17:01:55
2013-05-31T17:01:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tron, time from heapq import heappush, heappop, heapify class AStar: def execute(self, board): start = time.clock() goal = board.them() closedSet = [] openSet = dict() startPath = AStar.Path(board.me(), [], 0, board.distance(board.me(), goal)) openSet[board.me()] = startPath queue = [startPath] tron.log("goal: " + str(goal)) shortestPath = [] while len(queue) > 0 and time.clock() - start < 0.4: path = heappop(queue) shortestPath = path.visited if path.node == goal: break closedSet.append(path.node) destinations = [dest for dest in board.adjacent(path.node) if board.passable(dest) or dest == goal] for dest in destinations: if dest in closedSet: continue newScore = path.score + 1 if dest not in openSet.keys() or openSet[dest] not in queue: newPath = AStar.Path(dest, list(path.visited), newScore, board.distance(dest, goal)) openSet[dest] = newPath heappush(queue, newPath) elif newScore < openSet[dest].score and openSet[dest] in queue: openSet[dest].node = dest openSet[dest].score = newScore newVisited = list(path.visited) newVisited.append(dest) openSet[dest].visited = newVisited openSet[dest].estimate = board.distance(dest, goal) tron.log("Shortest path took: " + str(float(time.clock() - start))) return shortestPath[1:] class Path: def __init__(self, node, visited, score, estimate): self.node = node self.visited = visited self.visited.append(node) self.score = score self.estimate = estimate def __eq__(self, other): return self.visited == other.visited def __cmp__(self, other): return cmp(self.score + self.estimate, other.score + other.estimate) class Minimax: def __init__(self): self.cachedLevels = [] def prepareCache(self, board, spaceCount, enemySpaceCount): if len(self.cachedLevels) <= 3: root = Minimax.TreeNode(None, board.me(), board.them(), [board.me(), board.them()], True, 0) levels = [Minimax.Level([root], True)] for dir in board.moves(): move = board.rel(dir) visited = list(root.visited) visited.append(move) root.addChild(root, move, root.them, visited, spaceCount[dir] - enemySpaceCount[dir]) self.cachedLevels = levels return myMove = None for node in self.cachedLevels[1].nodes: if node.me == board.me(): myMove = node break theirMove = None for node in myMove.children: if node.them == board.them(): theirMove = node break theirMove.score = 0 self.cachedLevels = self.cachedLevels[2:] self.cachedLevels[0] = Minimax.Level([theirMove], True) def execute(self, board, spaceCount, enemySpaceCount): #self.prepareCache(board, spaceCount, enemySpaceCount) levels = self.minimax(board, spaceCount, enemySpaceCount) #self.cachedLevels = levels root = levels[0].nodes[0] tron.log("Minimax level: " + str(len(levels)-1)) minimaxSpaceCount = dict() for node in root.children: for dir in spaceCount.keys(): if board.rel(dir) == node.me: minimaxSpaceCount[dir] = node.score return minimaxSpaceCount def minimax(self, board, spaceCount, enemySpaceCount): #''' root = Minimax.TreeNode(None, board.me(), board.them(), [board.me(), board.them()], True, 0) levels = [Minimax.Level([root], True)] for dir in spaceCount.keys(): move = board.rel(dir, root.me) visited = list(root.visited) visited.append(move) root.addChild(root, move, root.them, visited, spaceCount[dir] - enemySpaceCount[dir]) #''' #levels = list(self.cachedLevels) #levels.append(Minimax.Level(list(root.children))) outOfTime = False while time.clock() - board.startTime < 0.85 and len(levels[len(levels)-1].nodes) > 0: currLevel = levels[len(levels)-1] #for node in level.nodes: # node.refineScore() #tron.log("level " + str(len(levels))) #tron.log("nodes" + str(level.nodes)) for parent in currLevel.nodes: if time.clock() - board.startTime > 0.85: outOfTime = True break nodeChildren = [child for child in parent.children if child.me != child.them and len(board.adjacentImpassableOrVisited(child.me, child.visited)) < 4 and len(board.adjacentImpassableOrVisited(child.them, child.visited)) < 4] heapify(nodeChildren) while len(nodeChildren) > 0: node = heappop(nodeChildren) if node.myMove: movedFrom = node.me other = node.them else: movedFrom = node.them other = node.me unvisitedMoves = [move for move in board.moveableDestinations(movedFrom) if move not in node.visited or move == other] newScore = None for move in unvisitedMoves: moveVisited = list(node.visited) moveVisited.append(move) if move == other: score = 0 elif len(board.adjacentImpassableOrVisited(move, moveVisited)) == 4: boardSize = (board.width - 2) * (board.height - 2) if node.myMove: score = -boardSize else: score = boardSize else: #floodfilled = tron.floodfill.execute(board, move, moveVisited) #deadCorners = [ffNode for ffNode in floodfilled if len(board.adjacentImpassable(ffNode)) == 3] #moveScore = len(floodfilled) - len(deadCorners) + min(len(deadCorners), 1) #if other in floodfilled: # score = 0 #else: moveScore = tron.floodfill.floodfillScore(board, move, moveVisited) otherScore = tron.floodfill.floodfillScore(board, other, moveVisited) if node.myMove: score = moveScore - otherScore else: score = otherScore - moveScore if node.myMove: child = node.addChild(node, move, other, moveVisited, score) if newScore is None or score > newScore: newScore = score else: child = node.addChild(node, other, move, moveVisited, score) if newScore is None or score < newScore: newScore = score node.score = newScore #newLevel.nodes.append(node) #childlevel #parentlevel parent.refineScore() if outOfTime: break #tron.log(levels) enemyMoveLevels = [level for level in levels if not level.myMove] for level in enemyMoveLevels: for parent in level.nodes: parent.children = [child for child in parent.children if child.score == parent.score] newLevel = Minimax.Level([], not currLevel.myMove) for parent in currLevel.nodes: newLevel.nodes.extend(parent.children) #tron.log("parentscore: " + str(parent.score) + " " + str(parent.me) + str(parent.them)) #if parent.myMove: # tron.log("mine: " + str([(node.me, node.score) for node in parent.children])) #else: # tron.log("them: " + str([(node.them, node.score) for node in parent.children])) levels.append(newLevel) index = 0 ''' for index in range(len(levels)): printLevel = levels[index] tron.log("level " + str(index) + ", nodes " + str(len(printLevel.nodes))) for node in printLevel.nodes: tron.log(str(node.me) + "," + str(node.them) + " " + str(node.score)) #tron.log(node.visited) index += 1 ''' return levels class Level: def __init__(self, nodes, myMove): self.nodes = nodes self.myMove = myMove class TreeNode: def __init__(self, parent, me, them, visited, myMove, score): self.parent = parent self.me = me self.them = them self.visited = visited self.myMove = myMove self.score = score self.children = [] def addChild(self, node, me, them, moveVisited, score): child = Minimax.TreeNode(self, me, them, moveVisited, not self.myMove, score) heappush(self.children, child) return child def __cmp__(self, other): return cmp(other.score, self.score) def refineScore(self): if len(self.children) > 0: if self.myMove: maxScore = -100 for child in self.children: if child.score > maxScore: maxScore = child.score self.score = maxScore else: minScore = 100 for child in self.children: if child.score < minScore: minScore = child.score self.score = minScore if self.parent is not None: self.parent.refineScore() ''' def addChild(self, board, dest, score=None): #tron.log(score) newVisited = list(self.visited) if score == None: if dest == self.them or dest == self.me: score = 0 else: if self.myMove: other = self.them newVisited.append(self.me) #child = Minimax.TreeNode(self, node, self.them, newVisited, False, score) else: other = self.me newVisited.append(self.them) #child = Minimax.TreeNode(self, self.me, node, newVisited, True, score) destScore = tron.floodfill.floodfillScore(board, dest, self.visited) otherScore = tron.floodfill.floodfillScore(board, other, newVisited) score = destScore - otherScore if self.myMove: child = Minimax.TreeNode(self, dest, self.them, newVisited, not self.myMove, score) else: child = Minimax.TreeNode(self, self.me, dest, newVisited, not self.myMove, score) self.children.append(child) return child '''
UTF-8
Python
false
false
2,013
17,059,610,128,852
444c81faa8218ee40cff7d4504ea3ccd436a0086
6de138140a350a3439e4d998d76092fe985ae499
/pydj/maui/map/helpers.py
8e153485c9305083776902f2ca55fe90fd533faf
[]
no_license
zarcoder/maui
https://github.com/zarcoder/maui
3e4c8be6fe745cb9edfbcaf7483970116cd60ed7
2c7df426716e374220583f6c2c92a1d5c2115f20
refs/heads/master
2021-01-21T04:11:25.935298
2013-01-16T16:27:05
2013-01-16T16:27:05
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from settings import MAPHOSTS,TEMPLATE_DIRS import re,logging from django.template import Context, Template import libssh2 import os import scp_upload import ssh_exec import sftp_listdir logger=logging.getLogger(__name__) def listjobsSlurm(session): listres = parseoutput(session,"squeue",2,-1,["id","queue","name" , "user", "state", "timeUse" , "nodes" , "nodeList"]) return listres def listjobsTorque(session): listres = parseoutput(session,"qstat",3,0,["id","name", "user", "timeUse", "state", "queue"]) return listres def listresSlurm(session): listres = parseoutput(session, "sinfo -N -l", 1, -1, ["NODELIST", "NODES", "PARTITION" , "STATE", "CPUS" , "SCT", "MEMORY", "TMP_DISK", "WEIGHT", "FEATURES", "REASON"]) return listres def regexpf(key,field): return (key , re.compile("^" + field + "=(\S+)") ) def listnodesSlurm(session): #NodeName=drago0 Arch=x86_64 CoresPerSocket=10 CPUAlloc=0 CPUErr=0 CPUTot=40 Features=(null) Gres=(null) NodeAddr=drago0 NodeHostName=drago0 OS=Linux RealMemory=1 Sockets=4 State=IDLE ThreadsPerCore=1 TmpDisk=0 Weight=1 BootTime=2012-02-09T10:21:15 SlurmdStartTime=2012-02-09T12:02:43 Reason=(null) listres = parseoutput(session, "scontrol show nodes -o", -1, -1, [ regexpf('id', 'NodeName'), regexpf('Arch', 'Arch'), regexpf('CoresPerSocket', 'CoresPerSocket'), regexpf('CPUAlloc', 'CPUAlloc'), regexpf('CPUErr', 'CPUErr'), regexpf('CPUTot', 'CPUTot'), regexpf('Features', 'Features'), regexpf('Gres', 'Gres'), regexpf('NodeAddr', 'NodeAddr'), regexpf('NodeHostName', 'NodeHostName'), regexpf('OS', 'OS'), regexpf('RealMemory', 'RealMemory'), regexpf('Sockets', 'Sockets'), regexpf('State', 'State'), regexpf('ThreadsPerCore', 'ThreadsPerCore'), regexpf('TmpDisk', 'TmpDisk'), regexpf('Weight', 'Weight'), regexpf('BootTime', 'BootTime'), regexpf('SlurmdStartTime', 'SlurmdStartTime'), regexpf('Reason', 'Reason') ]) return listres def listnodes(session): listres = sorted(parseoutput(session, "diagnose -n", 4, 4,["id","state","procs","memory","disk", "Swap" , "Speed" , "Opsys", "Arch", "Par" , "load", "Res" ,"classes"," Network"," features"]), key=lambda node: node['id'] ) return listres def getgangliaurl(sessionhost): if(MAPHOSTS[sessionhost]['enable_ganglia']): if MAPHOSTS[sessionhost].has_key("ganglia_cluster_name"): cn = MAPHOSTS[sessionhost]["ganglia_cluster_name"] else: cn = sessionhost return MAPHOSTS[sessionhost]['ganglia_url'] + 'graph.php?m=load_one&z=small&c=' + cn + '&x=0&g=load_report&n=0&r=hour&h=' return None def listres(session): return parseoutput(session,"showres -n",5,1,["nodeid", "type" , "resid" , "jobState", "task" , "start" , "duration" , "startTimeDayOfWeek" , "startTimeMonth" , "startTimeDayOfMonth", "startTime" ]) def liststats(session): return parseoutput(session,"showstats -u",5,2,[ "user" ,"jobs", "procs" , "procHours" , "jobs" , "jobsprc", "phReq" , "phReqPrc" , "phDed" , "phDedPrc" , "fsTgt" , "avgXF" , "maxXF" , "avgQH" , "effic" , "wCAcc" ]) def getqueuesfromcfg(sessionhost): queues = MAPHOSTS[sessionhost]['queues'] if isinstance(queues,tuple): return map(lambda x: (x,x), MAPHOSTS[sessionhost]['queues']) else: return [(queues, queues)] def getjoboutSlurm(session,jobid): return re.split("\n",execcommand(session,"scontrol show job "+ jobid)) def getjobout(session,jobid): return re.split("\n",execcommand(session,"checkjob -v "+ jobid)) def getjoboutTorque(session,jobid): return re.split("\n",execcommand(session,"qstat -f "+ jobid)) def getnodeoutSlurm(session,nodeid): return re.split("\n",execcommand(session,"scontrol show node "+ nodeid)) def getnodeout(session,nodeid): return re.split("\n",execcommand(session,"checknode -v "+ nodeid)) def getScriptContent(h, rmtype): f = open (TEMPLATE_DIRS + "/script_" + rmtype +".sh", "r") t = Template(f.read()) f.close() c = Context(h) return t.render(c) def submitJob(session, scriptFilename): rmtype = MAPHOSTS[session["host"]]['rmtype'] if rmtype == "slurm": submitcommand = "sbatch" resregexp = re.compile("^Submitted batch job (\S+)$") else: submitcommand = "qsub" resregexp = re.compile("^\s*(\S+).+") res = execcommand(session, submitcommand + " "+scriptFilename ) m = resregexp.match(res) if m: jobid = m.group(1) return jobid return None def parseoutput(session,cmd,tailplus,headminus,fields): cmdstr = cmd if tailplus!=-1: cmdstr += " | tail -n +"+ str(tailplus) if headminus!=-1: cmdstr += " | head -n -" + str(headminus) #logger.debug("cmdstr:" +cmdstr ) out = execcommand(session,cmdstr) lines = re.split("\n", out) res = [] for line in lines: m = re.split("\s+",line) o = {} #if string starts with \s+ split will put "" in the first elem if m[0]=="": m.pop(0) i=0 while i<len(fields) and i<len(m): f = fields[i] # como en scontrol show nodes -o #output NodeName=drago0 ... # ('nodeName' , re.compile('^NodeName=(\S+)')) if isinstance(f,tuple): regexp = f[1] field = f[0] rematch = regexp.match(m[i]) if rematch: value = rematch.group(1) else: value="" else: field = f value = m[i] o[field] = value #logger.debug(field + "=" + value ) # o[fields[i]] = m[i] i+=1 if i==len(fields): res.append(o) else: logger.debug("LINE " + str(i) +":" +line + "END") return res def remoteListdir(session, dirname="."): host = session['host'] listdirdata = sftp_listdir.MySFTPClient(hostname=MAPHOSTS[host]['host'], username=session['username'], password=session['password'], port=MAPHOSTS[host]['port'] ).listdir(dirname) filenames = [] for data in listdirdata: if len(data)>=2 and data[1]: if data[0]!='.' and data[0]!="..": filenames.append( (data[0], data[1][0] == 4096) ) filenames = sorted(filenames, key=lambda fn: fn[0]) if dirname!=".": filenames.insert(0,("..",True)) return filenames def authenticate(host,username,password): src = ssh_exec.SSHRemoteClient(MAPHOSTS[host]['host'], username, password, MAPHOSTS[host]['port']) out = src.execute() return out def execcommand(session,cmd): host = session['host'] src = ssh_exec.SSHRemoteClient(MAPHOSTS[host]['host'], session['username'], session['password'], MAPHOSTS[host]['port']) res = src.execute(cmd) # print "command:" # print cmd # print "res:" # print res # print "res-end:" return res def uploadfile(session,filename,remotefilename): host = session['host'] myscp = scp_upload.MySCPClient(hostname=MAPHOSTS[host]['host'], username=session['username'], password=session['password'], port=MAPHOSTS[host]['port']) myscp.send(filename,remotefilename) def downloadfile(session,remotefilename): host = session['host'] myscp = scp_upload.MySCPClient(hostname=MAPHOSTS[host]['host'], username=session['username'], password=session['password'], port=MAPHOSTS[host]['port']) return myscp.recv(remotefilename)
UTF-8
Python
false
false
2,013
14,577,119,010,580
967e548d6af3d65b64af9de37c6b534077e8480e
c0f87f93063502169e335037422e24d8dd0b7a20
/kaggle/criteo-display-ad-challenge/sample_generate.py
d5c725229e5fd2a3f4b1944e4e35a49c4c26d9dd
[]
no_license
dataninjas/challenges
https://github.com/dataninjas/challenges
4c9d3bc086137274962c0b682cfe8fc3a4046897
7f9bb1e289aa7f79e2a7a93b0fe71473692c0025
refs/heads/master
2021-01-22T08:49:04.517747
2014-09-17T09:23:26
2014-09-17T09:23:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Generate the samples # TODO: There should be a single-pass way to generate all samples at once import sample_util #file_size = file_len('train.csv') file_size = 45840618 sample_size = 20000 sample_util.generate_sample('train.csv', file_size, sample_size, "train_sample_%s.csv" % sample_size, True) sample_size = 100000 sample_util.generate_sample('train.csv', file_size, sample_size, "train_sample_%s.csv" % sample_size, True)
UTF-8
Python
false
false
2,014
15,771,119,955,461
21099f7bec84e9827b33d452e3331964a64788fe
8b95a52d00ef8445fd5e1f404e7f35f17008fd1d
/splitwarning.py
31e0fdd2d26f23eac7d9293582b9d381ae32af01
[ "MIT", "CC-BY-SA-3.0", "LicenseRef-scancode-mit-old-style", "Python-2.0" ]
non_permissive
h4ck3rm1k3/pywikipediabot
https://github.com/h4ck3rm1k3/pywikipediabot
77b81e852da96b730a1dbf1d4bf777270164d102
489b55b7a71a5c55d9ddd917a27302fa2b74458c
refs/heads/master
2021-01-15T22:23:40.846621
2012-08-01T18:04:12
2012-08-01T18:04:12
4,473,407
5
2
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """Splits a interwiki.log file into chunks of warnings separated by language. The following parameter is supported: -folder: The target folder to save warning files, if given. Otherwise use the /logs/ folder. """ # # (C) Rob W.W. Hooft, 2003 # (C) Pywikipedia bot team, 2004-2012 # # Distributed under the terms of the MIT license. # __version__ = '$Id: splitwarning.py 10023 2012-03-17 18:03:32Z xqt $' # import wikipedia as pywikibot import codecs import re def splitwarning(folder): files={} count={} fn = pywikibot.config.datafilepath("logs", "interwiki.log") logFile = codecs.open(fn, 'r', 'utf-8') rWarning = re.compile('WARNING: (?P<family>.+?): \[\[(?P<code>.+?):.*') for line in logFile: m = rWarning.match(line) if m: family = m.group('family') code = m.group('code') if code in pywikibot.getSite().languages(): if not code in files: files[code] = codecs.open( pywikibot.config.datafilepath( folder, 'warning-%s-%s.log' % (family, code)), 'w', 'utf-8') count[code] = 0 files[code].write(line) count[code] += 1 for code in files.keys(): print '* %s (%d)' % (code, count[code]) def main(*args): folder = 'logs' for arg in pywikibot.handleArgs(*args): if arg.startswith("-folder"): folder = arg[len('-folder:'):] splitwarning(folder) if __name__ == "__main__": # No need to have me on the stack - I don't contact the wiki pywikibot.stopme() main()
UTF-8
Python
false
false
2,012
403,726,935,714
f779e143e6f9aff675b790bac4e133bcbd0a1c09
28827f3e60166e9f9b5c179ccea6f4a16a6729e0
/errandboy/fileio.py
4be425cbae3672c8643c08e131aff5dae836c15b
[]
no_license
BobRoss/Lindon-Test-Framework-
https://github.com/BobRoss/Lindon-Test-Framework-
89581434a478605a52a7f71de5621a2603cc7eb5
76442ae3917f99c9b01f9f044a1a74a000a89d31
refs/heads/master
2016-08-07T06:30:27.593027
2013-11-08T16:20:03
2013-11-08T16:20:03
14,237,855
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/bin/python from os import path from functools import partial from sys import stdin, stderr, argv from flow import firstOrNone, pipeline from diagnostics import reportyFunc, test def attempt(f): # rig:3 '''Functions decorated as "attempts" will consider a return value of None to denote failures and will print their own doc strings as error messages.''' def _do(*things, **etc): def measure(outcome): if outcome is not None: return outcome if "-v" in argv: stderr.write("%s(%s):\n\t%s\n"%(f.func_name, ", ".join(map(repr, things)), f.__doc__)) return None return measure(f(*things, **etc)) return _do def mchain(o, *methods): return [getattr(o, m, lambda: None)() for m in methods] @attempt def fpth(filePath): # rig:3 '''File does not exist.''' return filePath and path.exists(filePath) and filePath or None @attempt def loadf(mode, filePath): # rig:3 '''Can't load specified file.''' return fpth(filePath) and open(filePath, mode) or None @attempt def goodStream(stream): '''Not a valid byte stream.''' return getattr(stream, "readlines", None) and stream @attempt def firstGood(*streams): '''Did not receive any good byte streams.''' return firstOrNone(*filter(goodStream, streams)) @attempt def linesOf(stream): # rig:3 '''Could not read lines out of stream.''' return firstOrNone(*mchain(stream, "readlines", "close")) @attempt def goodLines(linesrc): # rig:3 '''Bad line list.''' return hasattr(linesrc, "__iter__") or None @attempt def _cleanLines(cleanWith, lines): '''Can not clean the lines list with method specified.''' if goodLines(lines) and callable(cleanWith): return map(cleanWith, lines) return None test(_cleanLines, ["",""], lambda x: "", ["aoeu", "ntehu"]) test(_cleanLines, ["aoeu","ueoa"], lambda x: x.strip(), [" aoeu ", "\nueoa\t"]) test(_cleanLines, [3,4], lambda x: x+2, [1,2]) @attempt def bytesOf(stream): '''Could not read data from stream.''' return firstOrNone(*mchain(stream, "read", "close")) @attempt def readFile(filePath, mode, *etc): # rig:3 '''Failed to load file''' return pipeline(filePath, fpth, partial(loadf, mode), *etc) @attempt def readFromFile(filePath, alternate, mode, *etc): # rig:3 '''Failed to open file.''' return pipeline(firstGood(readFile(filePath, mode), alternate), *etc) @attempt def readLines(filePath, alternate=stdin, cleaner=str.strip): # rig:3 '''See above for failure information.''' return readFromFile(filePath, alternate, "rt", linesOf, partial(_cleanLines, cleaner)) @attempt def readBytes(filePath, alternate=stdin, mode="rt"): # rig:3 '''See above for failure information.''' return readFromFile(filePath, alternate, mode, bytesOf) @attempt def writeTo(lines, stream): # rig:3 '''Unable to write to stream.''' return goodStream(stream) and firstOrNone(stream.writelines(lines) is None, stream.close()) @attempt def writeLines(filePath, *lines): # rig:3 return pipeline(filePath, fpth, partial(loadf, "wt"), partial(writeTo, lines)) @reportyFunc def test_firstGood(): class DummyGoodStream: def readlines(self, *aoeu): return True def test_1(): return firstGood(None, DummyGoodStream(), None).readlines() return test_1() @reportyFunc def test_goodStream(): class DummyGoodStream: def readlines(self, *aoeu): return True def test_1(): return goodStream(DummyGoodStream()) is not None def test_2(): return goodStream(None) is None return test_1() and test_2() @reportyFunc def test_mchain(): class DummyObject: def first(self, *received): return 1 def second(self, *received): return 2 def test_1(): return mchain(DummyObject(), "first", "second")==[1,2] def test_2(): return mchain(DummyObject(), "first", "aoeu", "second")==[1,None,2] return test_1() and test_2() @reportyFunc def test_bytesOf(): class ByteSource(): def close(self, *etc): return True def read(self, *etc): return "Hooray" def test_1(): return bytesOf(ByteSource())=="Hooray" def test_2(): return bytesOf(None) is None return test_1() and test_2() def testModule(): return [test_firstGood, test_mchain, test_goodStream, test_bytesOf] if __name__=="__main__": if all(map(apply, testModule())): print "OK" else: print "FAILURES!"
UTF-8
Python
false
false
2,013
10,625,749,108,008
77f5b1e4e74788f304c5296a06700453efebe08f
150d783ac685ebf61e4a2d4f9558b90ead7ab629
/bin/oncall.py
80a253300e06d3619b4cb3c338ea01f5167db1cf
[]
no_license
magicaltrevor/Oncall
https://github.com/magicaltrevor/Oncall
1391ca7cfebaa2da75a571781e76ae9e2ce09163
e0f45a4d3b6b7b28e9bff961ffcf7bca754a9544
refs/heads/master
2020-12-25T02:01:40.383350
2012-03-10T20:48:12
2012-03-10T20:48:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import os import sys import logging from optparse import OptionParser # add this file location to sys.path cmd_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if cmd_folder not in sys.path: sys.path.insert(-1, cmd_folder) sys.path.insert(-1, cmd_folder + "/classes") import mysql_layer as mysql import twilio_layer as twilio import user_layer as User import alert_layer as Alert import util_layer as Util conf = Util.load_conf() Util.init_logging("client") def user(): ''' This function handles the rest of the command as it pertains to a user(s). ''' # Parse the command line parser = OptionParser() parser.add_option('-i', '--id', dest='id', help='User id', type='int', default=0) parser.add_option('-n', '--name', dest='name', help='User name', type='string', default='') parser.add_option('-p', '--phone', dest='phone', help='Phone number', type='string', default='') parser.add_option('-e', '--email', dest='email', help='Email address', type='string', default='') parser.add_option('-t', '--team', dest='team', help='Team', type='string', default='') # default is set to 100 as an easy means of figuring out if an state is inputted by user parser.add_option('-s', '--state', dest='state', help='State (0 = in rotation, 3 = off rotation, 9 = global entity)', type='int', default=100) parser.add_option('-d', '--delete', dest='delete', help='Delete result of user list query', action="store_true", default=False) parser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='') parser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action="store_true", default=False) (opts, args) = parser.parse_args() user_usage=''' oncall.py user create (options) oncall.py user list (options) oncall.py user edit -i <id> (options) ''' if (len(sys.argv) > 2) and sys.argv[2] in ['create', 'list', 'edit']: mode = sys.argv[2] if mode == "create": o = user_create(opts) if mode == "list": o = user_list(opts) if mode == "edit": o = user_edit(opts) return o else: return user_usage def user_create(opts): ''' Create a new user in the db. ''' try: if opts.name == '': return "User name is not set (-n)" if opts.email == '': return "User email is not set (-e)" if opts.phone == '' and opts.state != 9: return "User phone is not set (-p)" if "@" not in opts.email or "." not in opts.email: return "Invalid email address, try again" if (opts.phone.startswith("+") and len(opts.phone) == 12): pass else: if opts.state != 9: return "Invalid phone number format. Must be like '+12225558888' (no quotes)" if opts.team == '': opts.team = "default" if opts.state == 100: opts.state = 0 newuser = User.User() newuser.name = opts.name newuser.email = opts.email newuser.phone = opts.phone if opts.team != '': newuser.team = opts.team if opts.state != 100: newuser.state = opts.state newuser.save_user() # validate the phone number with twilio if opts.state != 9: valid_code = twilio.validate_phone(newuser) if valid_code == False: logging.error("Unable to get a validation code for new phone number") return newuser.print_user(opts.mobile) + "\nUnable to get a validation code. Please verify new phone number through Twilio website" elif valid_code == True: return newuser.print_user(opts.mobile) + "\nPhone has already been verified with Twilio" else: return newuser.print_user(opts.mobile) + "\nValidation Code: %s" % (valid_code) else: return newuser.print_user(opts.mobile) except Exception, e: logging.error("Failed to create new user: %s" % (e)) return "Failed to create user: %s" % (e.__str__()) def user_list(opts): ''' List users. Filter with options. ''' all_users = User.all_users() users = [] # init these variables with value True (id, name, phone, email, team, state) = [True] * 6 # filter users with options given for u in all_users: if opts.id != 0 and u.id != opts.id: id = False if opts.name != '' and u.name != opts.name: name = False if opts.phone != '' and u.phone != opts.phone: phone = False if opts.email != '' and u.email != opts.email: email = False if opts.team != '' and u.team != opts.team: team = False if opts.state != 100 and u.state != opts.state: state = False # see if all values given match attributes for user object if id == True and name == True and phone == True and email == True and team == True and state == True: users.append(u) if len(users) == 0: return "No users." if opts.delete == True: output = "Deleting users...\n" else: output = '' for u in users: output=output + "%s" % (u.print_user(opts.mobile)) if opts.delete == True: u.delete_user() return output def user_edit(opts): ''' Making changes to a user account with options inputted. ''' if opts.id == '' or opts.id == 0: return "User id is not set (-i)" user = User.User(opts.id) if opts.name != '': user.name = opts.name if opts.phone != '': user.phone = opts.phone if opts.email != '': user.email = opts.email if opts.team != '': user.team = opts.team if opts.state != '' and opts.state != 100: user.state = opts.state user.save_user() return user.print_user(opts.mobile) def alert(): ''' This function handles the rest of the command as it pertains to an alert(s). ''' # Parse the command line parser = OptionParser() parser.add_option('-i', '--id', dest='id', help='Alert id', type='int', default=0) parser.add_option('-t', '--team', dest='team', help='The team you want to send the message to', type='string', default='default') parser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='') parser.add_option('-a', '--ack', dest='ack', help='Ack the results of alert list query', action="store_true", default=False) parser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action="store_true", default=False) (opts, args) = parser.parse_args() user_usage=''' oncall.py alert status -t <team> -a oncall.py alert ack -i <id> -f <phone number> ''' if (len(sys.argv) > 2) and sys.argv[2] in ['status', 'ack']: mode = sys.argv[2] #if mode == "create": o = alert_create(opts) if mode == "status": o = alert_status(opts) #if mode == "acked": o = alert_acked(opts) #if mode == "all": o = alert_all(opts) if mode == "ack": o = alert_ack(opts) return o else: return user_usage def alert_create(opts): ''' Creating a new alert. Currently not in use. ''' try: if opts.subject == '': return "Subject is not set (-s)" if opts.message == '': return "Message is not set (-m)" if opts.team == '': opts.team = "default" newalert = Alert.Alert() newalert.subject = opts.subject newalert.message = opts.message if opts.team != '': newalert.team = opts.team newalert.save_alert() return newalert.print_alert(opts.mobile) except Exception, e: return "Failed to create alert: %s" % (e.__str__()) def alert_status(opts): ''' Printing out alerts that haven't been acked. If -a is given, will ack them. ''' user = None alerts = Alert.status() if len(alerts) == 0: return "No active alerts." if opts.ack == True: if opts._from == '': return "Must use option -f to ack alerts" else: user = User.get_user_by_phone(opts._from) output = "Acking alerts as %s...\n" % (u.name) else: output = '' for a in alerts: output=output + "%s" % (a.print_alert(opts.mobile)) if user != None: a.ack_alert(user) return output def alert_acked(opts): ''' Printing out alerts acked. Currently not in use. ''' alerts = Alert.acked() if len(alerts) == 0: return "No acked alerts." output = '' for a in alerts: output=output + "%s" % (a.print_alert(opts.mobile)) return output def alert_all(opts): ''' Printing out all alerts. Currently not in use. ''' alerts = Alert.all_alerts() if len(alerts) == 0: return "No alerts." output = '' for a in alerts: output=output + "%s" % (a.print_alert(opts.mobile)) return output def alert_ack(opts): ''' Acking a specific alert. Assumes the last alert to be sent to user if not given. ''' user = None if opts._from == '': return "Must use option -f to go on/off call" user = User.get_user_by_phone(opts._from) if user == False: return "No user ends with that phone number (-f)" output = "Acking alerts as %s...\n" % (user.name) if opts.id > 0: alert = Alert.Alert(opts.id) alert.ack_alert(user) return "Acknowledged" if user.lastAlert > 0: alert = Alert.Alert(user.lastAlert) alert.ack_alert(user) return "Acknowledged" else: return "No alert associated with your user" def oncall(): # Parse the command line parser = OptionParser() parser.add_option('-s', '--state', dest='state', help='On call stage (1 = primary, 2= secondary, etc)', type='int', default=1) parser.add_option('-t', '--team', dest='team', help='A team name', type='string', default='default') parser.add_option('-f', '--from', dest='_from', help='The phone number of the person using oncall (for sms identication purposes)', type='string', default='') parser.add_option('-m', '--mobile', dest='mobile', help='Flag as mobile device, format output for it.', action="store_true", default=False) (opts, args) = parser.parse_args() user_usage=''' oncall.py oncall on -s <state> -f <phone> oncall.py oncall off -f <phone> oncall.py oncall status -t <team> ''' if (len(sys.argv) > 2) and sys.argv[2] in ['on', 'off', 'status']: mode = sys.argv[2] if mode == "off": opts.state = 0 if mode == "on" or mode == "off": o = oncall_change(opts) if mode == "status": o = oncall_status(opts) return o else: return user_usage def oncall_change(opts): ''' Change your own oncall status ''' user = None if opts._from == '': return "Must use option -f to go on/off call" user = User.get_user_by_phone(opts._from) if user == False: return "No user ends with that phone number (-f)" user.print_user(opts.mobile) user.state = opts.state user.save_user() if user.state > 0: return "You, %s, are now on call" % user.name else: return "You, %s, are now off call" % user.name def oncall_status(opts): ''' Get a list of people oncall for a specific team ''' users = User.on_call(opts.team) oncall_users = [] for u in users: if u.state > 0 and u.state < 9: oncall_users.append(u) if len(oncall_users) == 0: return "No one is on call on the %s team." % (opts.team) output = '' for user in oncall_users: output=output + "%s" % (user.print_user(opts.mobile)) return output def run(args): ''' This gets run from oncall-server to execute the Oncall CLI ''' # convert argsuments into input params sys.argv = args.split() # gotta pad the arguments because usually sys.argv[0] is the python file name sys.argv.insert(0, 'spacer') return main() def main(): usage = ''' oncall.py user create (options) oncall.py user list (options) oncall.py user edit -i <id> (options) oncall.py alert status -t <team> -a oncall.py alert ack -i <id> -f <phone number> oncall.py oncall on -s <state> -f <phone> oncall.py oncall off -f <phone> oncall.py oncall status -t <team> ''' # converting all parameters to be lowercase to remove any case sensitivity sys.argv = map(lambda x:x.lower(),sys.argv) if (len(sys.argv) > 1) and sys.argv[1] in ['user', 'users', 'status', 'alert', 'alerts', 'ack', 'rotation', 'oncall']: mode = sys.argv[1] if mode == "user" or mode == 'users': o = user() if mode == "alert" or mode == 'alerts': o = alert() if mode == "status": sys.argv.insert(1, "alert") o = alert() if mode == "ack": sys.argv.insert(1, "alert") o = alert() if mode == "oncall": o = oncall() #if mode == "rotation": o = rotation() logging.info("Oncall.py output: %s" % o) return o else: return usage if __name__ == "__main__": print main()
UTF-8
Python
false
false
2,012
14,955,076,149,244
905e6e08c5a85084fc83c01090a3a230529c3cd8
10ba33b74911be372eff006f3f63c2f42b3f97ec
/calc_ambiguity.py
4a42f36945f2837f1fe60a12a72618a26bc225ec
[ "MIT" ]
permissive
HenryMarshall/softKey
https://github.com/HenryMarshall/softKey
36f5e29e2cf040c8cdb9d680fdca396e32fa0bef
7adae326acdef794ed0f31a7538f437651aa912f
refs/heads/master
2021-05-28T05:17:42.770610
2014-05-05T10:37:16
2014-05-05T10:37:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import argparse import pickle import calc_vectors import calc_scores # establish corpus from cli parser = argparse.ArgumentParser(description="""Calculate the ambiguity of word pairings. Accepts words, and files prefixed with '@' (e.g., @corpus.txt) with \\n delimited words.""", fromfile_prefix_chars='@') parser.add_argument('words', metavar='N', type=str, nargs='+', help='enter a series of words') corpus = parser.parse_args() # establish layouts # with open('character_positions.pickle', 'rb') as handle: with open('layouts/random_layouts_computer.pickle', 'rb') as handle: layouts = pickle.load(handle) # we will write the results to a csv file ghetto_csv = open("results/layout_results.csv", "w") # create label row label_lst = ["layout"] for i in range(101): label_lst.append(str(i/100.0)) label_str = ",".join(label_lst) ghetto_csv.write(label_str) ghetto_csv.write("\n") # calculate the vector path for each word on each layout paths = {} for layout_name, layout in layouts.iteritems(): paths[layout_name] = {} for word in corpus.words: word_vector = calc_vectors.calc_word_vector(word, layout) paths[layout_name][word] = word_vector # calculate and save the ambiguity of the corpus on each layout for layout_name, layout_paths in paths.iteritems(): ambiguity_results = calc_scores.calc_layout_results(layout_paths) results_lst = [str(layout_name)] for i in range(101): ambiguity_count = str(ambiguity_results[i/100.0]) results_lst.append(ambiguity_count) results_str = ",".join(results_lst) ghetto_csv.write(results_str) ghetto_csv.write("\n") ghetto_csv.close()
UTF-8
Python
false
false
2,014
13,735,305,422,269
be4e6f42388a18abbbb0cac02d4bfd697fc3800f
0b650d4db5064e27cad3993b617499c55d0a0b8a
/webpages_fetcher.py
6d1bc5d91dcee70088a822564277a0b763752459
[]
no_license
ultimatecoder/webpage_downloader
https://github.com/ultimatecoder/webpage_downloader
195b13b4244056ae4df0cd9f8120aeb39647b05c
02ae3d833f6d431a6c92020a73dddf609ee622bf
refs/heads/master
2016-09-07T10:54:48.285983
2014-10-16T09:12:36
2014-10-16T09:12:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import urllib2 from sys import exit, argv from os import path class Fetcher: _urls = [] _output = '' def _geturls(self, file): return file.read().split() def __init__(self, ifpath, ofpath): file = open(ifpath, 'r') self._urls = self._geturls(file) self._output = ofpath def _getwebpage(self, url): response = urllib2.urlopen(url) print("fetching : {}".format(url)) return response.read() def _writewebpage(self, webpage, fname): f = open(self._output + '/' + fname, 'w') f.write(webpage) f.close() def start(self): for i, url in enumerate(self._urls): self._writewebpage(self._getwebpage(url), str(i) + '.html') def main(): usage = 'usage: [--urlFile] url.txt webpages-dir-path' args = argv[1:] if not args: print(usage) exit(1) else: if args[0] == '--urlFile': try: urls = args[1] webpages = args[2] except IndexError: print("ERROR: Please provide enough arguments") print(usage) exit(1) if path.exists(urls) and path.isdir(webpages): fetcher = Fetcher(urls, webpages) fetcher.start() print("Fetcher Task completed !") else: print("ERROR : In url file path or out put webpages dir.") exit(1) else: print(usage) if __name__ == '__main__': main()
UTF-8
Python
false
false
2,014
11,854,109,783,179
00824038ba9b7664e1bd26e4b0d576535e4f4ce0
35277c644d4a2189aff624b7813107a35c234a93
/rethinkORM/rethinkCollection.py
a5927b62eda05ab67638ad0030818eda2243a943
[ "GPL-3.0-only" ]
non_permissive
JoshAshby/pyRethinkORM
https://github.com/JoshAshby/pyRethinkORM
f29393c649b013ed7f2f8af3336d8100d5b36c06
92158d146dea6cfe9022d7de2537403f5f2c1e02
refs/heads/master
2022-10-20T18:22:36.339599
2014-05-06T16:15:43
2014-05-06T16:15:43
11,209,106
8
3
GPL-3.0
false
2023-06-28T12:15:32
2013-07-05T21:50:23
2023-05-03T20:37:12
2022-09-23T21:08:21
158
17
9
4
Python
false
false
#!/usr/bin/env python """ Quick way to get groupings of RethinkModels objects matching the given criteria """ import rethinkdb as r class RethinkCollection(object): """ A way to fetch groupings of documents that meet a criteria and have them in an iterable storage object, with each document represented by `RethinkModel` objects """ documents = [] table = "" _model = None _query = None _filter = {} _join = None _joinedField = None def __init__(self, model, filter=None): """ Instantiates a new collection, using the given models table, and wrapping all documents with the given model. Filter can be a dictionary or lambda, similar to the filters for the RethinkDB drivers filters. """ self._model = model self._query = r.table(self._model.table) if filter: self._filter = filter self._query = self._query.filter(self._filter) def joinOn(self, model, onIndex): """ Performs an eqJoin on with the given model. The resulting join will be accessible through the models name. """ return self._joinOnAsPriv(model, onIndex, model.__name__) def joinOnAs(self, model, onIndex, whatAs): """ Like `joinOn` but allows setting the joined results name to access it from. Performs an eqJoin on with the given model. The resulting join will be accessible through the given name. """ return self._joinOnAsPriv(model, onIndex, whatAs) def _joinOnAsPriv(self, model, onIndex, whatAs): """ Private method for handling joins. """ if self._join: raise Exception("Already joined with a table!") self._join = model self._joinedField = whatAs table = model.table self._query = self._query.eq_join(onIndex, r.table(table)) return self def orderBy(self, field, direct="desc"): """ Allows for the results to be ordered by a specific field. If given, direction can be set with passing an additional argument in the form of "asc" or "desc" """ if direct == "desc": self._query = self._query.order_by(r.desc(field)) else: self._query = self._query.order_by(r.asc(field)) return self def __iter__(self): for doc in self._documents: yield doc def offset(self, value): """ Allows for skipping a specified number of results in query. Useful for pagination. """ self._query = self._query.skip(value) return self def limit(self, value): """ Allows for limiting number of results returned for query. Useful for pagination. """ self._query = self._query.limit(value) return self # Pagination helpers... # These are questionable, on if I'll put them in or not. #def paginate(self, start,finish): #pass #@property #def currentPage(self): #pass #@property #def perpage(self): #pass #@property #def hasnextpage(self): #pass #@property #def pages(self): #pass # Okay, enough pagination def fetch(self): """ Fetches the query and then tries to wrap the data in the model, joining as needed, if applicable. """ returnResults = [] results = self._query.run() for result in results: if self._join: # Because we can tell the models to ignore certian fields, # through the protectedItems blacklist, we can nest models by # name and have each one act normal and not accidentally store # extra data from other models item = self._model.fromRawEntry(**result["left"]) joined = self._join.fromRawEntry(**result["right"]) item.protectedItems = self._joinedField item[self._joinedField] = joined else: item = self._model.fromRawEntry(**result) returnResults.append(item) self._documents = returnResults return self._documents
UTF-8
Python
false
false
2,014
3,444,563,819,594
626fc0af8b8cc2d192bebf56a4662066cfb4d399
20a872331e80f6ad11752fa2d9d63864c2812b10
/test/base.py
c9a0dd429ed3dec77bd950859a24dba02b944903
[ "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference", "AGPL-3.0-or-later" ]
non_permissive
smurfix/pybble
https://github.com/smurfix/pybble
1ee535c74ae73605bc725a1ae1a41ef83190d000
305ba81d4600abb4d575b39926abc76992696c17
refs/heads/master
2021-01-22T23:53:05.310683
2014-07-23T16:41:10
2014-07-23T16:41:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division, unicode_literals ## ## This is part of Pybble, a WMS (Whatever Management System) based on ## Jinja2/Haml, Werkzeug, Flask, and Optimism. ## ## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>, ## it is licensed under the GPLv3. See the file `README.md` for details, ## including an optimistic statements by the author. ## ## This paragraph is auto-generated and may self-destruct at any time, ## courtesy of "make update". The original is in ‘utils/_boilerplate.py’. ## Thus, please do not remove the next line, or insert any blank lines. ##BP import sys,os sys.path.insert(0,os.pardir) from pybble import ROOT_SITE_NAME import unittest import datetime import flask from wsgi_intercept import WSGI_HTTPConnection,WSGI_HTTPSConnection from pybble.manager.main import SubdomainDispatcher from pybble.core.db import init_db from pybble.core import config as pybble_config main_app = None class Fake_HTTPConnection(WSGI_HTTPConnection): def get_app(self, host, port): return main_app,"" class Fake_HTTPSConnection(WSGI_HTTPSConnection): def get_app(self, host, port): return main_app,"" try: from wsgi_intercept import http_client_intercept except ImportError: skip_httpclient = True else: skip_httpclient = False http_client_intercept.HTTPInterceptorMixin = Fake_HTTPConnection http_client_intercept.HTTPSInterceptorMixin = Fake_HTTPSConnection try: from wsgi_intercept import httplib2_intercept except ImportError: skip_httplib2 = True else: skip_httplib2 = False httplib2_intercept.InterceptorMixin = Fake_HTTPConnection try: from wsgi_intercept import requests_intercept except ImportError: skip_requests = True else: skip_requests = False requests_intercept.InterceptorMixin = Fake_HTTPConnection try: from wsgi_intercept import urllib_intercept except ImportError: skip_urllib = True else: skip_urllib = False urllib_intercept.HTTPInterceptorMixin = Fake_HTTPConnection urllib_intercept.HTTPSInterceptorMixin = Fake_HTTPSConnection from pybble.core.db import db from pybble.core.models.site import Site,Blueprint from pybble.core.models.config import ConfigVar,SiteConfigVar did_once=set() class TC(unittest.TestCase): TESTING = True app_class = flask.Flask testsite=None def once(self,proc): if proc in did_once: return did_once.add(proc) return proc() def clear_db(self): pass def setUp(self): super(TC,self).setUp() app = self.app_class(__name__) app.config = pybble_config app.config.from_object(self) app.config.from_object("TEST") init_db(app) self.app = app self.ctx = app.test_request_context() self.ctx.push() self.cleanData() if self.testsite: try: s = Site.q.get_by(name=self.testsite) except NoData: s = Site.new(name=self.testsite, domain=self.testsite) db.session.flush() flask.current_app.site = s else: flask.current_app.site = Site.q.get_by(name=ROOT_SITE_NAME) self.setupData() self.setupRest() def cleanData(self): pass def setupData(self): pass def setupRest(self): pass def tearDown(self): self.ctx.pop() super(TC,self).tearDown() class WebTC(TC): def setupRest(self): from pybble.app import make_cfg_app super(WebTC,self).setupRest() global main_app app = make_cfg_app() main_app = SubdomainDispatcher(app) if not skip_httpclient: http_client_intercept.install() if not skip_httplib2: httplib2_intercept.install() if not skip_requests: requests_intercept.install() if not skip_urllib: urllib_intercept.install_opener() def tearDown(self): if not skip_httpclient: http_client_intercept.uninstall() if not skip_httplib2: httplib2_intercept.uninstall() if not skip_requests: requests_intercept.uninstall() if not skip_urllib: urllib_intercept.uninstall_opener()
UTF-8
Python
false
false
2,014
13,572,096,660,684
57e55ff8917ca9c673bfe88ba1234e11e54404b8
05f05d576d5aa29190cf83f0b4b9a1eea2fd555a
/src/pyPA/__init__.py
26e38f5fb2fa3e6951d463b1d27a58bcd58635c7
[]
no_license
barronh/pypa
https://github.com/barronh/pypa
476caab2704f545cd578894f3e2f4534952fc2c1
5e10eecd5bfed19f95dd92f1f1667fd1ae45d3eb
refs/heads/master
2016-09-05T15:08:30.103421
2014-02-19T04:07:23
2014-02-19T04:07:23
32,350,405
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
__doc__ = r""" .. _pyPA :mod:`pyPA` -- Python-based Process Analysis ============================================ .. module:: pyPA :platform: Unix, Windows :synopsis: Provides tools for analyzing Air Quality Model Process Analysis data .. moduleauthor:: Barron Henderson <[email protected]> """ __all__=['utils','pappt', 'test'] if __name__ != '__main__': import utils import pappt import cmaq from test import run as test else: from pyPA.main import run run()
UTF-8
Python
false
false
2,014
16,707,422,817,896
9da29893e98c43df5036050f97db105176341aea
ee232ec9f522eb06996ece7417712bfaa8ee2c2a
/setup.py
b4edec4c7ee5336e52b634163a5c9ec859a89ce7
[]
no_license
thet/elevate.cynin.fixes
https://github.com/thet/elevate.cynin.fixes
d048097111f8ba25fa7495a6a854231051c67e48
18fedff42227e7f943c666fbba1872a805a48109
refs/heads/master
2020-12-24T16:49:40.741030
2012-07-06T14:36:49
2012-07-06T14:36:49
1,587,032
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from setuptools import setup, find_packages import os version = '1.0' setup(name='elevate.cynin.fixes', version=version, description="Cynin fixes for elevate", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from # http://pypi.python.org/pypi?:action=list_classifiers classifiers=[ "Framework :: Plone", "Programming Language :: Python", ], keywords='cynin, plone', author='Johannes Raggam', author_email='[email protected]', url='https://github.com/thet/elevate.cynin.fixes', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['elevate', 'elevate.cynin'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', # -*- Extra requirements: -*- 'collective.monkeypatcher', 'collective.autolinks', 'Products.CacheSetup', 'plone.contentratings', ], )
UTF-8
Python
false
false
2,012
18,408,229,841,258
4e3321d90b6ba7a29b2977d2f847fad8f643e485
479a2fdc36191b6f5514d24b1a19307c7a7157b8
/hitime/md_io.py
0e8be5904955af956a9c00423bd8325aa9f93f1d
[ "BSD-3-Clause" ]
permissive
bjpop/HiTIME
https://github.com/bjpop/HiTIME
eaf73e81f9432bb746938edb44e4b431cac5edb1
c0671709bde7167305dd7ce2d440c1e370ff54ac
refs/heads/master
2021-01-17T13:56:01.072200
2014-10-06T05:02:42
2014-10-06T05:02:42
24,574,268
2
3
null
false
2015-02-20T03:39:19
2014-09-29T00:29:12
2014-09-29T00:53:31
2014-10-06T05:02:52
140
0
1
1
Python
null
null
#!/bin/env python from lxml import etree import sys import resource import base64 import struct import numpy as np from itertools import * import math import csv import logging import os import os.path import pymzml from collections import deque import resource # add dir of this (and following) file to path sys.path.append(os.path.realpath(__file__)) import md_filter # helper funtion for memory profiling def memory_usage_resource(): import resource rusage_denom = 1024 if sys.platform == 'darwin': # ... it seems that in OSX the output is different units ... rusage_denom = rusage_denom * rusage_denom mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / float(rusage_denom) return mem endianMap = { 'little': '<', 'big': '>' } precisionMap = { '64' : 'd', '32': 'f' } # convert the base64 encoded data into an array of floating point values # given the endianness and precision of the raw data def interpretBinary(data, endian, precision): precisionChar = precisionMap[precision] endianChar = endianMap[endian] decoded = base64.b64decode(data) count = len(decoded) / struct.calcsize(endianChar + precisionChar) return struct.unpack(endianChar + precisionChar * count, decoded[0:len(decoded)]) # find the binary data, endianness and precision of a single spectrum element def getMZDATASpectrum(spectrum, tag): for child in spectrum: if child.tag == tag: for binaryChild in child: if binaryChild.tag == 'data': endian = binaryChild.get('endian') precision = binaryChild.get('precision') binary = binaryChild.text return interpretBinary(binary, endian, precision) # find the timestamp of a spectrum, returns None if no timestamp was found. def getSpectrumTime(spectrum): desc = spectrum.find('spectrumDesc') settings = desc.find('spectrumSettings') instrument = settings.find('spectrumInstrument') for param in instrument.iter('cvParam'): if param.get('name') == 'TimeInMinutes': return param.get('value') return None # an encapsulation of a single Spectrum element containing its # identity, decoded mz data and decoded intensity data class Spectrum(object): def __init__(self, id, time, mzs, intensities): self.time = time self.mzs = mzs self.intensities = intensities self.id = int(id) def parseMZDATA(options): filename = options.inputFile result = [] # parse the XML document tree = etree.parse(filename) # get the root element root = tree.getroot() # iterate over the spectrum elements for spectrum in root.iter('spectrum'): # get the mz data for the spectrum mzData = getMZDATASpectrum(spectrum, 'mzArrayBinary') # get the intensity data for the spectrum intData = getMZDATASpectrum(spectrum, 'intenArrayBinary') time = getSpectrumTime(spectrum) result.append(Spectrum(spectrum.get('id'), time, mzData, intData)) return result def writeResults(stream, spectrum, scores=None): if scores is not None: rt = spectrum.time for mz, amp, val in zip(spectrum.mzs, spectrum.intensities, scores): # if val > 0.0: # print >> stream, '{}, {}, {}, {}'.format(rt, mz, amp, val) if val[0] > 0.0: print >> stream, '{}, {}, {}, {}'.format(rt, mz, amp, ', '.join([str(v) for v in val])) else: rt = spectrum.time for mz, amp in zip(spectrum.mzs, spectrum.intensities): print >> stream, '{}, {}, {}'.format(rt, mz, amp) def MZMLtoSpectrum(options): filename = options.inputFile delta_time = 0 time_prev = 0 points = 0 mean = 0 time = 0 msrun = pymzml.run.Reader(filename) for n,spectrum in enumerate(msrun): mzData = np.array(spectrum.mz, dtype="float32") intData = np.array(spectrum.i, dtype="uint64") points += len(intData) mean += sum(intData) try: time = spectrum['MS:1000016'] delta_time += (time - time_prev - delta_time)/(n+1) # incremental update to mean delta_time time_prev = time except KeyError: time_prev = time if delta_time > 0: time += delta_time else: time += 1.0 yield Spectrum(n, time, mzData, intData) if points > 0: mean /= float(points) else: exit("Zero spectra read from mz data file, did you specify the wrong input format?") logging.info('mzdata input file parsed, {0} ({1}) spectra (data points) read in'.format(n+1, points)) logging.info('time delta: %g, mean signal: %g' % (delta_time, mean)) def nextWindow(reader, options, half_window): ''' Use iterators to serve up data when needed ''' pad_front = repeat(Spectrum(0, 0.0, [0.0], [0.0]), half_window + 1) # extra one at start that gets ignored pad_back = repeat(Spectrum(0, 0.0, [0.0], [0.0]), half_window) items = chain(pad_front, reader(options), pad_back) # 1st window data = list(islice(items, 0, 2 * half_window + 1 )) data_deque = deque(data) # rest for i, scan in enumerate(items): data_deque.popleft() data_deque.append(scan) yield list(data_deque)
UTF-8
Python
false
false
2,014
13,615,046,367,660
ce7e5c49d3211e620982b309b3c0c110971ea428
df74cad3e64e7f86cc1b13211f9a46551a2c8599
/object_proxy/_lambda_relations.py
9bf02e8eab6aec6b46016296f8ca7b25e9553728
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
Montegasppa/ObjectProxy
https://github.com/Montegasppa/ObjectProxy
2e5c73661ad39d6f1ff30399b7a4d0ff886393f6
e4cd247dc16b3533ea0d77767000d8c35953e9f6
refs/heads/master
2016-09-06T16:10:06.088428
2013-12-16T21:49:47
2013-12-16T21:49:47
15,196,196
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: UTF-8 from __future__ import absolute_import, division, print_function, unicode_literals # @copyright ©2013, Rodrigo Cacilhας <[email protected]> from contextlib import wraps __all__ = ['method_map'] @apply def method_map(): def catch(wrapped, default=None): @wraps(wrapped) def wrapper(*args, **kwargs): try: return wrapped(*args, **kwargs) except: return default return wrapper return ( ('__abs__', abs), ('__add__', lambda target, o: target + o), ('__and__', lambda target, o: target & o), ('__bool__', catch(bool, False)), ('__call__', lambda target, *args, **kwargs: target(*args, **kwargs)), ('__cmp__', cmp), ('__coerce__', coerce), ('__contains__', lambda target, item: item in target), ('__delattr__', delattr), ('__dir__', catch(dir, [])), ('__divmod__', divmod), ('__float__', float), ('__floordiv__', lambda target, o: target // o), ('__eq__', lambda target, o: target == o), ('__ge__', lambda target, o: target >= o), ('__getattr__', getattr), ('__getitem__', lambda target, key: target[key]), ('__getslice__', lambda target, i, j: target[i:j]), ('__gt__', lambda target, o: target > o), ('__hash__', hash), ('__hex__', hex), ('__instancecheck__', lambda target, instance: isinstance(instance, target)), ('__int__', int), ('__invert__', lambda target: ~(target)), ('__iter__', iter), ('__le__', lambda target, o: target <= o), ('__len__', len), ('__long__', long), ('__lshift__', lambda target, o: target << o), ('__lt__', lambda target, o: target < o), ('__mod__', lambda target, o: target % o), ('__mul__', lambda target, o: target * o), ('__ne__', lambda target, o: target != o), ('__neg__', lambda target: -(target)), ('__oct__', oct), ('__or__', lambda target, o: target | o), ('__pos__', lambda target: +(target)), ('__pow__', lambda target, o: target ** o), ('__radd__', lambda target, o: o + target), ('__rand__', lambda target, o: o & target), ('__rcmp__', lambda target, o: cmp(o, target)), ('__rdiv__', lambda target, o: o.__div__(target)), ('__reversed__', reversed), ('__rfloordiv__', lambda target, o: o // target), ('__rlshift__', lambda target, o: o << target), ('__rmod__', lambda target, o: o % target), ('__rmul__', lambda target, o: o * target), ('__ror__', lambda target, o: o | target), ('__rpow__', lambda target, o: o ** target), ('__rrshift__', lambda target, o: o >> target), ('__rshift__', lambda target, o: target >> o), ('__rsub__', lambda target, o: o - target), ('__rtruediv__', lambda target, o: o / target), ('__rxor__', lambda target, o: o ^ target), ('__str__', bytes), ('__sub__', lambda target, o: target - o), ('__truediv__', lambda target, o: target / o), ('__unicode__', unicode), ('__xor__', lambda target, o: target ^ o), )
UTF-8
Python
false
false
2,013
13,735,305,438,882
87d654564f346ecf8e02f724db4034b5ef30edd5
3060a1174aeddec4147e22a21f828b65bcb16854
/main.py
dc958042538f368fde68907d320d6b0f1493928e
[]
no_license
chinnsane/wedding
https://github.com/chinnsane/wedding
818ef4b20d3ab2ff856b3c19548448212d2619fa
03834a5810f9cf247b43fe2b69188cfc7da1e679
refs/heads/master
2016-09-07T00:41:00.221764
2014-09-23T04:41:36
2014-09-23T04:41:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import jinja2, os, webapp2, json from random import shuffle from models.attendee import Attendee from models.question import Question, QuestionOption from models.rsvp import RSVP JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape']) ROOT_PATH = '/' RSVP_RESPONSE_PATH = '/rsv-response' DATA_VIWER_PATH = '/data-viewer' QUIZ_PATH = '/quiz' QUIZ_RESULTS_PATH = '/'.join([QUIZ_PATH, 'results']) QUIZ_FORM_PATH = '/'.join([QUIZ_PATH, 'form']) QUIZ_SUBMIT_PATH = '/'.join([QUIZ_PATH, 'form', 'submit']) class MainPage(webapp2.RequestHandler): def get(self): template_values = { 'rsvp_form_action': RSVP_RESPONSE_PATH } util = Util(self.response) return util.render_template('index.html', template_values) class RsvpResponse(webapp2.RequestHandler): def post(self): first_name = self.request.get('firstName') last_name = self.request.get('lastName') email = self.request.get('email') reply = self.request.get('reply') data = { 'status': "error", 'message': "" } util = Util(self.response) # See if someone's already replied if RSVP().get_by_email(email)[0]: data['message'] = "E-mail address already used." return util.send_json_response(data, 400) # Check for empty field if not first_name or not last_name or not email or not reply: data['message'] = "Missing required fields" return util.send_json_response(data, 400) rsvp = RSVP(first_name=first_name, last_name=last_name, email=email) if int(reply) == 0: rsvp.reply = False else: num_adults = int(self.request.get('numAdults')) num_children = int(self.request.get('numChildren', 0)) # Check if some how they didn't tell me how many people are coming if num_adults == 0: data['message'] = "Missing the number of attending guests" return util.send_json_response(data, 400) total_attendees = num_adults + num_children attendees = [] for i in range(1, total_attendees + 1): i = str(i) # Some how you forgot your name, the meal you wanted, or both if not self.request.get("name_" + i) or self.request.get("meal_" + i) == "none": data['message'] = "Missing a name, type of meal, or both" return util.send_json_response(data, 400) attendees.append(Attendee(name=self.request.get("name_" + i), meal_type=self.request.get("meal_" + i))) rsvp.reply = True rsvp.num_adults = num_adults rsvp.num_children = num_children rsvp.attendees = attendees rsvp.put() data['status'] = "success" data['message'] = "Successfully Saved." # Massage the data data['data'] = rsvp.to_dict() data['data']['created_at'] = str(data['data']['created_at']) return util.send_json_response(data) class DataViewer(webapp2.RequestHandler): def get(self): admin_cookie = self.request.cookies.get('lee-chinn-admin', '') if admin_cookie != "20140829": self.redirect(ROOT_PATH) data = RSVP().get_all() template_values = { 'data': data } util = Util(self.response) return util.render_template('data-viewer.html', template_values) class Quiz(webapp2.RequestHandler): def get(self): data = Question().get_all() data_as_dicts = [] shuffle(data) truncated_data = data[:5] # Make the date usable to JSON serializable for question in truncated_data: q_dict = question.to_dict() q_dict['created_at'] = q_dict['created_at'].isoformat() q_dict['id'] = question.key.id() for option in q_dict['options']: option['created_at'] = option['created_at'].isoformat() data_as_dicts.append(q_dict) template_values = { 'data': json.dumps(data_as_dicts) } util = Util(self.response) return util.render_template('quiz/quiz.html', template_values) class QuizForm(webapp2.RequestHandler): def get(self): data = Question().get_all() util = Util(self.response) template_values = { 'form_action_url': QUIZ_SUBMIT_PATH, 'data': data } return util.render_template('quiz/form.html', template_values) class QuizSubmit(webapp2.RequestHandler): def post(self): question = self.request.get('question') num_opts = int(self.request.get('numOpts')) answer = int(self.request.get('answer')) qo = [] for i in range(1, num_opts + 1): qo.append(QuestionOption( text=self.request.get('opt' + str(i)), isAnswer=True if answer == i else False) ) q = Question( prompt=question, options=qo) q.put() self.redirect(QUIZ_FORM_PATH) class QuizResults(webapp2.RequestHandler): def get(self): util = Util(self.response) return util.render_template('quiz/results.html') class Util(object): def __init__(self, response): self.response = response def send_json_response(self, data, code=200): self.response.headers['Content-Type'] = "application/json" self.response.set_status(code) return self.response.out.write(json.dumps(data)) def render_template(self, template_name, template_values={}): # Side effect of JINJA - doesn't play well with windows, expects *nix path systems # and therefore looks for a forward slash. Bad JINJA. template = JINJA_ENVIRONMENT.get_template(ROOT_PATH.join(['templates', template_name])) return self.response.write(template.render(template_values)) application = webapp2.WSGIApplication([ (ROOT_PATH, MainPage), (RSVP_RESPONSE_PATH, RsvpResponse), (DATA_VIWER_PATH, DataViewer), (QUIZ_PATH, Quiz), (QUIZ_FORM_PATH, QuizForm), (QUIZ_SUBMIT_PATH, QuizSubmit), (QUIZ_RESULTS_PATH, QuizResults) ], debug=True)
UTF-8
Python
false
false
2,014
3,839,700,781,085
ee17c0d600272496b9570eafce5a53d74fcb060e
618b3aadec53d2d19370df57a87dcd904f9ceeb6
/02_add_fields_split_for_xy.py
ceaf9251bcfe3a1b283e12a838461826246f5e5c
[]
no_license
nygeog/pnetapi
https://github.com/nygeog/pnetapi
8eb34e6501543d54339865a270c6a5196984cda4
728fe8b5f54c3d642ed554d9639f5437bd76fb62
refs/heads/master
2020-12-24T15:05:44.151339
2014-07-30T12:21:45
2014-07-30T12:21:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#add 2 new fields (lat, lng) (similar to loop above and then use split (( for x and then , for y import urllib2 import json import csv import sys, re, time import pandas as pd import glob import random with open('all_shows_working.csv', 'r') as csvinput: with open('all_shows_working_lat.csv', 'w') as csvoutput: writer = csv.writer(csvoutput, lineterminator='\n') reader = csv.reader(csvinput) all = [] row = next(reader) row.append('lat') all.append(row) cnt_cols = len(row) z = cnt_cols for row in reader: try: lat = row[z-2].split('(', 2)[2].split(',',1)[0] lat = float(lat) + 0.0001*random.randint(1, 50) row.append(lat) all.append(row) except: lat = 0 + 0.0001*random.randint(1, 50) row.append(lat) all.append(row) writer.writerows(all) with open('all_shows_working_lat.csv', 'r') as csvinput: with open('all_shows_working_lat_lng.csv', 'w') as csvoutput: writer = csv.writer(csvoutput, lineterminator='\n') reader = csv.reader(csvinput) all = [] row = next(reader) row.append('lng') all.append(row) cnt_cols = len(row) z = cnt_cols for row in reader: try: lng = row[z-3].split('(', 2)[2].split(',',1)[1].strip(')').strip(' ') lng = float(lng) + 0.0001*random.randint(1, 50) row.append(lng) all.append(row) except: lng = 0 + 0.0001*random.randint(1, 50) row.append(lng) all.append(row) writer.writerows(all)
UTF-8
Python
false
false
2,014
1,400,159,351,909
feb379c8ab2669ca109ce0cac226342bcd8b2e42
fcf5f9fa8bbac29562f28b7160902cea873789bb
/pybargain_demo_client/services/nego_db_service.py
8f2904d9ba2c251d8adb4f5b096474ee302da9c3
[ "MIT" ]
permissive
LaurentMT/pybargain_demo_client
https://github.com/LaurentMT/pybargain_demo_client
6c8191a6dd3f0f17d0d34af9d1a7ef0a35402aae
3bf1eac02be0fcedb3a9cca3fb0220ba88a40cf4
refs/heads/master
2020-04-06T07:03:24.920441
2014-08-23T19:02:31
2014-08-23T19:02:31
23,263,043
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ''' A class simulating a wrapper to access a database storing Negotiations. For this toy project, we store Negotiations in memory. ''' class NegoDbService(object): def __init__(self): # Initializes some dictionaries to store negotiations self._negos_by_id = dict() def create_nego(self, nid, nego): ''' Create a nego entry in db Parameters: nid = id of the negotiation nego = nego object to store in db ''' # Checks parameter if not self._check_nego(nego): return False # Checks that a nego with same id has not already been stored in db if self.get_nego_by_id(nid) is None: # Creates the user in db self._negos_by_id[nid] = nego return True else: return False def update_nego(self, nid, nego): ''' Update a nego entry in db Parameters: nid = id of the negotiation nego = nego object to update in db ''' # Checks parameter if not self._check_nego(nego): return False # Checks that a nego with same id exists in db if not (self.get_nego_by_id(nid) is None): # Updates the nego in db self._negos_by_id[nid] = nego return True else: return False def delete_nego(self, nid): ''' Delete a nego entry from db Parameters: nid = id of the negotiation ''' # Checks parameter if not nid: return False # Checks that a nego with same id exists in db if not (self.get_nego_by_id(nid) is None): del self._negos_by_id[nid] return True else: return False def get_nego_by_id(self, nid): ''' Gets a nego associated to a given id Parameters: nid = id of the negotiation ''' return self._negos_by_id.get(nid, None) if nid else None def get_all_negos(self): ''' Gets a list of all negotiations ''' return self._negos_by_id.values() def _check_nego(self, nego): if nego is None: return False else: return True
UTF-8
Python
false
false
2,014
15,590,731,290,087
031a1ee00b6fcb142823ab97b05cd300e35ec806
f389d6857c2fcf7b9132cb64602e2dc571c1f7ef
/longest_consec_seq.py
1fa2d9e055fd5e98fcb3f58282c3c2b3789820d3
[]
no_license
xblh2018/LeetcodePython
https://github.com/xblh2018/LeetcodePython
a6c005b6d07b0d73fc8a82ca0dc04a5d06daf3d0
7c3b65f82fab3405fa8ba097c3c659edcc63a330
refs/heads/master
2020-05-09T16:51:04.660252
2014-10-14T18:25:57
2014-10-14T18:25:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ''' Leetcode: Longest Consecutive Sequence Given an unsorted array of integers, find the length of the longest consecutive elements sequence. For example, Given [100, 4, 200, 1, 3, 2], The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4. Your algorithm should run in O(n) complexity. ''' from __future__ import division import random # Using bitmap def consecutive_seq(L): bitmap = 0 for x in L: bitmap |= 1 << x max_len = cur_len = 0 print bitmap, bin(bitmap) while bitmap > 0: bitmap, r = divmod(bitmap, 2) if r == 1: cur_len += 1 else: max_len = max(max_len, cur_len) cur_len = 0 return max_len # Using extra space to merge seq # Think as cluster merge, a single number is a length=1 cluster. # Map lowest and highest to length. To merge two neighbor clusters, only need to update it's new lowest and highest, with new length. # For every a[i], checking its neighbor a[i]-1 and a[i]+1 is enough. def merge(seq, x, y): a, b = min(seq[x][0], seq[y][0]), max(seq[x][1], seq[y][1]) seq[x] = [a,b]; seq[y] = [a,b] seq[a] = [a,b]; seq[b] = [a,b] return seq def consecutive_seq2(L): seq = {} # mapping: x -> sequence [a,b] that contains x for x in L: if x in seq: continue seq[x] = [x,x] if x-1 in seq: seq = merge(seq, x, x-1) if x+1 in seq: seq = merge(seq, x, x+1) print seq return max([b-a+1 for a,b in seq.values()]) if __name__ == '__main__': print consecutive_seq2([4,10,8,200,1,3,30,5,12,3,1,2,2,7,70,6,9,9,11,18,16,19])
UTF-8
Python
false
false
2,014
953,482,742,307
baa4b7ef9b91fa3663e3469155e508704ea5847c
2826031fd655335cf56dd305af324f7e39b44c8c
/scorekeeper/migrations/0003_auto_20141202_2009.py
e5cfe61645c809e9de93213c4b0733d4974cd5c1
[ "MIT" ]
permissive
Treggats/fun-and-games
https://github.com/Treggats/fun-and-games
98eeb181644a0932695dac783660378541de705d
246a55b302c840a82840721b9a3bb0b606d0088b
refs/heads/master
2016-09-05T15:05:26.668624
2014-12-09T13:50:04
2014-12-09T13:50:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('scorekeeper', '0002_auto_20141201_1349'), ] operations = [ migrations.RenameField( model_name='player', old_name='initial_points', new_name='points', ), migrations.RemoveField( model_name='score', name='player', ), migrations.RemoveField( model_name='score', name='score', ), migrations.AddField( model_name='player', name='score', field=models.ForeignKey(default=0, to='scorekeeper.Score'), preserve_default=True, ), migrations.AddField( model_name='score', name='points', field=models.IntegerField(default=0), preserve_default=True, ), ]
UTF-8
Python
false
false
2,014
13,984,413,529,163
eee6e4f7a0a578e914eed520e944fde84a1f3929
b833b76c218505e8d4d3850ad31b948bdce31a7f
/coltrane/views.py
e6ba42ee0488535c0500a5b0b1c05d55685a753c
[]
no_license
yunan/django
https://github.com/yunan/django
0d917bde826e2398fe6e57a0ec343238087eac88
d8163e5bfd49e54a04118bf5ed674688dcdead60
refs/heads/master
2020-12-24T15:49:10.874561
2014-03-16T08:52:45
2014-03-16T08:52:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.sortcuts import render_to_response, get_object_or_404 from coltrane.models import Entry def entries_index(request): return render_to_response('coltrane/entries_index.html', {'entry_list': Entry.objects.all()}) def entry_detail(request, year, month, day, slug): import datetime, time date_stamp = time.strptime(year+month+day, "%Y%b%d") pub_date = datetime.date(*date_stamp[:3]) entry = get_object_or_404(Entry, pub_date__year=pub_date.year, pub_date__month=pub_date.month, pub_date__day=pub_date.day, slug=slug) return render_to_response('coltrane/entry_detail.html', { 'entry': entry })
UTF-8
Python
false
false
2,014
8,392,366,123,752
dc65e90dbf502a22ae411bd02ae86c88717a083b
4b0e168a339b56d6f101f285419b4b5878d9ae28
/new_django_project/main/models.py
a90b148e61fe2730085f81c7f19e99dc61bf4887
[]
no_license
torchingloom/new_django_project
https://github.com/torchingloom/new_django_project
8f372c5e1d5180b7af0013d313d11882f6cc672b
60094f56d00f6e8f417d447f6683f1108155d46c
refs/heads/master
2021-01-01T18:34:17.711703
2014-06-18T12:20:24
2014-06-18T12:20:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: utf-8 from django.conf import settings from django.db import models from mptt.fields import TreeForeignKey from mptt.managers import TreeManager from mptt.models import MPTTModel from new_django_project._lib.models import SoftDeletionModel
UTF-8
Python
false
false
2,014
17,025,250,401,765
45e1844a69b84c5be7cca405a32a4d25133c3642
6c335e403bad1ac6baf12e623687d1bbdc70af3a
/sources/canolibs/lib/canolibs/unittest/cfile-Myunittest.py
f38fe2d1d1b9f32af98ef35a8c8cd2fc79cba7b6
[ "AGPL-3.0-or-later" ]
non_permissive
tito/canopsis
https://github.com/tito/canopsis
98140a2210e48f4b9f7534fb57dfedfb34efb163
ef14bae140cae5226b3c062f82572e6907cde1a4
refs/heads/develop
2020-12-25T06:32:40.492546
2014-10-10T12:43:11
2014-10-10T12:43:11
32,029,140
0
0
null
true
2015-03-11T16:42:49
2015-03-11T16:42:48
2015-03-03T17:09:17
2015-03-11T15:49:00
47,386
0
0
0
null
null
null
#!/usr/bin/env python #-------------------------------- # Copyright (c) 2011 "Capensis" [http://www.capensis.com] # # This file is part of Canopsis. # # Canopsis is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Canopsis is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Canopsis. If not, see <http://www.gnu.org/licenses/>. # --------------------------------- import unittest import logging logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s') from cfile import cfile from cfile import get_cfile from caccount import caccount from cstorage import get_storage from gridfs.errors import NoFile anonymous_account = caccount() root_account = caccount(user="root", group="root") storage = get_storage(account=root_account , namespace='unittest', logging_level=logging.DEBUG) sample_file_path = '/opt/canopsis/var/www/canopsis/themes/canopsis/resources/images/logo_small.png' sample_binary = open(sample_file_path, 'rb').read() sample_binary2 = bin(1234567890123456789) myfile = None class KnownValues(unittest.TestCase): def test_01_Init(self): global myfile myfile = cfile(storage=storage) if myfile.data != {}: raise Exception('Data corruption ...') def test_02_put_data(self): myfile.put_data(sample_binary2) if myfile.binary != sample_binary2: raise Exception('Data corruption ...') def test_03_save_data(self): global meta_id, bin_id meta_id = myfile.save() bin_id = myfile.get_binary_id() print "Meta Id: %s, Binary Id: %s" % (meta_id, bin_id) if not bin_id or not meta_id: raise Exception('Impossible to save cfile') def test_04_put_file(self): myfile.put_file(sample_file_path) if myfile.binary != sample_binary: raise Exception('Data corruption ...') def test_05_save_file(self): global meta_id, bin_id meta_id = myfile.save() bin_id = myfile.get_binary_id() if not bin_id or not meta_id: raise Exception('Impossible to save cfile') def test_06_Rights(self): with self.assertRaises(ValueError): storage.put(myfile, account=anonymous_account) with self.assertRaises(ValueError): storage.remove(myfile, account=anonymous_account) def test_07_GetMeta(self): meta = storage.get(meta_id) if not meta: raise Exception('Impossible to get meta data') print "Meta: %s" % meta def test_08_GetBinary(self): binary = storage.get_binary(bin_id) if not binary: raise Exception('Impossible to get binary data') if binary != sample_binary: raise Exception('Data corruption ...') def test_09_RemoveFile(self): myfile.remove() def test_10_CheckFileRemove(self): with self.assertRaises(NoFile): binary = storage.get_binary(bin_id) with self.assertRaises(KeyError): get_cfile(meta_id, storage) if myfile.check(): raise Exception('cfile is not deleted ...') if __name__ == "__main__": unittest.main(verbosity=2)
UTF-8
Python
false
false
2,014
5,669,356,874,273
83905f44d4730f3628f621099bbe592bd2d3348e
eea3fa4e235b9b76bf73aa08370d50714d61604b
/scripts/dump-sizes.py
c3907ab3a6cb71ea4fd5c1f7a9c2038e4d416a8e
[ "Apache-2.0" ]
permissive
conceptslearningmachine-FEIN-85-1759293/jydoop
https://github.com/conceptslearningmachine-FEIN-85-1759293/jydoop
d2ea1272f6f83e9c38c48b6317e3aad60eea88f3
a1ce82f3c6f3d335ba2b0cbc310dac52624a6e0b
refs/heads/master
2021-05-27T11:40:50.313084
2014-09-02T16:11:06
2014-09-02T16:11:06
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import crashstatsutils import jydoop import json from org.python.core.util import StringUtil setupjob = crashstatsutils.dosetupjob([]) def map(k, context): result = context.cx.getCurrentValue() meta_data = StringUtil.fromBytes(result.getValue("meta_data", "json")) meta = json.loads(meta_data) product = meta['ProductName'] version = meta['Version'] ispluginhang = meta.get('PluginHang', None) == "1" err = 0 kv = result.getColumnLatest("raw_data", "dump") if kv is None: err += 1 dumplen = 0 else: dumplen = kv.getValueLength() if "additional_minidumps" in meta: extradumps = meta["additional_minidumps"].split(",") for extradump in extradumps: extrakv = result.getColumnLatest("raw_data", "upload_file_minidump_" + extradump) if extrakv is None: err += 1 else: extralen = extrakv.getValueLength() dumplen += extralen context.write(k, (product, version, ispluginhang, dumplen, err)) output = jydoop.outputWithKey
UTF-8
Python
false
false
2,014
17,626,545,809,632
571da77464f101469faae81c565ad6a77c9ba0d3
12b5f5ca59e6258698b68c4c5874163cb7e6f55e
/hyperspyui/signalwrapper.py
17faed2f06f6ed5263a059bddf7718118f78b4c6
[]
no_license
pburdet/hyperspyUI
https://github.com/pburdet/hyperspyUI
03ff40aae21ead8d71bf8f7fa6f0bb3925c9543b
220458832abcf2e079ef3358f43885a1735565e6
refs/heads/master
2021-01-21T16:54:12.334674
2014-12-30T20:52:55
2014-12-30T20:52:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Oct 24 18:27:15 2014 @author: Vidar Tonaas Fauske """ from util import fig2win from python_qt_binding import QtCore, QtGui from modelwrapper import ModelWrapper from actionable import Actionable import hyperspy.hspy class SignalWrapper(Actionable): closing = QtCore.Signal() model_added = QtCore.Signal(object) model_removed = QtCore.Signal(object) def __init__(self, signal, mainwindow, name): super(SignalWrapper, self).__init__() self.signal = signal if name is None: name = signal.metadata.General.title self.name = name self.figures = [] self.mainwindow = mainwindow self.models = [] self._keep_on_close = 0 self.navigator_plot = None self.signal_plot = None self._nav_geom = None self._sig_geom = None self._replotargs = ((), {}) self._model_id = 1 self.add_action('plot', "&Plot", self.replot) self.add_action('add_model', "Add &model", self.make_model) self.add_separator() self.add_action('close', "&Close", self.close) @property def keep_on_close(self): return self._keep_on_close > 0 @keep_on_close.setter def keep_on_close(self, value): if value: self._keep_on_close += 1 else: if self._keep_on_close > 0: self._keep_on_close -= 1 def plot(self, *args, **kwargs): self.keep_on_close = True self.signal.plot(*args, **kwargs) self.keep_on_close = False self.update_figures() self._replotargs = (args, kwargs) self.mainwindow.main_frame.subWindowActivated.emit( self.mainwindow.main_frame.activeSubWindow()) def replot(self): self.plot(*self._replotargs[0], **self._replotargs[1]) def update(self): if self.navigator_plot is not None: self.navigator_plot.update() if self.signal_plot is not None: self.signal_plot.update() def update_figures(self): old_nav = self.navigator_plot old_sig = self.signal_plot self.remove_figure(old_nav) self.remove_figure(old_sig) self.navigator_plot = None self.signal_plot = None atleast_one_changed = False # If we have a navigator plot if self.signal._plot and self.signal._plot.navigator_plot: # Set internal `navigator_plot` to window containing it navi = self.signal._plot.navigator_plot.figure self.navigator_plot = fig2win(navi, self.mainwindow.figures) # Did the window change? if old_nav is not self.navigator_plot: # Process the plot navi.axes[0].set_title("") # remove title # Wire closing event self.navigator_plot.closing.connect(self.nav_closing) # Set a reference on window to self self.navigator_plot.setProperty('hyperspyUI.SignalWrapper', self) # Add to figures list self.add_figure(self.navigator_plot) # Did we have a previous window? if old_nav is not None: # Save geometry of old, and make sure it is closed self._nav_geom = old_nav.saveGeometry() old_nav.closing.disconnect(self.nav_closing) old_nav.close() atleast_one_changed = True # If we have stored geometry, and a valid plot, restore if self._nav_geom is not None and self.navigator_plot is not None: self.navigator_plot.restoreGeometry(self._nav_geom) self._nav_geom = None if self.signal._plot and self.signal._plot.signal_plot is not None: sigp = self.signal._plot.signal_plot.figure self.signal_plot = fig2win(sigp, self.mainwindow.figures) if old_sig is not self.signal_plot: sigp.axes[0].set_title("") self.signal_plot.closing.connect(self.sig_closing) self.signal_plot.setProperty('hyperspyUI.SignalWrapper', self) self.add_figure(self.signal_plot) if old_sig is not None: self._sig_geom = old_sig.saveGeometry() old_sig.closing.disconnect(self.sig_closing) old_sig.close() atleast_one_changed = True if self._sig_geom is not None and self.signal_plot is not None: self.signal_plot.restoreGeometry(self._sig_geom) self._sig_geom = None if atleast_one_changed: self.mainwindow.check_action_selections() def add_figure(self, fig): self.figures.append(fig) def remove_figure(self, fig): if fig in self.figures: self.figures.remove(fig) def as_image(self, axis=(0,1)): self.close() # Store geomtery and close tmp = self._sig_geom self._sig_geom = self._nav_geom self._nav_geom = tmp self.signal = self.signal.as_image(axis) def as_spectrum(self, axis=0): self.close() # Store geomtery and close tmp = self._sig_geom self._sig_geom = self._nav_geom self._nav_geom = tmp self.signal = self.signal.as_spectrum(axis) def run_nonblock(self, function, windowtitle): self.keep_on_close = True def on_close(): self.keep_on_close = False self.update_figures() def on_capture(dialog): dialog.destroyed.connect(on_close) dialog.setParent(self.mainwindow, QtCore.Qt.Tool) dialog.show() dialog.activateWindow() # Setup capture self.mainwindow.capture_traits_dialog(on_capture) # Call actual function that triggers dialog function() def make_model(self, *args, **kwargs): m = hyperspy.hspy.create_model(self.signal, *args, **kwargs) # modelname = self.signal.metadata.General.title modelname = "Model %d" % self._model_id self._model_id += 1 mw = ModelWrapper(m, self, modelname) self.add_model(mw) mw.plot() return mw def add_model(self, model): self.models.append(model) self.model_added.emit(model) def remove_model(self, model): self.models.remove(model) self.model_removed.emit(model) self.plot() def nav_closing(self): if self.navigator_plot: self._nav_geom = self.navigator_plot.saveGeometry() self.navigator_plot = None if self.signal_plot is None: self._closed() def sig_closing(self): if self.signal_plot: p = self.signal_plot.pos() # For some reason the position changes -8,-30 on closing, at least # it does on windows 7, Qt4. self.signal_plot.move(p.x()+8, p.y()+30) self._sig_geom = self.signal_plot.saveGeometry() if self.navigator_plot is not None: self.navigator_plot.close() self.navigator_plot = None self.signal_plot = None self._closed() def close(self): if self.signal_plot is not None: self.signal_plot.close() self.signal_plot = None if self.navigator_plot is not None: self.navigator_plot.close() self.navigator_plot = None self._closed() def _closed(self): if not self.keep_on_close: self.closing.emit() # TODO: Should probably be with by events for concistency if self in self.mainwindow.signals and not self.keep_on_close: self.mainwindow.signals.remove(self)
UTF-8
Python
false
false
2,014
996,432,459,599
0dda2feadf37c4eeeccf3fa1eda339961f3fbe92
f7330e26658ece327d2c44b8791d1104008fc271
/puntersparadise/app.py
9fb9a86ff0edb58f9653b3d364edc03d96e11a36
[]
no_license
diaakasem/scrap
https://github.com/diaakasem/scrap
7e3acd1970a9335b7a39e24668f3c75a3a9c0c88
4fb84eb2cea4da9e053a327a13d2182acdebae2a
refs/heads/master
2021-01-23T13:29:39.195885
2013-10-08T18:30:02
2013-10-08T18:30:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
############################################################################### # Description: Collects data from punter paradise.com.au # Author : Diaa Kasem # Email : [email protected] # oDesk : https://www.odesk.com/users/~019c6c183545f26f3f # Usage : Alter the config vlues below # Save The script with a <name.py> # execute 'python <name.py>' Where name.py is the script name ############################################################################### from bs4 import BeautifulSoup import urllib2 import csv import time, datetime import re import json import math from dateutil.parser import * config = { "weight-kg" : 0, "horse-age" : 0, "career-win-rate" : 0, "career-place-rate" : 100, "career-prize-money" : 0, "average-prize-money" : 0, "jockey-wins" : 0, "track-wins" : 0, "good-tracks" : 0, "heavy-tracks" : 0, "synthetic-tracks" : 0, "jumps-tracks" : 0, "barrier" : 0, "distance-wins" : 0, "track-distance-wins" : 0, "fast-tracks" : 0, "dead-tracks" : 0, "slow-tracks" : 0 } BASE = "http://www.puntersparadise.com.au" meetings_url = "%s/form-guid/" % BASE meetings_url = 'http://www.puntersparadise.com.au/form-guide' url = BASE + "/%s" data = [] outputFile = 'csvoutput.csv' fields = ['Rcdate', 'Track', 'Rcno', 'Rctime', 'Tab', 'Horse', 'Rat'] pagesCount = 998 def ordinal(n): if 10 <= n % 100 < 20: return str(n) + 'th' else: return str(n) + {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(n % 10, "th") def isToday(ts): """ Check if the Date of race equals today """ n = datetime.datetime.now() d = datetime.datetime.fromtimestamp(ts) # print "Race Date : %s " % d # print "Your current time: %s " % n return d.day == n.day def getMeetings(): """ Collect all race meetings going to happen """ page = urllib2.urlopen(meetings_url).read() soup = BeautifulSoup(page) scripts = soup.findAll('script') script = [ script for script in scripts if 'meeting_events' in str(script) ] jsCode = re.sub('<[^<]+?>', '', str(script[0])) array = re.findall('meeting_events\s*=\s*(.*)\s*;', jsCode) return json.loads(array[0]) def extractPage(url): """ Extract Meetings data from page """ page = urllib2.urlopen(url).read() soup = BeautifulSoup(page) fh = soup.find('div' , {'class': 'formHeader'}) h2 = fh.find('h2').text result = [] racedata = h2.split('-') racedate = racedata[0].strip() title = re.findall('(.+)\s*Race.*(\d+)', racedata[1])[0] track = title[0] track = track.strip() racenumber = title[1] racenumber = re.findall('\d+', racedata[1])[0] ts = float(soup.find('abbr', {'class': 'time12'})['data-utime']) otime = datetime.datetime.fromtimestamp(ts) racetime = otime.strftime('%I:%M%p') if not isToday(ts): print "Skipping - Not today : %s " % racedate return horses = [] runners = soup.findAll('div', {'class': 'csRunner'}) def num(string): s = str(float(string)) if len(s) > 4: return float(s[0:4]) return float(s) # A dictionary that when script executes, will hold max value # Important for calculations - DO NOT ALTER maxResults = { "weight-kg" : 0, "horse-age" : 0, "career-win-rate" : 0, "career-place-rate" : 0, "career-prize-money" : 0, "average-prize-money" : 0, "jockey-wins" : 0, "track-wins" : 0, "good-tracks" : 0, "heavy-tracks" : 0, "synthetic-tracks" : 0, "jumps-tracks" : 0, "barrier" : 0, "distance-wins" : 0, "track-distance-wins" : 0, "fast-tracks" : 0, "dead-tracks" : 0, "slow-tracks" : 0 } # Calculating horse rate for runner in runners: obj = { "runner-name" : runner.get('data-runner-name', ''), "runner-title" : runner.get('data-runner-title', ''), "weight-kg" : num(runner.get('data-weight-kg', '0')), "horse-age" : num(runner.get('data-horse-age', '0')), "career-win-rate" : num(runner.get('data-career-win-rate', '0')), "career-place-rate" : num(runner.get('data-career-place-rate', '0')), "career-prize-money" : num(runner.get('data-career-prize-money', '0')), "average-prize-money" : num(runner.get('data-average-prize-money', '0')), "jockey-wins" : num(runner.get('data-jockey-wins', '0')), "track-wins" : num(runner.get('data-track-wins', '0')), "good-tracks" : num(runner.get('data-good-tracks', '0')), "heavy-tracks" : num(runner.get('data-heavy-tracks', '0')), "synthetic-tracks" : num(runner.get('data-synthetic-tracks', '0')), "jumps-tracks" : num(runner.get('data-jumps-tracks', '0')), "barrier" : num(runner.get('data-barrier', '0')), "distance-wins" : num(runner.get('data-distance-wins', '0')), "track-distance-wins" : num(runner.get('data-track-distance-wins', '0')), "fast-tracks" : num(runner.get('data-fast-tracks', '0')), "dead-tracks" : num(runner.get('data-dead-tracks', '0')), "slow-tracks" : num(runner.get('data-slow-tracks', '0')) } maxResults = { "weight-kg" : max(obj["weight-kg"], maxResults["weight-kg"]), "horse-age" : max(obj["horse-age"], maxResults["horse-age"]), "career-win-rate" : max(obj["career-win-rate"], maxResults["career-win-rate"]), "career-place-rate" : max(obj["career-place-rate"], maxResults["career-place-rate"]), "career-prize-money" : max(obj["career-prize-money"], maxResults["career-prize-money"]), "average-prize-money" : max(obj["average-prize-money"], maxResults["average-prize-money"]), "jockey-wins" : max(obj["jockey-wins"], maxResults["jockey-wins"]), "track-wins" : max(obj["track-wins"], maxResults["track-wins"]), "good-tracks" : max(obj["good-tracks"], maxResults["good-tracks"]), "heavy-tracks" : max(obj["heavy-tracks"], maxResults["heavy-tracks"]), "synthetic-tracks" : max(obj["synthetic-tracks"], maxResults["synthetic-tracks"]), "jumps-tracks" : max(obj["jumps-tracks"], maxResults["jumps-tracks"]), "barrier" : max(obj["barrier"], maxResults["barrier"]), "distance-wins" : max(obj["distance-wins"], maxResults["distance-wins"]), "track-distance-wins" : max(obj["track-distance-wins"], maxResults["track-distance-wins"]), "fast-tracks" : max(obj["fast-tracks"], maxResults["fast-tracks"]), "dead-tracks" : max(obj["dead-tracks"], maxResults["dead-tracks"]), "slow-tracks" : max(obj["slow-tracks"], maxResults["slow-tracks"]) } horses.append(obj) horsesRates = {} for horse in horses: rate = 0 for k in config.iterkeys(): rate += ( horse.get(k, 0) / (maxResults.get(k, 1) or 1) ) * config.get(k, 0) / 100 # SAVING HORSE RATES horsesRates[horse.get('runner-name', '').strip()] = int(round(rate * 100)) data = soup.find('table', {'class': 'formRaceCard'}) data = data.findAll('tr')[1:] for tr in data: number = tr.find('td', {'class': 'horseNumber'}) number = number.find('a').text name = tr.find('td', {'class': 'horseDetails'}) name = name.find('a', {'class': 'hoverTrigger'}).text name = name.strip() # rate = tr.find('td', {'class': 'winPercent'}).text # rate = re.findall('\d+', rate)[0] rate = horsesRates.get(name, 0) obj = { 'Rcdate': racedate, 'Track': track, 'Rctime': racetime, 'Rcno': racenumber, 'Tab': number, 'Horse': name, 'Rat': rate } # Saving Extracted Data result.append(obj) # Sort Records in race by horse Rate result.sort(key=lambda row: row.get('Rat', 0), reverse=True) # print json.dumps(result, sort_keys = False, indent = 4) return result def writeData(outputFile, data): """ Save Data to Desk @param outputFile The path of the file to write to @param data The dictionary of value to save """ with open(outputFile, 'wb') as csvfile: output = csv.DictWriter(csvfile, delimiter=',', fieldnames=fields) output.writeheader() output.writerows(data) def main(): """ Program Starting Point """ result = [] meetings = getMeetings() for meeting in meetings: u = url % meeting['href'] print "Working on : %s" % meeting['href'] data = extractPage(u) if data: result = result + data print "Writing to disk." writeData(outputFile, result) time.sleep(1) print "Done." if __name__ == '__main__': main()
UTF-8
Python
false
false
2,013
807,453,869,173
4c3ddab6e9fb4cc822b1770c0964709f95a06464
efcfdf661a12da240780a2e505394bdf17af0b9c
/core/geometry.py
e3af36465cdcba0b3ea5ed323e4e9fe715a1e00e
[]
no_license
zarch/openriver
https://github.com/zarch/openriver
730a23c67cb4d1ab55f18c743d3e4e89437ddff4
9754f74552dbb9978a209420016196f160aad32c
refs/heads/master
2021-01-20T23:45:30.924509
2010-05-17T08:08:57
2010-05-17T08:08:57
563,674
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import numpy as np import re from os.path import join as joinpath # section example sec_rectangular=[(0, 0), (0, -2), (10, -2), (10, 0)] sec_rectangular2=sec_rectangular[1:] sec_rectangular2=sec_rectangular.insert(-1, sec_rectangular[-1]) print sec_rectangular print sec_rectangular2 rect=np.array(sec_rectangular) rect2=np.array(sec_rectangular2) rect_y=np.array([i[0] for i in sec_rectangular]) rect_z=np.array([i[1] for i in sec_rectangular]) print rect, rect2 import csv class Section: """ print m.group('sez_name') It defines attributes and methods for a river cross-section. It's possible to define sub-segments of the section, each one with a different roughness. Example of usage: coord = [[0,10],[0,0],[10,0],[20,0],[20,10]] sect = Section(0, coord) sect.addSegment(sect.yzcoord[0:2], 35) sect.addSegment(sect.yzcoord[2:], 40) """ def __init__(self, name=None, data=None, first=0, last=-1, erodible=True, roughness=None, discontinuity=False, subsection=False, watersurface=None, variotype = None, d90=None, variolenght=None, varioexcav=None): self.name = name self.data = np.array(data) self.xcoord = self.data.T[0] self.yzcoord = self.data.T[1:3] self.first = first self.last = last minimum = self.yzcoord[1].argmin() self.min = self.yzcoord[minimum][1] self.erodible = erodible self.roughness = roughness self.discontinuity = discontinuity self.subsection = subsection self.segment = [] # Add this attributs to support vario format self.d90 =d90 self.variotype = variotype self.varioLenght = variolenght self.varioexcav = varioexcav self.watersurf = watersurface def __str__(self): return str(self.name) def addSegment(self, yzcoordSegm=None, roughness=None): self.segment.append(Section(yzcoord=yzcoordSegm, roughness=roughness, subsection=True)) def firstPointAfter_h(self, points, h): """Return index of the first >>> points=np.array([ 742.73, 742.75, 742.77, 742.79, 747.27]) >>> section.firstPointAfter_h(points, 745) 4 >>> section.firstPointAfter_h(points, 742) Traceback (innermost last): ... ValueError: h outside section h < min >>> section.firstPointAfter_h(points, 748) Traceback (innermost last): ... ValueError: h outside section h > max """ if h > points.max(): raise ValueError("h outside section\n h > max") elif h < points.min(): raise ValueError("h outside section\n h < min") else: #print 'points:', points, h for i, p in enumerate(points): #print i, p if p > h: return i def intersection(self, pn1, pn2, h): """Returnurn intersection between 2 points and height >>> section.intersection((0,5),(0,0),3) (0, 3) (h-y0)/(y1-y0) = (x-x0)/(x1-x0) x=(x1-x0)/(y1-y0)*(h-y0)+x0 return x,h""" #print "intersectionection:", pn1, pn2, h #print "z: ", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0] return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h def getSect(self, h): """Return section only from left intersection to right intersection >>> section.getSect(745) array([[ 4.71227679e-01, 7.45000000e+02], [ 9.30000000e-01, 7.42790000e+02], [ 7.19000000e+00, 7.42770000e+02], [ 1.25900000e+01, 7.42750000e+02], [ 1.80800000e+01, 7.42730000e+02], [ 1.89100000e+01, 7.42730000e+02], [ 1.94887253e+01, 7.45000000e+02]]) """ lefttomin=self.yzcoord[1][:self.min+1] # left point lpnt = self.firstPointAfter_h(lefttomin[::-1], h) # find index of left point l_pnt = self.min - lpnt # find left intersection l_intersect = self.intersection(self.yzcoord.T[l_pnt], self.yzcoord.T[l_pnt+1], h) # right point rpnt = self.firstPointAfter_h(self.yzcoord[1][self.min:], h) # find index of right point r_pnt = self.min + rpnt # find right intersection r_intersect = self.intersection(self.yzcoord.T[r_pnt], self.yzcoord.T[r_pnt-1], h) # make new section geometries sez = self.yzcoord.T[l_pnt+1:r_pnt] # Add left intersection on the top sez=np.insert(sez, [0,], l_intersect,axis=0) # Add rightht intersection on the bottom sez=np.append(sez,[r_intersect],axis=0) return sez def area(self, sez): """Return area given a section take from getSect >>> section.area(yzcoordT) 41.448496630204318 """ # find area below water line area_h2o = (sez[-1][0]-sez[0][0])*sez[0][1] # find area bellow section area_sez = np.trapz(sez.T[1],x=sez.T[0]) return area_h2o - area_sez def wetBorder(self, sez): """Calculate web border from a given section >>> section.wetBorder(yzcoordT) 23.557497620999964 """ # calculate with pitagora: sqrt(dx²+dy²) sez1=np.delete(sez, 0, axis=0) sez2=np.delete(sez, -1, axis=0) delta=sez1-sez2 return np.sum(np.sqrt(delta * delta)) def rh(self, h): """Return thee idraulic radius given height >>> section.rh(745) 1.7594609288533762 """ sez=self.getSect(h) area=self.area(sez) wetborder = self.wetBorder(sez) return area/wetborder class Reach: """ It defines the geometric properties of a river reach. It is composed by sections and sections can be subdivided in segments. """ def __init__(self, sections = []): self.sections = sections self.workingpath = None def __str__(self): slist = [] for s in self.sections: separetor = '='*50 sectname = s.name + ': ' + str(len(s.yzcoord)) data = str(s.data) slist.append("\n".join([separetor, sectname, data])) return "\n".join(slist) def recursiveReadVario(self, datalist): """This function append section to reach.sections""" index = 0 # read first line section xcoord,e,d = datalist[index] xcoord = float(xcoord) erodible = True if e == 't' else False discontinuity= False if d == 'f' else True # go to the second line index+=1 # read and trasform str value in integer npoints, nsegments = map(int, datalist[index]) #initialize locals variables yzcoord = [] segmens = [] endpoints=index+npoints+1 endsegments = endpoints+nsegments index+=1 # start a cicle between points for e in datalist[index:endpoints]: # trasform string in float yz=map(float, e[:2]) #print e[:2] # add new coordinates to yzcoordinates list yzcoord.append(yz) # add x column to the data array data = np.ones(shape=(len(yzcoord),1)) data = data * xcoord # transform list in a numpy array because in this way is easier # to assign value for ks yzcoord = np.array(yzcoord) #print yzcoord # add yzcoord to the data array data = np.append(data,yzcoord,axis=1) # add roughnes column default it is 0 kscolumn = np.zeros(shape=(len(yzcoord),1)) data = np.append(data,kscolumn,axis=1) # assign KS = 3 to have more readable source KS = 3 for e in datalist[endpoints:endsegments]: # trasform string in integer and assign start end and ks start, end, ks = map(int, e) start -= 1 data.T[KS][start:end] = ks index = endsegments # check if discontinuity == True if discontinuity: [[type], [d90], [l], [excavation]] = datalist[index:index+4] index = index + 4 type, d90, l, excavation= int(type), int(d90), float(l), float(excavation) #print "type: %d, d90: %d, l: %f, excav: %f" % (type, d90, l, excavation) # make new section and append to the reach list self.sections.append(Section(data = data, erodible = erodible, discontinuity = discontinuity, variotype = None if discontinuity == False else type, d90 = None if discontinuity == False else d90, variolenght = None if discontinuity == False else l, varioexcav = None if discontinuity == False else excavation,)) newline = datalist[index] # check if new line is the end of file. if newline == ['-100', '-100', '-100']: print "Finish to import." else: self.recursiveReadVario(datalist[index:]) def importFileVario(self, filename): """ >>> river = Reach() >>> river.importFileVario('../test/importexport/variosection.geo') Finish to import. """ datalist = [] geometryFile = open(filename, "r") # make a list of list from the file. for row in geometryFile: datalist.append(row.split()) self.recursiveReadVario(datalist) def exportFileVario(self, filename): """ Return a vario file of sections >>> river = Reach() >>> river.importFileVario('../test/importexport/variosection.geo') Finish to import. >>> river.exportFileVario('../test/importexport/variosectionTEST.geo') Finish to export. """ sectionVarioFile = open(filename, "w") for s in self.sections: # Vario take just one x coordinates so we take the first one x = float(s.xcoord[0]) erod = 't' if s.erodible else 'f' disc = 't' if s.discontinuity else 'f' npoints = int(len(s.data)) kslist = s.data.T[3][:-1] segmentslist = [] index = 0 # initialize segment start and end s_start=0 s_end =1 while s_end != len(kslist): ks = kslist[s_start] ksnext = kslist[s_end] #print ks, ksnext, s_start, s_end if ks == ksnext: s_end += 1 else: segmentslist.append('%d %d %d' % (s_start+1, s_end+1, ks)) s_start = s_end s_end += 1 segmentslist.append('%d %d %d' % (s_start+1, s_end+1, kslist[s_start])) nsegments = int(len(segmentslist)) #print s.yzcoord.T yzcoordstr = "\n".join(["%f %f" % tuple(c) for c in s.yzcoord.T]) segmentstr = "\n".join(segmentslist) # Define the string that will be write in the file for each section variosection = """%f %s %s %d %d %s %s """ % (x, erod, disc, npoints, nsegments, yzcoordstr, segmentstr, ) # check if there are discontinuity if s.discontinuity: dis_str = "%d\n%d\n%f\n%f\n" % (s.variotype, s.d90, s.varioLenght, s.varioexcav) variosection +=dis_str # then write section string to the output file sectionVarioFile.write(variosection) sectionVarioFile.close() print "Finish to export." def importFileOri(self, sectionfilename, pointsfilename): """section.ori ------------------------- 301 4 sez0001 1 100.00000 4 100.00000 4 sez0002 1 100.00000 4 100.00000 4 sez0003 1 100.00000 4 100.00000 points.ori ------------------------- 0.00000 10.00000 100.00000 100.00000 0.00000 10.00000 0.00000 100.00000 0.00000 50.00000 0.00000 100.00000 0.00000 50.00000 100.00000 100.00000 5.00000 10.00000 100.00000 100.00000 5.00000 10.00000 0.00000 100.00000 5.00000 50.00000 0.00000 100.00000 5.00000 50.00000 100.00000 100.00000 >>> river = Reach() >>> river.importFileOri('../test/importexport/sections.ori', '../test/importexport/points.ori') >>> len(river.sections) 301 """ sectionFile = open(sectionfilename, "r") pointsFile = open(pointsfilename, "r") # define regexp restr = r"""^\s*(?P<points_num>\d+)\s+(?P<sez_name>[sez]+\d+)\s*\n^\s*(?P<first_point>\d+)\s+(?P<first_point_h>[0-9.]+)\s+(?P<last_point>\d+)\s+(?P<last_point_h>[0-9.]+)\s*\n""" regexp = re.compile(restr, re.MULTILINE) # find all section informations matches = [m.groupdict() for m in regexp.finditer(sectionFile.read())] # take all data from points.ori allcoord = [] for row in pointsFile: allcoord.append([float(x) for x in row.split()]) # make the list of sections sectionlist = [] first = 0 last = 0 for m in matches: # make a Section obj #print 'Numero punti sezione: %s\nSezione: %s\nPrimoPunto: %s\nPrimoPuntoH: %s\nUltimoPunto: %s\nUltimoPuntoH: %s\n' % (m['points_num'], m['sez_name'], m['first_point'],m['first_point_h'], m['last_point'],m['last_point_h']) first += int(m['first_point']) - 1 last += int(m['last_point']) sectionlist.append(Section(name=m['sez_name'], data=allcoord[first:last], first=int(m['first_point'])-1, last=int(m['last_point']))) first = last # asign sections attribute self.sections = sectionlist return def exportFileOri(self, sectionfilename, pointsfilename): """ >>> river = Reach() >>> river.importFileOri('../test/importexport/sections.ori', '../test/importexport/points.ori') >>> river.exportFileOri('../test/importexport/sectionsTEST.ori', '../test/importexport/pointsTEST.ori') Start writing: ../test/importexport/sectionsTEST.ori Start writing: ../test/importexport/pointsTEST.ori Finish """ sectionFile = open(sectionfilename, "w") print "Start writing: %s" % sectionfilename sectionFile.write('%s\n' % len(self.sections)) for sect in self.sections: #301 #4 sez0001 #1 100.00000 4 100.00000 #print sect.data rows = '%s %s\n%s %s %s %s\n' % (len(sect.data), sect.name, sect.first +1, sect.data[sect.first][2], sect.last, sect.data[sect.last-1][2]) #print rows sectionFile.write(rows) sectionFile.close() print "Start writing: %s" % pointsfilename pointsFile = open(pointsfilename, "w") for section in self.sections: rowlist = [] for row in section.data: rowlist.append(" ".join(['%9.5f' % x for x in row])) pointsFile.write('%s\n' % "\n".join([' %s' % r for r in rowlist])) pointsFile.close() print "Finish" def addSection(self, section=None): self.sections.append(section) def length(self, sectlist = None, dim = 3): """ >>> river = Reach() >>> river.importFileOri('../test/test1/sections.ori', '../test/test1/points.ori') to calculate length just only 1D long x >>> river.length(dim = 1) 1500.0 to calculate length just only 2D long x and y >>> river.length(dim = 2) 1585.0 to calculate length just only 3D long x, y and z >>> river.length(dim = 3) 1607.1999999999994 """ # check input if not sectlist: sectlist = self.sections if dim <= 3: dim = int(dim) else: raise ValueError("dim must be <= 3") l = [] for sez in sectlist: #print 'sez.first:', sez.first #print 'sez.last:', sez.last #print '-', sez.data[sez.first:sez.last] data = sez.data[sez.first:sez.last] x = dim -4 l.append(data[0][0:x]) array = np.array(l) #print array a1 = np.delete(array, 0, axis=0) a2 = np.delete(array, -1, axis=0) #print a1, a2 delta = a2 - a1 #print delta return np.sum(np.sqrt(delta * delta)) def readSimulation(self): pass if __name__ == "__main__": import doctest yzcoordT=np.array([[ 4.71227679e-01, 7.45000000e+02],\ [ 9.30000000e-01, 7.42790000e+02],\ [ 7.19000000e+00, 7.42770000e+02],\ [ 1.25900000e+01, 7.42750000e+02],\ [ 1.80800000e+01, 7.42730000e+02],\ [ 1.89100000e+01, 7.42730000e+02],\ [ 1.94887253e+01, 7.45000000e+02]]) sezdata=np.array([[ 0. , 0. , 747.27, 50. ], [ 0. , 0.93, 742.79, 50. ], [ 0. , 7.19, 742.77, 50. ], [ 0. , 12.59, 742.75, 50. ], [ 0. , 18.08, 742.73, 50. ], [ 0. , 18.91, 742.73, 50. ], [ 0. , 20.07, 747.28, 50. ]]) section=Section(data=sezdata) doctest.testmod()
UTF-8
Python
false
false
2,010