{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n ''' % (js_filename)\n return html_script\n\ndef create_javascript_array(otu_table, use_floats=False):\n \"\"\"Convert the OTU table counts into a javascript array\"\"\"\n # Build up list of strings and concatenate at end, as this string can be\n # very large and have many concatenations.\n js_array = ['var OTU_table=new Array();\\n'\n 'var i=0;\\n'\n 'for (i==0;i<%i;i++) {\\n'\n 'OTU_table[i]=new Array();}\\n' %\n (len(otu_table.SampleIds) + 2)]\n\n #0 ['#OTU ID', 'OTU2', 'OTU3']\n #1 ['Sample1', 1, 2]\n #2 ['Sample2', 5, 4]\n #3 ['Consensus Lineage', 'Archaea', 'Bacteria']\n\n # OTU ids first\n js_array.append(\"OTU_table[0][0]='#OTU ID';\\n\")\n for (idx, otu_id) in enumerate(otu_table.ObservationIds):\n js_array.append(\"OTU_table[0][%i]='%s';\\n\" % (idx+1, otu_id))\n\n # Sample ids and values in the table\n i = 1\n for (sam_val, sam_id, meta) in otu_table.iterSamples():\n js_array.append(\"OTU_table[%i][0]='%s';\\n\" % (i, sam_id))\n for (idx, v) in enumerate(sam_val):\n if use_floats:\n js_array.append(\"OTU_table[%i][%i]=%.4f;\\n\" %\n (i, idx+1, float(v)))\n else:\n # don't quite understand why int(float(v)), rather than int(v)\n js_array.append(\"OTU_table[%i][%i]=%d;\\n\" %\n (i, idx+1, int(float(v))))\n i += 1\n\n # Consensus lineages for each OTU\n last_idx = len(otu_table.SampleIds) + 1\n js_array.append(\"OTU_table[%i][0]='Consensus Lineage';\\n\" % last_idx)\n i = 1\n for (otu_val, otu_id, meta) in otu_table.iterObservations():\n js_array.append(\"OTU_table[%i][%i]='%s';\\n\" %\n (last_idx, i, \";\".join(meta['taxonomy']).strip('\"')))\n i += 1\n\n return ''.join(js_array)\n\ndef filter_by_otu_hits(num_otu_hits, otu_table):\n \"\"\"Filter the OTU table by the number of otus per sample\"\"\"\n # Filter out rows with sum > num_otu_hits\n new_otu_table = filter_otus_from_otu_table(otu_table, otu_table.ObservationIds,\n num_otu_hits, inf,0,inf)\n\n return new_otu_table\n\ndef get_log_transform(otu_table, eps=None):\n \"\"\" This function and the one in make_otu_heatmap.py are essentially the same except\n the non-negative transform at the end of this function. Dan Knights suggests this might\n be due to this script not being able to handle negative values, hence the transform.\n \"\"\"\n # explicit conversion to float: transform\n def f(s_v, s_id, s_md):\n return float64(s_v)\n float_otu_table = otu_table.transformSamples(f)\n\n if eps is None:\n # get the minimum among nonzero entries and divide by two\n eps = inf\n for (obs, sam) in float_otu_table.nonzero():\n eps = minimum(eps, float_otu_table.getValueByIds(obs,sam))\n if eps == inf:\n raise ValueError('All values in the OTU table are zero!')\n \n # set zero entries to eps/2 using a transform\n\n def g2(x):\n return [i if i !=0 else eps/2 for i in x]\n\n # do we have map in OTU object?\n g = lambda x : x if (x != 0) else eps/2\n def g_m(s_v, s_id, s_md):\n return asarray(map(g,s_v))\n\n eps_otu_table = float_otu_table.transformSamples(g_m)\n\n # take log of all values with transform\n def h(s_v, s_id, s_md):\n return log(s_v)\n log_otu_table = eps_otu_table.transformSamples(h)\n\n # one more transform\n min_val = inf\n for val in log_otu_table.iterSampleData():\n min_val = minimum(min_val, val.min())\n def i(s_v, s_id, s_md):\n return s_v - min_val\n\n res_otu_table = log_otu_table.transformSamples(i)\n\n return res_otu_table\n\ndef get_otu_counts(fpath):\n \"\"\"Reads the OTU table file into memory\"\"\"\n\n try:\n otu_table = parse_biom_table(open(fpath,'U'))\n except (TypeError, IOError):\n raise MissingFileError, 'OTU table file required for this analysis'\n\n if (otu_table.ObservationMetadata is None or\n otu_table.ObservationMetadata[0]['taxonomy'] is None):\n raise ValueError, '\\n\\nThe lineages are missing from the OTU table. Make sure you included the lineages for the OTUs in your OTU table. \\n'\n\n return otu_table\n\ndef generate_heatmap_plots(num_otu_hits, otu_table, otu_sort, sample_sort, dir_path,\n js_dir_path, filename,fractional_values=False):\n \"\"\"Generate HTML heatmap and javascript array for OTU counts\"\"\"\n\n #Filter by number of OTU hits\n # rows come transposed in the original code\n filtered_otu_table = filter_by_otu_hits(num_otu_hits, otu_table)\n\n if otu_sort:\n # Since the BIOM object comes back with fewer Observation_ids, we need to \n # remove those from the original sort_order\n actual_observations=filtered_otu_table.ObservationIds\n new_otu_sort_order=[]\n for i in otu_sort:\n if i in actual_observations:\n new_otu_sort_order.append(i)\n \n filtered_otu_table = filtered_otu_table.sortObservationOrder(new_otu_sort_order)\n\n # This sorts the samples by the order supplied\n if sample_sort:\n # Since the BIOM object may come back with fewer Sampleids, we need to \n # remove those from the original sample_sort\n actual_samples=filtered_otu_table.SampleIds\n new_sample_sort_order=[]\n for i in sample_sort:\n if i in actual_samples:\n new_sample_sort_order.append(i)\n \n filtered_otu_table = filtered_otu_table.sortSampleOrder(new_sample_sort_order)\n \n #Convert OTU counts into a javascript array\n js_array=create_javascript_array(filtered_otu_table, fractional_values)\n\n #Write otu filter number\n js_otu_cutoff='var otu_num_cutoff=%d;\\n' % num_otu_hits\n \n #Write js array to file\n js_filename=os.path.join(js_dir_path,filename)+'.js'\n jsfile = open(js_filename,'w')\n jsfile.write(js_otu_cutoff)\n jsfile.write(js_array)\n jsfile.close()\n\n #Write html file\n html_filename=os.path.join(dir_path,filename)+'.html'\n js_file_location='js/'+filename+'.js'\n table_html=make_html_doc(js_file_location)\n ofile = open(html_filename,'w')\n ofile.write(table_html)\n ofile.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39925,"cells":{"__id__":{"kind":"number","value":15676630664127,"string":"15,676,630,664,127"},"blob_id":{"kind":"string","value":"9ed21372d76cc5cecb095f3e9157eeeab67f62c3"},"directory_id":{"kind":"string","value":"2ca2f655ab504c5240546c403d9c09cba6b0f0ba"},"path":{"kind":"string","value":"/file_sharing_server.py"},"content_id":{"kind":"string","value":"bbe9a1df5037eb22310cf34b1a581e5f9021b5c0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"caovanmanhqx2/centralised-monitoring-on-a-network"},"repo_url":{"kind":"string","value":"https://github.com/caovanmanhqx2/centralised-monitoring-on-a-network"},"snapshot_id":{"kind":"string","value":"0f0c5f0ad8fa9313e7fff62dd149da277fce1ec5"},"revision_id":{"kind":"string","value":"56bc0fb86e2ac4464c9fd165e04e8bdafc119c2c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-12-02T13:48:20.709981","string":"2021-12-02T13:48:20.709981"},"revision_date":{"kind":"timestamp","value":"2013-10-10T17:49:56","string":"2013-10-10T17:49:56"},"committer_date":{"kind":"timestamp","value":"2013-10-10T17:49:56","string":"2013-10-10T17:49:56"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport os\nimport socket\nimport pickle\nafile=[]\nbfile=[]\nc=len(bfile)\nfor subdir, dirs, files in os.walk('c:/xampp/htdocs/file_transfer/'):\n for file in files:\n if(len(afile)<25):\n file_size=os.path.getsize('c:/xampp/htdocs/file_transfer/'+file)\n afile.append(str(file))\n bfile.append(str(file_size))\n\n#s=socket.socket() # Create a socket object\n#host=socket.gethostname() # Get local machine name\n#port=12345 # Reserve a port for your service.\n#s.bind((host, port))\n\n#s.listen(5)\n\n#while True:\n #c, addr = s.accept() \n #print \"\\n\".join(afile)\n \n #c.send(pickle.dumps(afile))\n #c.close()\n\n#Create custom HTTPRequestHandler class\nclass KodeFunHTTPRequestHandler(BaseHTTPRequestHandler):\n \n #handle GET command\n def do_GET(self):\n rootdir = 'c:/xampp/htdocs/file_transfer/' #file location\n try:\n print self.path\n if self.path.endswith('.html') or self.path.endswith('.txt'):\n f = open(rootdir + self.path) #open requested file\n #file_size=os.path.getsize('rootdir + self.path')\n #print file_size\n self.send_response(200)\n self.send_header('Content-type','text-html')\n self.end_headers()\n self.wfile.write(f.read())\n f.close()\n return\n elif self.path.endswith('.jpg') or self.path.endswith('.jpeg') or self.path.endswith('.jpe'):\n f = open(rootdir + self.path,'rb') #open requested file\n self.send_response(200)\n self.send_header('Content-type','image/jpeg')\n self.end_headers()\n self.wfile.write(f.read())\n f.close()\n return\n elif self.path.endswith('.avi'):\n f = open(rootdir + self.path,'rb') #open requested file\n self.send_response(200)\n self.send_header('Content-type','video/x-msvideo')\n self.end_headers()\n self.wfile.write(f.read())\n f.close()\n return\n elif self.path == \"list\":\n self.send_response(200)\n self.send_header('Content-type','text-html')\n self.end_headers()\n self.wfile.write(\"\\n\".join(afile))\n #self.wfile.write(\"\\n\".join(bfile))\n return\n elif self.path == \"update\":\n uafile=\"\"\n for subdir, dirs, files in os.walk('c:/xampp/htdocs/file_transfer/'):\n for file in files: \n file_size=os.path.getsize('c:/xampp/htdocs/file_transfer/'+file)\n uafile = uafile + str(file) + \";\" + str(file_size) + \"\\n\"\n self.send_response(200)\n uafile = uafile.rstrip(\"\\n\")\n #print uafile\t\t \n self.send_header('Content-type','text-html')\n self.end_headers()\n self.wfile.write(uafile)\n return\t\t \n \n except IOError:\n self.send_error(404, 'file not found')\n \ndef run():\n print('http server is starting...')\n\n \n server_address = ('0.0.0.0', 82)\n httpd = HTTPServer(server_address, KodeFunHTTPRequestHandler)\n print('http server is running...')\n httpd.serve_forever()\n \nif __name__ == '__main__':\n run()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39926,"cells":{"__id__":{"kind":"number","value":377957143579,"string":"377,957,143,579"},"blob_id":{"kind":"string","value":"b8437d7a3e9a9e6f490b085cd7dedad8e37e6e7c"},"directory_id":{"kind":"string","value":"bd20e1698dc386e025f88de5e9905243bb9e700e"},"path":{"kind":"string","value":"/plotting/multilineplot.py"},"content_id":{"kind":"string","value":"7b9645d4f988ab42f03ce9722461a9224ec94f25"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"MrOwen/CS106"},"repo_url":{"kind":"string","value":"https://github.com/MrOwen/CS106"},"snapshot_id":{"kind":"string","value":"451dcb2104802d8c3545462debdb183dba7b20da"},"revision_id":{"kind":"string","value":"60eb718b41da959c2a4377efcce1c3f0d31a2520"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T11:06:00.444830","string":"2021-01-20T11:06:00.444830"},"revision_date":{"kind":"timestamp","value":"2013-02-18T20:47:56","string":"2013-02-18T20:47:56"},"committer_date":{"kind":"timestamp","value":"2013-02-18T20:47:56","string":"2013-02-18T20:47:56"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#multilineplot.py\nimport numpy as np\nimport pylab as pl\n\n#Make an array of x values\nx = [1, 2, 3, 4, 5]\n#Define another set of x values\nx2 = [1, 2, 4, 6, 8]\n\n# Make an array of y values\ny = [1, 4, 9, 16, 25]\n#Define another set of x values\ny2 = [2, 4, 8, 12, 16]\n\n# Use pylab to plot x and y points\np1 = pl.plot(x, y, 'ro')\n# Secondary plot\np2 = pl.plot(x2, y2, '*')\n\n# Add a plot legend\nplots = (p1, p2)\n# Lables for legend\nlabels = ('Results of y=x^2', 'Results of y=2x')\npl.legend(plots, labels, 'best', numpoints=1)\n\n# Set the labels and title for the graph\npl.xlabel(\"X-axis\")\npl.ylabel(\"Y-axis\")\npl.title(\"An interesting graph\")\n\n# Set axis limits for the graph\npl.xlim(0, 9)\npl.ylim(0, 30)\n\n# Show the plot on the screen\npl.show()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39927,"cells":{"__id__":{"kind":"number","value":15272903729883,"string":"15,272,903,729,883"},"blob_id":{"kind":"string","value":"388a67e7f4438a0680fcf88945724a545b02da3c"},"directory_id":{"kind":"string","value":"f0e6dab1f65595cddc5f4efda99a3f223450e24c"},"path":{"kind":"string","value":"/tests/api/__init__.py"},"content_id":{"kind":"string","value":"ae7a9bf49813c0ee1d9984eeb518fd4be3a5595c"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"shanzi/code-vilya"},"repo_url":{"kind":"string","value":"https://github.com/shanzi/code-vilya"},"snapshot_id":{"kind":"string","value":"9d448936b6f0c85335b0423ab95070baa35e2749"},"revision_id":{"kind":"string","value":"9e3cde7764f4bfdd0f86e95bec5c7a97458b2689"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-11T14:58:48.128059","string":"2016-09-11T14:58:48.128059"},"revision_date":{"kind":"timestamp","value":"2014-06-28T13:42:14","string":"2014-06-28T13:42:14"},"committer_date":{"kind":"timestamp","value":"2014-06-28T13:42:14","string":"2014-06-28T13:42:14"},"github_id":{"kind":"number","value":18682238,"string":"18,682,238"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2014-06-28T13:42:15","string":"2014-06-28T13:42:15"},"gha_created_at":{"kind":"timestamp","value":"2014-04-11T17:12:28","string":"2014-04-11T17:12:28"},"gha_updated_at":{"kind":"timestamp","value":"2014-06-25T10:13:21","string":"2014-06-25T10:13:21"},"gha_pushed_at":{"kind":"timestamp","value":"2014-06-28T13:42:14","string":"2014-06-28T13:42:14"},"gha_size":{"kind":"number","value":875,"string":"875"},"gha_stargazers_count":{"kind":"number","value":12,"string":"12"},"gha_forks_count":{"kind":"number","value":4,"string":"4"},"gha_open_issues_count":{"kind":"number","value":2,"string":"2"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from vilya.api import create_app\nfrom tests import VilyaAppTestCase\n\nclass VilyaApiTestCase(VilyaAppTestCase):\n \n def _create_app(self, settings):\n return create_app(settings)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39928,"cells":{"__id__":{"kind":"number","value":2534030752784,"string":"2,534,030,752,784"},"blob_id":{"kind":"string","value":"82cc81c42f71c9a2b126a631ac02a56ee2542a18"},"directory_id":{"kind":"string","value":"95e42007ed8df1837191bfda2550391b9da27d5d"},"path":{"kind":"string","value":"/example_paramiko_user.py"},"content_id":{"kind":"string","value":"97be7666e852aa354e9ca8f5cde7ff93abee4b47"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"grhawk/MyQueue"},"repo_url":{"kind":"string","value":"https://github.com/grhawk/MyQueue"},"snapshot_id":{"kind":"string","value":"83c3a74a7cf0dc8f02610bfbbbad3f84ae0a3aeb"},"revision_id":{"kind":"string","value":"c00012a168dbc0da2df4d519747b265c65e1713c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T20:11:13.515854","string":"2016-09-05T20:11:13.515854"},"revision_date":{"kind":"timestamp","value":"2014-10-29T16:53:39","string":"2014-10-29T16:53:39"},"committer_date":{"kind":"timestamp","value":"2014-10-29T16:53:39","string":"2014-10-29T16:53:39"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 15 17:03:54 2014\n\n@author: petragli\n\"\"\"\n\"\"\" analisys:ignore \"\"\"\n\n# adapted from http://stackoverflow.com/questions/3485428/creating-multiple-ssh-connections-at-a-time-using-paramiko\n\nimport signal, sys, threading\nimport paramiko\n\nCMD = 'uname -n'\n\ndef signal_cleanup(_signum, _frame):\n print '\\nCLEANUP\\n'\n sys.exit(0)\n\ndef workon(host):\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(host[0], username=host[1], password=host[2])\n _stdin, stdout, _stderr = ssh.exec_command(CMD)\n\n# for line in stdout:\n# print threading.current_thread().name, line,\n return stdout.readline()\n\ndef main():\n hosts = [['localhost','petragli','06111983'], ['128.178.134.191','riccardo','06111983']]\n\n # exit after a few seconds (see WARNINGs)\n signal.signal(signal.SIGALRM, signal_cleanup)\n signal.alarm(10)\n\n threads = [\n threading.Thread(\n target=workon,\n args=(host,),\n name='host #{}'.format(num+1)\n )\n for num,host in enumerate(hosts)\n ]\n\n\n print 'starting'\n for t in threads:\n # WARNING: daemon=True allows program to exit when main proc\n # does; otherwise we'll wait until all threads complete.\n t.daemon = True\n t.start()\n\n print 'joining'\n for t in threads:\n # WARNING: t.join() is uninterruptible; this while loop allows\n # signals\n # see: http://snakesthatbite.blogspot.com/2010/09/cpython-threading-interrupting.html\n while t.is_alive():\n t.join(timeout=0.1)\n\n print 'done!'\n\nif __name__=='__main__':\n main()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39929,"cells":{"__id__":{"kind":"number","value":9311489145256,"string":"9,311,489,145,256"},"blob_id":{"kind":"string","value":"3ab4a81e40f11b7aa559c62d74d8f1ca0549d994"},"directory_id":{"kind":"string","value":"73a61e58674d61d7b8c76bb2f9c60babd18d51ad"},"path":{"kind":"string","value":"/app/py/blbr/wsgis.py"},"content_id":{"kind":"string","value":"686a1eafdd7ac3ee99c6a90fb62def5de3f27d29"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"omo/blbr"},"repo_url":{"kind":"string","value":"https://github.com/omo/blbr"},"snapshot_id":{"kind":"string","value":"6c5e2824d2d05d12170a944be2311acdce394589"},"revision_id":{"kind":"string","value":"1060cf950d17140b317af2a01ef8878f4a8d04d4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-08T00:35:22.986849","string":"2016-09-08T00:35:22.986849"},"revision_date":{"kind":"timestamp","value":"2011-11-27T11:21:43","string":"2011-11-27T11:21:43"},"committer_date":{"kind":"timestamp","value":"2011-11-27T11:21:43","string":"2011-11-27T11:21:43"},"github_id":{"kind":"number","value":2749553,"string":"2,749,553"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nimport functools\nimport webapp2\n\nfrom google.appengine.api import users\n\ndef require_login(**kwargs):\n if users.get_current_user():\n return None\n redirect = kwargs.get('redirect')\n if redirect:\n return webapp2.redirect(users.create_login_url(redirect))\n resp = webapp2.Response()\n resp.status = '400 Bad Request'\n return resp\n \ndef login_required(func, **deco_kwargs):\n @functools.wraps(func)\n def decorated_view(*args, **kwargs):\n return require_login(**deco_kwargs) or func(*args, **kwargs)\n return decorated_view\n\ndef to_application(handler_classes):\n return webapp2.WSGIApplication([webapp2.Route(p.url, handler=p) for p in handler_classes])\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39930,"cells":{"__id__":{"kind":"number","value":5205500385505,"string":"5,205,500,385,505"},"blob_id":{"kind":"string","value":"df1cd414435e9dd058a76e962e2a6f7ae14a5062"},"directory_id":{"kind":"string","value":"7ff333dd18ebea4159160b07c2e281461e021e25"},"path":{"kind":"string","value":"/lib/flows/general/collectors_test.py"},"content_id":{"kind":"string","value":"764a886e420c4ee9595b4cc448150779b011186b"},"detected_licenses":{"kind":"list like","value":["Apache-2.0","DOC"],"string":"[\n \"Apache-2.0\",\n \"DOC\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"defaultnamehere/grr"},"repo_url":{"kind":"string","value":"https://github.com/defaultnamehere/grr"},"snapshot_id":{"kind":"string","value":"d768240ea8ffc9d557f5fe2e272937b83398b6e3"},"revision_id":{"kind":"string","value":"ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-21T19:09:18.863900","string":"2021-01-21T19:09:18.863900"},"revision_date":{"kind":"timestamp","value":"2014-12-07T01:49:53","string":"2014-12-07T01:49:53"},"committer_date":{"kind":"timestamp","value":"2014-12-07T01:49:53","string":"2014-12-07T01:49:53"},"github_id":{"kind":"number","value":27655857,"string":"27,655,857"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"Test the collector flows.\"\"\"\n\n\nimport os\n\nfrom grr.client import vfs\nfrom grr.lib import action_mocks\nfrom grr.lib import aff4\nfrom grr.lib import artifact\nfrom grr.lib import artifact_lib\nfrom grr.lib import artifact_test\nfrom grr.lib import flags\nfrom grr.lib import rdfvalue\nfrom grr.lib import test_lib\nfrom grr.lib import utils\nfrom grr.lib.flows.general import collectors\nfrom grr.lib.flows.general import transfer\nfrom grr.test_data import client_fixture\n\n# pylint: mode=test\n\n\nclass CollectorTest(artifact_test.ArtifactTest):\n pass\n\n\nclass TestArtifactCollectors(CollectorTest):\n \"\"\"Test the artifact collection mechanism with fake artifacts.\"\"\"\n\n def setUp(self):\n \"\"\"Make sure things are initialized.\"\"\"\n super(TestArtifactCollectors, self).setUp()\n self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts\n artifact_lib.ArtifactRegistry.ClearRegistry()\n self.LoadTestArtifacts()\n artifact_reg = artifact_lib.ArtifactRegistry.artifacts\n self.fakeartifact = artifact_reg[\"FakeArtifact\"]\n self.fakeartifact2 = artifact_reg[\"FakeArtifact2\"]\n\n self.output_count = 0\n\n with aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\") as fd:\n fd.Set(fd.Schema.SYSTEM(\"Linux\"))\n kb = fd.Schema.KNOWLEDGE_BASE()\n artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)\n fd.Set(kb)\n\n def tearDown(self):\n super(TestArtifactCollectors, self).tearDown()\n artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg\n self.fakeartifact.collectors = [] # Reset any Collectors\n self.fakeartifact.conditions = [] # Reset any Conditions\n\n self.fakeartifact2.collectors = [] # Reset any Collectors\n self.fakeartifact2.conditions = [] # Reset any Conditions\n\n def testInterpolateArgs(self):\n collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)\n\n collect_flow.state.Register(\"knowledge_base\", rdfvalue.KnowledgeBase())\n collect_flow.current_artifact_name = \"blah\"\n collect_flow.state.knowledge_base.MergeOrAddUser(\n rdfvalue.KnowledgeBaseUser(username=\"test1\"))\n collect_flow.state.knowledge_base.MergeOrAddUser(\n rdfvalue.KnowledgeBaseUser(username=\"test2\"))\n\n test_rdf = rdfvalue.KnowledgeBase()\n action_args = {\"usernames\": [\"%%users.username%%\", \"%%users.username%%\"],\n \"nointerp\": \"asdfsdf\", \"notastring\": test_rdf}\n kwargs = collect_flow.InterpolateDict(action_args)\n self.assertItemsEqual(kwargs[\"usernames\"],\n [\"test1\", \"test2\", \"test1\", \"test2\"])\n self.assertEqual(kwargs[\"nointerp\"], \"asdfsdf\")\n self.assertEqual(kwargs[\"notastring\"], test_rdf)\n\n # We should be using an array since users.username will expand to multiple\n # values.\n self.assertRaises(ValueError, collect_flow.InterpolateDict,\n {\"bad\": \"%%users.username%%\"})\n\n list_args = collect_flow.InterpolateList([\"%%users.username%%\",\n \"%%users.username%%aa\"])\n self.assertItemsEqual(list_args, [\"test1\", \"test2\", \"test1aa\", \"test2aa\"])\n\n list_args = collect_flow.InterpolateList([\"one\"])\n self.assertEqual(list_args, [\"one\"])\n\n def testGrepRegexCombination(self):\n collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)\n self.assertEqual(collect_flow._CombineRegex([r\"simple\"]),\n \"simple\")\n self.assertEqual(collect_flow._CombineRegex([\"a\", \"b\"]),\n \"(a)|(b)\")\n self.assertEqual(collect_flow._CombineRegex([\"a\", \"b\", \"c\"]),\n \"(a)|(b)|(c)\")\n self.assertEqual(collect_flow._CombineRegex([\"a|b\", \"[^_]b\", \"c|d\"]),\n \"(a|b)|([^_]b)|(c|d)\")\n\n def testGrep(self):\n class MockCallFlow(object):\n\n def CallFlow(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n mock_call_flow = MockCallFlow()\n with utils.Stubber(collectors.ArtifactCollectorFlow, \"CallFlow\",\n mock_call_flow.CallFlow):\n\n collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)\n collect_flow.state.Register(\"knowledge_base\", rdfvalue.KnowledgeBase())\n collect_flow.current_artifact_name = \"blah\"\n collect_flow.state.knowledge_base.MergeOrAddUser(\n rdfvalue.KnowledgeBaseUser(username=\"test1\"))\n collect_flow.state.knowledge_base.MergeOrAddUser(\n rdfvalue.KnowledgeBaseUser(username=\"test2\"))\n\n collector = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.GREP,\n args={\"path_list\": [\"/etc/passwd\"],\n \"content_regex_list\": [r\"^a%%users.username%%b$\"]})\n collect_flow.Grep(collector, rdfvalue.PathSpec.PathType.TSK)\n\n conditions = mock_call_flow.kwargs[\"conditions\"]\n self.assertEqual(len(conditions), 1)\n regexes = conditions[0].contents_regex_match.regex.SerializeToString()\n self.assertItemsEqual(regexes.split(\"|\"), [\"(^atest1b$)\", \"(^atest2b$)\"])\n self.assertEqual(mock_call_flow.kwargs[\"paths\"], [\"/etc/passwd\"])\n\n def testGetArtifact1(self):\n \"\"\"Test we can get a basic artifact.\"\"\"\n\n client_mock = action_mocks.ActionMock(\"TransferBuffer\", \"StatFile\", \"Find\",\n \"FingerprintFile\", \"HashBuffer\")\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Linux\"))\n client.Flush()\n\n # Dynamically add a Collector specifying the base path.\n file_path = os.path.join(self.base_path, \"test_img.dd\")\n coll1 = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.FILE,\n args={\"path_list\": [file_path]})\n self.fakeartifact.collectors.append(coll1)\n\n artifact_list = [\"FakeArtifact\"]\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list, use_tsk=False,\n token=self.token, client_id=self.client_id\n ):\n pass\n\n # Test the AFF4 file that was created.\n fd1 = aff4.FACTORY.Open(\"%s/fs/os/%s\" % (self.client_id, file_path),\n token=self.token)\n fd2 = open(file_path)\n fd2.seek(0, 2)\n\n self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))\n\n def testRunGrrClientActionArtifact(self):\n \"\"\"Test we can get a GRR client artifact.\"\"\"\n client_mock = action_mocks.ActionMock(\"ListProcesses\")\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Linux\"))\n client.Flush()\n\n coll1 = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,\n args={\"client_action\": r\"ListProcesses\"})\n self.fakeartifact.collectors.append(coll1)\n artifact_list = [\"FakeArtifact\"]\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list,\n token=self.token, client_id=self.client_id,\n output=\"test_artifact\"\n ):\n pass\n\n # Test the AFF4 file that was created.\n fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(\"test_artifact\"),\n token=self.token)\n self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))\n self.assertTrue(len(fd) > 5)\n\n def testRunGrrClientActionArtifactSplit(self):\n \"\"\"Test that artifacts get split into separate collections.\"\"\"\n client_mock = action_mocks.ActionMock(\"ListProcesses\", \"StatFile\")\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Linux\"))\n client.Flush()\n\n coll1 = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,\n args={\"client_action\": r\"ListProcesses\"})\n self.fakeartifact.collectors.append(coll1)\n self.fakeartifact2.collectors.append(coll1)\n artifact_list = [\"FakeArtifact\", \"FakeArtifact2\"]\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list,\n token=self.token, client_id=self.client_id,\n output=\"test_artifact\",\n split_output_by_artifact=True):\n pass\n\n # Check that we got two separate collections based on artifact name\n fd = aff4.FACTORY.Open(rdfvalue.RDFURN(\n self.client_id).Add(\"test_artifact_FakeArtifact\"),\n token=self.token)\n self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))\n self.assertTrue(len(fd) > 5)\n\n fd = aff4.FACTORY.Open(rdfvalue.RDFURN(\n self.client_id).Add(\"test_artifact_FakeArtifact2\"),\n token=self.token)\n self.assertTrue(len(fd) > 5)\n self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process))\n\n def testConditions(self):\n \"\"\"Test we can get a GRR client artifact with conditions.\"\"\"\n # Run with false condition.\n client_mock = action_mocks.ActionMock(\"ListProcesses\")\n coll1 = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,\n args={\"client_action\": \"ListProcesses\"},\n conditions=[\"os == 'Windows'\"])\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"AFF4Volume\")\n\n # Now run with matching or condition.\n coll1.conditions = [\"os == 'Linux' or os == 'Windows'\"]\n self.fakeartifact.collectors = []\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"RDFValueCollection\")\n\n # Now run with impossible or condition.\n coll1.conditions.append(\"os == 'NotTrue'\")\n self.fakeartifact.collectors = []\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"AFF4Volume\")\n\n def testSupportedOS(self):\n \"\"\"Test supported_os inside the collector object.\"\"\"\n # Run with false condition.\n client_mock = action_mocks.ActionMock(\"ListProcesses\")\n coll1 = rdfvalue.Collector(\n collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION,\n args={\"client_action\": \"ListProcesses\"}, supported_os=[\"Windows\"])\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"AFF4Volume\")\n\n # Now run with matching or condition.\n coll1.conditions = []\n coll1.supported_os = [\"Linux\", \"Windows\"]\n self.fakeartifact.collectors = []\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"RDFValueCollection\")\n\n # Now run with impossible or condition.\n coll1.conditions = [\"os == 'Linux' or os == 'Windows'\"]\n coll1.supported_os = [\"NotTrue\"]\n self.fakeartifact.collectors = []\n self.fakeartifact.collectors.append(coll1)\n fd = self._RunClientActionArtifact(client_mock, [\"FakeArtifact\"])\n self.assertEqual(fd.__class__.__name__, \"AFF4Volume\")\n\n def _RunClientActionArtifact(self, client_mock, artifact_list):\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Linux\"))\n client.Flush()\n self.output_count += 1\n output = \"test_artifact_%d\" % self.output_count\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list,\n token=self.token, client_id=self.client_id,\n output=output\n ):\n pass\n\n # Test the AFF4 file was not created, as flow should not have run due to\n # conditions.\n fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(output),\n token=self.token)\n return fd\n\n\nclass TestArtifactCollectorsInteractions(CollectorTest):\n \"\"\"Test the collection of artifacts.\n\n This class loads both real and test artifacts to test the interaction of badly\n defined artifacts with real artifacts.\n \"\"\"\n\n def setUp(self):\n \"\"\"Add test artifacts to existing registry.\"\"\"\n super(TestArtifactCollectorsInteractions, self).setUp()\n self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts\n self.LoadTestArtifacts()\n\n def tearDown(self):\n super(TestArtifactCollectorsInteractions, self).tearDown()\n artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg\n\n def testProcessCollectedArtifacts(self):\n \"\"\"Test downloading files from artifacts.\"\"\"\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Windows\"))\n client.Set(client.Schema.OS_VERSION(\"6.2\"))\n client.Flush()\n\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler\n\n client_mock = action_mocks.ActionMock(\"TransferBuffer\", \"StatFile\", \"Find\",\n \"HashBuffer\", \"FingerprintFile\",\n \"ListDirectory\")\n\n # Get KB initialized\n for _ in test_lib.TestFlowHelper(\n \"KnowledgeBaseInitializationFlow\", client_mock,\n client_id=self.client_id, token=self.token):\n pass\n\n artifact_list = [\"WindowsPersistenceMechanismFiles\"]\n with test_lib.Instrument(\n transfer.MultiGetFile, \"Start\") as getfile_instrument:\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list,\n token=self.token,\n client_id=self.client_id,\n output=\"analysis/{p}/{u}-{t}\",\n split_output_by_artifact=True):\n pass\n\n # Check MultiGetFile got called for our runkey files\n # TODO(user): RunKeys for S-1-5-20 are not found because users.sid only\n # expands to users with profiles.\n pathspecs = getfile_instrument.args[0][0].args.pathspecs\n self.assertItemsEqual([x.path for x in pathspecs],\n [u\"C:\\\\Windows\\\\TEMP\\\\A.exe\"])\n\n artifact_list = [\"BadPathspecArtifact\"]\n with test_lib.Instrument(\n transfer.MultiGetFile, \"Start\") as getfile_instrument:\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=artifact_list,\n token=self.token,\n client_id=self.client_id,\n output=\"analysis/{p}/{u}-{t}\",\n split_output_by_artifact=True):\n pass\n\n self.assertFalse(getfile_instrument.args)\n\n\nclass TestArtifactCollectorsRealArtifacts(CollectorTest):\n \"\"\"Test the collection of real artifacts.\"\"\"\n\n def _CheckDriveAndRoot(self):\n client_mock = action_mocks.ActionMock(\"StatFile\", \"ListDirectory\")\n\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=[\n \"SystemDriveEnvironmentVariable\"],\n token=self.token, client_id=self.client_id,\n output=\"testsystemdrive\"):\n pass\n\n fd = aff4.FACTORY.Open(rdfvalue.RDFURN(\n self.client_id).Add(\"testsystemdrive\"), token=self.token)\n self.assertEqual(len(fd), 1)\n self.assertEqual(str(fd[0]), \"C:\")\n\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\", client_mock,\n artifact_list=[\"SystemRoot\"],\n token=self.token, client_id=self.client_id,\n output=\"testsystemroot\"):\n pass\n\n fd = aff4.FACTORY.Open(\n rdfvalue.RDFURN(self.client_id).Add(\"testsystemroot\"), token=self.token)\n self.assertEqual(len(fd), 1)\n # Filesystem gives WINDOWS, registry gives Windows\n self.assertTrue(str(fd[0]) in [r\"C:\\Windows\", r\"C:\\WINDOWS\"])\n\n def testSystemDriveArtifact(self):\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Windows\"))\n client.Set(client.Schema.OS_VERSION(\"6.2\"))\n client.Flush()\n\n class BrokenClientMock(action_mocks.ActionMock):\n\n def StatFile(self, _):\n raise IOError\n\n def ListDirectory(self, _):\n raise IOError\n\n # No registry, broken filesystem, this should just raise.\n with self.assertRaises(RuntimeError):\n for _ in test_lib.TestFlowHelper(\"ArtifactCollectorFlow\",\n BrokenClientMock(), artifact_list=[\n \"SystemDriveEnvironmentVariable\"],\n token=self.token,\n client_id=self.client_id,\n output=\"testsystemdrive\"):\n pass\n\n # No registry, so this should use the fallback flow\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture\n self._CheckDriveAndRoot()\n\n # Registry is present, so this should use the regular artifact collection\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler\n self._CheckDriveAndRoot()\n\n def testRunWMIComputerSystemProductArtifact(self):\n\n class WMIActionMock(action_mocks.ActionMock):\n\n def WmiQuery(self, _):\n return client_fixture.WMI_CMP_SYS_PRD\n\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Windows\"))\n client.Set(client.Schema.OS_VERSION(\"6.2\"))\n client.Flush()\n\n client_mock = WMIActionMock()\n for _ in test_lib.TestFlowHelper(\n \"ArtifactCollectorFlow\", client_mock,\n artifact_list=[\"WMIComputerSystemProduct\"], token=self.token,\n client_id=self.client_id,\n dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,\n store_results_in_aff4=True):\n pass\n\n client = aff4.FACTORY.Open(self.client_id, token=self.token,)\n hardware = client.Get(client.Schema.HARDWARE_INFO)\n self.assertTrue(isinstance(hardware, rdfvalue.HardwareInfo))\n self.assertEqual(str(hardware.serial_number), \"2RXYYZ1\")\n\n def testRunWMIArtifact(self):\n\n class WMIActionMock(action_mocks.ActionMock):\n\n def WmiQuery(self, _):\n return client_fixture.WMI_SAMPLE\n\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Windows\"))\n client.Set(client.Schema.OS_VERSION(\"6.2\"))\n client.Flush()\n\n client_mock = WMIActionMock()\n for _ in test_lib.TestFlowHelper(\n \"ArtifactCollectorFlow\", client_mock, artifact_list=[\"WMILogicalDisks\"],\n token=self.token, client_id=self.client_id,\n dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS,\n store_results_in_aff4=True):\n pass\n\n # Test that we set the client VOLUMES attribute\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n volumes = client.Get(client.Schema.VOLUMES)\n self.assertEqual(len(volumes), 2)\n for result in volumes:\n self.assertTrue(isinstance(result, rdfvalue.Volume))\n self.assertTrue(result.windows.drive_letter in [\"Z:\", \"C:\"])\n if result.windows.drive_letter == \"C:\":\n self.assertAlmostEqual(result.FreeSpacePercent(), 76.142, delta=0.001)\n self.assertEqual(result.Name(), \"C:\")\n elif result.windows.drive_letter == \"Z:\":\n self.assertEqual(result.Name(), \"homefileshare$\")\n self.assertAlmostEqual(result.FreeSpacePercent(), 58.823, delta=0.001)\n\n def testRetrieveDependencies(self):\n \"\"\"Test getting an artifact without a KB using retrieve_depdendencies.\"\"\"\n client = aff4.FACTORY.Open(self.client_id, token=self.token, mode=\"rw\")\n client.Set(client.Schema.SYSTEM(\"Windows\"))\n client.Set(client.Schema.OS_VERSION(\"6.2\"))\n client.Flush()\n\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler\n vfs.VFS_HANDLERS[\n rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler\n\n client_mock = action_mocks.ActionMock(\"TransferBuffer\", \"StatFile\", \"Find\",\n \"HashBuffer\", \"FingerprintFile\",\n \"ListDirectory\")\n\n artifact_list = [\"WinDirEnvironmentVariable\"]\n for _ in test_lib.TestFlowHelper(\n \"ArtifactCollectorFlow\", client_mock, artifact_list=artifact_list,\n token=self.token, client_id=self.client_id,\n dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW,\n output=\"testRetrieveDependencies\"):\n pass\n\n output = aff4.FACTORY.Open(self.client_id.Add(\"testRetrieveDependencies\"),\n token=self.token)\n self.assertEqual(len(output), 1)\n self.assertEqual(output[0], r\"C:\\Windows\")\n\n\ndef main(argv):\n # Run the full test suite\n test_lib.GrrTestProgram(argv=argv)\n\nif __name__ == \"__main__\":\n flags.StartMain(main)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39931,"cells":{"__id__":{"kind":"number","value":2851858308265,"string":"2,851,858,308,265"},"blob_id":{"kind":"string","value":"deff41835049343dd22af89b6e777e63d64898d2"},"directory_id":{"kind":"string","value":"8d5aa8c52ced487031c6eb3bc9df1ac007aa50f0"},"path":{"kind":"string","value":"/pybloomfire/bloomfire.py"},"content_id":{"kind":"string","value":"afc08947d92d5a96e04507a9f30055a5068d25d4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Sendside/pybloomfire"},"repo_url":{"kind":"string","value":"https://github.com/Sendside/pybloomfire"},"snapshot_id":{"kind":"string","value":"960da2b8110c3aefff990b472e2d0a2b7db0b99e"},"revision_id":{"kind":"string","value":"9ec43485916d9f1de346bac33e6197c47982144e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-28T02:25:51.010906","string":"2021-05-28T02:25:51.010906"},"revision_date":{"kind":"timestamp","value":"2013-01-22T03:47:01","string":"2013-01-22T03:47:01"},"committer_date":{"kind":"timestamp","value":"2013-01-22T03:47:01","string":"2013-01-22T03:47:01"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import json\nfrom urllib import urlencode\nimport urllib2\n\nclass API(object):\n \"\"\"\n API object that provides a Python binding to the Bloomfire REST API: http://bloomfire.com\n \"\"\"\n \n def __init__(self, subdomain, api_key, auth_email, auth_password):\n self._api_key = api_key\n self._endpoint_prefix = 'https://%s.bloomfire.com/api/' % subdomain\n self._authenticate(auth_email, auth_password)\n \n def _authenticate(self, auth_email, auth_password):\n passman = urllib2.HTTPPasswordMgrWithDefaultRealm()\n passman.add_password(None, self._endpoint_prefix, auth_email, auth_password)\n proxyhandler = urllib2.ProxyHandler()\n authhandler = urllib2.HTTPBasicAuthHandler(passman)\n opener = urllib2.build_opener(authhandler, proxyhandler)\n urllib2.install_opener(opener)\n \n def get(self, api_name, kwargs=None):\n if kwargs:\n endpoint_query = '?%s' % urlencode(kwargs)\n else:\n endpoint_query = ''\n return self._call('%s%s.json%s' % (self._endpoint_prefix, api_name, endpoint_query))\n \n def post(self, api_name, kwargs=None):\n req = urllib2.Request(url='%s%s.json' % (self._endpoint_prefix, api_name), data=urlencode(kwargs))\n return self._call(req)\n \n def _call(self, request):\n \n try:\n result = urllib2.urlopen(request)\n response_dict = json.loads(result.read())\n except urllib2.HTTPError as http_error:\n response_dict = json.loads(http_error.read())\n return response_dict\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39932,"cells":{"__id__":{"kind":"number","value":6124623397321,"string":"6,124,623,397,321"},"blob_id":{"kind":"string","value":"ba03f91b56c24492d042310729f4fb140efce3a1"},"directory_id":{"kind":"string","value":"ee7925f2d461a075aac8f2b29dfa32a30561bc7a"},"path":{"kind":"string","value":"/catch_web.py"},"content_id":{"kind":"string","value":"1d4b63a153c532d9e63c68a7d0bc405eff6d1ab6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"qq40660/weixinGetPostDemo"},"repo_url":{"kind":"string","value":"https://github.com/qq40660/weixinGetPostDemo"},"snapshot_id":{"kind":"string","value":"d01576d4a7aa971bcad0ba2ba73d2ba31b090837"},"revision_id":{"kind":"string","value":"f6cc8894dd45c9c68bb99847bb023b97051b286c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T23:58:31.113535","string":"2021-01-17T23:58:31.113535"},"revision_date":{"kind":"timestamp","value":"2013-03-10T01:36:33","string":"2013-03-10T01:36:33"},"committer_date":{"kind":"timestamp","value":"2013-03-10T01:36:33","string":"2013-03-10T01:36:33"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\n\nimport urllib2\nfrom bs4 import BeautifulSoup\n\n\ndef catch_url( url ):\n content = urllib2.urlopen( url )\n soup = BeautifulSoup( content )\n return soup"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39933,"cells":{"__id__":{"kind":"number","value":7550552544500,"string":"7,550,552,544,500"},"blob_id":{"kind":"string","value":"eda253ff69e275dd710031146742bc06aa2f495d"},"directory_id":{"kind":"string","value":"2de99713a3abf6e7780a0d9ea3aec0b1de51e242"},"path":{"kind":"string","value":"/nxtdemo.py"},"content_id":{"kind":"string","value":"305969aa7e5472df60125511486fd8497582214a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pepijndevos/nxtwrite"},"repo_url":{"kind":"string","value":"https://github.com/pepijndevos/nxtwrite"},"snapshot_id":{"kind":"string","value":"e518d0f8b98168c465c4bb37cf75d6690a51ddd0"},"revision_id":{"kind":"string","value":"e3b99d7387427e558b6ac109d062a29623a1114b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-16T10:29:19.539805","string":"2016-09-16T10:29:19.539805"},"revision_date":{"kind":"timestamp","value":"2011-12-12T13:18:07","string":"2011-12-12T13:18:07"},"committer_date":{"kind":"timestamp","value":"2011-12-12T13:18:07","string":"2011-12-12T13:18:07"},"github_id":{"kind":"number","value":2916047,"string":"2,916,047"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from nxt import locator, motor\nimport nxtprinter\nb = locator.find_one_brick()\np = nxtprinter.NxtPrinter(b, motor.PORT_B, motor.PORT_A, motor.PORT_C)\np.write(raw_input(\"print: \"), 15)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39934,"cells":{"__id__":{"kind":"number","value":6287832137724,"string":"6,287,832,137,724"},"blob_id":{"kind":"string","value":"228c73504d3ee0b8d536f166de6b7d93cc6f0ac6"},"directory_id":{"kind":"string","value":"63c89d672cb4df85e61d3ba9433f4c3ca39810c8"},"path":{"kind":"string","value":"/python/testdata/launchpad/lib/lp/services/librarianserver/tests/test_db_outage.py"},"content_id":{"kind":"string","value":"833f61d25a7b5a5ed82190a0c68ee76f0fe87cbe"},"detected_licenses":{"kind":"list like","value":["AGPL-3.0-only","AGPL-3.0-or-later"],"string":"[\n \"AGPL-3.0-only\",\n \"AGPL-3.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"abramhindle/UnnaturalCodeFork"},"repo_url":{"kind":"string","value":"https://github.com/abramhindle/UnnaturalCodeFork"},"snapshot_id":{"kind":"string","value":"de32d2f31ed90519fd4918a48ce94310cef4be97"},"revision_id":{"kind":"string","value":"e205b94b2c66672d264a08a10bb7d94820c9c5ca"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T10:21:36.093911","string":"2021-01-19T10:21:36.093911"},"revision_date":{"kind":"timestamp","value":"2014-03-13T02:37:14","string":"2014-03-13T02:37:14"},"committer_date":{"kind":"timestamp","value":"2014-03-13T02:37:14","string":"2014-03-13T02:37:14"},"github_id":{"kind":"number","value":17692378,"string":"17,692,378"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"string","value":"AGPL-3.0"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2020-07-24T05:39:10","string":"2020-07-24T05:39:10"},"gha_created_at":{"kind":"timestamp","value":"2014-03-13T02:52:20","string":"2014-03-13T02:52:20"},"gha_updated_at":{"kind":"timestamp","value":"2018-01-05T07:03:31","string":"2018-01-05T07:03:31"},"gha_pushed_at":{"kind":"timestamp","value":"2014-03-13T02:53:59","string":"2014-03-13T02:53:59"},"gha_size":{"kind":"number","value":24904,"string":"24,904"},"gha_stargazers_count":{"kind":"number","value":0,"string":"0"},"gha_forks_count":{"kind":"number","value":3,"string":"3"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"bool","value":false,"string":"false"},"gha_disabled":{"kind":"bool","value":false,"string":"false"},"content":{"kind":"string","value":"# Copyright 2011 Canonical Ltd. This software is licensed under the\n# GNU Affero General Public License version 3 (see the file LICENSE).\n\n\"\"\"Test behavior of the Librarian during a database outage.\n\nDatabase outages happen by accident and during fastdowntime deployments.\"\"\"\n\n__metaclass__ = type\n\nfrom cStringIO import StringIO\nimport urllib2\n\nfrom fixtures import Fixture\n\nfrom lp.services.librarian.client import LibrarianClient\nfrom lp.services.librarianserver.testing.server import LibrarianServerFixture\nfrom lp.testing import TestCase\nfrom lp.testing.fixture import PGBouncerFixture\nfrom lp.testing.layers import (\n BaseLayer,\n DatabaseFunctionalLayer,\n )\n\n\nclass PGBouncerLibrarianLayer(DatabaseFunctionalLayer):\n \"\"\"Custom layer for TestLibrarianDBOutage.\n\n We are using a custom layer instead of standard setUp/tearDown to\n avoid the lengthy Librarian startup time, and to cope with undoing\n changes made to BaseLayer.config_fixture to allow access to the\n Librarian we just started up.\n \"\"\"\n pgbouncer_fixture = None\n librarian_fixture = None\n\n @classmethod\n def setUp(cls):\n # Fixture to hold other fixtures.\n cls._fixture = Fixture()\n cls._fixture.setUp()\n\n cls.pgbouncer_fixture = PGBouncerFixture()\n # Install the PGBouncer fixture so we shut it down to\n # create database outages.\n cls._fixture.useFixture(cls.pgbouncer_fixture)\n\n # Bring up the Librarian, which will be connecting via\n # pgbouncer.\n cls.librarian_fixture = LibrarianServerFixture(\n BaseLayer.config_fixture)\n cls._fixture.useFixture(cls.librarian_fixture)\n\n @classmethod\n def tearDown(cls):\n cls.pgbouncer_fixture = None\n cls.librarian_fixture = None\n cls._fixture.cleanUp()\n\n @classmethod\n def testSetUp(cls):\n cls.pgbouncer_fixture.start()\n\n\nclass TestLibrarianDBOutage(TestCase):\n layer = PGBouncerLibrarianLayer\n\n def setUp(self):\n super(TestLibrarianDBOutage, self).setUp()\n self.pgbouncer = PGBouncerLibrarianLayer.pgbouncer_fixture\n self.client = LibrarianClient()\n\n # Add a file to the Librarian so we can download it.\n self.url = self._makeLibraryFileUrl()\n\n def _makeLibraryFileUrl(self):\n data = 'whatever'\n return self.client.remoteAddFile(\n 'foo.txt', len(data), StringIO(data), 'text/plain')\n\n def getErrorCode(self):\n # We need to talk to every Librarian thread to ensure all the\n # Librarian database connections are in a known state.\n # XXX StuartBishop 2011-09-01 bug=840046: 20 might be overkill\n # for the test run, but we have no real way of knowing how many\n # connections are in use.\n num_librarian_threads = 20\n codes = set()\n for count in range(num_librarian_threads):\n try:\n urllib2.urlopen(self.url).read()\n codes.add(200)\n except urllib2.HTTPError as error:\n codes.add(error.code)\n self.assertTrue(len(codes) == 1, 'Mixed responses: %s' % str(codes))\n return codes.pop()\n\n def test_outage(self):\n # Everything should be working fine to start with.\n self.assertEqual(self.getErrorCode(), 200)\n\n # When the outage kicks in, we start getting 503 responses\n # instead of 200 and 404s.\n self.pgbouncer.stop()\n self.assertEqual(self.getErrorCode(), 503)\n\n # When the outage is over, things are back to normal.\n self.pgbouncer.start()\n self.assertEqual(self.getErrorCode(), 200)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39935,"cells":{"__id__":{"kind":"number","value":12902081805088,"string":"12,902,081,805,088"},"blob_id":{"kind":"string","value":"716ca55f82e717938798a239ef7c79ec72c9feff"},"directory_id":{"kind":"string","value":"acf8fe77e599f8372adf4fc971012394715795d6"},"path":{"kind":"string","value":"/flask/lib/python2.7/site-packages/egginst/utils.py"},"content_id":{"kind":"string","value":"cc97bb0514962a5dffd4961cc61c7e5602623998"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"shaheershantk/Blog-Engine-Using-Flask"},"repo_url":{"kind":"string","value":"https://github.com/shaheershantk/Blog-Engine-Using-Flask"},"snapshot_id":{"kind":"string","value":"3e2f1457a59f282c336bbb63ff48171f938f5108"},"revision_id":{"kind":"string","value":"450e76a8bde0bd702d995fa7bb746ed920917f98"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T19:42:03.401554","string":"2021-01-01T19:42:03.401554"},"revision_date":{"kind":"timestamp","value":"2014-11-10T15:01:08","string":"2014-11-10T15:01:08"},"committer_date":{"kind":"timestamp","value":"2014-11-10T15:01:08","string":"2014-11-10T15:01:08"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import errno\nimport sys\nimport os\nimport shutil\nimport stat\nimport tempfile\nimport zipfile\n\nfrom os.path import basename, isdir, isfile, islink, join\n\nif sys.version_info[:2] < (2, 7):\n class ZipFile(zipfile.ZipFile):\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\nelse:\n ZipFile = zipfile.ZipFile\n\non_win = bool(sys.platform == 'win32')\n\nif on_win:\n bin_dir_name = 'Scripts'\n rel_site_packages = r'Lib\\site-packages'\nelse:\n bin_dir_name = 'bin'\n rel_site_packages = 'lib/python%i.%i/site-packages' % sys.version_info[:2]\n\nZIP_SOFTLINK_ATTRIBUTE_MAGIC = 0xA1ED0000L\n\ndef rm_empty_dir(path):\n \"\"\"\n Remove the directory `path` if it is a directory and empty.\n If the directory does not exist or is not empty, do nothing.\n \"\"\"\n try:\n os.rmdir(path)\n except OSError: # directory might not exist or not be empty\n pass\n\n\ndef rm_rf(path, verbose=False):\n if not on_win and islink(path):\n # Note that we have to check if the destination is a link because\n # exists('/path/to/dead-link') will return False, although\n # islink('/path/to/dead-link') is True.\n if verbose:\n print \"Removing: %r (link)\" % path\n os.unlink(path)\n\n elif isfile(path):\n if verbose:\n print \"Removing: %r (file)\" % path\n if on_win:\n try:\n os.unlink(path)\n except (WindowsError, IOError):\n os.rename(path, join(tempfile.mkdtemp(), basename(path)))\n else:\n os.unlink(path)\n\n elif isdir(path):\n if verbose:\n print \"Removing: %r (directory)\" % path\n if on_win:\n try:\n shutil.rmtree(path)\n except (WindowsError, IOError):\n os.rename(path, join(tempfile.mkdtemp(), basename(path)))\n else:\n shutil.rmtree(path)\n\n\ndef get_executable(prefix):\n if on_win:\n paths = [prefix, join(prefix, bin_dir_name)]\n for path in paths:\n executable = join(path, 'python.exe')\n if isfile(executable):\n return executable\n else:\n path = join(prefix, bin_dir_name, 'python')\n if isfile(path):\n from subprocess import Popen, PIPE\n cmd = [path, '-c', 'import sys;print sys.executable']\n p = Popen(cmd, stdout=PIPE)\n return p.communicate()[0].strip()\n return sys.executable\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%i B' % n\n k = (n - 1) / 1024 + 1\n if k < 1024:\n return '%i KB' % k\n return '%.2f MB' % (float(n) / (2**20))\n\ndef makedirs(path):\n \"\"\"Recursive directory creation function that does not fail if the\n directory already exists.\"\"\"\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\ndef ensure_dir(path):\n \"\"\"\n Create the parent directory of the give path, recursively is necessary.\n \"\"\"\n makedirs(os.path.dirname(path))\n\ndef is_zipinfo_symlink(zip_info):\n \"\"\"Return True if the given zip_info instance refers to a symbolic link.\"\"\"\n return zip_info.external_attr == ZIP_SOFTLINK_ATTRIBUTE_MAGIC\n\ndef is_zipinfo_dir(zip_info):\n \"\"\"Returns True if the given zip_info refers to a directory.\"\"\"\n return stat.S_ISDIR(zip_info.external_attr >> 16)\n\ndef zip_write_symlink(fp, link_name, source):\n \"\"\"Add to the zipfile the given link_name as a softlink to source\n\n Parameters\n ----------\n fp: file object\n ZipFile instance\n link_name: str\n Path of the symlink\n source: str\n Path the symlink points to (the output of os.readlink)\n \"\"\"\n zip_info = zipfile.ZipInfo(link_name)\n zip_info.create_system = 3\n zip_info.external_attr = ZIP_SOFTLINK_ATTRIBUTE_MAGIC\n fp.writestr(zip_info, source)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39936,"cells":{"__id__":{"kind":"number","value":3427383908791,"string":"3,427,383,908,791"},"blob_id":{"kind":"string","value":"1c9110db73e8870ac65a6faa18e3c87d906b8b6e"},"directory_id":{"kind":"string","value":"16f95a43b0c0bf86b9a0c50f5bf2639bdb8b07d0"},"path":{"kind":"string","value":"/KWebLoc/atualizadores/anbima/anbima_MAPEAMENTO.py"},"content_id":{"kind":"string","value":"c234a86d17bc39c2190cdccc3371cd220e05a747"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tigum27/KondorPythonProjects"},"repo_url":{"kind":"string","value":"https://github.com/tigum27/KondorPythonProjects"},"snapshot_id":{"kind":"string","value":"33432b8d263f20d96d0ef678aaa12dfacbb300c4"},"revision_id":{"kind":"string","value":"132dd7e50d987c0aa8f5aa60ca0ccefb7c7fefe0"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-07-09T12:10:35","string":"2015-07-09T12:10:35"},"revision_date":{"kind":"timestamp","value":"2014-09-03T20:11:48","string":"2014-09-03T20:11:48"},"committer_date":{"kind":"timestamp","value":"2014-09-03T20:11:48","string":"2014-09-03T20:11:48"},"github_id":{"kind":"number","value":23467645,"string":"23,467,645"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: cp1252 -*-\nipcaBase = 1614.62\nigpmBase = 1000\nfrom QuantLib import Schedule,Date,Brazil,Period,Months,Following,DateGeneration,Business252\nimport MySQLdb\n\nfrom sympy import Symbol\nfrom sympy.solvers import solve\nimport inspect as IS\nclass SqlQuery:\n def __init__(self):\n self.db = MySQLdb.connect(host=\"192.168.180.249\",user=\"root\",passwd=\"marisco\",db=\"posicoes\")\n \n def execute(self,query):\n cur = self.db.cursor()\n cur.execute(query)\n return cur\nclass mySchedule:\n def __init__(self,settlementDate,maturityDate,per,calendar = Brazil()):\n self.schedule = Schedule(settlementDate,maturityDate,Period(per,Months),calendar,Following,Following,DateGeneration.Backward,False)\n self.cupons = [self.schedule.__getitem__(x) for x in range(self.schedule.__len__())]\n self.cupons.pop(0)\nclass QLDate:\n @staticmethod\n def toString(date = Date(),sep = '-'):\n day = date.dayOfMonth()\n year = date.year()\n month = int(date.month())\n return str(year)+sep+str(month) +sep+str(day) \n @staticmethod\n def fromString(date,sep = '-',ordem = 'ymd'):\n \n list1 = date.split(sep)\n day = int(list1[ordem.find('d')])\n month = int(list1[ordem.find('m')])\n year = int(list1[ordem.find('y')])\n return Date(day,month,year)\n \ndef GoalSeek(fction,pfction,vari,target,lextreme=0.00001,hextreme=100000,erro=0.00000001,maxit=100):\n argos=IS.getargspec(fction) \n nargos=[pfction[x] for x in argos[0]] \n vl=lextreme\n vh=hextreme\n vm=(vl+vh)/2\n pfction[vari]=vl\n nargos=[pfction[x] for x in argos[0]]\n plow=fction(*nargos)-target\n pfction[vari]=vh\n nargos=[pfction[x] for x in argos[0]]\n phigh=fction(*nargos)-target\n pfction[vari]=vm\n nargos=[pfction[x] for x in argos[0]]\n pmid=fction(*nargos)-target\n i=0\n while abs(pmid)>=erro and i<=maxit:\n if pmid*phigh>0:\n vh=vm\n vm=(vl+vh)/2\n else:\n vl=vm\n vm=(vl+vh)/2\n pfction[vari]=vl\n nargos=[pfction[x] for x in argos[0]]\n plow=fction(*nargos)-target\n pfction[vari]=vh\n nargos=[pfction[x] for x in argos[0]]\n phigh=fction(*nargos)-target\n pfction[vari]=vm\n nargos=[pfction[x] for x in argos[0]]\n pmid=fction(*nargos)-target\n i=+1\n if i>=maxit and abs(pmid)>=erro:\n return 0\n else:\n return vm\n \ndef interpolar(x,y,i):\n n = 0 \n xx = x[n] \n while(i>xx):\n n = n+1\n xx =x[n]\n A = ((1+y[n-1]/100)**(x[n-1]/252.0))\n B = ((1+y[n]/100)**(x[n]/252.0))\n Z = A*((B/A)**(float(i-x[n-1])/float(x[n]-x[n-1])))\n return Z**(252.0/i)-1\ndef f(xx,str1):\n X=xx\n return eval(str1)\n\n \n\ndef cupom(tabela,titulo):\n \n mtm = [float(u.replace('.','').replace(',','.')) for u in [tabela['col6'][str(x)] for x in sorted([int(key) for key in tabela['col6'].keys()])][5:]]\n VCTOs = [tabela['col2'][str(x)] for x in sorted([int(key) for key in tabela['col2'].keys()])][5:] \n yields = [float(u.replace(',','.')) for u in [tabela['col5'][str(x)] for x in sorted([int(key) for key in tabela['col5'].keys()])][5:]]\n schedules=[mySchedule(QLDate.fromString(data),QLDate.fromString(x,'/','dmy'),6) for x in VCTOs]\n sql = SqlQuery()\n if titulo[-1].lower() != 'f':\n ipca = float(sql.execute('select posicoes.vna(\"%s\",\"2014-02-28\")'%('IPCA' if titulo[-1].lower() == 'b' else 'IPCA')).fetchall()[0][0])\n \n fatorAcum = ipca/(ipcaBase if titulo[-1].lower() == 'b' else igpmBase)\n notional = 1000*fatorAcum\n else:\n notional = 1000\n dc = Business252()\n today = QLDate.fromString(data)\n curva = {}\n \n def bootstrapping(n = 0):\n \n if n == 0:\n for vt in schedules[n].cupons:\n curva.update({dc.dayCount(today,vt):yields[n]})\n else:\n ultimoVtConhecido = sorted(curva.keys())[len(curva)-1]\n vtDesejados =[]\n vtDesejados2={}\n vtx = schedules[n].cupons[len(schedules[n].cupons)-1]\n vtx = dc.dayCount(today,vtx)\n eq = '- %2.9f'%mtm[n]\n cupom = 1.06**0.5-1\n for vt in schedules[n].cupons:\n vt = dc.dayCount(today,vt)\n if vt in curva:\n Cupom = notional*cupom/(1+curva[vt]/100)**(vt/252.0) \n eq = eq + ' + %2.9f'%(Cupom)\n \n else:\n if vt < ultimoVtConhecido:\n sort = sorted(curva.keys())\n if vt 0:\n i = 0\n for vt in curva:\n\n curva[vt] = ((1 + curva[vt]/100)/(1 + pre[0][i]) - 1)*100\n\n i = i + 1\n\n\n return curva\n\n\narvoreSaida= []\ndepara = {'ntn-f':'curva_ntnf','ntn-b':'curva_ntnb','ntn-c':'curva_ntnc','lft':'curva_lft','ltn':'curva_ltn','lspread':'spread_ltndi'}\nentrada['lspread'] = entrada['ltn']\nfor titulo in entrada.keys():\n print 'iniciando bootstrapping ' + titulo\n curva = {}\n if titulo[0].lower() == 'l':\n\n curva = bullet(entrada[titulo])\n\n if titulo == 'lspread':\n curva = spreadLtnDi(curva)\n \n else:\n curva = cupom(entrada[titulo],titulo)\n \n \n for vt in curva.keys():\n valor= curva[vt]/100.0\n \n arvoreSaida.append(data+\"|\"+depara[titulo]+\"|\"+str(vt)+\"|\"+'0'+\"|\"+str(valor))\n print 'finalizando ' + titulo\n\n \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39937,"cells":{"__id__":{"kind":"number","value":11785390280879,"string":"11,785,390,280,879"},"blob_id":{"kind":"string","value":"2ade252f6fffc6374133cebd46c37c1c834c2247"},"directory_id":{"kind":"string","value":"21306ff756b6bb49fc51f5a8d89f4924c3f5caf5"},"path":{"kind":"string","value":"/obsolete/demo/vor/__init__.py"},"content_id":{"kind":"string","value":"a5c56657a754ea5cac6755f275d783f7e5129a8e"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"MaxTyutyunnikov/lino"},"repo_url":{"kind":"string","value":"https://github.com/MaxTyutyunnikov/lino"},"snapshot_id":{"kind":"string","value":"d5d0392eb76c7fc3858c9589243e3f3743daa9d4"},"revision_id":{"kind":"string","value":"9a6322272d36b8c747e06b6b9eb0889a2e0d27a1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T16:50:17.870568","string":"2021-01-19T16:50:17.870568"},"revision_date":{"kind":"timestamp","value":"2013-11-23T03:47:26","string":"2013-11-23T03:47:26"},"committer_date":{"kind":"timestamp","value":"2013-11-23T03:47:26","string":"2013-11-23T03:47:26"},"github_id":{"kind":"number","value":39275252,"string":"39,275,252"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding: latin1\n\nlabel = 'Prototyp einer Homepage für den V.O.R.'\n\ndef populate(db):\n\timport vor1\n\tvor1.populate(db)\n\n"},"src_encoding":{"kind":"string","value":"ISO-8859-1"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39938,"cells":{"__id__":{"kind":"number","value":12103217860902,"string":"12,103,217,860,902"},"blob_id":{"kind":"string","value":"392d648d6017431db3bd7ea741a6622b1c4d8313"},"directory_id":{"kind":"string","value":"1065414f55761f1ce0fc80c85eebc078f8a18f91"},"path":{"kind":"string","value":"/apps/payouts/admin_utils.py"},"content_id":{"kind":"string","value":"ffc9dd1d559cf8793146942cb295bc8c73375b00"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause","BSD-3-Clause"],"string":"[\n \"BSD-2-Clause\",\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"gannetson/onepercentclub-site"},"repo_url":{"kind":"string","value":"https://github.com/gannetson/onepercentclub-site"},"snapshot_id":{"kind":"string","value":"69d7e4fa93b28b71ff5b5d0e02e0795621d233f3"},"revision_id":{"kind":"string","value":"fb0d14beac1bbb1e477c8521fc9e77a1509fcf1d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T18:59:16.973559","string":"2021-01-15T18:59:16.973559"},"revision_date":{"kind":"timestamp","value":"2014-07-01T06:45:14","string":"2014-07-01T06:45:14"},"committer_date":{"kind":"timestamp","value":"2014-07-01T06:45:14","string":"2014-07-01T06:45:14"},"github_id":{"kind":"number","value":8796961,"string":"8,796,961"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":true,"string":"true"},"gha_event_created_at":{"kind":"timestamp","value":"2014-07-14T12:18:28","string":"2014-07-14T12:18:28"},"gha_created_at":{"kind":"timestamp","value":"2013-03-15T10:40:04","string":"2013-03-15T10:40:04"},"gha_updated_at":{"kind":"timestamp","value":"2014-07-07T07:56:39","string":"2014-07-07T07:56:39"},"gha_pushed_at":{"kind":"timestamp","value":"2014-07-10T13:41:13","string":"2014-07-10T13:41:13"},"gha_size":{"kind":"number","value":67782,"string":"67,782"},"gha_stargazers_count":{"kind":"number","value":0,"string":"0"},"gha_forks_count":{"kind":"number","value":0,"string":"0"},"gha_open_issues_count":{"kind":"number","value":2,"string":"2"},"gha_language":{"kind":"string","value":"Shell"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import urllib\n\nfrom django.core.urlresolvers import reverse\n\n\ndef link_to(value, url_name, view_args=(), view_kwargs={}, query={}, short_description=None):\n \"\"\"\n Return admin field with link to named view with view_args/view_kwargs\n or view_[kw]args(obj) methods and HTTP GET parameters.\n\n Parameters:\n\n * value: function(object) or string for object proeprty name\n * url_name: name used to reverse() view\n * view_args: () or function(object) -> () returing view params\n * view_kwargs: {} or function(object) -> {} returing view params\n * query: {} or function(object) -> {} returning HTTP GET params\n * short_description: string with description, defaults to\n 'value'/property name\n \"\"\"\n\n def prop(self, obj):\n # Replace view_args methods by result of function callss\n if callable(view_args):\n args = view_args(obj)\n else:\n args = view_args\n\n if callable(view_kwargs):\n kwargs = view_kwargs(obj)\n else:\n kwargs = view_kwargs\n\n # Construct URL\n url = reverse(url_name, args=args, kwargs=kwargs)\n\n if callable(query):\n params = query(obj)\n else:\n params = query\n\n # Append query parameters\n if params:\n url += '?' + urllib.urlencode(params)\n\n # Get value\n if callable(value):\n # Call value getter\n new_value = value(obj)\n else:\n # String, assume object property\n assert isinstance(value, basestring)\n new_value = getattr(obj, value)\n\n return u'{1}'.format(url, new_value)\n\n # Decorate function\n prop.allow_tags = True\n\n if not short_description:\n # No short_description set, use property name\n assert isinstance(value, basestring)\n short_description = value\n prop.short_description = short_description\n\n return prop\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39939,"cells":{"__id__":{"kind":"number","value":15513421885001,"string":"15,513,421,885,001"},"blob_id":{"kind":"string","value":"e0dceed2587b5c11e305770e88f815b78a0a4366"},"directory_id":{"kind":"string","value":"79f5290060ead677fc35a238dc6f9b9621b3f35c"},"path":{"kind":"string","value":"/lyrics_searcher/tools/fetchers/musixmatch_fetcher.py"},"content_id":{"kind":"string","value":"18c795bf090c7fd6bdb011ece58e1b1aaccfe07f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rrader/nowLyrics"},"repo_url":{"kind":"string","value":"https://github.com/rrader/nowLyrics"},"snapshot_id":{"kind":"string","value":"8c67c97bc809187c71de237f44c50c56ed55d2f3"},"revision_id":{"kind":"string","value":"2ce91707dbc649b01e3b1880f6e469c196acab8e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-07-04T04:42:49.088625","string":"2020-07-04T04:42:49.088625"},"revision_date":{"kind":"timestamp","value":"2011-12-28T12:35:02","string":"2011-12-28T12:35:02"},"committer_date":{"kind":"timestamp","value":"2011-12-28T12:35:02","string":"2011-12-28T12:35:02"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding:utf8 -*-\n\nMM_API_KEY = 'a453f8a71d8e47edf061cc7d53b375d6'\n\nimport os\nos.environ['MUSIXMATCH_API_KEY'] = MM_API_KEY\n\nfrom musixmatch.track import search as mm_search\nfrom BaseFetcher import BaseFetcher\n\nclass Fetcher(BaseFetcher):\n def _do_fetch(self, title, artist):\n sr = mm_search(q_track=title, q_artist=artist)\n sr = filter(lambda x: x.lyrics_id != 0, sr)\n sr = map(lambda x: x.lyrics(), sr[:3])\n sr = map(lambda x: u\"%s \\n%s (c)\\nSource: MusiXmatch.com\" % (x['lyrics_body'],x['lyrics_copyright']), sr)\n return sr\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39940,"cells":{"__id__":{"kind":"number","value":5583457493049,"string":"5,583,457,493,049"},"blob_id":{"kind":"string","value":"026bf32126d2d2dcd855f9f227378735296baa25"},"directory_id":{"kind":"string","value":"abb71ed66f53a40d28be79825417e69c926bd1a4"},"path":{"kind":"string","value":"/src/python/example3-1.py"},"content_id":{"kind":"string","value":"005919b028290ade40c8844945a82a7fb3dfd2af"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"yarox/libxml-examples"},"repo_url":{"kind":"string","value":"https://github.com/yarox/libxml-examples"},"snapshot_id":{"kind":"string","value":"957a221642f5ce3de6579e9822d1fcf434f700ae"},"revision_id":{"kind":"string","value":"40189d6f06db8ef474d0e29d332e03067de89c39"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-21T13:21:58.824973","string":"2020-05-21T13:21:58.824973"},"revision_date":{"kind":"timestamp","value":"2012-07-25T09:50:52","string":"2012-07-25T09:50:52"},"committer_date":{"kind":"timestamp","value":"2012-07-25T09:50:52","string":"2012-07-25T09:50:52"},"github_id":{"kind":"number","value":4985667,"string":"4,985,667"},"star_events_count":{"kind":"number","value":10,"string":"10"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''\nexample3-1.py\nFind elements in a simple XML document.\n'''\n\nfrom lxml import etree\n\n\n\ndoc = etree.parse('../../data/simple.xml')\n\n# Retrieve all room elements and print tag, 'id' property, and number of \n# children\nrooms = doc.xpath('/house/room')\nfor room in rooms:\n print '{0}: a {1} with {2} objects'.format(room.tag, room.attrib['id'], \n len(room.getchildren()))\n\n# Retrieve all red chairs ('chair' nodes with property 'color' = 'red') and \n# print color, name, and room\nred_chairs = doc.xpath('//chair[@color=\"red\"]')\nfor chair in red_chairs:\n print 'a {1} {2} from the {0}'.format(chair.getparent().attrib['id'], \n chair.attrib['color'], chair.tag)\n \n# List nodes whith text, if any\nfor element in doc.iter(): \n if element.text and element.text.strip():\n print '{0}: {1}'.format(element.tag, element.text)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39941,"cells":{"__id__":{"kind":"number","value":14121852480552,"string":"14,121,852,480,552"},"blob_id":{"kind":"string","value":"bf02272c8ff5d65ad8db17a7f5b3c211e04e587d"},"directory_id":{"kind":"string","value":"44f0da4da4bde82c6df6c806680f524b200e0e5b"},"path":{"kind":"string","value":"/nonmainprograms/generate_light_curve.py"},"content_id":{"kind":"string","value":"a88470cbe48862a41091625a1edd82298c031266"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"davidwhogg/exoplanet-research"},"repo_url":{"kind":"string","value":"https://github.com/davidwhogg/exoplanet-research"},"snapshot_id":{"kind":"string","value":"51b747a3c817fe779fadeb922a507e6fcfbf9185"},"revision_id":{"kind":"string","value":"5b040ab523f5f0e49d806088684b7b32dc2ea9a0"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T11:37:36.204830","string":"2021-01-22T11:37:36.204830"},"revision_date":{"kind":"timestamp","value":"2014-10-21T02:27:43","string":"2014-10-21T02:27:43"},"committer_date":{"kind":"timestamp","value":"2014-10-21T02:27:43","string":"2014-10-21T02:27:43"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#This code generates the light curve of a given kepler object.\n#Many parts of the code were written with the help from http://yourlocaltautologist.blogspot.com/2012/08/viewing-kepler-light-curves-via.html\n\nimport os\nimport pyfits\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#This short segment of code allows the user to input the kepler ID\n#and change the working directory to the proper one. (Given that the proper local data exists)\n\nkplr_id = raw_input('Input kepler id:')\npath = '/Users/SHattori/.kplr/data/lightcurves/%s' %kplr_id\nos.chdir(path)\n\n# print path\n# print os.getcwd()\n\n#Code to allow the user to decide which FITS format file to generate light curve.\nfilename = raw_input('Input FITS file to use: ')\n\nFITSfile = pyfits.open(filename)\n\n#FITSfile.info()\n\ndataheader = FITSfile[1].header\ntopheader = FITSfile[0].header\n\njdadj = str(dataheader[\"BJDREFI\"])\nobsobject = str(dataheader[\"OBJECT\"])\n\nlightdata = FITSfile[1].data\n\n#Convert the data to fractional relative flux\n#Currently this code is written to generate graphs for PDCSAP_FLUX.\nflux = lightdata.field(\"PDCSAP_FLUX\")\ntime = lightdata.field('TIME')\nprint time\nassert 0\nmedian = np.median(flux)\n# for i, e in enumerate(flux):\n# \t#fractional relative flux\n# \tflux[i] = ((e - median) / median)\n\ntime = lightdata.field(\"TIME\") #Barycenter corrected Julian date\nfig1 = plt.figure()\nsub1 = fig1.add_subplot(111)\nsub1.plot(time,flux, color=\"black\", marker=\",\", linestyle = 'None')\n\n#The following code is to set the labels and title\nxlab = \"Time (days, Kepler Barycentric Julian date - %s)\"%jdadj\nsub1.set_xlabel(xlab)\nsub1.set_ylabel(\"Relative Brightness (electron flux)\")\nplottitle=\"Light Curve for %s\"%obsobject\nsub1.set_title(plottitle)\n\nplt.show()\n\nFITSfile.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39942,"cells":{"__id__":{"kind":"number","value":3547642990851,"string":"3,547,642,990,851"},"blob_id":{"kind":"string","value":"2f936e7a7da6c99259e924f78846142921d05f03"},"directory_id":{"kind":"string","value":"86e27a49d54a807902c5dec48da3e597c1bcf7da"},"path":{"kind":"string","value":"/importers/delicious-xml.py"},"content_id":{"kind":"string","value":"3d27635536555788931d0539b7f9d04c768d2a89"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pombredanne/selficious"},"repo_url":{"kind":"string","value":"https://github.com/pombredanne/selficious"},"snapshot_id":{"kind":"string","value":"9b8d497b055f13be34ea39ac6f5c4724ce5e1c7e"},"revision_id":{"kind":"string","value":"4f57f00a36b40c71ffab031bfc41b2a17a7b939f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2017-12-02T07:54:35.585125","string":"2017-12-02T07:54:35.585125"},"revision_date":{"kind":"timestamp","value":"2010-12-24T14:07:49","string":"2010-12-24T14:07:49"},"committer_date":{"kind":"timestamp","value":"2010-12-24T14:07:49","string":"2010-12-24T14:07:49"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n#\n# This is SELFICIOUS by Yuuta\n# UPDATED: 2010-12-23 20:53:41\n\nimport urllib2\nimport base64\nimport hashlib\nfrom xml.dom import minidom\nimport dateutil.parser\nfrom importers import BaseImporter\n\nclass DeliciousLocalXMLImporter(BaseImporter):\n \"\"\"\n Imports bookmarks from an XML file saved from delicious. To get this kind of\n files, visit http://api.del.icio.us/v1/posts/all\n (if you're an old delicious user) \n \"\"\"\n service_name = 'delicious-xml'\n service_verbose_name = \"Local XML file saved from delicious\"\n form = \"\"\"\n

\n \n \n

\n \"\"\"\n\n def __init__(self, tornado_handler):\n try:\n uploaded_file = tornado_handler.request.files['xmlfile'][0]\n self.data = uploaded_file['body']\n self.success = True\n except:\n self.success = False\n self.error = 'fetch'\n super(DeliciousLocalXMLImporter, self).__init__(tornado_handler)\n\n def posts(self):\n if self.success:\n posts = []\n dom = minidom.parseString(self.data)\n h = hashlib.sha1()\n for node in dom.getElementsByTagName('post'):\n h.update(node.getAttribute('href'))\n posts.append({\n 'hash':h.hexdigest(),\n 'url':node.getAttribute('href'),\n 'title':node.getAttribute('description'),\n 'description':node.getAttribute('extended'),\n 'tags':node.getAttribute('tag').split(' '),\n 'time':dateutil.parser.parse(node.getAttribute('time'))\n })\n return posts\n else:\n return []\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":39943,"cells":{"__id__":{"kind":"number","value":5428838676443,"string":"5,428,838,676,443"},"blob_id":{"kind":"string","value":"893e00b67bc3b9c29c5f7b76b1d9bc805fa93f8c"},"directory_id":{"kind":"string","value":"5c9530bab0210bbd3a47992ca11e895a8f9836c5"},"path":{"kind":"string","value":"/scikits/learn/naive_bayes/__init__.py"},"content_id":{"kind":"string","value":"0f632219df385b18fbe29257be8ed9af720c47f9"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"amitibo/scikit-learn"},"repo_url":{"kind":"string","value":"https://github.com/amitibo/scikit-learn"},"snapshot_id":{"kind":"string","value":"4b94e8d307102290f1329608a88c9e3dfa184434"},"revision_id":{"kind":"string","value":"3dd1d00532c70cc3f5ebf2db916e072138afa0f1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-30T19:22:28.287688","string":"2020-12-30T19:22:28.287688"},"revision_date":{"kind":"timestamp","value":"2011-05-11T16:20:43","string":"2011-05-11T16:20:43"},"committer_date":{"kind":"timestamp","value":"2011-05-11T16:20:43","string":"2011-05-11T16:20:43"},"github_id":{"kind":"number","value":1502609,"string":"1,502,609"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nNaive Bayes models\n==================\n\nNaive Bayes algorithms are a set of supervised learning methods based on\napplying Baye`s theorem with strong (naive) independence assumptions. \n\nSee http://scikit-learn.sourceforge.net/modules/naive_bayes.html for\ncomplete documentation.\n\"\"\"\n\nfrom .naive_bayes import GNB, MultinomialNB\n\nfrom . import sparse\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39944,"cells":{"__id__":{"kind":"number","value":8572754758203,"string":"8,572,754,758,203"},"blob_id":{"kind":"string","value":"ff7dea97b2df357a903bf2052097ca471e936164"},"directory_id":{"kind":"string","value":"fcd2148474f1e8cd7c426e1d5c48559f320fdbbc"},"path":{"kind":"string","value":"/playlists.py"},"content_id":{"kind":"string","value":"ff3d2e9d477af133673907157f19be2587db30e5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"keroserene/mens-amplio"},"repo_url":{"kind":"string","value":"https://github.com/keroserene/mens-amplio"},"snapshot_id":{"kind":"string","value":"f0125c749a5217bd0bab00c463cbb7c62ac42fb0"},"revision_id":{"kind":"string","value":"e8c05c85742c3624cf8aed8149857e931a566333"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T22:40:27.262447","string":"2021-01-20T22:40:27.262447"},"revision_date":{"kind":"timestamp","value":"2013-08-16T09:46:51","string":"2013-08-16T09:46:51"},"committer_date":{"kind":"timestamp","value":"2013-08-16T09:46:51","string":"2013-08-16T09:46:51"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Defines full set of lighting effect playlists to use when actually running piece \n\nfrom led.effects.base import (\n EffectParameters, SnowstormLayer, TechnicolorSnowstormLayer, WhiteOutLayer, NoDataLayer)\nfrom led.effects.digital_rain import DigitalRainLayer\nfrom led.effects.drifters import *\nfrom led.effects.firefly_swarm import FireflySwarmLayer\nfrom led.effects.impulses import *\nfrom led.effects.lightning_storm import LightningStormLayer\nfrom led.effects.plasma import PlasmaLayer\nfrom led.effects.waves import WavesLayer\nfrom led.renderer import Playlist\n\nheadsetOn = Playlist([\n [\n ImpulseLayer2(),\n WavesLayer(color=(1,0,.2)),\n PlasmaLayer(color=(.1,.1,.1)),\n ],\n [\n WavesLayer(),\n LightningStormLayer(),\n ]\n])\n \nheadsetOff = Playlist([\n [\n TreeColorDrifterLayer([ (1,0,1), (.5,.5,1), (0,0,1) ], 5),\n PlasmaLayer(),\n ],\n [\n OutwardColorDrifterLayer([ (1,0,0), (.7,.3,0), (.7,0,.3) ], 10),\n PlasmaLayer(),\n ]\n])\n \ntransition = Playlist([\n [WhiteOutLayer()],\n [SnowstormLayer()]\n ])"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39945,"cells":{"__id__":{"kind":"number","value":10471130281730,"string":"10,471,130,281,730"},"blob_id":{"kind":"string","value":"7171d40572b1d6b72403a98559d7c092ae2df004"},"directory_id":{"kind":"string","value":"fe89fbe21a9e7b3b43e27e39bdf1e81e2f6c839f"},"path":{"kind":"string","value":"/test/test_all.py"},"content_id":{"kind":"string","value":"76da3836c7ce2f17998c17ac1688fd69354d23f1"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"pbulsink/turbocontrol"},"repo_url":{"kind":"string","value":"https://github.com/pbulsink/turbocontrol"},"snapshot_id":{"kind":"string","value":"dc2d88a2578baa413ee59b5ba7929476f8ebc218"},"revision_id":{"kind":"string","value":"67f036e520bc03e76f71e5ca41d2455b2712b2e7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-03-25T07:39:24.761624","string":"2020-03-25T07:39:24.761624"},"revision_date":{"kind":"timestamp","value":"2014-12-24T02:08:58","string":"2014-12-24T02:08:58"},"committer_date":{"kind":"timestamp","value":"2014-12-24T02:08:58","string":"2014-12-24T02:08:58"},"github_id":{"kind":"number","value":15980284,"string":"15,980,284"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2014-08-08T01:48:23","string":"2014-08-08T01:48:23"},"gha_created_at":{"kind":"timestamp","value":"2014-01-16T20:39:57","string":"2014-01-16T20:39:57"},"gha_updated_at":{"kind":"timestamp","value":"2014-08-08T01:48:07","string":"2014-08-08T01:48:07"},"gha_pushed_at":{"kind":"timestamp","value":"2014-08-08T01:48:23","string":"2014-08-08T01:48:23"},"gha_size":{"kind":"number","value":356,"string":"356"},"gha_stargazers_count":{"kind":"number","value":1,"string":"1"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nTurbogo Main Test Suite\n\nRuns all test files\n\n\"\"\"\n\nfrom unittest import TestLoader, TextTestRunner, TestSuite\nfrom test_turbogo import TestControlEdit, TestJob, TestSetup\nfrom test_turbogo import TestWriteCoord, TestSubmitScriptPrep\nfrom test_turbogo_helpers import TestArgs, TestChSpin\nfrom test_turbogo_helpers import TestControlMods, TestGeom\nfrom test_turbogo_helpers import TestRoute, TestSimpleFuncs\nfrom test_turbocontrol import TestJobset, TestFindInputs\nfrom test_turbocontrol import TestJobChecker, TestWriteStats, TestWriteFreeh\nfrom test_def_op import TestDefine\nfrom test_screwer_op import TestScrewer\nfrom test_freeh_op import TestFreeh\nfrom test_cosmo_op import TestCosmo\n\nif __name__ == \"__main__\":\n loader = TestLoader()\n suite = TestSuite((\n loader.loadTestsFromTestCase(TestControlEdit),\n loader.loadTestsFromTestCase(TestJob),\n loader.loadTestsFromTestCase(TestSetup),\n loader.loadTestsFromTestCase(TestWriteCoord),\n loader.loadTestsFromTestCase(TestSubmitScriptPrep),\n loader.loadTestsFromTestCase(TestArgs),\n loader.loadTestsFromTestCase(TestChSpin),\n loader.loadTestsFromTestCase(TestControlMods),\n loader.loadTestsFromTestCase(TestGeom),\n loader.loadTestsFromTestCase(TestRoute),\n loader.loadTestsFromTestCase(TestSimpleFuncs),\n loader.loadTestsFromTestCase(TestJobset),\n loader.loadTestsFromTestCase(TestFindInputs),\n loader.loadTestsFromTestCase(TestJobChecker),\n loader.loadTestsFromTestCase(TestWriteStats),\n loader.loadTestsFromTestCase(TestDefine),\n loader.loadTestsFromTestCase(TestScrewer),\n loader.loadTestsFromTestCase(TestFreeh),\n loader.loadTestsFromTestCase(TestCosmo),\n loader.loadTestsFromTestCase(TestWriteFreeh),\n ))\n\n runner = TextTestRunner(verbosity = 2)\n runner.run(suite)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39946,"cells":{"__id__":{"kind":"number","value":19335942780279,"string":"19,335,942,780,279"},"blob_id":{"kind":"string","value":"57bf1e3fe5b66559a0aed584b46bf764ad4b2f63"},"directory_id":{"kind":"string","value":"f42f06849f549f1781c2d5a5455bb1593a56b8c4"},"path":{"kind":"string","value":"/sitemap.py"},"content_id":{"kind":"string","value":"de62ebf3a440f54377d05c949eef19addb158cbe"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"freshy969/Lonava"},"repo_url":{"kind":"string","value":"https://github.com/freshy969/Lonava"},"snapshot_id":{"kind":"string","value":"6e700de0198db83653f6f4c2f78fb603db91e31d"},"revision_id":{"kind":"string","value":"30906235597b0617789f4d7e1a6c3b24c5415137"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-03-23T04:21:02.463563","string":"2020-03-23T04:21:02.463563"},"revision_date":{"kind":"timestamp","value":"2013-03-08T07:28:01","string":"2013-03-08T07:28:01"},"committer_date":{"kind":"timestamp","value":"2013-03-08T07:28:01","string":"2013-03-08T07:28:01"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\nimport time\nimport datetime\nimport psycopg2\nimport psycopg2.extras\nimport os\n\ndb = psycopg2.connect(\"dbname='lonava' user='lonuser' host='localhost' password='YOURPASS'\")\ncur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nprint \"Generating Count\"\ncur.execute (\"select count(*) as count from (select distinct on (commentgroup) * from storygroup ) as bar\")\ntotalrows = cur.fetchone()['count']\nprint \"There are \" + str(totalrows) + \" stories in Lonava.\"\nlooppasses = 1 + (totalrows / 40000)\nprint \"A total of \" + str(looppasses) + \" story sitemaps\"\ndate1 = datetime.datetime.now().isoformat()\ndatenow = date1[0:date1.find(\".\")] + \"+00:00\"\nprint datenow\n\n\nstartat = 0 \nsitemapindex = open('/usr/local/lonava/static/sitemap_index.xml', 'w')\nsitemapindex.write(\"\"\"\n\n\nhttp://lonava.com/static/sitemap-0.xml\n\"\"\" + datenow + \"\"\"\n\n\"\"\")\n\n \nfor i in range(looppasses):\n sitemap_path = \"/usr/local/lonava/static/sitemap-\" + str(i + 1) + \".xml\"\n sitemap = open(sitemap_path, 'w') \n sitemapindex.write(\"\")\n sitemapindex.write(\"http://lonava.com/static/sitemap-\" + str(i + 1) + \".xml\\n\")\n sitemapindex.write(\"\" + datenow + \"\\n\")\n sitemapindex.write(\"\\n\")\n cur.execute(\"create temporary table mystories (like stories);\")\n cur.execute(\"alter table mystories add column ord bigserial;\")\n cur.execute(\"alter table mystories add column cachedreplycount bigint;\")\n cur.execute(\"insert into mystories(lastedit,pimgurl,imgurl,usr,storytime,title,url,text,name,score,commentgroup,storyid,location,cachedreplycount) select lastedit,pimgurl,imgurl,usr,storytime,title,url,text,name,score,commentgroup,storyid,location,cachedreplycount from storygroup where location in (select subbedchan from usrsubs) order by storyid asc;\",)\n cur.execute(\"select * from (select distinct on (commentgroup) * from (select *,(1.0 + score + (cachedreplycount / 10)) / (1.0 + (select count(*) from mystories) - ord) as rank from mystories) as foo) as bar order by storyid asc offset %s limit %s;\",[startat,39999])\n\n\n sitemap.write(\"\"\"\\n\"\"\")\n sitemap.write(\"\"\"\\n\"\"\")\n print \"Starting file \" + str(i) + \"; startat = \" + str(startat) + \" ; endat \" + str(startat + 39999)\n startat = startat + 40000\n rows = cur.fetchall()\n cur.execute(\"drop table mystories\")\n print \"Writing \" + str(len(rows)) + \" rows to \" + sitemap_path\n for row in rows:\n url = 'http://lonava.com/stories/' + str(row['storyid'])\n date = row['storytime'] + datetime.timedelta(days=2)\n datestr = date.isoformat()\n datecur = datestr[0:datestr.find(\".\")] + \"+00:00\"\n sitemap.write(\"\\n\")\n sitemap.write(\"\" + url + \"\\n\")\n sitemap.write(\"\" + datecur + \"\\n\")\n sitemap.write(\" monthly \\n\") \n sitemap.write(\" .5 \\n\")\n sitemap.write(\"\\n\")\n sitemap.write(\"\\n\")\n sitemap.close()\n\n#### DONE WITH STORIES, now do USERS\n\ncur.execute (\"select count(*) from usrs as count\")\ntotalrows = cur.fetchone()['count']\nprint \"There are \" + str(totalrows) + \" usrs of Lonava.\"\nlooppasses = 1 + (totalrows / 40000)\nprint \"A total of \" + str(looppasses) + \" usr sitemaps\"\ndate1 = datetime.datetime.now().isoformat()\ndatenow = date1[0:date1.find(\".\")] + \"+00:00\"\ni = 0\nstartat = 0\nfor i in range(looppasses):\n sitemap_path = \"/usr/local/lonava/static/sitemap-usr-\" + str(i + 1) + \".xml\"\n sitemap = open(sitemap_path, 'w')\n sitemapindex.write(\"\")\n sitemapindex.write(\"http://lonava.com/static/sitemap-usr-\" + str(i + 1) + \".xml\\n\")\n sitemapindex.write(\"\" + datenow + \"\\n\")\n sitemapindex.write(\"\\n\")\n cur.execute (\"select * from usrs limit %s offset %s\",[startat + 39999,startat])\n\n\n sitemap.write(\"\"\"\\n\"\"\")\n sitemap.write(\"\"\"\\n\"\"\")\n\n startat = startat + 40000\n rows = cur.fetchall()\n for row in rows:\n url = 'http://lonava.com/user/' + str(row['usrid'])\n date = row['regtime'] + datetime.timedelta(days=2)\n datestr = date.isoformat()\n datecur = datestr[0:datestr.find(\".\")] + \"+00:00\"\n sitemap.write(\"\\n\")\n sitemap.write(\"\" + url + \"\\n\")\n sitemap.write(\"\" + datecur + \"\\n\")\n sitemap.write(\" monthly \\n\")\n sitemap.write(\" .6 \\n\")\n sitemap.write(\"\\n\")\n sitemap.write(\"\\n\")\n sitemap.close()\n\nsitemapindex.write(\"\\n\")\nsitemapindex.close()\nprint \"Notifying Bing\"\ncmd = 'curl http://www.bing.com/webmaster/ping.aspx?siteMap=http://lonava.com/static/sitemap_index.xml > /dev/null'\nos.system(cmd)\nprint \"Notifying Google\"\ncmd = 'curl http://www.google.com/webmasters/sitemaps/ping?sitemap=http://lonava.com/static/sitemap_index.xml > /dev/null'\nos.system(cmd)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39947,"cells":{"__id__":{"kind":"number","value":15006615768946,"string":"15,006,615,768,946"},"blob_id":{"kind":"string","value":"0582d0cb2df257b0e697286d10f4e807f046eae2"},"directory_id":{"kind":"string","value":"270487b95e2309dc6a4e392b241374beb24f2633"},"path":{"kind":"string","value":"/clean_sentences.py"},"content_id":{"kind":"string","value":"d4079e0998c8505c9ade3cc2944758b41fc7add3"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"cdg720/parsing"},"repo_url":{"kind":"string","value":"https://github.com/cdg720/parsing"},"snapshot_id":{"kind":"string","value":"58b85b0ea6013e3e1034fd5a8667e66ebcd7e1b5"},"revision_id":{"kind":"string","value":"9c79a364887395d91a45aa4f8090e26606fa00f7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T18:07:16.278184","string":"2021-01-01T18:07:16.278184"},"revision_date":{"kind":"timestamp","value":"2014-11-13T00:20:18","string":"2014-11-13T00:20:18"},"committer_date":{"kind":"timestamp","value":"2014-11-13T00:20:18","string":"2014-11-13T00:20:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import gzip\nimport string\nimport sys\n\n# NYT only. check for other coropra.\ndef clean_sentences():\n\tif len(sys.argv) == 1:\n\t\tprint 'usage: python clean_sentences.py file1 file2 ... out'\n\t\tprint 'e.g.: python clean_sentences.py ~/data/gigaword/nyt/*.gz /pro/dpg/dc65/data/gigaword/chunks/nyt'\n\t\tsys.exit(0)\n\n\tdoc_size = 500\n\tout = sys.argv[-1]\n\ti = 0\n\tg = None\n\n\tx = [0,] * 2\n\tfor doc in sys.argv[1:-1]:\n\t\tf = gzip.open(doc, 'rb')\n\t\ty = [0,] * 2\n\t\tfor line in f.read().splitlines():\n\t\t\tcount = 0\n\t\t\tcount2 = 0\n\t\t\tif line[22:24] == '> ':\n\t\t\t\tprocessed = line[24:-6]\n\t\t\telif line[23:25] == '> ':\n\t\t\t\tprocessed = line[25:-6]\t\t\t\t\n\t\t\telif line[24:26] == '> ':\n\t\t\t\tprocessed = line[26:-6]\t\t\t\t\n\t\t\telse:\n\t\t\t\tprint line\n\t\t\tfor ch in processed:\n\t\t\t\tif ch.islower():\n\t\t\t\t\tcount += 1\n\t\t\t\tif ch != ' ':\n\t\t\t\t\tcount2 += 1\n\t\t\tx[1] += 1\n\t\t\ty[1] += 1\n\t\t\tif count * 100 / count2 >= 90: # go back to 90\n\t\t\t\tx[0] += 1\n\t\t\t\ty[0] += 1\n\t\t\t\tif i % doc_size == 0: # new file\n\t\t\t\t\tif g:\n\t\t\t\t\t\tg.flush()\n\t\t\t\t\t\tg.close()\n\t\t\t\t\tg = gzip.open(out + '/' + str(i / doc_size + 1) + '.gz', 'wb')\n\t\t\t\tg.write(line + '\\n')\n\t\t\t\ti += 1\n\t\tprint doc, y, x\n\ndef clean_sentences2():\n\tif len(sys.argv) == 1:\n\t\tprint 'usage: python clean_sentences.py file1 file2 ... out'\n\t\tprint 'e.g.: python clean_sentences.py ~/data/gigaword/nyt/*.gz /pro/dpg/dc65/data/gigaword/chunks/nyt'\n\t\tsys.exit(0)\n\n\tdoc_size = 500\n\tout = sys.argv[-1]\n\ti = 0\n\tg = None\n\n\tx = [0,] * 2\n\tfor doc in sys.argv[1:-1]:\n\t\tf = gzip.open(doc, 'rb')\n\t\ty = [0,] * 2\n\t\tfor line in f.read().splitlines():\n\t\t\tx[1] += 1\n\t\t\ty[1] += 1\n\t\t\tx[0] += 1\n\t\t\ty[0] += 1\n\t\t\tif i % doc_size == 0: # new file\n\t\t\t\tif g:\n\t\t\t\t\tg.flush()\n\t\t\t\t\tg.close()\n\t\t\t\tg = gzip.open(out + '/' + str(i / doc_size + 1) + '.gz', 'wb')\n\t\t\tg.write(line + '\\n')\n\t\t\ti += 1\n\t\tprint doc, y, x\n\nclean_sentences2()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39948,"cells":{"__id__":{"kind":"number","value":15040975485713,"string":"15,040,975,485,713"},"blob_id":{"kind":"string","value":"28d814caf31ab9754206adfd6da704bb184b56a2"},"directory_id":{"kind":"string","value":"c0b744ca35ef8791c0f7cc22a66a3754a0d33493"},"path":{"kind":"string","value":"/interface.py"},"content_id":{"kind":"string","value":"9ff898f25ff6630b299b49b988925198a0bd61f7"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"Cjreynol/Python-Monster-Zoo"},"repo_url":{"kind":"string","value":"https://github.com/Cjreynol/Python-Monster-Zoo"},"snapshot_id":{"kind":"string","value":"fc91207f04fbb52a8adccb2f1a0025f1df7fb5ac"},"revision_id":{"kind":"string","value":"811055206ef1ce994677e208606b16964baf1e42"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-16T02:25:35.809134","string":"2021-05-16T02:25:35.809134"},"revision_date":{"kind":"timestamp","value":"2013-12-27T19:15:21","string":"2013-12-27T19:15:21"},"committer_date":{"kind":"timestamp","value":"2013-12-27T19:15:21","string":"2013-12-27T19:15:21"},"github_id":{"kind":"number","value":15458914,"string":"15,458,914"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Chad Reynolds\n# 12/24/13\n# Creates an interface to hold multiple managers\n\nfrom tkinter import *\nfrom manager import Name_Window, Manager\n\nclass Interface():\n\t\"\"\"Main Window to add and hold multiple monsters.\"\"\"\n\tdef __init__(self):\n\t\tself.root = Tk()\n\t\tself.root.title(\"Monster Zoo\")\n\n\t\tself.interface_frame = Frame(self.root)\n\t\tself.interface_frame.pack()\n\n\t\tself.button = Button(self.interface_frame, text = \"Add Monster\", command = self.add_monster)\n\t\tself.button.pack(side = LEFT)\n\n\t\tself.monster_total = IntVar()\n\t\tself.monster_total.set(0)\n\t\tself.total = Label(self.interface_frame, textvariable = self.monster_total)\n\t\tself.total.pack(side = RIGHT)\n\n\t\tLabel(self.interface_frame, text = \"Monster Total: \").pack(side = RIGHT)\t\t\n\n\tdef add_monster(self):\n\t\t\"\"\"Creates a name menu to add a new monster.\"\"\"\n\t\tnew = Name_Window(self.root, self.monster_total)\t\n\n\tdef mainloop(self):\n\t\t\"\"\"Starts the root's mainloop.\"\"\"\n\t\tself.root.mainloop()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39949,"cells":{"__id__":{"kind":"number","value":13125420098678,"string":"13,125,420,098,678"},"blob_id":{"kind":"string","value":"69b866147be5708fa38154c0e012fdded7312f68"},"directory_id":{"kind":"string","value":"430293e7a0a2dd89e43103cb76eded423454834a"},"path":{"kind":"string","value":"/models/models.py"},"content_id":{"kind":"string","value":"bcebf7808210e761b2692e8a20468dd8a9b11c95"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"dezkareid/bicitacora"},"repo_url":{"kind":"string","value":"https://github.com/dezkareid/bicitacora"},"snapshot_id":{"kind":"string","value":"b2db8e24a6a513408a384554a6feff35e6a24441"},"revision_id":{"kind":"string","value":"b10e943382596d8a60d9ccbf7e68e12d573e5089"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T21:16:40.370738","string":"2021-01-10T21:16:40.370738"},"revision_date":{"kind":"timestamp","value":"2013-12-27T21:46:45","string":"2013-12-27T21:46:45"},"committer_date":{"kind":"timestamp","value":"2013-12-27T21:46:45","string":"2013-12-27T21:46:45"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from google.appengine.ext import db\nfrom google.appengine.api import users\n\nclass Ciclista(db.Model):\n\t\"\"\"Entidad para administrar a los Ciclistas que usen la aplicacion\"\"\"\n\tnombre = db.StringProperty(required=True)\n\tfacebook = db.LinkProperty(required=True)\n\ttwitter = db.StringProperty(required=True)\n\tes_hombre = db.BooleanProperty(default=True)\n\tfecha_nacimiento = db.DateProperty()\n\tfecha_registro = db.DateProperty(auto_now_add=True)\n\tusuario = db.UserProperty()\n\nclass Ruta(db.Model):\n\t\"\"\"Entidad para guardar una Ruta\"\"\"\n\tnombre = db.StringProperty(required=True)\n\tpuntos = db.ListProperty(db.GeoPt,default=[])\n\tinicio = db.DateTimeProperty(required=True,auto_now_add=True)\n\ttermino = db.DateTimeProperty(required=True,auto_now_add=True)\n\tciclista = db.ReferenceProperty(Ciclista)\n\nclass Tipo_Lugar(db.Model):\n\t\"\"\"Entidad para guardar los tipos de lugares\"\"\"\n\ttipo = db.StringProperty(required=True)\n\nclass Lugar(db.Model):\n\t\"\"\"Entidad para guardar lugares de interes para los ciclistas: talleres, biciestacionamientos,etc.\"\"\"\n\tnombre = db.StringProperty(required=True)\n\ttipo = db.ReferenceProperty(Tipo_Lugar)\n\tdescripcion = db.TextProperty(required=True)\n\tdireccion = db.StringProperty(required=True)\n\talta = db.DateProperty(auto_now_add=True)\n\tubicacion = db.GeoPtProperty()\n\nclass Tipo_Suceso(db.Model):\n\t\"\"\"Entidad que identifica a los tipos de sucesos: Manifestaciones, bloqueos, composturas de calle\"\"\"\n\ttipo = db.StringProperty(required=True)\n\tduracion = db.IntegerProperty(required=True)\n\nclass Suceso(db.Model):\n\t\"\"\"Entidad para reportar los tipos de sucesos que pueden haber en la ruta del ciclista\"\"\"\n\ttipo = db.ReferenceProperty(Tipo_Suceso)\n\tdescripcion = db.TextProperty(required=True)\n\talta = db.DateTimeProperty(auto_now_add=True)\n\tubicacion = db.GeoPtProperty()\t"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39950,"cells":{"__id__":{"kind":"number","value":11355893570767,"string":"11,355,893,570,767"},"blob_id":{"kind":"string","value":"7c0b39c8798eecf916c539238eff6381aaad4597"},"directory_id":{"kind":"string","value":"2e908072b624c46240ee9e7fcb993b2f21aee0da"},"path":{"kind":"string","value":"/plugins/presence_handler.py"},"content_id":{"kind":"string","value":"c2925081028c10ab791ed3a189ee2168c10a7546"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"bogobog/steerage"},"repo_url":{"kind":"string","value":"https://github.com/bogobog/steerage"},"snapshot_id":{"kind":"string","value":"d7e562c5d7cf8a730b0de5e8176c78cb7ecd1147"},"revision_id":{"kind":"string","value":"ce3d0e39efb26615ad114195d9675401e9cac99f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T13:50:15.830892","string":"2016-09-06T13:50:15.830892"},"revision_date":{"kind":"timestamp","value":"2014-01-08T05:15:56","string":"2014-01-08T05:15:56"},"committer_date":{"kind":"timestamp","value":"2014-01-08T05:15:56","string":"2014-01-08T05:15:56"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from twisted.python import log\nfrom wokkel import xmppim\nimport logging\n\nfrom common import CommonClientManager\n\nclass CommonPresenceHandler( xmppim.PresenceClientProtocol ):\n received_statuses = {}\n\n def __init__( self, client ):\n super( CommonPresenceHandler, self ).__init__()\n\n self.my_client = client\n\n def connectionInitialized(self):\n self.available()\n\n def subscribedReceived(self, entity):\n log.msg( 'subscribedReceived', level = logging.DEBUG )\n\n def subscribeReceived(self, subscriber):\n\n rtr_client = CommonClientManager.getHandler( 'roster', self.my_client )\n\n def rosterAddResponse( response ):\n log.msg( 'rosterAddResponse', level = logging.DEBUG )\n\n self.subscribed( subscriber )\n\n # subscribe to subscriber\n user_status = rtr_client.getUserStatus( subscriber )\n if not user_status or ( not user_status == 'both' and not user_status == 'to' ):\n rtr_client.addItem( subscriber.userhostJID() ).addCallback( addSubscriberToRosterResponse ).addErrback( log.err )\n\n def addSubscriberToRosterResponse( response ):\n log.msg( 'addSubscriberToRosterResponse', level = logging.DEBUG )\n\n if response.attributes['type'] == 'result':\n self.subscribe( subscriber.userhostJID() )\n\n rtr_client.addItem( subscriber ).addCallback( rosterAddResponse )\n\n def availableReceived(self, entity, show=None, statuses=None, priority=0):\n log.msg( 'availableReceived', level = logging.DEBUG )\n\n # ignore if self\n if entity.full() == self.my_client.jid.full():\n return\n\n self.received_statuses[ entity.full() ] = 'available'\n\nCommonClientManager.addHandler( 'presence', CommonPresenceHandler )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39951,"cells":{"__id__":{"kind":"number","value":14886356680695,"string":"14,886,356,680,695"},"blob_id":{"kind":"string","value":"59dcd9d9438d0446307ea9c88e48d6f3b2b8d8b8"},"directory_id":{"kind":"string","value":"e9cfb8cec8d62b02136e6f2efe793c9f62a1c96f"},"path":{"kind":"string","value":"/src/main.py"},"content_id":{"kind":"string","value":"2368a096fc2e97fa71aac9ec0997ffca2dfadb45"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jleake/pytetris"},"repo_url":{"kind":"string","value":"https://github.com/jleake/pytetris"},"snapshot_id":{"kind":"string","value":"d6558340fd9ff074c80e013d2003649ae1a98da4"},"revision_id":{"kind":"string","value":"c481c8d6fac6db054fdf78245b2cd5b1062ff738"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T10:39:10.850861","string":"2020-05-18T10:39:10.850861"},"revision_date":{"kind":"timestamp","value":"2013-10-03T01:06:28","string":"2013-10-03T01:06:28"},"committer_date":{"kind":"timestamp","value":"2013-10-03T01:06:28","string":"2013-10-03T01:06:28"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from wrapper import CursesGraphics, curses_wrapper\nfrom game import Block\n\ndef main(cg):\n\tfor n in range(1, 30):\n\t\tcg.draw('#', (CursesGraphics.RED, CursesGraphics.BLACK), [(n, 1), (n, 2), (n, 3), (n+1, 3)])\n\t\tcg.refresh()\n\t\tcg.wait_for_key(1000)\n\t\tcg.erase([(n, 1), (n, 2), (n, 3), (n+1, 3)])\n\n# Call the main curses wrapper function to start things off. The parameter refers to the main function to be run after curses initialization. (In this case, this is the above \"main\" function.)\ncurses_wrapper(main)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39952,"cells":{"__id__":{"kind":"number","value":6476810712219,"string":"6,476,810,712,219"},"blob_id":{"kind":"string","value":"90a9a0dcd8b6dabb1a051002af273bc9edf51685"},"directory_id":{"kind":"string","value":"c3da8ec01fbdf0dc2040afa12c9a3b3d13940f7a"},"path":{"kind":"string","value":"/regression/test_cross_validation.py"},"content_id":{"kind":"string","value":"81b87782a2bbdb273e62a5f2d5c96e8779263680"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jjakeman/pyheat"},"repo_url":{"kind":"string","value":"https://github.com/jjakeman/pyheat"},"snapshot_id":{"kind":"string","value":"18ae60174dcd09028ae274e7b0401ce09ce6f677"},"revision_id":{"kind":"string","value":"2050e4fbe70503f04e54c3436276ba5903145dcd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T11:19:56.409206","string":"2016-09-06T11:19:56.409206"},"revision_date":{"kind":"timestamp","value":"2013-08-09T04:19:28","string":"2013-08-09T04:19:28"},"committer_date":{"kind":"timestamp","value":"2013-08-09T04:19:28","string":"2013-08-09T04:19:28"},"github_id":{"kind":"number","value":7924075,"string":"7,924,075"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import unittest\nimport numpy\nfrom regression.compressed_sensing import *\nfrom regression.gaussian_process import *\nfrom utilities.quadrature_rules import clenshaw_curtis\nfrom utilities.math_utils import cartesian_product, hypercube_map\nfrom utilities import visualisation as plot\nfrom matplotlib import cm\nfrom scipy import stats\nfrom cross_validation import *\n\nfrom regression.compressed_sensing import *\nfrom utilities.tensor_product_domain import *\n\nfrom interpolation.least_interpolation import least_factorization\nfrom polynomial_chaos.orthogonal_polynomial import LegendrePolynomial1D, \\\n JacobiPolynomial1D\nfrom polynomial_chaos.polynomial_chaos_expansion import \\\n PolynomialChaosExpansion as PCE\nfrom utilities.tensor_product_basis import *\nfrom examples.test_functions import matlab_peaks\nfrom utilities.math_utils import ridge_regression\n \nclass TestCrossValidation(unittest.TestCase):\n\n def setUp( self ):\n self.eps = 20*numpy.finfo( numpy.float ).eps\n self.verbosity = 0\n\n def xtest_grid_search_cross_validation( self ):\n\n f_1d = lambda x: x**10\n\n build_pts = numpy.linspace(-.85,.9,14)\n build_pts = numpy.atleast_2d( build_pts )\n build_vals = f_1d( build_pts ).T\n\n # Test grid search cross validation when applied to Gaussian Process\n num_dims = 1\n func_domain = TensorProductDomain( num_dims, [[-1,1]] )\n GP = GaussianProcess()\n GP.set_verbosity( 0 )\n GP.function_domain( func_domain )\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n CV = GridSearchCrossValidation( loo_cv_iterator, GP )\n CV.run( build_pts, build_vals ) \n \n I = numpy.arange( build_pts.shape[1] )\n for i in xrange( build_pts.shape[1] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n train_pts = build_pts[:,J]\n train_vals = build_vals[J,:]\n GP.build( train_pts, train_vals )\n pred_vals = GP.evaluate_set( build_pts )\n assert numpy.allclose( build_vals[i,0]-pred_vals[i],\n CV.residuals[0][i] )\n\n # Test grid search cross validation when applied to polynomial chaos\n # expansions that are built using ridge regression\n # The vandermonde matrix is built from scratch every time by the pce\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n CV = GridSearchCrossValidation( loo_cv_iterator, pce )\n CV.run( build_pts, build_vals )\n\n I = numpy.arange( build_pts.shape[1] )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b )\n assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],\n CV.residuals[0][i] )\n\n # Test grid search cross validation when applied to polynomial chaos\n # expansions that are built using ridge regression\n # Specifying parse_cross_validation_data = True will ensure that \n # the vandermonde matrix is not built from scratch every time by \n # the pce\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n CV = GridSearchCrossValidation( loo_cv_iterator, pce, \n use_predictor_cross_validation = True)\n CV.run( build_pts, build_vals )\n\n I = numpy.arange( build_pts.shape[1] )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b )\n assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],\n CV.residuals[0][i] )\n\n # Test grid search cross validation when applied to polynomial chaos\n # expansions that are built using ridge regression\n # A closed form for the cross validation residual is used\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n CV = GridSearchCrossValidation( loo_cv_iterator, pce, \n use_predictor_cross_validation = True,\n use_fast_predictor_cross_validation = True )\n CV.run( build_pts, build_vals )\n\n I = numpy.arange( build_pts.shape[1] )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b )\n assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],\n CV.residuals[0][i] )\n\n # Test grid search cross validation when applied to polynomial chaos\n # expansions that are built using ridge regression\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n max_order = build_pts.shape[1]\n orders = numpy.arange( 1, max_order )\n lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )\n # note cartesian product takes type from first array in 1d sets\n # so if I use orders first lamda will be rounded to 0\n cv_params_grid_array = cartesian_product( [lamda,orders] )\n\n cv_params_grid = []\n for i in xrange( cv_params_grid_array.shape[0] ):\n cv_params = {}\n cv_params['lambda'] = cv_params_grid_array[i,0]\n cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )\n cv_params_grid.append( cv_params )\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n CV = GridSearchCrossValidation( loo_cv_iterator, pce, \n use_predictor_cross_validation = True,\n use_fast_predictor_cross_validation = False )\n CV.run( build_pts, build_vals, cv_params_grid )\n\n k = 0\n I = numpy.arange( build_pts.shape[1] )\n for cv_params in cv_params_grid:\n order = cv_params['order']\n lamda = cv_params['lambda']\n pce.set_order( order )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b, lamda = lamda )\n assert numpy.allclose( ( build_vals[i,0]-\n numpy.dot( V, x ) )[i],\n CV.residuals[k][i] )\n k += 1\n\n print 'best',CV.best_cv_params\n\n # Test grid search cross validation when applied to \n # expansions that are built using a step based method\n # ( LARS )\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n max_order = build_pts.shape[1]\n orders = numpy.arange( 1, max_order )\n lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )\n # note cartesian product takes type from first array in 1d sets\n # so if I use orders first lamda will be rounded to 0\n cv_params_grid_array = cartesian_product( [lamda,orders] )\n\n cv_params_grid = []\n for i in xrange( cv_params_grid_array.shape[0] ):\n cv_params = {}\n cv_params['solver'] = 4 # LARS\n cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )\n cv_params_grid.append( cv_params )\n\n print cv_params_grid\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n #loo_cv_iterator = KFoldCrossValidationIterator( 3 )\n CV = GridSearchCrossValidation( loo_cv_iterator, pce, \n use_predictor_cross_validation = True,\n use_fast_predictor_cross_validation = False )\n CV.run( build_pts, build_vals, cv_params_grid )\n\n k = 0\n I = numpy.arange( build_pts.shape[1] )\n for cv_params in cv_params_grid:\n order = cv_params['order']\n pce.set_order( order )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n b = b.reshape( b.shape[0] )\n x, metrics = least_angle_regression( A, b, 0., 4, 0., 1000, \n 0 )\n assert numpy.allclose( ( build_vals[i,0]-\n numpy.dot( V, x ) )[i],\n CV.residuals[k][i] )\n k += 1\n\n #for i in xrange( len( CV.cv_params_set ) ):\n # print CV.cv_params_set[i], CV.scores[i]\n\n print 'best param', CV.best_cv_params\n print 'best score', CV.best_score\n print build_pts.shape[1]\n\n # ( OMP )\n num_dims = 1\n order = 3\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n max_order = build_pts.shape[1]\n orders = numpy.arange( 1, max_order )\n lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )\n # note cartesian product takes type from first array in 1d sets\n # so if I use orders first lamda will be rounded to 0\n cv_params_grid_array = cartesian_product( [lamda,orders] )\n\n cv_params_grid = []\n for i in xrange( cv_params_grid_array.shape[0] ):\n cv_params = {}\n cv_params['solver'] = 2 # OMP\n cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )\n cv_params_grid.append( cv_params )\n\n print cv_params_grid\n\n loo_cv_iterator = LeaveOneOutCrossValidationIterator()\n #loo_cv_iterator = KFoldCrossValidationIterator( 3 )\n CV = GridSearchCrossValidation( loo_cv_iterator, pce, \n use_predictor_cross_validation = True,\n use_fast_predictor_cross_validation = False )\n CV.run( build_pts, build_vals, cv_params_grid )\n\n k = 0\n I = numpy.arange( build_pts.shape[1] )\n for cv_params in cv_params_grid:\n order = cv_params['order']\n pce.set_order( order )\n V = pce.vandermonde( build_pts ).T\n for i in xrange( V.shape[0] ):\n if i == 0 : J = I[1:]\n elif i == build_pts.shape[1]-1 : J = I[:-1]\n else: J = numpy.hstack( ( I[:i], I[i+1:] ) )\n A = V[J,:]\n b = build_vals[J,:]\n b = b.reshape( b.shape[0] )\n x, metrics = orthogonal_matching_pursuit( A, b, 0., 1000, 0 )\n assert numpy.allclose( ( build_vals[i,0]-\n numpy.dot( V, x ) )[i],\n CV.residuals[k][i] )\n k += 1\n\n #for i in xrange( len( CV.cv_params_set ) ):\n # print CV.cv_params_set[i], CV.scores[i]\n\n print 'best param', CV.best_cv_params\n print 'best score', CV.best_score\n print build_pts.shape[1]\n \n def test_omp_choloesky( self ):\n\n f_1d = lambda x: x**10\n\n num_dims = 1\n order = 20\n func_domain = TensorProductDomain( num_dims, [[-1,1]] ) \n build_pts = numpy.linspace(-.85,.9,14)\n build_pts = numpy.atleast_2d( build_pts )\n build_vals = f_1d( build_pts ).T\n poly_1d = [ LegendrePolynomial1D() ]\n basis = TensorProductBasis( num_dims, poly_1d )\n pce = PCE( num_dims, basis, order, func_domain )\n\n\n all_train_indices = []\n all_validation_indices = []\n cv_iterator = LeaveOneOutCrossValidationIterator( build_pts.shape[1] )\n for train_indices, validation_indices in cv_iterator:\n all_train_indices.append( train_indices )\n all_validation_indices.append( validation_indices )\n\n vandermonde = pce.vandermonde( build_pts ).T\n out = orthogonal_matching_pursuit_cholesky( vandermonde, \n build_vals.squeeze(), \n all_train_indices,\n all_validation_indices, \n 0.0, 1000, 0 )\n\n num_steps = out[1].shape[1]\n # use num_steps -1 bscause leave one out cross validation is\n # invalid when V is underdterimed which happens when i = num_steps.\n for i in xrange( num_steps-1 ):\n I = numpy.asarray( out[1][1,:i+1], dtype = numpy.int32 )\n V = vandermonde[:,I]\n for j in xrange( len( all_validation_indices ) ):\n J = all_train_indices[j]\n K = all_validation_indices[j]\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b )\n assert numpy.allclose( ( build_vals[K,0] - \n numpy.dot( V, x )[K,0 ]), \n out[2][i][j] )\n\n all_train_indices = []\n all_validation_indices = []\n num_folds = 5\n cv_iterator = KFoldCrossValidationIterator( num_folds,\n build_pts.shape[1] )\n for train_indices, validation_indices in cv_iterator:\n all_train_indices.append( train_indices )\n all_validation_indices.append( validation_indices )\n\n vandermonde = pce.vandermonde( build_pts ).T\n out = orthogonal_matching_pursuit_cholesky( vandermonde, \n build_vals.squeeze(), \n all_train_indices,\n all_validation_indices, \n 0.0, 1000, 0 )\n\n num_steps = out[1].shape[1]\n for i in xrange( num_steps-1 ):\n I = numpy.asarray( out[1][1,:i+1], dtype = numpy.int32 )\n V = vandermonde[:,I]\n for j in xrange( len( all_validation_indices ) ):\n J = all_train_indices[j]\n K = all_validation_indices[j]\n A = V[J,:]\n b = build_vals[J,:]\n x = ridge_regression( A, b )\n if ( len( I ) <= len( J ) ):\n assert numpy.allclose( ( build_vals[K,0] - \n numpy.dot( V, x )[K,0] ),\n out[2][i][j] )\n\nif __name__ == '__main__':\n unittest.main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39953,"cells":{"__id__":{"kind":"number","value":7344394114408,"string":"7,344,394,114,408"},"blob_id":{"kind":"string","value":"ba6a4e2d0926f9a0db7650ca6e863e7606a7cc9d"},"directory_id":{"kind":"string","value":"04da89950f839b2ccc10522061e058dfe6afd4a8"},"path":{"kind":"string","value":"/Packer.py"},"content_id":{"kind":"string","value":"c4b1d3c44587ab739446dec0a14110030d6eaf01"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Cocobug/MINI"},"repo_url":{"kind":"string","value":"https://github.com/Cocobug/MINI"},"snapshot_id":{"kind":"string","value":"8c9bf90b0ed29f6d8955490f488e0731d5247d57"},"revision_id":{"kind":"string","value":"9b193d0d6b40aaf46079247695c1f71932ef0144"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-03T06:31:19.653880","string":"2016-09-03T06:31:19.653880"},"revision_date":{"kind":"timestamp","value":"2011-11-15T11:07:55","string":"2011-11-15T11:07:55"},"committer_date":{"kind":"timestamp","value":"2011-11-15T11:07:55","string":"2011-11-15T11:07:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding:latin-1 -*-\n\n#####################################\n# Date: 05/04/10 #\n# Auteur: Rigaut Maximilien #\n# Nom: Packer #\n# Version: 1.0 #\n# Copyright 2010: Rigaut Maximilien #\n#####################################\n# This file is part of YASMS.\n#\n# YASMS is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# YASMS is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with YASMS. If not, see .\n#################################\n\nfrom tarfile import open\nimport os\n\nnom=\"YASMS\"\nv=raw_input(\"Version du programme: \")\nnom=nom+'_All '+v+'.tar.gz'\ntFile= open(nom,'w:gz')\nfichiers=os.listdir('.')\nfichiers.pop(fichiers.index('Archives'))\nfor fichier in fichiers:\n tFile.add(fichier,fichier,True)\ntFile.close()\nos.rename(nom,'./Archives/'+nom)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39954,"cells":{"__id__":{"kind":"number","value":2989297241708,"string":"2,989,297,241,708"},"blob_id":{"kind":"string","value":"fb702299473de4bcae86d59aef083a5014a0adad"},"directory_id":{"kind":"string","value":"42a28c247f89a594144aa8369773e43d8bb098d0"},"path":{"kind":"string","value":"/src_daemon/test_dictate.py"},"content_id":{"kind":"string","value":"3dfbc92c4939c9e7806262bfd8fbffa64f0b0a07"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"gabadie/yotta"},"repo_url":{"kind":"string","value":"https://github.com/gabadie/yotta"},"snapshot_id":{"kind":"string","value":"d22da520e6bfe2744a0d5134ad6077225f636f37"},"revision_id":{"kind":"string","value":"744f6295a15c4c907778e241cf379cf9ddaddcdf"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-08-04T12:18:39.500233","string":"2020-08-04T12:18:39.500233"},"revision_date":{"kind":"timestamp","value":"2014-04-26T21:48:19","string":"2014-04-26T21:48:19"},"committer_date":{"kind":"timestamp","value":"2014-04-26T21:48:19","string":"2014-04-26T21:48:19"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nimport struct\n\nfrom daemon import Daemon\nimport dictate\n\n\ndef test_frame_error():\n msg = 'hello world'\n\n frame = dictate.frame_error(msg)\n\n theoric_frame = ''\n theoric_frame += '\\x00\\x00'\n theoric_frame += '\\x0B\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n theoric_frame += struct.pack('{}s'.format(len(msg)), msg)\n\n assert len(theoric_frame) == 2 + 8 + len(msg)\n assert frame == theoric_frame\n\ndef test_deamon_info():\n daemon = Daemon()\n daemon.computers = 3\n daemon.threads = 7\n\n frame = dictate.deamon_info(daemon)\n\n theoric_frame = ''\n theoric_frame += '\\x00\\x10'\n theoric_frame += '\\x10\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n theoric_frame += '\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n theoric_frame += '\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n assert frame == theoric_frame\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39955,"cells":{"__id__":{"kind":"number","value":3238405389919,"string":"3,238,405,389,919"},"blob_id":{"kind":"string","value":"ad302ee60c67cc56b1db462c9f5464b316dce74a"},"directory_id":{"kind":"string","value":"6d531163482af8876b79be727f8eb2fdfab73112"},"path":{"kind":"string","value":"/ex33.py"},"content_id":{"kind":"string","value":"460d6d18cf844d88281651d356bc8a963a3618fb"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"joetait/learnpythonthehardway"},"repo_url":{"kind":"string","value":"https://github.com/joetait/learnpythonthehardway"},"snapshot_id":{"kind":"string","value":"ffe9c85fd834a3130396e049ac394a0f9c6ed537"},"revision_id":{"kind":"string","value":"436718a30e6a0e4d2dee80e6b7a348be4f261371"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-04T19:09:04.203151","string":"2020-06-04T19:09:04.203151"},"revision_date":{"kind":"timestamp","value":"2014-07-27T20:38:30","string":"2014-07-27T20:38:30"},"committer_date":{"kind":"timestamp","value":"2014-07-27T20:38:30","string":"2014-07-27T20:38:30"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\ndef list_numbers(length, inc):\n numbers = []\n for i in range (0,length,inc):\n print \"At the top i is %d\" % i\n numbers.append(i) \n print \"Numbers now: \", numbers\n print \"At the bottom i is %d\" % i\n \n return numbers\n\nnumbers = list_numbers(20,3)\n\n\n#\n#def list_numbers(length, inc):\n# i = 0\n# j = inc\n# numbers = []\n# while i < length:\n# print \"At the top i is %d\" % i\n# numbers.append(i)\n# \n# i = i + j\n# print \"Numbers now: \", numbers\n# print \"At the bottom i is %d\" % i\n#\n# return numbers\n# \n#numbers = list_numbers(10,2)\n\nprint \"The numbers: \"\n\nfor num in numbers:\n print num\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39956,"cells":{"__id__":{"kind":"number","value":18683107741556,"string":"18,683,107,741,556"},"blob_id":{"kind":"string","value":"c8d0a19855709973c906ae4db51f632d7397479c"},"directory_id":{"kind":"string","value":"31b90992af2285159c32e1e389d37efae482563e"},"path":{"kind":"string","value":"/application/logic/web_handlers/add_manager_form_handler.py"},"content_id":{"kind":"string","value":"b06ce60cc4b41342fd60d151f2a346d2289c4327"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"mkorenkov/Pragmatiq"},"repo_url":{"kind":"string","value":"https://github.com/mkorenkov/Pragmatiq"},"snapshot_id":{"kind":"string","value":"633345350b03bfc77c85d726fe08181af6647780"},"revision_id":{"kind":"string","value":"fb61b9fb2be087d9e9b9ea0307e6d4b27bebd945"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-01-23T05:10:30.548338","string":"2020-01-23T05:10:30.548338"},"revision_date":{"kind":"timestamp","value":"2012-02-20T10:17:00","string":"2012-02-20T10:17:00"},"committer_date":{"kind":"timestamp","value":"2012-02-20T10:17:00","string":"2012-02-20T10:17:00"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from logic.func import get_prev_pr\nfrom logic.models import Salary, Grade, Position, Conclusion\nfrom logic.web_handlers.add_form_handler import AddEmployeeForm\n\n\nclass AddManagerForm(AddEmployeeForm):\n\n type = \"manager\"\n form = None\n flag = 0\n\n def get(self, key):\n\n super(AddManagerForm, self).get(key)\n\n if self.form.type == 'manager':\n prev_pr = get_prev_pr(self.form.pr)\n try:\n prev_form = prev_pr.forms.filter('type', self.form.type).get()\n\n prev_salary = prev_form.get_all_data['salary']\n prev_grade = prev_form.get_all_data['grade']\n except AttributeError:\n prev_salary = None\n prev_grade = None\n\n prev_position = self.form.pr.employee.position\n\n if prev_salary:\n salary = Salary(value=prev_salary.value,\n form=self.form)\n else:\n salary = Salary(value='N/A',\n form=self.form)\n salary.put()\n\n if prev_grade:\n grade = Grade(value=prev_grade.value,\n form=self.form)\n else:\n grade = Grade(value='N/A',\n form=self.form)\n grade.put()\n\n if prev_position:\n position = Position(value=prev_position,\n form=self.form)\n else:\n position = Position(value='N/A',\n form=self.form)\n position.put()\n\n conclusion = Conclusion(value='meet expectations',\n form=self.form)\n conclusion.put()\n\n self.redirect('/manager/pr/get/%(type)s/%(key)s'\n % {'type': self.type,\n 'key': self.form.pr.key()})\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39957,"cells":{"__id__":{"kind":"number","value":17549236380589,"string":"17,549,236,380,589"},"blob_id":{"kind":"string","value":"c6a278e80e241b534aa8a48d594277bede06609a"},"directory_id":{"kind":"string","value":"a6b3a096090672d46f754217987fd10deb6780c3"},"path":{"kind":"string","value":"/rSpider_multiThread/testRenrenBrowser.py"},"content_id":{"kind":"string","value":"7fb417d2d977424943875331101d652e9aab01b0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"jinmfeng/dataBang"},"repo_url":{"kind":"string","value":"https://github.com/jinmfeng/dataBang"},"snapshot_id":{"kind":"string","value":"7c1e0b8b15952373898247eb0d17641bcfa3896f"},"revision_id":{"kind":"string","value":"20eaa17a498b34fea5431bd899a20fda1f896f6a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T21:26:16.500908","string":"2021-01-20T21:26:16.500908"},"revision_date":{"kind":"timestamp","value":"2013-03-13T08:38:26","string":"2013-03-13T08:38:26"},"committer_date":{"kind":"timestamp","value":"2013-03-13T08:38:26","string":"2013-03-13T08:38:26"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import shutil\nimport os\nimport unittest\n\nfrom renrenBrowser import *\n\nclass TestRenrenBrowser(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.pwdRoot='./testBrowser'\n\t\tself.pwdSave=self.pwdRoot+'/renrenData/pages'\n\t\tif os.path.exists(self.pwdSave)==True:\n\t\t\tshutil.rmtree(self.pwdSave)\n\n\t\tself.browser=RenrenBrowser(path=self.pwdRoot)\n\t\t#self.browser.login()\n\n\tdef tearDown(self):\n#\t\tself.browser.dispose()\n\t\tself.browser=None\n\t\tpass\n\n\tdef testProfile(self):\n\t\trenrenIds={'233330059','230760442','223981104','410941086','285060168'}\n\t\t\t\t\t\t\t#myself,timeline ok/unavailable,old style ok/unavailable\n\t\tself.browser.setLogLevel(10)#debug\n\n\t\tself.browser.localSave(False)\n\t\tfor renrenId in renrenIds:\n\t\t\tself.assertNotEqual(self.browser.profile(renrenId),'timeout')\n\t\tself.assertFalse(os.path.exists(self.pwdSave))#path not exist\n\n\t\tself.browser.localSave(True)\n\t\tfor renrenId in renrenIds:\n\t\t\tself.assertNotEqual(self.browser.profile(renrenId),'timeout')\n\t\tself.assertEqual(len(os.listdir(self.pwdSave)),len(renrenIds))\n\n\tdef testFriendList(self):\n\t\t#target 1 page and check htmlStr not 'timeout'\n\t\trenrenIds={'233330059','410941086','267654044','285060168','240303471'}\n\t\t\t\t\t\t\t#myself,3+pages/2pages/1page/unavailable\n\t\tself.browser.setLogLevel(10)#debug\n\t\t#target page=0,1,2,3\n\t\tself.browser.localSave(False)\n\t\tfor targetPage in range(0,3):\n\t\t\tfor renrenId in renrenIds:\n\t\t\t\tself.assertNotEqual(self.browser.friendList(renrenId,targetPage),'timeout')\n\t\t\tself.assertFalse(os.path.exists(self.pwdSave))#path not exist\n\t\tself.browser.localSave(True)\n\t\tfor targetPage in range(0,3):\n\t\t\tfor renrenId in renrenIds:\n\t\t\t\tself.assertNotEqual(self.browser.friendList(renrenId,targetPage),'timeout')\n\t\t\tself.assertEqual(len(os.listdir(self.pwdSave)),len(renrenIds)*(targetPage+1))\n\n\t\t#target all pages and check len(set)\n\t\tflist={'232639310':35,'242543024':152,'285060168':5}\n\t\tfor item in flist.items():\n\t\t\tself.assertEqual(len(self.browser.friendList(item[0])),item[1])\n\nif __name__=='__main__':\n\tsuite=unittest.TestSuite()\n\tsuite.addTest(TestRenrenBrowser('testProfile'))\n\tsuite.addTest(TestRenrenBrowser('testFriendList'))\n\trunner=unittest.TextTestRunner()\n\trunner.run(suite)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39958,"cells":{"__id__":{"kind":"number","value":7782480754530,"string":"7,782,480,754,530"},"blob_id":{"kind":"string","value":"afaed1e84daf4f393f387a0c81a382690fb6037b"},"directory_id":{"kind":"string","value":"f9905ef34784f332deb22da395df037c28c9ef76"},"path":{"kind":"string","value":"/haicheng/cost_model/select.py"},"content_id":{"kind":"string","value":"82f53a71f6e942a3927e1974a4272b0e77b4bc8d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"batvph00561/harmonyruntime"},"repo_url":{"kind":"string","value":"https://github.com/batvph00561/harmonyruntime"},"snapshot_id":{"kind":"string","value":"c76fb06eaf3697c351ac538ccfe98c4b7f5a4087"},"revision_id":{"kind":"string","value":"b4df7a045109d19c1133f2225822a3e5f94a0d15"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-15T20:54:27.082826","string":"2016-09-15T20:54:27.082826"},"revision_date":{"kind":"timestamp","value":"2012-04-18T20:37:01","string":"2012-04-18T20:37:01"},"committer_date":{"kind":"timestamp","value":"2012-04-18T20:37:01","string":"2012-04-18T20:37:01"},"github_id":{"kind":"number","value":33104889,"string":"33,104,889"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#! /usr/bin/python\n\nimport os\nimport numpy\nimport sys\nimport subprocess\n\n#check arguments\nif len(sys.argv) > 1 and len(sys.argv) != 4 :\n print \"arguments error, usage: ./select.py element_num cta_num thread_num\"\n exit()\n\n#read file\n#f = open(\"select_v13.cu\")\n\n#read beginning lines\n#tmpHeader=[]\n#for i in range(1,19):\n# tmpHeader.append(f.readline())\n\n#ctaLine = f.readline()\n#threadLine = f.readline()\n#\n#tmpTail = f.readlines()\n#f.close()\n\n\n#output\nrmCmd = \"rm -rf select.csv\"\nprint rmCmd\nos.system(rmCmd)\nprint \"Writing result to select.csv\"\noutput = open(\"select.csv\", \"w\")\noutput.close()\n\n#modify cta number and thread number\n#assert ctaLine[0:13] == \"#define CTAS \"\n#assert threadLine[0:16] == \"#define threads \"\n\n#eleNumPowEnd = 18 #256K*1024\neleNumTimeEnd = 40\nctaNumPowEnd = 15 #32K\nthreadNumTimeEnd = 16 #16*64\n\n#eleNumArray = numpy.power(2, range(0,eleNumPowEnd+1)) * 1024\n#eleNumArray = numpy.append(eleNumArray, 400*1024*1024)\neleNumArray = 100*1024*1024 * numpy.arange(5, eleNumTimeEnd + 1)\n\nfirstTime = True\n\n#choose start element\nif len(sys.argv) > 1:\n eleStart = numpy.where(eleNumArray == int(sys.argv[1]))[0][0]\nelse:\n eleStart = 0\n\n\n#modify element number\nfor eleNum in eleNumArray[eleStart:]:\n \n# ctaNumArray = numpy.power(2, range(1,ctaNumPowEnd+1))\n#\n# if len(sys.argv) > 1 and firstTime:\n# ctaStart = numpy.where(ctaNumArray == int(sys.argv[2]))[0][0]\n# else:\n# ctaStart = 0\n#\n# for ctaNum in ctaNumArray[ctaStart:]:\n#\n# if ctaNum > eleNum: #should be in range\n# continue\n# \n# threadNumArray = 64 * numpy.arange(1, threadNumTimeEnd+1)\n#\n# if len(sys.argv) > 1 and firstTime:\n# threadStart = numpy.where(threadNumArray == int(sys.argv[3]))[0][0]\n# firstTime = False\n# else:\n# threadStart = 0\n# for threadNum in threadNumArray[threadStart:]:\n#\n# if ctaNum * threadNum > eleNum: #should be in range\n# break\n#\n# ctaLine = \"#define CTAS \" + str(ctaNum) + \"\\n\"\n# threadLine = \"#define threads \" + str(threadNum) + \"\\n\"\n#\n# outf = open(\"tmp.cu\", \"w\")\n# outf.writelines(tmpHeader)\n# outf.write(ctaLine)\n# outf.write(threadLine)\n# outf.writelines(tmpTail)\n# outf.close()\n# \n# \n# #compile and execute\n# print \"CTAS\", ctaNum\n# print \"threads\", threadNum\n # compileCmd = \"nvcc tmp.cu -o tmp -arch sm_23 -O3 -lpthread\"\n # print compileCmd\n # os.system(compileCmd)\n exeCmd = \"./select_v13 \" + str(eleNum)\n print exeCmd\n out = subprocess.Popen([\"./select_v13\", str(eleNum)], stdout = subprocess.PIPE)\n output = open(\"select.csv\", \"a\")\n output.write(out.stdout.read())\n output.close()\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39959,"cells":{"__id__":{"kind":"number","value":17085379943846,"string":"17,085,379,943,846"},"blob_id":{"kind":"string","value":"b5292b53361c522e246830f8415851d9c2f5784c"},"directory_id":{"kind":"string","value":"f48a2fc8152ebd290c0f96358811087617d0f499"},"path":{"kind":"string","value":"/python_side/Door_serverv0.6.py"},"content_id":{"kind":"string","value":"4e8f1078593613c0e50ff63fef8b1e8a0e4e22a4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"kism3t1/Door-LCD"},"repo_url":{"kind":"string","value":"https://github.com/kism3t1/Door-LCD"},"snapshot_id":{"kind":"string","value":"0038ea74decb1478f74e2eaa85e9d0e37a8a0929"},"revision_id":{"kind":"string","value":"93c19015fac048d7a4bf6af2fd73153d300379ad"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T08:56:06.611141","string":"2021-01-19T08:56:06.611141"},"revision_date":{"kind":"timestamp","value":"2012-03-12T18:05:08","string":"2012-03-12T18:05:08"},"committer_date":{"kind":"timestamp","value":"2012-03-12T18:05:08","string":"2012-03-12T18:05:08"},"github_id":{"kind":"number","value":3400896,"string":"3,400,896"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# TCP server example\nimport socket\nimport serial\nimport time\nimport datetime\nimport sys\nimport argparse\n\nthe_Time = datetime.datetime.now()\n\nif len(sys.argv) != 2:\n print (\"[+] usage: ./Door_serverv0.5.py \")\n sys.exit(1)\nPORT = int(sys.argv[1])\n#HOST = \" \"\nprint (\"The chosen port is \") + str(PORT)\n\nser = serial.Serial ('/dev/ttyACM0')\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#server_socket.bind((\"\", 5000))\nserver_socket.bind((\"\", PORT))\nserver_socket.listen(5)\n\nprint (\"TCPServer Waiting for client on port \") + str(PORT)\n\nwhile 1:\n client_socket, address = server_socket.accept()\n print \"I got a connection from \", address\n while 1:\n data = client_socket.recv(512)\n if ( data == 'q' or data == 'Q'):\n client_socket.close()\n f = open('doorlog.ian', 'a')\n f.write(\"Exiting Server @ \" + (str(the_Time) + \"/n\"))\n f.close\n exit()\n break;\n elif ( data == 'out'):\n print \"RECIEVED:\" , data\n ser.write(\"o\")\n f = open('doorlog.ian', 'a')\n f.write(\"Out of Office @ \" + (str(the_Time) + \"/n\"))\n f.close\n elif ( data == 'in'):\n print \"RECIEVED:\" , data\n ser.write(\"i\")\n f = open('doorlog.ian', 'a')\n f.write(\"In Office @ \" + (str(the_Time) + \"/n\"))\n f.close\n elif ( data == 'pub'):\n print \"RECIEVED:\" , data\n ser.write(\"p\")\n f = open('doorlog.ian', 'a')\n f.write(\"Down the Pub @ \" + (str(the_Time) + \"/n\"))\n f.close\n elif ( data == 'yes'):\n print \"RECIEVED:\" , data\n ser.write(\"y\")\n f = open('doorlog.ian', 'a')\n f.write(\"Come in! @ \" + (str(the_Time) + \"/n\"))\n f.close\n elif ( data == 'meeting'):\n print \"RECIEVED:\" , data\n ser.write(\"m\")\n f = open('doorlog.ian', 'a')\n f.write(\"Sorry in a Meeting @ \" + (str(the_Time) + \"/n\"))\n f.close\n elif ( data == 'face'):\n print \"RECIEVED:\" , data\n ser.write(\"f\")\n f = open('doorlog.ian', 'a')\n f.write(\"Displaying the FACE! @ \" + (str(the_Time) + \"/n\"))\n f.close\n\t\telif (data == 'close'):\n\t\t\tprint \"RECIEVED:\" , data\n\t\t\tclient_socket.close()\n\t\t\tf = open('doorlog.ian', 'a')\n\t\t\tf.write(\"Retarting Connection... @ \" + (str(the_Time) + \"/n\"))\n\t\t\tf.close\n\t\t\tprint \"Restarting connection:\"\n\t\t\t#server_socket.bind((\"\", 5000))\n\t\t\tserver_socket.listen(5)\n\t\t\tprint \"Door LCD: Waiting for Ian's PC on port 5000\"\n\n else:\n print(\"*\")\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39960,"cells":{"__id__":{"kind":"number","value":14164802159443,"string":"14,164,802,159,443"},"blob_id":{"kind":"string","value":"0a56fd023775cd0fa3026345a84f267d0048a909"},"directory_id":{"kind":"string","value":"daf95a08aa12d2251f3991fbe2b678ed910a60ee"},"path":{"kind":"string","value":"/weather.py"},"content_id":{"kind":"string","value":"ba01d8b1fefc24bada5c916716fe2a3863575a29"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"stuycs-softdev-fall-2013/proj2-pd6-06-GoodMorning"},"repo_url":{"kind":"string","value":"https://github.com/stuycs-softdev-fall-2013/proj2-pd6-06-GoodMorning"},"snapshot_id":{"kind":"string","value":"90843d9fd554aecf56bf8d847414d1e852c5d1c2"},"revision_id":{"kind":"string","value":"0707cbddefce390e3b183cc415b8476fe3d4bd15"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T11:33:34.232421","string":"2021-01-23T11:33:34.232421"},"revision_date":{"kind":"timestamp","value":"2013-12-06T09:00:50","string":"2013-12-06T09:00:50"},"committer_date":{"kind":"timestamp","value":"2013-12-06T09:00:50","string":"2013-12-06T09:00:50"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import urllib2\nimport json\n\ndef url():\n f = urllib2.urlopen('http://api.wunderground.com/api/987077b70105ec11/hourly/q/NY/New_York_City.json')\n return f\n#-----------------USING WUNDERGROUND (weather underground/weather channel) API\n\ndef getWeather():\n f = url()\n json_string = f.read()\n parsed_json = json.loads(json_string)\n weather = parsed_json['hourly_forecast'][0]['condition']\n f.close()\n#-----------------returns what the weather is predicted to be (ex: cloudy, rainy, etc.)\n return weather\n\ndef getTemp():\n f = url()\n json_string = f.read()\n parsed_json = json.loads(json_string)\n temp_f = parsed_json['hourly_forecast'][0]['temp']['english']\n f.close()\n#-----------------returns the expected temperature in fahrenheit. celsius is possible by replacing 'english' with 'metric'\n return temp_f\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39961,"cells":{"__id__":{"kind":"number","value":6021544166369,"string":"6,021,544,166,369"},"blob_id":{"kind":"string","value":"51413e1aae4c2f0a165afec264ed886492d9738c"},"directory_id":{"kind":"string","value":"6dd1cca222f557eaa87645f9fed90d230a52f40c"},"path":{"kind":"string","value":"/scriptsForPictures/scatterplot200and2000frames.py"},"content_id":{"kind":"string","value":"2ccb21d109f740ef1baf750dc6a57dee1ade123c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"fherrmannsdoerfer/masterArbeit"},"repo_url":{"kind":"string","value":"https://github.com/fherrmannsdoerfer/masterArbeit"},"snapshot_id":{"kind":"string","value":"84c546737a4c1a0d63f090c9e6178b62c80db078"},"revision_id":{"kind":"string","value":"d6639759b0b9c66c8c561dad067185e58993a9c4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T16:49:58.265911","string":"2020-12-24T16:49:58.265911"},"revision_date":{"kind":"timestamp","value":"2013-06-07T16:22:18","string":"2013-06-07T16:22:18"},"committer_date":{"kind":"timestamp","value":"2013-06-07T16:22:18","string":"2013-06-07T16:22:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nimport matplotlib.pyplot as plot\n\n\ny200 = [681.743, 662.12, 931.126, 1214.01, 632.229, 658.116, 603.67, 536.833, 756.438, 1044.05, 653.162, 577.872, 568.438, 562.657, 552.272, 547.224, 532.969, 505.124, 610.332, 540.592, 575.771, 498.912, 489.702, 598.537, 573.327, 584.177, 617.958, 667.46, 568.544, 516.282, 609.562, 690.694, 574.577, 614.993, 604.329, 632.576, 563.449, 558.082, 614.49, 657.338, 631.615, 626.906, 651.686, 770.814, 668.025, 616.958, 697.004, 643.536, 749.92, 722.886, 685.852, 668.536, 725.505, 772.818, 712.879, 653.323, 774.895, 734.905, 779.83, 854.317, 622.252, 756.669, 957.158, 753.328, 833.74, 813.774, 599.949, 688.538, 856.767, 824.431, 785.724, 867.578, 764.84, 761.088, 673.847, 713.189, 879.369, 678.094, 703.111, 758.669, 822.999, 873.861, 835.924, 1095.43, 952.186, 884.578, 936.288, 3362.11, 1185.11, 879.104, 677.031, 982.987, 1209.75, 957.92, 719.284, 1037.5, 796.198, 2422.94, 954.599, 1464.55, 858.934, 924.957, 1067.82, 1012.1, 2190.56, 820.816, 1255.44, 1144.36, 946.304, 954.194, 1062.74, 1467.66, 1051.94, 2419.82, 2860.71, 1053.89, 2073.97, 928.202, 886.938, 1018.56, 1175.71, 1337.36, 3102.44, 977.935, 1081.49, 948.079, 950.438, 918.01, 3888.59, 1102.57, 1697.7, 2793, 1299.08, 1223.99, 1176.7, 993.709, 1006.51, 1123.77, 1141.53, 1083.39, 1472.94, 1088.77, 1170.23, 1537.65, 1155.26, 1300.27, 1359, 2135.16, 1393, 905.614, 1145.05, 1444.29, 983.482, 1083.65, 1220.55, 1516.89, 1199.18, 1163.77, 1923.71, 1301.71, 2516.39, 1129.27, 1301.87, 1280.35, 1702.66, 1814.58, 1651.47, 2336.18, 1504.18, 1430.18, 2332.07, 1167.41, 1581.03, 1002.2, 1243.13, 1058.51, 1782.7, 1225.01, 1195.16, 1577.81, 23929, 1059.58, 5423.47, 1133.51, 1917.53, 1107.96, 1268.91, 3419.2, 1793.77, 4151.62, 1179.79, 2084.39, 1353.24, 1470.79, 1429.52, 1558.47, 1493.52, 3660.16, 1420.78, 1245.21, 1409.37, 1324.64, 1296.32, 2471.82, 2050.03, 1437.92, 1522.4, 3132.82, 10654, 5890.01, 1406.81, 1326.32, 1724.42, 2290.95, 1439.53, 1667.12, 1660.29, 2415.91, 6546.46, 1314.76, 1611.3, 2480.19, 1429.86, 1767.77, 1811.65, 2822.91, 1575.8, 2536.36, 1253.17, 1916.63, 2672.12, 2059.45, 1568, 1854.89, 3843.53, 1704.17, 3445.03, 1636.03, 1704.45, 1580.38, 1731.48, 1789.67, 2557.66, 1619.69, 1669.88, 3076.99, 7281.66, 1289.18, 1581.71, 1881.08, 1586.08, 1663.76, 6455.87, 1681.9, 6393.47, 3048.4, 1675.54, 1656.01, 14749.3, 4676.28, 7223.98, 25183.7, 1731.44, 13572.3, 3387.42, 2375.23, 3240.39, 1677.17, 4616.91, 1705.99, 1395.03, 2109.66, 2563.98, 3359.19, 5614.09, 4099.91, 1918.14, 10484.3, 2730.39, 3659.11, 1783.3, 1583.47, 7363.56, 1916.97, 2073.23, 1948.28, 1712.64, 6930.29, 1635.33, 1912.95, 1688.31, 2097.27, 5737.7, 4376.4, 4476.25, 10203.6, 2008.16, 1867.04, 2074.93, 3604.52, 7713.94, 2263.18, 3496.64, 2106.36, 2389.58, 1941.93, 2181.15, 2003.48, 1856.04, 2415.85, 1977.1, 2613.18, 1983.39, 15753.1, 1729.34, 2011.13, 3859.13, 8179.89, 2221.26, 8125.42, 1771.9, 2080.67, 1921.51, 2127, 1700.51, 2353.38, 2060.66, 18387.5, 1895.56, 2026.48, 4528.17, 7582.23, 2499.08, 2047.94, 2244.21, 2225.63, 4372.16, 5601.77, 2897.22, 5335.34, 2000.38, 13550.7, 5255.65, 10813, 1831.48, 2573.45, 13120.1, 6544.92, 3580.48, 5535.24, 4463.65, 10876.9, 21035.3, 22631.1, 8061.44, 13452.1, 24642.4, 13949.6, 9204.74, 11998.4, 5281.75, 45236.6, 12117.1, 5788.4, 11672.8, 6294.83, 8693.7, 2143.08, 6895.15, 12318.9, 5431.11, 5146.42, 28870, 6040.51, 9489.4, 5968.09, 8144.23, 7511.53, 31740.5, 22570.1, 3891, 2511.15, 5354.78, 18269.1, 16045.1, 13173, 5682.23, 25674, 21055.2, 19824.8, 14339.7, 19345.5, 6500.51, 6589.79, 26822.4, 42188.3, 6137.9, 6667.73, 13687, 17607.4, 11421.7, 5932.95, 48730.2, 7127.63, 10138.8, 3360.97, 33839.3, 13131.2, 8231.54, 18418.6, 27789.5, 8440.87, 8569.06, 16161.5, 22395.7, 7460.18, 8863.26, 12981.6, 12722.4, 3290.23, 6170.84, 23394.4, 26645.9, 26190.4, 24060.1, 20012.9, 25879.3, 13125.1, 55868.1, 12125.8, 25007.5, 32598.4, 25497.6, 27041, 12496.4, 24173.2, 11790.5, 49739.4, 4375.74, 8834.62, 17607.8, 7815.58, 26741.6, 10198.3, 10660.9, 6647.83, 14379, 24882.1, 6586.66, 35046.4, 10473.1, 37564.5, 35652.6, 27447.7, 29942.6, 25322.6, 2945.71, 34405.9, 14147.6, 10217.4, 59009.5, 9293.24, 49480, 40575.1, 54103.8, 32235.9, 32593.6, 24406.1, 65377, 32905.7, 17155.8, 18469.6, 33690.2, 22991.5, 51052.1, 42759.5, 98931.4, 36313.9, 70043.3, 15018.8, 45743.2, 98736.6, 27154.5, 33297.6, 25277.7, 41649.2, 53341.9, 28213.4, 12058.4, 43461.7, 24195.5, 13411.1, 53510.6, 5408.42, 49844, 22291.3, 37217, 36250, 45040.5, 27934.7, 23350.4, 8606.62, 46089.4, 16645, 21176.8, 5974.27, 4921.36, 39559.4, 58506.5, 55672.2, 28504.1, 26848.4, 16313.3, 16385.4, 24949.8, 25375.1, 43033.6, 38589.5, 138138, 32297.6, 29498.8, 23463.9, 19668.3, 16922.3, 4973.9, 23978.3, 9486.55, 39987.1, 20181.3, 63596.9, 17941.3, 24533.8, 93196.6, 34196.7, 32874.6, 36172.9, 35704.1, 30068.2, 23862.1, 21868.1, 16654.6, 36960.4, 13132.2, 32152.4, 37471.6, 80530.7, 20571.9, 62294.6, 18939, 30203.8, 38578.2, 21315.8, 6796.62, 44202.6, 38294.4, 40568.3, 48245.7, 36564.1, 63809.6, 22523.2, 76868.6, 5760.12, 44153.3, 3575.75, 23618.5, 17583.3, 4078.61, 3555.44, 35145.4, 39034.5, 15989.3, 34611, 33922.7, 20767.8, 76214.6, 132816, 57810.6, 59038.9, 64896.3, 37417.1, 30619.3, 77341.8, 17271.5, 26133, 34230.4, 30024.9, 45931.1, 47610.3, 6123.11, 28327.3, 60511.7, 48278.2, 36342, 20393.3, 41307, 34378, 65206.7, 37869.1, 48936.3, 36284.1, 20644.5, 27136.3, 43090.6, 3856.1, 47390.5, 105371, 95623.8, 31408.5, 86566.1, 40960.1, 22986, 70859.3, 146400, 52548.8, 36374.1, 64135.9, 65109.9, 22198.7, 5575.37, 69587.1, 27239.2, 27162.9, 21177.7, 37809.3, 72692.8, 26366.9, 28650.5, 40022.9, 45985.8, 23640, 45092.6, 83745.1, 5141.75, 49769.8, 56259.1, 46780.2, 59016.2, 3971.98, 6739.78, 21849.9, 55271.8, 69229.7, 56925, 29833, 73973.7, 72531, 38204, 66877.7, 22564.8, 33877.5, 41992.2, 34361.4, 51078.1, 61805.5, 51426.4, 53489.5, 42156.5, 50946.3, 66129, 5645.18, 4823.37, 44501.6, 36930.9, 50654.6, 39622.5, 44066.4, 4062.11, 27244.9, 28766.5, 81498.4, 32875.4, 61151.7, 33294.4, 37596.5, 92853.2, 40586.6, 39932.5, 61394.2, 7799.78, 6237.06, 27655.9, 57931.4, 132553, 3724.75, 57728.8, 75947.5, 7319.06, 3949.84, 53195.7, 7842.14, 77541.6, 42739.8, 88916.4, 42425.2, 63093.3, 57315.8, 52557.8, 56712.7, 50769.3, 100589, 6675.86, 64297.2, 82295.6, 44531.5, 58754.1, 4766.3, 119179, 74638.9, 95997.9, 65025.2, 4721.07, 13539.6, 13102.5, 7485.12, 54090.6, 44410.2, 84027.9, 44325.1, 47587.5, 76094.3, 44969, 35599.5, 86226, 14324.6, 54845, 4426.81, 46756.2, 71578.2, 59334.6, 63169, 49948.4, 91508, 47096.3, 103631, 42901.6, 49046.7, 9835.06, 64919, 49592.1, 50410, 37430.8, 69280.2, 79929.7, 13098.8, 59906.2, 43524.5, 90587.6, 85733.9, 63013.8, 7194.28, 11005.4, 86249.2, 78298, 65777.6, 51196.4, 60964, 90430.5, 86296.1, 13995.2, 63891.8, 62598.2, 44361.1, 64331.6, 56223.4, 5108.35, 163709, 49726.9, 61080.7, 58848, 5871.35, 59436.4, 60737.6, 53703.2, 52377.1, 71293.6, 56901, 53153.6, 48983.7, 63438.6, 59098.7, 81402.2, 49110.1, 4375.64, 43909.8, 61835.9, 15527.8, 13710.3, 82468.9, 60570.7, 17052, 8148.16, 70369.6, 58206.4, 13271.4, 65552.1, 8874.01, 57901.9, 58696.5, 5332.27, 62792.8, 86875.2, 58215.2, 61148.4, 71872.1, 4876.7, 13308.6, 92341.4, 74564.2, 67693.9, 10326.5, 11598.5, 59704.7, 65194.8, 68453.4, 93051.1, 72639.9, 111978, 62109.5, 5317.88, 57122.6, 72096.1, 48818.8, 59345.4, 12872.2, 10708.1, 63614.7, 71964.5, 120129, 62175.4, 58723.6, 91531.7, 80278.5, 71782.1, 24598.3, 61741.2, 96108.6, 81256.7, 12570.5, 89902.4, 6462.38, 95028.6, 10596.4, 11038.5, 99600.6, 66316.5, 92988, 78046.9, 78903.4, 67743.1, 92541, 81190.7, 75018.5, 7166.41, 23959, 11243.9, 71535.5, 16302.4, 78499.2, 93503.1, 10011.6, 100367, 98180.2, 70130.1, 79870.8, 73093.8, 7829.21, 82850.9, 18861.9, 73479.4, 14041.6, 91267.9, 124262, 32196.9, 21360.6, 10780.4, 119193, 15488.8, 9343.45, 12823.4, 8634.23, 21193.7, 77982.7, 19240.9, 28027.2, 82691.4, 15112.8, 126556, 141808, 13535.2, 12793.3, 29263, 59273.3, 30243.5, 20689.7, 80290.4, 21165.4, 10308.4, 24851.2, 11457.8, 11402.3, 15188.5, 61145.8, 32465.6, 14362.1, 14079.9, 14435.9, 28660.7, 28717.9, 87708.5, 36121.2, 18565.8, 14411.4, 15290.9, 40018.9, 30999.1,]\n\nx200 = [476.955, 481.235, 490.79, 490.805, 491.725, 496.71, 498.36, 504.335, 504.89, 506.56, 507.72, 508.22, 508.25, 508.45, 509.09, 509.34, 509.395, 509.715, 509.72, 510.055, 510.155, 510.28, 510.365, 510.385, 510.445, 510.445, 510.455, 510.505, 510.69, 510.72, 510.74, 510.855, 510.865, 511.13, 511.275, 511.315, 511.325, 511.41, 511.48, 512.04, 512.985, 514.06, 515.12, 516.175, 517.245, 518.32, 519.375, 520.435, 521.51, 522.56, 523.635, 524.685, 525.755, 526.82, 527.875, 528.955, 530.015, 531.07, 532.14, 533.195, 534.26, 535.325, 536.39, 537.45, 538.515, 539.575, 540.645, 541.705, 542.77, 543.83, 544.895, 545.96, 547.02, 548.085, 549.15, 550.215, 551.275, 552.34, 553.405, 554.465, 555.53, 556.595, 557.66, 558.72, 559.79, 560.85, 561.915, 562.975, 564.045, 565.105, 566.17, 567.23, 568.295, 569.36, 570.42, 571.49, 572.55, 573.61, 574.675, 575.74, 576.81, 577.865, 578.93, 579.995, 581.06, 582.12, 583.185, 584.255, 585.31, 586.375, 587.44, 588.5, 589.565, 590.63, 591.695, 592.755, 593.82, 594.885, 595.95, 597.01, 598.075, 599.145, 600.2, 601.265, 602.33, 603.395, 604.455, 605.52, 606.585, 607.645, 608.71, 609.775, 610.84, 611.9, 612.965, 614.03, 615.09, 616.155, 617.22, 618.285, 619.345, 620.41, 621.475, 622.54, 623.6, 624.665, 625.73, 626.79, 627.855, 628.92, 629.985, 631.05, 632.115, 633.175, 634.235, 635.3, 636.365, 637.43, 638.49, 639.555, 640.62, 641.685, 642.745, 643.81, 644.875, 645.935, 647, 648.065, 649.13, 650.19, 651.255, 652.325, 653.38, 654.445, 655.51, 656.575, 657.635, 658.7, 659.765, 660.825, 661.89, 662.955, 664.02, 665.085, 666.145, 667.21, 668.275, 669.34, 670.4, 671.465, 672.525, 673.59, 674.655, 675.72, 676.78, 677.845, 678.91, 679.97, 681.035, 682.1, 683.165, 684.23, 685.295, 686.36, 687.415, 688.48, 689.545, 690.61, 691.67, 692.735, 693.8, 694.87, 695.925, 696.99, 698.055, 699.12, 700.18, 701.245, 702.31, 703.37, 704.435, 705.51, 706.56, 707.625, 708.69, 709.755, 710.825, 711.885, 712.945, 714.015, 715.07, 716.145, 717.2, 718.265, 719.33, 720.39, 721.465, 722.515, 723.585, 724.645, 725.71, 726.77, 727.835, 728.915, 729.98, 731.025, 732.095, 733.15, 734.22, 735.28, 736.36, 737.405, 738.485, 739.55, 740.605, 741.67, 742.735, 743.81, 744.85, 745.92, 746.995, 748.045, 749.105, 750.175, 751.27, 752.315, 753.36, 754.425, 755.49, 756.55, 757.625, 758.68, 759.76, 760.815, 761.875, 762.945, 764.015, 765.07, 766.145, 767.2, 768.265, 769.315, 770.385, 771.45, 772.515, 773.57, 774.67, 775.71, 776.765, 777.855, 778.905, 779.955, 781.025, 782.09, 783.14, 784.215, 785.275, 786.34, 787.4, 788.485, 789.525, 790.6, 791.65, 792.74, 793.79, 794.85, 795.915, 796.97, 798.035, 799.115, 800.18, 801.23, 802.295, 803.415, 804.425, 805.485, 806.54, 807.605, 808.68, 809.745, 810.8, 811.885, 812.97, 813.985, 815.065, 816.135, 817.18, 818.245, 819.31, 820.38, 821.43, 822.51, 823.575, 824.64, 825.685, 826.765, 827.855, 828.915, 829.94, 831.01, 832.075, 833.175, 834.2, 835.28, 836.33, 837.415, 838.595, 839.53, 840.625, 841.65, 842.73, 843.77, 844.845, 845.94, 846.99, 848.115, 849.115, 850.2, 851.3, 852.36, 853.53, 854.51, 855.525, 856.635, 857.595, 858.705, 859.72, 860.87, 861.85, 862.995, 864.02, 865.16, 866.24, 867.175, 868.265, 869.34, 870.395, 871.56, 872.5, 873.765, 874.76, 875.69, 876.915, 877.925, 878.915, 880.06, 881.03, 882.085, 883.12, 884.235, 885.325, 886.365, 887.445, 888.475, 889.505, 890.595, 891.635, 892.705, 893.87, 894.875, 895.955, 897.11, 898.085, 899.18, 900.14, 901.225, 902.295, 903.43, 904.615, 905.555, 906.535, 907.625, 908.725, 909.76, 910.775, 911.885, 912.985, 913.99, 915.165, 916.155, 917.18, 918.48, 919.3, 920.36, 921.44, 922.53, 923.63, 924.825, 925.695, 926.735, 927.885, 928.98, 930.065, 931.05, 932.415, 933.23, 934.255, 935.32, 936.625, 937.395, 938.45, 939.54, 940.68, 941.96, 942.7, 943.975, 944.845, 946.03, 946.965, 948.235, 949.31, 950.21, 951.21, 952.295, 953.43, 954.555, 955.46, 956.675, 957.57, 958.765, 959.945, 960.9, 961.915, 962.92, 964.05, 965.105, 966.13, 967.325, 968.265, 969.38, 970.43, 972.015, 972.63, 974.34, 974.915, 975.89, 976.965, 978.035, 979.35, 980.135, 981.37, 982.105, 983.14, 984.43, 985.365, 986.51, 987.425, 988.66, 989.5, 990.57, 991.685, 992.715, 993.925, 994.915, 996.4, 997.1, 998.12, 999.225, 1000.14, 1001.32, 1002.47, 1003.45, 1004.47, 1005.71, 1007.45, 1008.11, 1008.83, 1009.74, 1010.78, 1012.34, 1013.05, 1014.32, 1015.46, 1016.2, 1017.22, 1018.81, 1019.85, 1020.55, 1021.63, 1022.48, 1023.77, 1025.05, 1026.69, 1026.95, 1028.12, 1028.88, 1030.07, 1031.2, 1032.21, 1033.17, 1034.28, 1035.66, 1036.98, 1037.51, 1038.59, 1039.56, 1040.8, 1042.44, 1043.67, 1044.14, 1044.94, 1046.4, 1047.1, 1048.13, 1049.09, 1050.18, 1051.49, 1053.02, 1054.52, 1054.62, 1055.67, 1056.55, 1058.31, 1058.73, 1059.99, 1060.78, 1061.9, 1063.03, 1063.98, 1065.34, 1066.26, 1067.15, 1068.38, 1069.47, 1070.37, 1071.75, 1072.89, 1074.26, 1075.43, 1075.81, 1076.85, 1077.82, 1078.94, 1080.19, 1081, 1082.3, 1083.1, 1084.27, 1086.06, 1086.29, 1087.69, 1088.71, 1090.29, 1090.72, 1091.68, 1092.69, 1093.92, 1095.44, 1095.94, 1097.02, 1099.92, 1100.72, 1101.9, 1102.31, 1103.04, 1103.39, 1105.58, 1105.68, 1106.79, 1107.79, 1109.02, 1109.69, 1111.98, 1112.17, 1113.56, 1114.01, 1115.12, 1116.18, 1117.39, 1118.29, 1120.55, 1120.82, 1121.58, 1122.54, 1123.67, 1125.4, 1125.94, 1126.9, 1127.83, 1129.01, 1130.05, 1131.84, 1132.09, 1133.18, 1135.2, 1135.24, 1138.06, 1138.71, 1138.73, 1142.22, 1143.07, 1143.1, 1143.53, 1144, 1144.79, 1146.11, 1147.39, 1148.36, 1149.46, 1150.19, 1151.7, 1153.2, 1153.76, 1154.86, 1155.44, 1156.54, 1157.61, 1159.31, 1159.76, 1160.94, 1161.93, 1163.57, 1164.01, 1165.19, 1166.81, 1167.27, 1170.73, 1170.83, 1172.51, 1172.89, 1173.05, 1173.86, 1174.98, 1175.97, 1176.68, 1179.72, 1183.27, 1183.74, 1183.91, 1184.81, 1184.86, 1186.02, 1186.66, 1187.2, 1188.06, 1188.95, 1190.23, 1190.86, 1192.14, 1192.84, 1195.08, 1196.12, 1198.06, 1198.28, 1198.31, 1199.07, 1201.99, 1202.29, 1202.32, 1204.34, 1204.64, 1205.48, 1207.69, 1208.33, 1209.17, 1209.65, 1212.44, 1212.8, 1213.4, 1213.91, 1215.58, 1219.1, 1219.39, 1219.94, 1220.32, 1221.84, 1222.43, 1223.6, 1223.66, 1226.91, 1228.43, 1230.99, 1231.86, 1232.34, 1232.65, 1232.97, 1233.52, 1234.88, 1235.66, 1235.83, 1237.11, 1237.39, 1239.77, 1242.17, 1243.24, 1243.69, 1243.8, 1244.8, 1245.08, 1245.81, 1247.55, 1247.94, 1249.57, 1250.64, 1251.27, 1252.78, 1255.01, 1255.2, 1259.02, 1259.08, 1260.49, 1261.25, 1261.59, 1264.93, 1267.47, 1267.92, 1269.08, 1269.33, 1272.7, 1272.71, 1272.78, 1273.08, 1273.49, 1274.69, 1275.44, 1278.68, 1281.65, 1282.52, 1282.67, 1284.01, 1284.35, 1284.42, 1286.11, 1288.92, 1289.05, 1289.81, 1289.95, 1290.8, 1292.72, 1292.93, 1294.24, 1295.94, 1296.49, 1297.17, 1297.94, 1298.46, 1303.51, 1303.9, 1307.9, 1308.02, 1308.91, 1311.44, 1312.31, 1312.67, 1313.17, 1314.77, 1315.71, 1319.74, 1320.44, 1320.62, 1321.85, 1322.11, 1322.56, 1323.23, 1328.73, 1328.95, 1335.44, 1337.9, 1341.02, 1342.33, 1343.22, 1347.7, 1348.26, 1348.91, 1350.56, 1351.68, 1356.09, 1356.9, 1358.8, 1361.73, 1361.95, 1363.16, 1365.05, 1365.15, 1367.26, 1369.09, 1369.82, 1372.17, 1374.85, 1375.17, 1380.13, 1382.59, 1384.06, 1390.94, 1398.3, 1399.94, 1401.34, 1401.42, 1404.11, 1407.56, 1409.44, 1409.79, 1410.38, 1412.47, 1414.53, 1415.04, 1415.22, 1415.94, 1424.47, 1424.92, 1425.81, 1426.16, 1426.89, 1427.05, 1428.13, 1428.28, 1431.08, 1448.83, 1462.28, 1467.28, 1472.24, 1475.01, 1481.66, 1491.71, 1492.1, 1493.39, 1501.56, 1508.19, 1510.34, 1517.3, 1522.89, 1525.12, 1527.1, 1529.14, 1530.76, 1536.15, 1540.71, 1548.15, 1550.05, 1553.65, 1555.67, 1557.11, 1564.11, 1575.24, 1581.66, 1583.35, 1604.53, 1605.8, 1649.47, 1656.36, 1657.05, 1657.53, 1664.23, 1664.86, 1672.6, 1675.35, 1703.63, 1705.12, 1705.58, 1708.38, 1714.99, 1751.62, 1796, 1796.85, 1919.04, 1981.48, 1984.73, 1994.78, 2035.6, 2060.94, 2083.54, 2103.58, 2186.93, 2255.43, 2259.26, 2290.61, 2330.15, 2349.48, 2375.01, 2376.64, 2392.32, 2477.42, 2514.92, 2586.97, 2597.65,]\n\ny2000 = [463.568, 511.752, 567.601, 601.309, 550.731, 599.666, 647.981, 627.123, 590.324, 705.949, 595.653, 591.492, 623.537, 573.613, 621.538, 582.926, 588.022, 845.963, 560.104, 573.418, 609.648, 570.771, 609.016, 579.097, 599.163, 559.733, 569.81, 556.383, 569.158, 592.739, 603.963, 589.582, 566.231, 598.155, 566.146, 578.214, 605.389, 564.788, 624.991, 584.879, 570.096, 582.281, 570.954, 600.081, 559.388, 564.034, 578.86, 578.628, 586.587, 596.214, 603.566, 578.381, 631.426, 595.221, 656.938, 664.76, 659.244, 709.816, 681.967, 634.885, 611.582, 693.25, 686.668, 783.009, 698.688, 774.203, 806.348, 720.173, 692.94, 675.93, 787.305, 753.821, 726.267, 970.511, 751.047, 1014.65, 715.171, 655.566, 831.972, 735.428, 711.206, 797.938, 878.551, 1032.83, 774.162, 728.909, 774.311, 1006.09, 1889.75, 1101.54, 940.443, 856.049, 1136.42, 1148.29, 819.051, 938.336, 1051.28, 912.42, 972.569, 1157.4, 1223.97, 997.594, 1248.86, 912.128, 886.262, 1576.63, 952.398, 1286.78, 1518.44, 2017.85, 886.082, 1025.62, 979.953, 976.759, 3404.74, 6311.81, 922.492, 2139.62, 900.462, 1163.42, 1144.74, 950.044, 1196.74, 1910.96, 2172.49, 1151.29, 1276.69, 996.497, 1080.86, 1114.81, 1389.23, 1101.42, 982.03, 1024.37, 1583.08, 1518.95, 1480.04, 4811.01, 1337.4, 1109.1, 2356.71, 1094.69, 1041.76, 1434.82, 1177.17, 1736.21, 1654.43, 1409.45, 3658.62, 1182.28, 1538.92, 1334.05, 1191.01, 1247.77, 1686.47, 1302.21, 1288.96, 1168.88, 1224.14, 1252.68, 1170.03, 1456.82, 1255.8, 1238.89, 1168.49, 3919.74, 1259.39, 1478.73, 1269.95, 2264.71, 1428.69, 1249.88, 1278.51, 1374.28, 2934.64, 2264.41, 1353.14, 1904.42, 1499.88, 2761.67, 1557.2, 1570.11, 1469.92, 1355.28, 3266.01, 9290.76, 2061.46, 1278.17, 1538.05, 1563.53, 3567.94, 1404.62, 1387.06, 1474.33, 1360.49, 1514.48, 2241.55, 2265.28, 1522.08, 4485.8, 1697.74, 1422.07, 2554.5, 1564.96, 1629.62, 1788.88, 2153.64, 1444.51, 1618.68, 1623.03, 1705.84, 6316.03, 3213.73, 2299.24, 1520.91, 1602.02, 1636.37, 1750.13, 4266.15, 1744.13, 1709.58, 1782.51, 8506.23, 1642.07, 3015.08, 2014.97, 1800.11, 1726.63, 1623.83, 2475.96, 1732.28, 1813.73, 2004.2, 3441.54, 1574.1, 3048.79, 3824.85, 2002.55, 2283.71, 2600.16, 2959.94, 2231.45, 4252.78, 1749.33, 1886.15, 3793.61, 2709.95, 2107.37, 4219.59, 4901.02, 4196.81, 4159.07, 2743.17, 4300.86, 3050.44, 2122.53, 2660.65, 2251.86, 2265.21, 1914.6, 2125.44, 3307.03, 2145.59, 5854.42, 3918.05, 7015.5, 2104.95, 2200.78, 1948.92, 6596.31, 3184.72, 1962.57, 2364.54, 1933.57, 2226.78, 3630, 3435.08, 4213.87, 6979.17, 8109.76, 2125.48, 2116.37, 2040.1, 1941.15, 2628.72, 2603.3, 2748.43, 2480.22, 2006.24, 2771.96, 3967.06, 15910.5, 6431.4, 2208.41, 2641.65, 2653.52, 3087.49, 2342.75, 2177.78, 3577.33, 2411.62, 2245.52, 2681.11, 2898.18, 2485.79, 6852.02, 7413.21, 2297.92, 2457.06, 8211.89, 4343.98, 2200.23, 2493.32, 2544.09, 2260.01, 2333.7, 3844.66, 2425.09, 2306.18, 2425.92, 2398.82, 4598.26, 2437.19, 2404.24, 2564.55, 6330.84, 2380.26, 2611.75, 5972.55, 6296.98, 2538.59, 8438.47, 2353, 2577.65, 10310, 8587.21, 2614.1, 2652.69, 4157.58, 8744.91, 17513.2, 3102.7, 8143.23, 2594.11, 2551.47, 2545.2, 5613.47, 2597.11, 2710.99, 2719.59, 7278.92, 11682.5, 12306.7, 9348.77, 17202.4, 5232.14, 11227.3, 17931.5, 6031.79, 2727.55, 28807.7, 27725.5, 25352.1, 12407.8, 12445, 13232, 12018.5, 7898.1, 7592.39, 16800.8, 7594.41, 36919.8, 21013.2, 8322.73, 14266, 12556.4, 8617.49, 12729.9, 8821.11, 43659.7, 17352.7, 8876.03, 30686.5, 17685.4, 24340.2, 33384.2, 26833.3, 18793.6, 9740.08, 8366.67, 11718.3, 17662.8, 13187.3, 12308.6, 22709.2, 12353.4, 20496.1, 12459.3, 24675.2, 18277.8, 35012.6, 17285.2, 18940.9, 17224.5, 12316.5, 17988.7, 15106.7, 7747.36, 16699.2, 8600.27, 13486.3, 15138.3, 3933.47, 27780.4, 23317.6, 20780, 11069.5, 17565.5, 8570.9, 34030.5, 14207.5, 10776, 11939.8, 18661.3, 4964.24, 40415.1, 11049.8, 40990.6, 14262.1, 14561.5, 29623, 19444.3, 29592.1, 32700.6, 13178.4, 17123.2, 13105.6, 21859.1, 38914.3, 21915.6, 29199.7, 20537.5, 25397.5, 25827.6, 32832.4, 11341, 20797.6, 37452.1, 37527.1, 44262.6, 6474.13, 13578.7, 19464.2, 46841.4, 42849.2, 14397.4, 18135, 11456.9, 43806.3, 47240.5, 56625.3, 14890.7, 10559.5, 42045.5, 20768.2, 42092.2, 43346.5, 27798.7, 4791.4, 7449.79, 23062.8, 15236.3, 55853.9, 26656.2, 6539.09, 27542.7, 45573.8, 7763.38, 16015.4, 27404.4, 22970.7, 19585.3, 34021.9, 37479.1, 20168.7, 19232.5, 28777, 28899.6, 22071.4, 26847.4, 56918.6, 17612.9, 17995.1, 12113.1, 44057.8, 11166.1, 18605.7, 58695, 38751, 39060.5, 20573.6, 39451, 22737.3, 6480.64, 32212.7, 27123.6, 16946.6, 17148, 23245.1, 27559.2, 26753.7, 17309.3, 17437.9, 29009.8, 35491.8, 59604.3, 28979, 25495.4, 35429, 46225.1, 40021.2, 37840.9, 13743.8, 22444.2, 35692.1, 36438.8, 16815, 11795.7, 11074.9, 44216.6, 27618, 6458.24, 19643.9, 51121.3, 54627.5, 9311.95, 27720, 44002.2, 36401.6, 24530.1, 19170.1, 35555.9, 29648.5, 59280.4, 19897.6, 9190.2, 58555.4, 47140, 30011, 30704.1, 35339.8, 57121.7, 21056.2, 40998.5, 28413.3, 41793.4, 47192.1, 27407.3, 30047.4, 27120.6, 29118.3, 23938.9, 58925.5, 72058.5, 22873.5, 42101.8, 66972.2, 44823.1, 35764, 34239.8, 29360.9, 76930.4, 33239.8, 60258.3, 18153.6, 28983.3, 48602.4, 40165, 39565.2, 25828.4, 36787.2, 45500.5, 27645.6, 11434.3, 13844.6, 30231.8, 22930.5, 34881.4, 25895.3, 41087.4, 33922.5, 28840.5, 28979.4, 29940.5, 8378.83, 34045.9, 34945.5, 61958.6, 13666.6, 39247.7, 43760.2, 70173.1, 66980.1, 27152.9, 55580.7, 46298.2, 9292.65, 41173, 33442.3, 29804.2, 31102.9, 36029.9, 33434.6, 80651.7, 17740.9, 59478, 22610.9, 45120.8, 54997.1, 68034.4, 48126.5, 11208.1, 35740.2, 13588.5, 29276.1, 52817, 46777.3, 46040, 54908.9, 49109.5, 31901.2, 62873.5, 66155.9, 39686.1, 57656.1, 43331.8, 48869.3, 51264.9, 62262.8, 29629.6, 51923.4, 30152.5, 9487.38, 32083.1, 36148.5, 38376.8, 13995.1, 63639, 46674.6, 46288.5, 35630.7, 10194.3, 38090.3, 62287, 14768.3, 49471.9, 9263.67, 61216.1, 18063.5, 35784, 50631.8, 46918.3, 12267.8, 48272.2, 41000.3, 54276.7, 51610.7, 60803.1, 39649.4, 39596.7, 49838, 10832.3, 65093.3, 73042, 39173.7, 16413.5, 57140.8, 75723.7, 32271.1, 65812.8, 43370.3, 24131.5, 69167.1, 54257.5, 63354.8, 57821, 11508, 62243.3, 14790.8, 62076.2, 65049.2, 9378.87, 44190.5, 19507.5, 17976.6, 6800.52, 41426.2, 66740.3, 64705.9, 6375.2, 70862, 73881.1, 27408.8, 42709.7, 74539.8, 15954.2, 23547.2, 55779.2, 17418.7, 53234.2, 76335.6, 56133.1, 72035.8, 62793, 52364.6, 81927.1, 63502.9, 19121.8, 52346.3, 60380.5, 23852.3, 65034.8, 65720.5, 55139.4, 19533.1, 57660, 44206.1, 52533.6, 5622.85, 75929.6, 90270.6, 41565, 42164.2, 56284.5, 49377, 32105.1, 77273.2, 41399.8, 16532.9, 76408.1, 79276.5, 52875.6, 55748.7, 56037.4, 60631.4, 42748.1, 68959, 56964, 73782.4, 11853.4, 18037, 92233.3, 60963.8, 71852.8, 68425, 93216.7, 67420.6, 92619.1, 33786.3, 86222.7, 38859.7, 68007.6, 48184.8, 68676.1, 100067, 70234.3, 73904, 94252.5, 21600.5, 59535.8, 71448.6, 53050, 50875.7, 99244.3, 86279.4, 103999, 111737, 73396.5, 71489.1, 17860.2, 53927.1, 103003, 9487.82, 79201.4, 56103.8, 108290, 77176.6, 75248.7, 76368.5, 76435.6, 55491.3, 31986.6, 70160.7, 110679, 22666.9, 60777.2, 77638.4, 59705, 72981.3, 120519, 89567.8, 68281.5, 88196.6, 85647.6, 39182.8, 34051.6, 23978.1, 104582, 49728.6, 21758.3, 108744, 84070.5, 41880.7, 11106, 69714, 75212.7, 37634.2, 77947.3, 88241.8, 81061, 41752.1, 87753.4, 87902.9, 19569.7, 39363.3, 39224.8, 14412.1, 27383.5, 10602.6, 31496.7, 39034.8, 58931.9, 74993.1, 82528.2, 80649, 37035.4, 113668, 57351.6, 55105.1, 21720.9, 108942, 52483.7, 84357, 66011, 125539, 69543.3, 77999, 33902.2, 163735, 225836, 135163, 113768, 116990, 115874, 60565.6, 106437, 51296.9, 118108, 77679.1]\n\nx2000=[467.944, 470.328, 474.58, 477.983, 483.731, 487.206, 489.609, 491.522, 492.445, 492.9, 495.144, 501.409, 501.422, 501.734, 501.793, 501.829, 501.852, 501.959, 502.031, 502.117, 502.12, 502.12, 502.121, 502.152, 502.172, 502.205, 502.298, 502.306, 502.359, 502.366, 502.389, 502.461, 502.48, 502.491, 502.526, 502.531, 502.54, 502.589, 502.608, 502.626, 502.654, 502.661, 502.681, 502.697, 502.703, 502.717, 502.727, 503.461, 504.374, 505.354, 506.244, 507.175, 508.091, 509.016, 509.945, 510.871, 511.805, 512.736, 513.659, 514.586, 515.512, 516.45, 517.371, 518.304, 519.227, 520.156, 521.083, 522.01, 522.94, 523.87, 524.795, 525.726, 526.661, 527.58, 528.511, 529.437, 530.365, 531.296, 532.222, 533.154, 534.08, 535.008, 535.936, 536.864, 537.792, 538.721, 539.648, 540.579, 541.505, 542.433, 543.362, 544.292, 545.219, 546.146, 547.075, 548.005, 548.932, 549.86, 550.788, 551.716, 552.644, 553.574, 554.502, 555.43, 556.357, 557.286, 558.216, 559.143, 560.076, 561, 561.929, 562.857, 563.786, 564.713, 565.641, 566.57, 567.497, 568.425, 569.356, 570.282, 571.211, 572.141, 573.07, 573.997, 574.925, 575.852, 576.781, 577.709, 578.64, 579.565, 580.495, 581.426, 582.352, 583.279, 584.207, 585.137, 586.065, 586.995, 587.922, 588.849, 589.778, 590.708, 591.64, 592.565, 593.49, 594.419, 595.349, 596.276, 597.205, 598.131, 599.062, 599.989, 600.917, 601.845, 602.773, 603.705, 604.632, 605.558, 606.487, 607.417, 608.348, 609.271, 610.201, 611.128, 612.059, 612.985, 613.914, 614.845, 615.77, 616.699, 617.628, 618.558, 619.487, 620.412, 621.343, 622.274, 623.196, 624.125, 625.066, 625.983, 626.909, 627.838, 628.771, 629.699, 630.625, 631.556, 632.482, 633.408, 634.343, 635.264, 636.195, 637.122, 638.052, 638.982, 639.906, 640.834, 641.766, 642.69, 643.62, 644.549, 645.476, 646.412, 647.333, 648.262, 649.192, 650.119, 651.049, 651.977, 652.905, 653.84, 654.76, 655.688, 656.621, 657.552, 658.479, 659.404, 660.331, 661.257, 662.189, 663.117, 664.044, 664.979, 665.901, 666.83, 667.755, 668.686, 669.615, 670.541, 671.472, 672.397, 673.333, 674.253, 675.188, 676.114, 677.047, 677.969, 678.907, 679.825, 680.758, 681.68, 682.609, 683.542, 684.469, 685.395, 686.322, 687.256, 688.191, 689.108, 690.036, 690.972, 691.891, 692.823, 693.755, 694.677, 695.606, 696.544, 697.464, 698.393, 699.326, 700.257, 701.18, 702.109, 703.032, 703.959, 704.904, 705.818, 706.75, 707.678, 708.607, 709.529, 710.461, 711.387, 712.318, 713.245, 714.17, 715.103, 716.046, 716.968, 717.89, 718.813, 719.746, 720.673, 721.615, 722.531, 723.46, 724.401, 725.331, 726.248, 727.182, 728.11, 729.042, 729.966, 730.887, 731.812, 732.737, 733.704, 734.596, 735.533, 736.453, 737.38, 738.307, 739.265, 740.167, 741.095, 742.026, 742.969, 743.881, 744.827, 745.757, 746.664, 747.611, 748.523, 749.532, 750.383, 751.308, 752.255, 753.167, 754.094, 755.023, 755.956, 756.873, 757.802, 758.74, 759.66, 760.631, 761.534, 762.453, 763.372, 764.318, 765.245, 766.164, 767.086, 768.015, 768.943, 769.872, 770.813, 771.747, 772.657, 773.583, 774.516, 775.489, 776.374, 777.305, 778.252, 779.157, 780.081, 781.052, 781.938, 782.875, 783.81, 784.734, 785.679, 786.622, 787.552, 788.477, 789.392, 790.424, 791.261, 792.166, 793.102, 794.042, 794.995, 795.985, 796.854, 797.741, 798.677, 799.662, 800.512, 801.437, 802.434, 803.336, 804.239, 805.211, 806.088, 807.003, 807.993, 808.947, 809.812, 810.729, 811.791, 812.635, 813.513, 814.431, 815.388, 816.326, 817.286, 818.175, 819.114, 820.023, 820.989, 821.859, 822.859, 823.729, 824.688, 825.602, 826.523, 827.43, 828.484, 829.36, 830.359, 831.218, 832.112, 833.04, 834.076, 834.879, 835.802, 836.714, 837.734, 838.605, 839.797, 840.518, 841.378, 842.3, 843.315, 844.285, 845.104, 846.065, 846.957, 847.998, 848.981, 849.74, 850.696, 851.897, 852.584, 853.957, 854.391, 855.315, 856.24, 857.312, 858.086, 859.13, 859.942, 860.893, 861.899, 862.722, 863.978, 864.708, 865.496, 866.477, 867.451, 868.568, 869.326, 870.332, 871.113, 872.133, 873.435, 873.881, 874.776, 875.697, 876.634, 877.598, 878.565, 879.44, 880.463, 881.292, 882.232, 883.312, 884.131, 885.023, 886.003, 887.044, 887.793, 888.726, 889.776, 890.676, 891.508, 892.565, 893.466, 894.297, 895.201, 896.323, 897.109, 898.159, 899.023, 900.198, 900.804, 902.224, 903.196, 903.734, 904.682, 905.488, 906.358, 907.525, 908.263, 909.469, 910.432, 910.993, 912.073, 912.89, 913.831, 914.909, 915.789, 916.826, 917.611, 918.403, 919.481, 920.333, 921.274, 922.148, 923.042, 924.122, 924.924, 925.852, 926.755, 927.818, 928.703, 929.823, 930.617, 931.398, 932.954, 933.398, 934.331, 935.448, 936.054, 937.226, 937.968, 938.959, 939.924, 940.83, 941.924, 942.781, 943.63, 944.438, 945.516, 946.51, 947.25, 948.185, 949.294, 949.966, 950.896, 951.949, 952.771, 953.726, 955.032, 955.719, 956.497, 957.498, 958.428, 959.332, 960.76, 961.274, 962.475, 963.307, 964.002, 964.881, 965.805, 966.972, 968.33, 968.594, 969.554, 970.499, 971.515, 972.466, 974.137, 974.339, 975.052, 976.289, 977.497, 977.984, 978.995, 981.135, 981.146, 981.566, 982.721, 983.669, 984.82, 985.827, 986.272, 987.497, 988.811, 989.036, 990.112, 991.418, 991.798, 992.95, 994.833, 995.399, 996.67, 997.063, 997.499, 998.242, 999.353, 1000.24, 1001.76, 1002.04, 1003.92, 1003.96, 1004.81, 1006.99, 1007.19, 1007.64, 1008.5, 1010.2, 1010.83, 1011.64, 1012.68, 1013.17, 1014.07, 1015.12, 1016.56, 1017.48, 1017.9, 1018.84, 1020.18, 1021.1, 1021.72, 1022.58, 1023.98, 1024.53, 1025.28, 1026.27, 1028.34, 1028.56, 1029.38, 1029.91, 1031.06, 1031.84, 1032.82, 1033.79, 1034.56, 1035.51, 1037.47, 1037.67, 1038.31, 1039.87, 1041.36, 1041.42, 1042.68, 1043.17, 1044.08, 1046.39, 1046.6, 1046.95, 1049.05, 1050.5, 1050.78, 1050.9, 1051.41, 1052.52, 1054.38, 1054.71, 1055.01, 1055.91, 1057.4, 1058.21, 1060.06, 1060.6, 1062.67, 1062.78, 1062.91, 1063.74, 1064.22, 1065.39, 1066.21, 1066.97, 1068.12, 1069.29, 1069.87, 1073.55, 1073.77, 1073.81, 1074.69, 1075.11, 1076.49, 1079.03, 1079.12, 1079.15, 1080.29, 1080.47, 1081.89, 1082.14, 1083.32, 1086.1, 1086.35, 1087.94, 1088.55, 1090.49, 1091.45, 1092.24, 1093.39, 1093.96, 1094.3, 1094.83, 1095.23, 1095.86, 1096.45, 1097.42, 1097.59, 1098.89, 1100.14, 1100.63, 1102.98, 1103.27, 1104.39, 1104.49, 1105.82, 1106.05, 1107.1, 1107.83, 1110.79, 1111.03, 1111.26, 1111.9, 1112.73, 1115.58, 1116.82, 1117.42, 1117.45, 1117.58, 1119.53, 1120.22, 1122.03, 1122.36, 1122.55, 1122.91, 1124.46, 1124.97, 1127.49, 1128.24, 1128.28, 1129.64, 1129.72, 1130.18, 1131.47, 1132.74, 1134.28, 1137.81, 1137.82, 1138.14, 1138.23, 1140.35, 1140.97, 1144.22, 1144.7, 1146.29, 1146.65, 1148.93, 1149.47, 1150.14, 1150.15, 1150.29, 1152.06, 1154.92, 1159.51, 1159.69, 1161.82, 1161.97, 1163.24, 1163.91, 1165.58, 1166.87, 1167.8, 1170.2, 1171.59, 1173.7, 1174.3, 1175.16, 1177.11, 1177.28, 1178.19, 1178.25, 1182.94, 1185.64, 1186.06, 1186.31, 1187.12, 1187.32, 1188.4, 1193.68, 1195.48, 1197.58, 1197.82, 1200.51, 1206.91, 1207.41, 1209.17, 1217.19, 1220.61, 1221.51, 1227.79, 1228.04, 1228.65, 1229.81, 1230.69, 1234.87, 1235.51, 1236.28, 1237.49, 1239.01, 1240.85, 1242.75, 1243.46, 1246.92, 1250.41, 1251.61, 1267.1, 1274.85, 1275.98, 1276.46, 1277.47, 1280.9, 1282.38, 1283.42, 1285.29, 1286.34, 1286.97, 1289.62, 1292.03, 1300.45, 1301.54, 1308.27, 1310.26, 1310.82, 1316.13, 1317.84, 1331.07, 1333.46, 1337.21, 1345.61, 1345.94, 1350.77, 1363.09, 1364.29, 1380.01, 1389.55, 1416.39, 1442.25, 1452.25, 1452.99, 1467.49, 1470.39, 1473.72, 1478.47, 1498.98, 1550.69, 1573.2, 1597.14, 1629.43, 1630.39, 1643.84, 1648.5, 1660.74, 1663.12, 1663.51, 1671.13, 1673.72, 1674.3, 1714.79, 1731.73, 1734.58, 1756.54, 1769.73, 1893.29, 1913.23, 1947.25, 1953.02, 2083.1, 2202.75, 2316.44,]\n\n\nplot.scatter(x200,y200)\nplot.scatter(x2000,y2000,color=[1,0,0])\nplot.show()\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39962,"cells":{"__id__":{"kind":"number","value":3238405380234,"string":"3,238,405,380,234"},"blob_id":{"kind":"string","value":"058697d5bec46bab75926c290c8bbb639f86311e"},"directory_id":{"kind":"string","value":"2de33ba731066a63352080dd19da1e4582bb00c4"},"path":{"kind":"string","value":"/my315ok.portlet.logo/my315ok/portlet/logo/widget.py"},"content_id":{"kind":"string","value":"74837d0a1a7e4f8619a96867216760006f9895e2"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"adam139/plonesrc"},"repo_url":{"kind":"string","value":"https://github.com/adam139/plonesrc"},"snapshot_id":{"kind":"string","value":"58f48e7cdfc8fbed7398011c40649f095df10066"},"revision_id":{"kind":"string","value":"cbf20045d31d13cf09d0a0b2a4fb78b96c464d20"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T21:36:44.014240","string":"2021-01-10T21:36:44.014240"},"revision_date":{"kind":"timestamp","value":"2014-09-09T04:28:04","string":"2014-09-09T04:28:04"},"committer_date":{"kind":"timestamp","value":"2014-09-09T04:28:04","string":"2014-09-09T04:28:04"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from OFS.Image import Image\n\n\nfrom zope.app.form.browser.textwidgets import FileWidget\nfrom zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile\n\n\nclass ImageWidget(FileWidget):\n \"\"\"\n The standard FileWidget returns a string instead of an IFile inst,\n which means it will always fail schema validation in formlib.\n \"\"\"\n\n template = ViewPageTemplateFile('inputwidget.pt')\n displayWidth = 30\n\n def __call__(self):\n value=self._getFormValue() or None\n return self.template(name=self.context.__name__, value=value)\n\n def _toFieldValue(self, input):\n value=super(ImageWidget, self)._toFieldValue(input)\n if value is not self.context.missing_value:\n value=Image('image','image', value)\n return value\n\n def hasInput(self):\n return ((self.name+\".used\" in self.request.form)\n or\n (self.name in self.request.form)\n ) and not self.request.form.get(self.name+\".nochange\", '')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39963,"cells":{"__id__":{"kind":"number","value":11905649352884,"string":"11,905,649,352,884"},"blob_id":{"kind":"string","value":"e7a7e3510dbfc1f059e426d451ec22a185b27b01"},"directory_id":{"kind":"string","value":"a297132838c5a6b436d940db63286896b8e7c829"},"path":{"kind":"string","value":"/configure"},"content_id":{"kind":"string","value":"cb29f5d380041ca043510ccf56f7c8134a960ecc"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"hotgloupi/cof"},"repo_url":{"kind":"string","value":"https://github.com/hotgloupi/cof"},"snapshot_id":{"kind":"string","value":"799ba000548959e64e00ec27065afa36d1cb1b31"},"revision_id":{"kind":"string","value":"fa6193fe8ea7b9f5c7318ed5b84059785bc718d3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-23T12:23:42.257051","string":"2020-04-23T12:23:42.257051"},"revision_date":{"kind":"timestamp","value":"2013-10-22T09:50:07","string":"2013-10-22T09:50:07"},"committer_date":{"kind":"timestamp","value":"2013-10-22T09:50:07","string":"2013-10-22T09:50:07"},"github_id":{"kind":"number","value":171166390,"string":"171,166,390"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nimport argparse\nimport os\nimport pipes\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport cgitb\ncgitb.enable(format = 'text')\n\ndef cleanpath(p, **kwargs):\n p = os.path.normpath(p)\n if p.startswith('./'):\n p = p[2:]\n if kwargs.get('replace_home'):\n p = os.path.join(\n '~',\n os.path.relpath(p, start=os.path.expanduser('~')),\n )\n return p.replace('\\\\', '/')\n\ndef cleanabspath(p, **kwargs):\n return cleanpath(os.path.abspath(p), **kwargs)\n\ndef cleanjoin(*args, **kwargs):\n return cleanpath(os.path.join(*args), **kwargs)\n\nFATAL = \"[ cfg ] FATAL\"\nERROR = \"[ cfg ] ERROR\"\nSTATUS = \"[ cfg ]\"\n\ndef err(*args, **kwargs):\n kwargs['file'] = sys.stderr\n return print(*args, **kwargs)\n\ndef status(*args, **kwargs):\n return print(STATUS, *args, **kwargs)\n\ndef error(*args, **kwargs):\n return err(ERROR, *args, **kwargs)\n\ndef fatal(*args, **kwargs):\n try:\n err(FATAL, *args, **kwargs)\n finally:\n sys.exit(1)\n\ndef which(binary):\n paths = os.environ['PATH'].split(os.path.pathsep)\n for dir_ in paths:\n path = os.path.join(dir_, binary)\n if os.path.exists(path) and os.stat(path)[stat.ST_MODE] & stat.S_IXUSR:\n return path\n if sys.platform =='win32' and not binary.lower().endswith('.exe'):\n return which(binary + '.exe')\n return None\n\ndef cmd(cmd, stdin = b'', cwd = None, env=None):\n sys.stderr.flush()\n p = subprocess.Popen(cmd,\n cwd = cwd,\n stdin = subprocess.PIPE,\n shell = False,\n env = env)\n p.stdin.write(stdin)\n p.stdin.close()\n p.wait()\n if p.returncode != 0:\n raise Exception(\"Command failed\")\n\nDEBUG = True\nVERBOSE = True\nROOT_DIR = cleanpath(os.path.dirname(__file__))\nHOME_URL = \"http://hotgloupi.fr/tupcfg.html\"\nTUP_HOME_URL = \"http://gittup.org\"\nPROJECT_CONFIG_DIR_NAME = \".config\"\nPROJECT_CONFIG_DIR = cleanjoin(ROOT_DIR, PROJECT_CONFIG_DIR_NAME)\nPROJECT_CONFIG_FILENAME = \"project.py\"\nTUP_INSTALL_DIR = cleanjoin(PROJECT_CONFIG_DIR, 'tup')\nTUP_GIT_URL = \"git://github.com/gittup/tup.git\"\nTUP_WINDOWS_URL = \"http://gittup.org/tup/win32/tup-latest.zip\"\nTUPCFG_INSTALL_DIR = cleanjoin(PROJECT_CONFIG_DIR, 'tupcfg-install')\nTUPCFG_GIT_URL = \"git://github.com/hotgloupi/tupcfg.git\"\nTUPCFG_GENERATORS = ['Tup', 'Makefile']\n\ndef self_install(args):\n status(\"Installing tupcfg in\", TUPCFG_INSTALL_DIR)\n if not os.path.exists(TUPCFG_INSTALL_DIR):\n os.makedirs(TUPCFG_INSTALL_DIR)\n status(\"Getting tup from\", TUPCFG_GIT_URL)\n cmd(['git', 'clone', TUPCFG_GIT_URL, TUPCFG_INSTALL_DIR])\n else:\n status(\"Updating tupcfg\")\n cmd(['git', 'pull'], cwd=TUPCFG_INSTALL_DIR)\n shutil.rmtree(os.path.join(PROJECT_CONFIG_DIR, 'tupcfg'), ignore_errors=True)\n shutil.copytree(\n os.path.join(TUPCFG_INSTALL_DIR, 'src/tupcfg'),\n os.path.join(PROJECT_CONFIG_DIR, 'tupcfg')\n )\n\n\ndef tup_install(args):\n from tupcfg import platform\n if platform.IS_WINDOWS:\n tup_install_windows(args)\n else:\n tup_install_git(args)\n from tupcfg import tools\n print(\"Tup installed in\", tools.which('tup'))\n\n\ndef tup_install_windows(args):\n import urllib.request as r\n req = r.urlopen(TUP_WINDOWS_URL)\n if not os.path.exists(TUP_INSTALL_DIR):\n os.makedirs(TUP_INSTALL_DIR)\n tarball = os.path.join(TUP_INSTALL_DIR, 'tup.zip')\n with open(tarball, 'wb') as f:\n while True:\n data = req.read(4096)\n if not data:\n break\n f.write(data)\n import zipfile\n with zipfile.ZipFile(tarball) as f:\n f.extractall(TUP_INSTALL_DIR)\n\n\ndef tup_install_git(args):\n from tupcfg import path\n status(\"Installing tup in\", TUP_INSTALL_DIR)\n if not path.exists(TUP_INSTALL_DIR):\n os.makedirs(TUP_INSTALL_DIR)\n status(\"Getting tup from\", TUP_GIT_URL)\n cmd(['git', 'clone', TUP_GIT_URL, TUP_INSTALL_DIR])\n else:\n status(\"Updating tup\")\n cmd(['git', 'pull'], cwd=TUP_INSTALL_DIR)\n\n tup_shell_bin = path.join(TUP_INSTALL_DIR, \"build\", \"tup\")\n if not path.exists(TUP_INSTALL_DIR, \"build\", \"tup\"):\n cmd(['sh', 'build.sh'], cwd=TUP_INSTALL_DIR)\n else:\n status(\"Found shell version of tup at\", tup_shell_bin)\n\n tup_dir = path.join(ROOT_DIR, '.tup')\n if os.path.exists(tup_dir):\n os.rename(tup_dir, tup_dir + '.bak')\n\n try:\n if not path.exists(TUP_INSTALL_DIR, '.tup'):\n cmd(['./build/tup', 'init'], cwd=TUP_INSTALL_DIR)\n cmd(['./build/tup', 'upd'], cwd=TUP_INSTALL_DIR)\n finally:\n if path.exists(tup_dir + '.bak'):\n os.rename(tup_dir + '.bak', tup_dir)\n\n\ndef prepare_build(args, defines, exports):\n import tupcfg # Should work at this point\n from tupcfg.path import exists, join, absolute\n\n build_dir = args.build_dir\n try:\n project = tupcfg.Project(\n ROOT_DIR,\n PROJECT_CONFIG_DIR,\n config_filename = PROJECT_CONFIG_FILENAME,\n new_project_vars = exports,\n )\n\n env_build_dirs = list(\n d for d in project.env.get('BUILD_DIRECTORIES', [])\n if exists(d, '.tupcfg_build')\n )\n tupcfg.tools.verbose(\"Found build directories:\", ' '.join(env_build_dirs))\n build_dirs = []\n if build_dir is not None:\n build_dirs = [build_dir]\n tup_build_marker = join(build_dir, '.tupcfg_build')\n if not exists(build_dir):\n os.makedirs(build_dir)\n with open(tup_build_marker, 'w') as f:\n pass\n elif not exists(tup_build_marker):\n fatal('\\n'.join([\n \"'%(build_dir)s' doest not seem to be a tup build directory:\",\n \"\\t* Remove this directory\",\n \"\\t* Touch the file %(tup_build_marker)s\",\n ]) % locals())\n else:\n build_dirs = env_build_dirs\n\n if not build_dirs:\n fatal(\"No build directory specified on command line. (try -h switch)\")\n\n project.env.project_set(\n 'BUILD_DIRECTORIES',\n list(set(build_dirs + env_build_dirs))\n )\n\n if args.dump_vars:\n status(\"Project variables:\")\n for k, v in project.env.project_vars.items():\n status(\"\\t - %s = %s\" % (k, v))\n\n generators = []\n if args.generator:\n generators.append(args.generator)\n with project:\n for build_dir in build_dirs:\n with project.configure(build_dir, defines, generators) as build:\n if args.dump_vars:\n status(\"Build variables for directory '%s':\" % build_dir)\n build_vars = project.env.build_vars\n keys = sorted(build_vars.keys())\n for k in keys:\n status(\"\\t - %s = %s\" % (k, build_vars[k]))\n continue\n\n if args.dump_build:\n build.dump(project)\n else:\n build.execute(project)\n\n except tupcfg.Project.NeedUserEdit:\n print(\n \"Please edit %s and re-run the configure script\" % join(\n PROJECT_CONFIG_DIR,\n PROJECT_CONFIG_FILENAME,\n replace_home=True,\n )\n )\n sys.exit(0)\n\n\ndef parse_args():\n def Dir(s):\n if not os.path.isdir(s):\n raise argparse.ArgumentTypeError\n return s\n parser = argparse.ArgumentParser(\n description=\"Configure your project for tup\"\n )\n parser.add_argument('build_dir', action=\"store\",\n help=\"Where to build your project\", nargs='?')\n parser.add_argument('-D', '--define', action='append',\n help=\"Define build specific variables\", default=[])\n parser.add_argument('-E', '--export', action='append',\n help=\"Define project specific variables\", default=[])\n parser.add_argument('-v', '--verbose', action='store_true', help=\"verbose mode\")\n parser.add_argument('-d', '--debug', action='store_true', help=\"debug mode\")\n parser.add_argument('--dump-vars', action='store_true', help=\"dump variables\")\n parser.add_argument('--dump-build', action='store_true', help=\"dump commands that would be executed\")\n parser.add_argument('--install', action='store_true', help=\"install when needed\")\n parser.add_argument('--self-install', action='store_true', help=\"install (or update) tupcfg\")\n parser.add_argument('--tup-install', action='store_true', help=\"install (or update) tup\")\n parser.add_argument('--generator', '-G', default = None,\n help = \"Generate build rules for another build system\",\n choices = TUPCFG_GENERATORS)\n\n return parser, parser.parse_args()\n\ndef parse_cmdline_variables(args):\n res = {}\n for arg in args:\n arg = arg.strip()\n if '=' not in arg:\n fatal(\"'=' not found in define: use %s=true to define a boolean variable\" % arg)\n parts = arg.split('=')\n k = parts[0].strip()\n v = '='.join(parts[1:]).strip()\n op = '='\n for p in ['+', ':']:\n if k.endswith(p):\n op = p + '='\n k = k[:-1]\n break\n if v.lower() in ['1', 'true']:\n v = True\n elif v.lower() in ['0', 'false']:\n v = False\n elif v.startswith('['):\n if not v.endswith(']'):\n fatal(\"Missing ']' when defining %s\" % k)\n v = [e.strip() for e in v[1:-1].split(',')]\n res[k] = {\n 'op': op,\n 'value': v,\n }\n return res\n\ndef main():\n parser, args = parse_args()\n\n DEBUG = args.debug\n VERBOSE = args.verbose\n\n from os.path import exists, join\n\n sys.path.insert(0, os.path.join(PROJECT_CONFIG_DIR,'tupcfg/src'))\n sys.path.insert(0, PROJECT_CONFIG_DIR)\n\n have_tupcfg = False\n try:\n import tupcfg\n have_tupcfg = True\n except: pass\n\n if args.self_install or (args.install and not have_tupcfg):\n self_install(args)\n try:\n import imp\n file_, pathname, descr = imp.find_module(\"tupcfg\", [PROJECT_CONFIG_DIR])\n tupcfg = imp.load_module(\"tupcfg\", file_, pathname, descr)\n except Exception as e:\n fatal(\"Sorry, tupcfg installation failed for some reason:\", e)\n\n try:\n import tupcfg\n\n # Tupcfg will use these functions to log\n tupcfg.tools.status = status\n tupcfg.tools.error = error\n tupcfg.tools.fatal = fatal\n\n tupcfg.tools.DEBUG = DEBUG\n tupcfg.tools.VERBOSE = VERBOSE or DEBUG\n except ImportError as e:\n if DEBUG is True:\n raise e\n\n fatal(\n '\\n'.join([\n \"Cannot find tupcfg module, your options are:\",\n \"\\t* Just use the --self-install flag (installed in %(config_dir)s/tupcfg)\",\n \"\\t* Add it as a submodule: `git submodule add git@github.com:hotgloupi/tupcfg.git %(config_dir)s/tupcfg`\",\n \"\\t* Install it somewhere (see %(home_url)s)\",\n ]) % {\n 'config_dir': cleanabspath(PROJECT_CONFIG_DIR, replace_home=True),\n 'home_url': HOME_URL\n }\n )\n\n tupcfg.tools.PATH.insert(0, tupcfg.path.absolute(TUP_INSTALL_DIR))\n\n if args.tup_install or (args.install and not tupcfg.tools.which('tup')):\n tup_install(args)\n\n if 'Tup' == args.generator and not tupcfg.tools.which('tup'):\n fatal(\n '\\n'.join([\n \"Cannot find tup binary, your options are:\",\n \"\\t* Just use the --tup-install flag (installed in %(config_dir)s/tup)\",\n \"\\t* Install it somewhere (see %(home_url)s)\",\n ]) % {\n 'config_dir': cleanabspath(PROJECT_CONFIG_DIR, replace_home=True),\n 'home_url': TUP_HOME_URL\n }\n )\n\n try:\n defines = parse_cmdline_variables(args.define)\n exports = parse_cmdline_variables(args.export)\n\n prepare_build(args, defines, exports)\n\n except tupcfg.Env.VariableNotFound as e:\n fatal('\\n'.join([\n \"Couldn't find any variable '%s', do one of the following:\" % e.name,\n \"\\t* Export it with: `%s=something ./configure`\" % e.name,\n \"\\t* Define it with: `./configure -D%s=something`\" % e.name,\n \"\\t* Define it in your project config file (%s)\" % cleanjoin(PROJECT_CONFIG_DIR, PROJECT_CONFIG_FILENAME),\n ]))\n\n\nif __name__ == '__main__':\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":true,"string":"true"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39964,"cells":{"__id__":{"kind":"number","value":9156870294364,"string":"9,156,870,294,364"},"blob_id":{"kind":"string","value":"5c67cf78ac17398d66078fcc069ec58af0439e39"},"directory_id":{"kind":"string","value":"8d257076e970099c978d9859c48bb63c8b0460ca"},"path":{"kind":"string","value":"/CPAC/anatpreproc/tests/anatpreproc_test.py"},"content_id":{"kind":"string","value":"5c7aadece74a812129e6ce7d7bb4d78ff6ae560e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"theresaanna/C-PAC"},"repo_url":{"kind":"string","value":"https://github.com/theresaanna/C-PAC"},"snapshot_id":{"kind":"string","value":"5e8061ec9626fa8c36675b938ab0aaf60c72cdd6"},"revision_id":{"kind":"string","value":"661dba65bffd6481e096d0da11d6d742d7ab4a55"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-17T21:49:35.293968","string":"2021-01-17T21:49:35.293968"},"revision_date":{"kind":"timestamp","value":"2012-06-27T16:43:27","string":"2012-06-27T16:43:27"},"committer_date":{"kind":"timestamp","value":"2012-06-27T16:43:27","string":"2012-06-27T16:43:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nimport nibabel as nib\n\n\n\"\"\"\nAnatomical Workflow Sanity Checks\n================================\n\n**Class anatPreprocTest**\n-------------------------\n\n\"\"\"\n\nclass anatPreprocTest(object):\n \n \"\"\"\n This class defines all the quantitative checks done to the inputs and outputs of\n Anatomical Preprocessing\n \"\"\"\n \n def __init__(self, preproc, base_dir, input_anat):\n \"\"\"\n Constructor to initialize the workflow\n \n **Parameters**\n \n *preproc* : (Object)\n Anatomical workflow object\n *base_dir* : (String) \n path of output workflow directory\n *input_anat* : (file)\n Input image to the anatomical workflow\n \n \"\"\"\n \n \"\"\"\n Setup workflow object\n ---------------------\n \"\"\"\n \n def setup(self):\n \"\"\" \n Set up the Workflow and run\n This method is run once before _each_ test method is executed\n \n **Example**\n \n >>> self.preproc.inputs.anat = self.input_anat\n >>> self.preproc.base_dir = self.base_dir \n >>> self.preproc.run()\n \n \"\"\"\n \n \"\"\"\n Delete workflow object\n -----------------------\n \"\"\"\n \n def teardown(self):\n \"\"\"\n Delete The Workflow Object\n This method is run once after _each_ test method is executed\n \"\"\"\n \n \"\"\"\n Test to verify inputs\n ---------------------\n \"\"\" \n \n def inputs_test(self):\n assert False\n \"\"\"\n Method to check if the input file \n is T1 image and is in right format\n \n **Returns**\n \n *TRUE* : (boolean)\n if all the tests pass\n *Error* : (Exception)\n raise if any of the tests fail \n \n **Tests**\n \n - input_anat image should be a nifti file with extension nii.gz or nii\n - input_anat image should be a 3D image, i.e only one volume\n \"\"\"\n \n \"\"\"\n Test to verify deoblique Image\n ------------------------------\n \"\"\"\n \n def anat_deoblique_test(self, deoblique_img):\n assert False\n \"\"\"\n method to check if the input file\n is deobliqued correctly or not\n \n **Parameters**\n \n *deoblique_img* : (nifti file) \n De-obliqued mprage file\n \n **Returns**\n \n Same as above method\n \n **Tests**\n \n - Compare the headers of input_anat and deoblique_img,\n the voxel offset values will be unequal, if the original\n image is obliqued. All other values should remain unchanged.\n - Compare the Image data of input_anat and deoblique_img, they\n should remain same\n \n \"\"\" \n \n \"\"\"\n Test to verify re-orientation\n -----------------------------\n \"\"\"\n \n def anat_reorient_test(self, standard_img, rpi_img):\n assert False\n \"\"\"\n method to check if the output reorient is coorectly\n in RPI orientation or not\n \n **Parameters**\n \n *standard_img* : (nifti file) \n Standard T1 RPI image in MNI space\n *rpi_img* : (nifti file)\n RPI output mprage file\n \n **Returns**\n \n Same as above method\n \n **Tests**\n \n - Compute Spatial Correlation between standard_img and rpi_img\n For this, first convert the RPI output image into MNI space\n and if necessary resample it to match the voxel dimensions\n of the standard image\n - Compare the Image data of rpi_img and input_anat. It should be \n same.\n \n \"\"\"\n \n \"\"\"\n Test to verify skulStrip image with normalized/scaled intensities\n -----------------------------------------------------------------\n \"\"\"\n \n def anat_skullstrip_test(self, skullstrip_img):\n assert False \n \"\"\"\n method to check if the skull stripped image is \n correct or not\n \n **Parameters**\n \n *skullstrip_img* : (nifti file)\n Skullstrip output image with normalized/scaled intensities\n \n **Returns**\n \n Same as above method\n \n **Tests**\n \n - Since afni scales/normalizes the intensity values\n its hard to test it.Can be tested in the next step\n \"\"\"\n \n \"\"\"\n Test to verify skullstrip image with original intensity values\n --------------------------------------------------------------\n \"\"\"\n \n def anat_brain_test(self, rpi_img, brian_img):\n assert False\n \"\"\"\n method to check if the input file\n is skull stripped and the intensity values are unchanged\n \n **Parameters**\n \n *rpi_img* : (nifti file)\n RPI output mprage file\n *brian_img* : (nifti file)\n Skull stripped Brain only file\n \n **Returns**\n \n Same as above method\n \n **Tests**\n \n - Subtract (infile_a - infile_b), this should return a matrix\n with all zero values in brain area and non zero values around \n the edge. From the origin, choose a sphere of resonable diameter\n and check the intensity values should be zero. Then check the\n edges for non-zero values.\n \n \"\"\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39965,"cells":{"__id__":{"kind":"number","value":1340029829050,"string":"1,340,029,829,050"},"blob_id":{"kind":"string","value":"5b6184e6823851a6082c85b8026cfcc528171a23"},"directory_id":{"kind":"string","value":"913fcfd2f179d1d784f9a6cdad8570ca9c444488"},"path":{"kind":"string","value":"/Lecture/day3/getters-and-setters-2/main.py"},"content_id":{"kind":"string","value":"9dd3cb632dc9083e27606118e42b4765ad799234"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Russ93/dpw"},"repo_url":{"kind":"string","value":"https://github.com/Russ93/dpw"},"snapshot_id":{"kind":"string","value":"1aa0995fe6c84e101898e0dcaad19ffdb351f02b"},"revision_id":{"kind":"string","value":"727353528d8047dac607ba566cd0249c90f85375"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T03:34:08.264513","string":"2021-01-20T03:34:08.264513"},"revision_date":{"kind":"timestamp","value":"2014-01-30T11:53:36","string":"2014-01-30T11:53:36"},"committer_date":{"kind":"timestamp","value":"2014-01-30T11:53:36","string":"2014-01-30T11:53:36"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#Russell Schlup\n#january 13, 2013\n\nimport webapp2\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n \trussell = Transscript()\n \trussell.grade1 = 20\n \t#list the grades\n \t#calculate the averages\n \t#show the average\n self.response.write(russell.print_num())\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\nclass Transscript(object):\n\tdef __init__(self):\n\t\t#attributes - that are private - should have underscores - convention - tradition\n\t\tself.__grade1 = 90\n\t\tself.__grades = [self.__grade1,80,60]\n\t@property\n\tdef grade1(self): #convention - don't have underscores... just tradition\n\t\tpass\n#\t\t#this is what tells computer to associate grade1 with __grade1\n#\t\treturn self.__grade1\n\t@grade1.setter\n\tdef grade1(self, value):\n\t\tself.__grade1 = value\n\t\treturn self.__grade1\n\tdef calc_average(self):\n\t\tsum = 0\n\t\tfor g in self.__grades:\n\t\t\tsum = g+sum\n\t\tavg = sum/len(self.__grades)\n\t\treturn avg\n\tdef print_num(self):\n\t\treturn self.__grade1"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39966,"cells":{"__id__":{"kind":"number","value":13606456436632,"string":"13,606,456,436,632"},"blob_id":{"kind":"string","value":"4d566e0d128d11e5db19b3568178097ad7a10077"},"directory_id":{"kind":"string","value":"e567e8955467117db85a5d34446ba9d6f8fc35f9"},"path":{"kind":"string","value":"/src/wrangler/queue/fifo.py"},"content_id":{"kind":"string","value":"1853c1af885193baf78a8bd00bb37cda1442059d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"HyFrmn/Wrangler"},"repo_url":{"kind":"string","value":"https://github.com/HyFrmn/Wrangler"},"snapshot_id":{"kind":"string","value":"9d20431e23a746a884842fbd0dbf77135951f3c0"},"revision_id":{"kind":"string","value":"88b2f0f45f7e9621c3818d3288523880670e407f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T00:54:14.386594","string":"2021-01-10T00:54:14.386594"},"revision_date":{"kind":"timestamp","value":"2009-11-10T15:27:27","string":"2009-11-10T15:27:27"},"committer_date":{"kind":"timestamp","value":"2009-11-10T15:27:27","string":"2009-11-10T15:27:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom wrangler.queue.interface import WranglerQueueInterface\n\nclass FIFOQueue(WranglerQueueInterface):\n def __init__(self):\n self.queue = []\n \n def queue_task(self, task, priority=500):\n self.queue.append(task)\n\n def next_task(self):\n try:\n task = self.queue.pop(0)\n except IndexError:\n task = -1\n return task\n\n def remove_task(self, task):\n try:\n self.queue.remove(task)\n return True\n except ValueError:\n return False\n\n def list(self):\n return self.queue"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2009,"string":"2,009"}}},{"rowIdx":39967,"cells":{"__id__":{"kind":"number","value":18726057444437,"string":"18,726,057,444,437"},"blob_id":{"kind":"string","value":"7891b7ded381e088da4d5afc1269a55ea40ba5cd"},"directory_id":{"kind":"string","value":"49d127ffd824ebe5cc3dc453cdeff8cd9f83ad4d"},"path":{"kind":"string","value":"/ui/helper/ms_leaves_table.py"},"content_id":{"kind":"string","value":"c3493f8dbfff9d1dbb64a0a015499a5f3d18b6a0"},"detected_licenses":{"kind":"list like","value":["AGPL-3.0-or-later","AGPL-3.0-only"],"string":"[\n \"AGPL-3.0-or-later\",\n \"AGPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"suw/sidd"},"repo_url":{"kind":"string","value":"https://github.com/suw/sidd"},"snapshot_id":{"kind":"string","value":"554358a77dbd22fe97a261c871624940188f8d20"},"revision_id":{"kind":"string","value":"c3b28ea89bb88d14988bed89ff6b03603a16d699"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-30T09:50:47.728723","string":"2020-12-30T09:50:47.728723"},"revision_date":{"kind":"timestamp","value":"2013-12-01T06:29:07","string":"2013-12-01T06:29:07"},"committer_date":{"kind":"timestamp","value":"2013-12-01T06:29:07","string":"2013-12-01T06:29:07"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Copyright (c) 2011-2013, ImageCat Inc.\n#\n# This program is free software: you can redistribute it and/or modify \n# it under the terms of the GNU Affero General Public License as published by \n# the Free Software Foundation, either version 3 of the License, or \n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, \n# but WITHOUT ANY WARRANTY; without even the implied warranty of \n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License \n# along with this program. If not, see .\r\n#\n\"\"\"\r\ndialog for editing mapping scheme branches\r\n\"\"\"\r\nfrom PyQt4.QtCore import Qt, QVariant, QString, QAbstractTableModel, QModelIndex\r\nfrom ui.helper.common import build_attribute_tooltip\r\n\r\nclass MSLeavesTableModel(QAbstractTableModel):\r\n \"\"\"\r\n table model supporting visualization of node in mapping scheme tree\r\n \"\"\"\r\n def __init__(self, values, headers, formats, parser, valid_codes):\r\n \"\"\" constructor \"\"\"\r\n super(MSLeavesTableModel, self).__init__()\r\n\r\n self.headers = headers\n self.formats= formats\r\n self.parser=parser\r\n self.valid_codes=valid_codes\n self.values = values\n self.do_sort(sortIndex=0) \r\n \r\n def columnCount(self, parent):\r\n \"\"\" only three columns exist. always return 3 \"\"\"\r\n return len(self.headers)\r\n\r\n def rowCount(self, parent):\r\n \"\"\" number of rows same as number of siblings \"\"\"\r\n return len(self.values) \r\n\r\n def headerData(self, section, orientation, role):\r\n \"\"\" return data to diaply for header row \"\"\" \r\n if role == Qt.DisplayRole: \r\n if orientation == Qt.Horizontal:\r\n return QString(self.headers[section][0])\r\n else:\r\n # no vertical header\r\n return QVariant()\r\n elif role == Qt.ToolTipRole: \r\n return QString(self.headers[section][1])\r\n else: \r\n return QVariant()\r\n \r\n def data(self, index, role):\r\n \"\"\" return data to be displayed in a cell \"\"\"\n row, col = index.row(), index.column() \r\n value = self.values[row][col] \n if role == Qt.DisplayRole:\r\n if value is not None:\n return QString(self.formats[col] % value)\n else:\n return QVariant(\"\")\n elif role == Qt.ToolTipRole:\r\n # construct data for display in tooltip \r\n if (index.column() == 0): \r\n if value is not None:\r\n return build_attribute_tooltip(self.valid_codes, self.parser.parse(value))\r\n else:\r\n return QVariant(\"\")\r\n elif role == Qt.UserRole: \n return index.internalPointer()\n else:\r\n return QVariant()\r\n \n def index(self, row, column, parent):\n \"\"\" provide index to data given a cell \"\"\"\n try:\n node = self.values[row][len(self.headers)]\n return self.createIndex(row, column, node)\n except:\n return QModelIndex() \n \r\n def flags(self, index):\r\n \"\"\" cell condition flag \"\"\"\r\n # NOTE: \r\n # ItemIsEditable also required data() and setData() function\r\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\r\n\r\n def sort(self, ncol, order):\r\n \"\"\" sort table \"\"\"\r\n if ncol < 0 or ncol > len(self.headers):\r\n return\r\n self.layoutAboutToBeChanged.emit() \r\n self.do_sort(sortIndex=ncol, reverse_sort=order==Qt.DescendingOrder)\r\n self.layoutChanged.emit()\r\n\n # internal helper methods\n ############################### \r\n def do_sort(self, sortIndex = 0, reverse_sort=False):\n def sort_key(row):\n return row[sortIndex] \r\n self.values.sort(key=sort_key, reverse=reverse_sort)\r\n "},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39968,"cells":{"__id__":{"kind":"number","value":3393024190373,"string":"3,393,024,190,373"},"blob_id":{"kind":"string","value":"dc9d9ec25d80b1585dea678ec22eaa402e387afe"},"directory_id":{"kind":"string","value":"3dd31b5da5358d455e607af46610786093c7cb6b"},"path":{"kind":"string","value":"/bin/dump_mongo.py"},"content_id":{"kind":"string","value":"23f191842fef3b5bfd1e8933d295fbd6ea0e5982"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"sunlightlabs/upwardly"},"repo_url":{"kind":"string","value":"https://github.com/sunlightlabs/upwardly"},"snapshot_id":{"kind":"string","value":"1de31a3b817bb89acc356099499d2c2b018f1c24"},"revision_id":{"kind":"string","value":"d34986de3836968f5fddb1be21f45ec6ca94d28c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-25T06:36:53.492438","string":"2021-01-25T06:36:53.492438"},"revision_date":{"kind":"timestamp","value":"2014-05-01T19:22:31","string":"2014-05-01T19:22:31"},"committer_date":{"kind":"timestamp","value":"2014-05-01T19:22:31","string":"2014-05-01T19:22:31"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import itertools\n\nfrom pymongo import Connection\nfrom saucebrush import emitters, filters, sources, stats, run_recipe, Recipe\nimport os\nimport re\nimport settings\n\nPRIMARY_STATE_RE = re.compile('^(.*), ([A-Z]{2})$')\n\nclass LocationSource(object):\n \n fields = (\n 'code', 'name', 'primary_state', 'occupation',\n 'ffiec_low', 'ffiec_high', 'ffiec_avg',\n 'nces_schools',\n 'oes_median', 'oes_mean',\n )\n \n def __init__(self):\n self._locations = Connection()[settings.MONGO_DATABASE]['locations']\n \n def __iter__(self):\n \n for location in self._locations.find({}):\n \n if 'code' not in location:\n continue\n \n record = dict((key, None) for key in self.fields)\n record.update({\n 'code': location['code'],\n 'name': location['name'],\n 'occupation': '00-0000',\n })\n \n primary_state = location.get('primary_state', None)\n if primary_state is None:\n m = PRIMARY_STATE_RE.match(location['name'])\n if m is not None:\n primary_state = m.groups()[1]\n record['primary_state'] = primary_state\n \n if 'ffiec' in location:\n record['ffiec_low'] = location['ffiec']['low']\n record['ffiec_high'] = location['ffiec']['high']\n record['ffiec_avg'] = location['ffiec']['avg']\n \n if 'nces' in location:\n record['nces_schools'] = location['nces']['schools']\n \n yield record\n \n if 'oes' in location:\n \n for occupation_id, oes in location['oes'].iteritems():\n \n record['occupation'] = occupation_id\n record['oes_median'] = oes['median']\n record['oes_mean'] = oes['mean']\n \n yield record\n \n\n\nif __name__ == '__main__':\n \n csv_path = os.path.join(settings.DATA_DIR, 'k2.csv')\n db_path = os.path.join(settings.DATA_DIR, 'k2.db')\n \n # if os.path.exists(db_path):\n # os.unlink(db_path)\n # \n # run_recipe(\n # LocationSource(),\n # #emitters.CSVEmitter(open(csv_path, 'w'), fieldnames=LocationSource.fields),\n # emitters.SqliteEmitter(db_path, 'locations', fieldnames=LocationSource.fields),\n # #emitters.MongoDBEmitter(settings.MONGO_DATABASE, 'movingup', port=settings.MONGO_PORT)\n # #emitters.DebugEmitter(),\n # )\n \n def to_float(s):\n if s is not None:\n try:\n return float(s)\n except ValueError:\n pass\n \n def fieldnames_iter(fieldnames):\n yield 'occupation'\n for f in fieldnames:\n yield \"%s_stddev\" % f\n yield \"%s_mean\" % f\n \n STATS_FIELDS = ('ffiec_low','ffiec_high','ffiec_avg','nces_schools','oes_median','oes_mean')\n \n class StatsGenerator(filters.Filter):\n def process_record(self, record):\n \n occ = record['occupation']\n \n stats_filters = {}\n \n for fieldname in STATS_FIELDS:\n stats_filters[fieldname] = stats.StandardDeviation(fieldname)\n \n run_recipe(\n sources.SqliteSource(db_path, \"\"\"SELECT * FROM locations WHERE occupation = ?\"\"\", (occ,)),\n filters.FieldModifier(STATS_FIELDS, to_float),\n Recipe(*stats_filters.values()),\n error_stream = emitters.DebugEmitter(),\n )\n \n for fieldname, stats_filter in stats_filters.iteritems():\n record['%s_stddev' % fieldname] = stats_filter.value()[0]\n record['%s_mean' % fieldname] = stats_filter.average()\n \n return record\n \n stats_path = os.path.join(settings.DATA_DIR, 'k2-stats.csv')\n \n run_recipe(\n sources.SqliteSource(db_path, \"\"\"SELECT DISTINCT occupation FROM locations\"\"\"),\n StatsGenerator(),\n emitters.CSVEmitter(\n open(stats_path, 'w'),\n fieldnames=[f for f in fieldnames_iter(STATS_FIELDS)],\n ),\n emitters.DebugEmitter(),\n error_stream = emitters.DebugEmitter(),\n )"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39969,"cells":{"__id__":{"kind":"number","value":4398046552665,"string":"4,398,046,552,665"},"blob_id":{"kind":"string","value":"88c15a04dddc59823bb05d2eaf38bae4230f9412"},"directory_id":{"kind":"string","value":"a62adccc82ee6ef7ebd94d710b5f75056626057f"},"path":{"kind":"string","value":"/account_tgt/wizard/account_balance.py"},"content_id":{"kind":"string","value":"7c00321fa7883dcf0679f5d4d92a3bd5c90b07f7"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Lunchik/openerp_custom_modules"},"repo_url":{"kind":"string","value":"https://github.com/Lunchik/openerp_custom_modules"},"snapshot_id":{"kind":"string","value":"614b2fa6ccf1aaaaa2b99210bcd72aea301079d0"},"revision_id":{"kind":"string","value":"b7707762d66743dad139727b3903063393f0da93"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-19T13:58:50.328789","string":"2020-05-19T13:58:50.328789"},"revision_date":{"kind":"timestamp","value":"2014-12-01T10:36:39","string":"2014-12-01T10:36:39"},"committer_date":{"kind":"timestamp","value":"2014-12-01T10:36:39","string":"2014-12-01T10:36:39"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from openerp.osv import fields, osv\n\nfrom ..report.account_balance_report import TrialBalanceReport\n\nimport base64\n\nclass account_balance_report(osv.osv_memory):\n _inherit = \"account.balance.report\"\n _name = 'account.balance.report.xcel'\n _description = 'Trial Balance Report XCEL'\n\n def _get_company_ids(self, cr, uid, context=None):\n return self.pool.get('res.company').search(cr, uid, [], context=context)\n\n _columns = {\n 'company_ids': fields.many2many('res.company', 'trial_company_rel', 'trial_id', 'company_id', string=\"TGT Entities\"),\n }\n \n _defaults = {\n 'company_ids': _get_company_ids,\n }\n\n def _print_report(self, cr, uid, ids, data, context=None):\n company_ids = self.read(cr, uid, ids, ['company_ids'], context=context)\n data = self.pre_print_report(cr, uid, ids, data, context=context)\n data['form']['company_ids'] = company_ids[0]['company_ids']\n combined = TrialBalanceReport(cr, uid, self.pool, '', context)\n combined.set_context([], data, [])\n report = combined.generate()\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'accounting.report.xcel.download',\n 'view_mode': 'form',\n 'target': 'new',\n 'name': 'Trial Balance Report',\n 'datas': data,\n 'context': {'r_file': base64.encodestring(report.read()),},\n }\n return {'type': 'ir.actions.report.xml', 'report_name': 'account.account.balance', 'datas': data}\n\naccount_balance_report()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39970,"cells":{"__id__":{"kind":"number","value":6545530198648,"string":"6,545,530,198,648"},"blob_id":{"kind":"string","value":"7a4fb763a635a3db7cd9b5d51c17f7ae5525828a"},"directory_id":{"kind":"string","value":"067754dd40a742efe306fd95056211485047994c"},"path":{"kind":"string","value":"/challenge.py"},"content_id":{"kind":"string","value":"af07923db4af97fae2932fe37416e7251dcd6abc"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"yoland68/email-prediction-challenge"},"repo_url":{"kind":"string","value":"https://github.com/yoland68/email-prediction-challenge"},"snapshot_id":{"kind":"string","value":"fce37beb6f7edd826052af40a8a07bdf9211b590"},"revision_id":{"kind":"string","value":"9c0bf04b436027d621ad3d1fdf1d0bfc5076c1de"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-03T00:24:48.408018","string":"2016-08-03T00:24:48.408018"},"revision_date":{"kind":"timestamp","value":"2014-11-21T16:10:22","string":"2014-11-21T16:10:22"},"committer_date":{"kind":"timestamp","value":"2014-11-21T16:10:22","string":"2014-11-21T16:10:22"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# coding=utf-8\n\n\"\"\"\nUSAGE:\n\n python challenge.py [existing email file name] [prediting email file name]\n\nEXAMPLES:\n python challenge.py db_input.txt prediction_input.txt\n\n\"\"\"\n\nimport json\nimport os\nfrom company_class import Company, print_company_pattern, print_company_dict\nfrom employee_class import Employee\nfrom pattern_class import Pattern\nimport pdb\n\ndef main():\n \"\"\" Run the program and get result\"\"\"\n\n ### Reading and parsing the first file (existing email addresses)\n try:\n file = os.sys.argv[1]\n with open(file, 'r') as f:\n inp = f.read();\n dict = None\n try:\n dict = json.loads(inp) # dump string into dict {[person name]: [email]}\n except:\n inp = convert_to_json_string(inp)\n try:\n dict = json.loads(inp)\n except:\n print(\"Input invalid\")\n exit(1) # exit code 1 for error input\n except:\n print(\"First Input in invalid\")\n print(__doc__)\n exit(1)\n\n ### Loop through the dictionary and add each item as an Employee, and each Employee to a Company\n for name, email in dict.iteritems():\n # pdb.set_trace()\n em = Employee(name, email)\n em.join_company() # this function put Employee into a Company's list and update their Pattern\n\n # print_company_pattern() # Print all the patterns in all the Companies\n # print_company_dict() # Print all the Companies\n\n ## Next get input value for that emails user wants to predict\n file2 = None\n try:\n file2 = os.sys.argv[2]\n except:\n print(\"Second Input in invalid\")\n print(__doc__)\n exit(1)\n\n\n with open(file2, 'r') as f2:\n to_be_predicted = f2.readlines()\n for i in to_be_predicted:\n # print(i)\n i = i.strip()\n i = i.replace('\"', '')\n name = i.split(', ')[0].lower()\n company_name = i.split(', ')[1].lower()\n result = predict(name, company_name) #output the Employee's possible email(s) \n print(\"Name: {0}, \\nEmail: {1}\\n\".format(name, result))\n\n\n\ndef convert_to_json_string(inp):\n \"\"\"Convert the db_input (the ones with name and emails) to valid json string\"\"\"\n return inp.replace(\" =>\", ':')\n\ndef predict(name, company_name):\n \"\"\"\n Input: Employee's name and Company's name\n Output: Employee's potential email address using Company.company_dict\n \"\"\"\n comp = Company.company_dict.get(company_name)\n if comp: # if the Company is found in master list, use its Pattern list to make predictions\n return comp.predict(name)\n else:\n return \"Domain {0} not found in existing database, can not predict\".format(company_name)\n\n\n## Testing area\n\nif __name__ == \"__main__\":\n main()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39971,"cells":{"__id__":{"kind":"number","value":17222818871274,"string":"17,222,818,871,274"},"blob_id":{"kind":"string","value":"60f8d538da22d1a6c1dfd1556f3677d9168856be"},"directory_id":{"kind":"string","value":"85b099b67fbbc4f172a62f7ce922cd667cd70998"},"path":{"kind":"string","value":"/2013/Crypto/1-[CSAWpad - 100 Points]/dec.py"},"content_id":{"kind":"string","value":"d11dfc0055ef31f03d5d6cda9316c3de5df6f9c8"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"b1acktrac3/CSAW-ctf"},"repo_url":{"kind":"string","value":"https://github.com/b1acktrac3/CSAW-ctf"},"snapshot_id":{"kind":"string","value":"2ac9f98955b0ba3b8d65919fe84de1b1a405764c"},"revision_id":{"kind":"string","value":"a3af80b67403fd47c326e9ead25f2b1fc7db88d9"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T07:25:03.436800","string":"2021-01-19T07:25:03.436800"},"revision_date":{"kind":"timestamp","value":"2013-10-25T10:57:22","string":"2013-10-25T10:57:22"},"committer_date":{"kind":"timestamp","value":"2013-10-25T10:57:33","string":"2013-10-25T10:57:33"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import string\nimport os\nfrom hashlib import sha512\nfrom binascii import hexlify\n\n#generates s box and sinverse box, called f and g respectively, using\n#sha 512 as a deterministic random number generator\ndef genTables(seed=\"Well one day i'll be a big boy just like manhell\"):\n fSub={}\n gSub={}\n i=0\n prng=sha512()\n prng.update(seed)\n seed=prng.digest()\n for el in xrange(256):\n cSeed=\"\"\n for x in xrange(4):\n cSeed+=prng.digest()\n prng.update(str(x))\n prng.update(cSeed)\n fCharSub=[0]*256\n gCharSub=[0]*256\n unused=range(256)\n for toUpdate in xrange(256):\n i+=1\n curInd=ord(cSeed[toUpdate])%len(unused)\n toDo=unused[curInd]\n del unused[curInd]\n fSub[(el,toUpdate)]=toDo\n gSub[(el,toDo )]=toUpdate\n return fSub,gSub\nf,g=genTables()\n\ndef encrypt(pad, ptext):\n assert(len(ptext)<=len(pad))#if pad < plaintext bail\n ctext = []\n if type(ptext)==type(\"\"):\n ptext=map(ord,ptext)\n if type(pad)==type(\"\"):\n pad=map(ord,pad)\n for padByte,ptextByte in zip(pad,ptext):\n ctext.append(f[padByte,ptextByte])\n return \"\".join(map(chr,ctext))\ndef decrypt(pad, ciphertext):\n assert(len(ciphertext)<=len(pad))#if pad < ciphertext bail\n ptext = []\n if type(ciphertext)==type(\"\"):\n ciphertext=map(ord,ciphertext)\n if type(pad)==type(\"\"):\n pad=map(ord,pad)\n for padByte,ctextByte in zip(pad,ciphertext):\n ptext.append(g[padByte,ctextByte])\n \n return \"\".join(map(chr,ptext))\n\n\nstrings = [ '794d630169441dbdb788337d40fe245daa63c30e6c80151d4b055c18499a8ac3e5f3b3a8752e95cb36a90f477eb8d7aa7809427dde0f00dc11ab1f78cdf64da55cb75924a2b837d7a239639d89fe2b7bc1415f3542dba748dd40',\n '14a60bb3afbca7da0e8e337de5a3a47ae763a20e8e18695f39450353a2c6a26a6d8635694cbdc34b7d1a543af546b94b6671e67d0c5a8b64db12fe32e275',\n '250d83a7ed103faaca9d786f23a82e8e4473a5938eabd9bd03c3393b812643ea5df835b14c8e5a4b36cdcfd210a82e2c3c71d27d3c47091bdb391f2952b261fde94a4b23238137a4897d1631b4e18d63',\n '68a90beb191f13b621747ab46321a491e71c536b71800b8f5f08996bb433838fe56587f171a759cf1c160b4733a3465f5509ad7d1a89d4b41f631f3c600347a8762141095dad3714027dfc7c894d69fd896b810313259b1a0e941ecb43d6ae1857a465b4ddcdf102b7297763acb0281144b0598c326e871c3a1ad047ad4fea2093a1b734d589b8998175b3',\n '0fc304048469137d0e2f3a71885a5a78e749145510cf2d56157939548bfd5dd7e59dcebc75b678cfeac4cf408fce5dda32c9bfcbfd578bdcb801df32ebf64da365df4b285d5068975137990134bd69991695989b322b0849',\n '254c0bb31453badaca9d060ce5faa45fa66378a6716915473579d3743e315dbedf4d8cf78b93c3267d579247e32c8c7cd3e71e7dda6138a2ab015166fa03f2ce6ab74b89ce561eb16a65990189e169f1c457d9af622ba119a66acedb108fae18825bf3efc0428b9dae250791cb0ea018966e257d601a87f9914d646026eeab5c45cbaedd27e4c47643ab4e25193aa64f79',\n '41cd1c01c62883b2ca71e671dce57e5f96b1610e29507b6c03c38211653284576d4d8cdc967764147d1a0578102cb05f32a73065f11009041fa3cc5f60b24d8c7098598627df37322f814525966acabc99be5303c2322b43ecf358ac8b8541bd82214d1cc042cac3869c54e2964fa376229c2563ba3fd03e2d4d4d441721c60b6d817e034965be28b7d463cf2b97baebfe2729ed2aa41ffe',\n '68c50bd5197bfdbdfa887883783d2455a673a685436915bd72d1af74dffdd2b89df335daee93c36d5f57e147e9a35913d3b3bf33']\nprintable = string.printable[:-5]\n\nprint \"\".join(string.printable[:-5])\nprint \"\".join(string.printable[:-3])\nprint \"\".join(string.printable[:-2])\nprint \"\".join(string.printable[:-1])\nprint \"\".join(string.printable[:0])\nprint \"\".join(string.printable)\n\n\n\nstrings = map(lambda s: s.decode(\"hex\"), strings)\nmaxl = max(map(len, strings))\npad = [0,]*maxl\n# pos: (string, char)\nguess = {0:(1,\"G\"),\n\t14: (0, \" \"),\n\t16: (0, \"e\"),\n\t17: (0, \"t\"),\n\t20: (0, \"e\"),\n\t22: (0, \" \"),\n\t24: (0, \"t\"),\n\t34: (0, \"n\"),\n\t38: (0, \"e\"),\n\t39: (0, \"n\"),\n\t43: (0, \" \"),\n\t46: (0, \" \"),\n\t51: (0, \" \")}\n\nfor pos in range(maxl):\n\tpossible = []\n\tfor i in range(256):\n\t\tok = True\n\t\tx = []\n\t\tfor string in strings:\n\t\t\tif len(string) <= pos:\n\t\t\t\tcontinue\n\t\t\tc = chr(g[i,ord(string[pos])])\n\t\t\tif c not in printable:\n\t\t\t\tok = False\n\t\t\t\tbreak\n\t\t\tx.append(c)\n\t\tif ok and len(x) > 0:\n\t\t\tpossible.append((i,x))\n\tif len(possible) == 1:\n\t\tpad[pos] = possible[0][0]\n\telif pad[pos] == 0:\n\t\tgotone = False\n\t\tif pos in guess:\n\t\t\tfor padval, solutions in possible:\n\t\t\t\tif guess[pos][1] in solutions[guess[pos][0]]:\n\t\t\t\t\tpad[pos] = padval\n\t\t\t\t\tgotone = True\n\t\t\t\t\tbreak\n \n\t\tif not gotone:\n\t\t\tpad[pos] = possible[0][0]\n\nfor string in strings:\n\tprint decrypt(pad, string)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39972,"cells":{"__id__":{"kind":"number","value":2611340144531,"string":"2,611,340,144,531"},"blob_id":{"kind":"string","value":"f0ad6dd46f30c24dbf54c76f747bc37b79450274"},"directory_id":{"kind":"string","value":"d29c68fded4a93b09e862b46ae6128dc3c29bf54"},"path":{"kind":"string","value":"/test/tests/600.bdm-performance/highest-perf.py"},"content_id":{"kind":"string","value":"62730fe6581c00e7cf9b14f3395d75b17da15488"},"detected_licenses":{"kind":"list like","value":["LicenseRef-scancode-generic-cla","LGPL-2.1-or-later","LicenseRef-scancode-other-copyleft","LGPL-2.1-only","LicenseRef-scancode-unknown-license-reference"],"string":"[\n \"LicenseRef-scancode-generic-cla\",\n \"LGPL-2.1-or-later\",\n \"LicenseRef-scancode-other-copyleft\",\n \"LGPL-2.1-only\",\n \"LicenseRef-scancode-unknown-license-reference\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"ldm5180/hammerhead"},"repo_url":{"kind":"string","value":"https://github.com/ldm5180/hammerhead"},"snapshot_id":{"kind":"string","value":"2fb52eb379f4311891627e1b6e9a286a3fd169a7"},"revision_id":{"kind":"string","value":"9a17d28027c9c55c3943209c837d2520e2156d8f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T08:42:12.076826","string":"2020-12-24T08:42:12.076826"},"revision_date":{"kind":"timestamp","value":"2014-05-28T21:27:57","string":"2014-05-28T21:27:57"},"committer_date":{"kind":"timestamp","value":"2014-05-28T21:27:57","string":"2014-05-28T21:27:57"},"github_id":{"kind":"number","value":3656864,"string":"3,656,864"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Copyright (c) 2010, Regents of the University of Colorado.\n# This work was supported by NASA contracts NNJ05HE10G, NNC06CB40C, and\n# NNC07CB47C.\n#\n\nimport sys\nimport optparse\nimport logging\nfrom select import select\nimport time\n\n# parse options \nparser = optparse.OptionParser()\nparser.add_option(\"-r\", \"--resource\", \"--resources\", dest=\"resource_name\",\n help=\"Subscribe to a Resource list.\", \n metavar=\"HAB-Type.HAB-ID.Node-ID:Resource-ID\")\nparser.add_option(\"-s\", \"--security-dir\", dest=\"security_dir\",\n help=\"Directory containing security certificates.\",\n metavar=\"dir\", default=None)\nparser.add_option(\"-e\", \"--require-security\", dest=\"require_security\",\n help=\"Require secured connections.\",\n action=\"store_true\", default=False)\nparser.add_option(\"-t\", \"--time-to-run\", dest=\"time_to_run\",\n help=\"Duration to check\", default=\"60\")\n\n(options, args) = parser.parse_args()\n\nlogger = logging.getLogger(\"Bionet Highest BDM Performance\")\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nfrom bionet import *\n\nglobal highest\nhighest = 0\n\n#bionet callbacks\ndef cb_lost_hab(hab):\n None\n\n\ndef cb_new_hab(hab):\n None\n\n\ndef cb_new_node(node):\n global highest\n hab = bionet_node_get_hab(node)\n if (bionet_node_get_num_resources(node)):\n for i in range(bionet_node_get_num_resources(node)):\n resource = bionet_node_get_resource_by_index(node, i)\n datapoint = bionet_resource_get_datapoint_by_index(resource, 0)\n value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint))\n val = int(value_str);\n if (val > highest):\n highest = val\n\ndef cb_lost_node(node):\n None\n\ndef cb_datapoint(datapoint):\n global highest\n value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint))\n val = int(value_str);\n if (val > highest):\n highest = val\n\nif (options.security_dir != None):\n bionet_init_security(options.security_dir, options.require_security)\n\nbionet_fd = -1\nbionet_fd = bionet_connect()\nif (0 > bionet_fd):\n logger.error(\"error connecting to Bionet\")\n exit(1)\nelse:\n # register Bionet callbacks\n pybionet_register_callback_new_hab(cb_new_hab)\n pybionet_register_callback_lost_hab(cb_lost_hab);\n pybionet_register_callback_new_node(cb_new_node);\n pybionet_register_callback_lost_node(cb_lost_node);\n pybionet_register_callback_datapoint(cb_datapoint);\n\nif (None == options.resource_name):\n logger.error(\"Resource name needs to be specified.\")\n exit(1)\n\nbionet_subscribe_datapoints_by_name(options.resource_name)\n\nfd_list = []\nif (bionet_fd != -1):\n fd_list.append(bionet_fd)\n\nstarted = time.time()\nwhile(1):\n (rr, wr, er) = select(fd_list, [], [], 1)\n for fd in rr:\n if (fd == bionet_fd):\n bionet_read()\n if ((time.time() - started) >= float(options.time_to_run)):\n print \"Bionet Data Manager max speed:\", highest, \"datapoints/sec\"\n exit(0)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39973,"cells":{"__id__":{"kind":"number","value":17609365928429,"string":"17,609,365,928,429"},"blob_id":{"kind":"string","value":"081e1ccd91d4b415eac4faa87169a21291516a26"},"directory_id":{"kind":"string","value":"0a80fb72ea403c5fc5ed3c55b8f16490c21d8c60"},"path":{"kind":"string","value":"/teststratum.py"},"content_id":{"kind":"string","value":"3a2048d17a3afbc535b75f5a047427767592a2d9"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dkoh/Stratum-Project"},"repo_url":{"kind":"string","value":"https://github.com/dkoh/Stratum-Project"},"snapshot_id":{"kind":"string","value":"287ed1d7c126873f07a300665c2ff6872ac6258d"},"revision_id":{"kind":"string","value":"9f29521fd1b8fc77519719b0478e19c13e74ceaa"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-02T08:57:00.611786","string":"2021-01-02T08:57:00.611786"},"revision_date":{"kind":"timestamp","value":"2012-06-24T23:16:20","string":"2012-06-24T23:16:20"},"committer_date":{"kind":"timestamp","value":"2012-06-24T23:16:20","string":"2012-06-24T23:16:20"},"github_id":{"kind":"number","value":2050189,"string":"2,050,189"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nteststratum.py\nCreated by Derek Koh on 2011-07-07.\nCopyright (c) 2011 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nimport random, Forest1, math, clusters\nfrom copy import deepcopy \nimport rpy2.robjects as robjects\nrandom.seed(2222222)\n\n\ninputdict={'readDataset':(\"vanillapaireddata.csv\",0,-99),'number_of_trees':10}\nblah=Forest1.ConditionalRandomForest(**inputdict)\n#\n#def logisticregressionR(data):\n#\tdata1=zip(*data)\n#\tfeatures=['col{0}'.format(i) for i in xrange(len(data[0]))]\n#\tcolumns=[robjects.FloatVector(col) for col in data1]\n#\tRdata = robjects.r['data.frame'](**dict(zip(features,columns)))\n#\tRformula = robjects.r['as.formula']('{0} ~ {1} -1'.format(features[-1],reduce(lambda x,y: x + '+' + y, features[:-1] )))\n#\trpart_params = {'formula' : Rformula, 'data' : Rdata, 'family' : \"binomial\"}\n#\tmodel=robjects.r.glm(**rpart_params)\n#\treturn model.rx('aic')[0][0],model.rx('deviance')[0][0]\n#\n###This function transform stratum data to its independent variables\n#def transformstratum(data, clrformat=0): \n#\treturndata = deepcopy(data) #create a copy of input data\n#\tcolumn_count=len(data[0])\n#\tcolumn_count_half=column_count/2\n#\tif clrformat ==0:\n#\t\tfor row in returndata:\n#\t\t\tfor i in range(column_count_half):\n#\t\t\t\tif row[i] < row[i+column_count_half]: \n#\t\t\t\t\trow[i+column_count_half]=1\n#\t\t\t\telse: row[i+column_count_half]=0\n#\telse:\n#\t\tfor row in returndata:\n#\t\t\tfor i in range(column_count_half):\n#\t\t\t\trow[i+column_count_half]=row[i+column_count_half]-float(row[i])\t\t\t\t\n#\t\treturndata=[row[column_count_half:] + [1] for row in returndata]\n#\treturn returndata\n#\n#logisticdata= Forest1.read_data(\"vanillapaireddata.csv\",0,-99)[0]\n#logisticdata=transformstratum(logisticdata,1)\n#import csv \n#outputwriter=csv.writer(open('Rlogisticdata.csv', 'wb'))\n#for row in logisticdata:\n#\toutputwriter.writerow(row)\n#print logisticregressionR(logisticdata)\n#print \"done\"\n##This code does plain CLR on a dataset and gets back the aic and deviance\n#def logisticregressionR(data):\n#\tdata1=zip(*data)\n#\tfeatures=['col{0}'.format(i) for i in xrange(len(data[0]))]\n#\tcolumns=[robjects.FloatVector(col) for col in data1]\n#\tRdata = robjects.r['data.frame'](**dict(zip(features,columns)))\n#\tRformula = robjects.r['as.formula']('{0} ~ {1} -1'.format(features[-1],reduce(lambda x,y: x + '+' + y, features[:-1] )))\n#\trpart_params = {'formula' : Rformula, 'data' : Rdata, 'family' : \"binomial\"}\n#\tmodel=robjects.r.glm(**rpart_params)\n#\treturn model.rx('aic')[0][0],model.rx('deviance')[0][0]\n#\t\t\n######################\n##START OF MAIN SCRIPT\n######################\n#numberofnodes=10\n#\n##The input dataset as 4 columns. the xy of the case and xy for the control\n#rawdata, featureNames=Forest.read_data('vanillalogisticdata.csv')\n#logisticdata=treepredictstratum.transformstratum(rawdata,1)\n#clusters= treepredictstratum.stratumForest(treepredictstratum.transformstratum(rawdata),numberofnodes)\n#\n## Perform logistic regression in each of the 10 clusters and then suming up the stats\n#finallist=[]\n#for key in clusters:\n#\tif len(clusters[key])>3: \n#\t\tfinallist.append(logisticregressionR([logisticdata[i] for i in clusters[key]]))\n#\telse: print 'Less than 3 obs in cluster'\n#\n##print out the final results for the number of nodes. \n#print \"Results for {0} nodes\".format(numberofnodes)\n#print reduce(lambda x, y: (x[0]+y[0],x[1]+y[1]),finallist)\n#\n##Calculating the vanilla logistic regression \n#print treepredictstratum.logisticregressionR(logisticdata)\t\n\t\n\n\n\n\n\n# Unused code\n# dummydata1=[[int(random.random()*100) for i in xrange(20)] for j in xrange(len(dummydata))]\n# for i in xrange(len(dummydata)):\n# \tdummydata1[i][0]=dummydata[i][0]\n# \tdummydata1[i][1]=dummydata[i][1]\n# \tdummydata1[i][len(dummydata1[0])/2]=dummydata[i][2]\n# \tdummydata1[i][len(dummydata1[0])/2+1]=dummydata[i][3]\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39974,"cells":{"__id__":{"kind":"number","value":10574209511141,"string":"10,574,209,511,141"},"blob_id":{"kind":"string","value":"bf7c5af851c0228b370b25108a0e4a98883dbf47"},"directory_id":{"kind":"string","value":"cb5c94d899a2ee9b8c576904227500911ea71977"},"path":{"kind":"string","value":"/trychooser_test"},"content_id":{"kind":"string","value":"3792cf984c906e39ad8899c8482faf47af9bd55f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pbiggar/trychooser"},"repo_url":{"kind":"string","value":"https://github.com/pbiggar/trychooser"},"snapshot_id":{"kind":"string","value":"ce13e5b6301940fef2d0520b1134ad727c261ab0"},"revision_id":{"kind":"string","value":"5dd9b72289be346295c3e6a426757480806802b4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-11T03:00:26.836467","string":"2021-01-11T03:00:26.836467"},"revision_date":{"kind":"timestamp","value":"2014-08-08T04:04:39","string":"2014-08-08T04:04:39"},"committer_date":{"kind":"timestamp","value":"2014-08-08T04:04:39","string":"2014-08-08T04:04:39"},"github_id":{"kind":"number","value":1907283,"string":"1,907,283"},"star_events_count":{"kind":"number","value":11,"string":"11"},"fork_events_count":{"kind":"number","value":12,"string":"12"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2014-08-08T04:04:39","string":"2014-08-08T04:04:39"},"gha_created_at":{"kind":"timestamp","value":"2011-06-16T18:52:20","string":"2011-06-16T18:52:20"},"gha_updated_at":{"kind":"timestamp","value":"2013-11-08T03:44:23","string":"2013-11-08T03:44:23"},"gha_pushed_at":{"kind":"timestamp","value":"2014-08-08T04:04:39","string":"2014-08-08T04:04:39"},"gha_size":{"kind":"number","value":194,"string":"194"},"gha_stargazers_count":{"kind":"number","value":17,"string":"17"},"gha_forks_count":{"kind":"number","value":16,"string":"16"},"gha_open_issues_count":{"kind":"number","value":4,"string":"4"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport subprocess\nimport sys\nimport re\n\ndef run (input):\n proc = subprocess.Popen(\"./trychooser\", stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (stdout, _) = proc.communicate(input)\n return (stdout.split(\"\\n\")[-2], stdout)\n\n\ntests = [\n ('Y', '-b do -p all -u all -t all'),\n ('NYYYYY', '-b do -p all -u all -t all'),\n ('NNYYYY', '-b o -p all -u all -t all'),\n ('NNNYYYY', '-b d -p all -u all -t all'),\n ('NYNYYYYYYYYYYYYY', '-b do -p linux,linux64,macosx64,win32,win64,android,android-armv6,android-noion,ics_armv7a_gecko,panda,unagi -u all -t all'),\n ('NYNNNYYNNNNNNNYY', '-b do -p macosx64,win32 -u all -t all'),\n ('NYNNNNNNYNNNNNYYY', '-b do -p android -u all -t all'),\n ('NYYNNY', '-b do -p all -u none -t all'),\n ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYYYYYYYYYY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-3,mochitest-4,mochitest-5,mochitest-6,mochitest-7,mochitest-8,mochitest-bc,mochitest-o -t all'),\n ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYYYNNNNNNY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-3,mochitest-4 -t all'),\n ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYNNYNNNYYY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-5,mochitest-bc,mochitest-o -t all'),\n ('NYYNYNNNNNNNNNNNNNNYYNNNNYYNNNYYYYYYNNNYYY', '-b do -p all -u xpcshell,jsreftest,marionette,marionette-webapi,mochitest-1,mochitest-2,mochitest-3,mochitest-4,mochitest-5,mochitest-bc,mochitest-o -t all'),\n ('NYYNYYNNNNNNNNYNNNNYYNNNNYYNNYYY', '-b do -p all -u reftest,crashtest,xpcshell,jsreftest,marionette,marionette-webapi,mochitests -t all'),\n ('NYYYNNNN', '-b do -p all -u all -t none'),\n ('NYYYNYYYYYYYYYYYYY', '-b do -p all -u all -t tpn,nochromer,other,dirtypaint,svgr,dromaeojs,xperf,remote-ts,remote-tdhtml,remote-tsvg,remote-tpan,remote-trobopan,remote-trobocheck,remote-troboprovider,remote-trobocheck2,remote-trobocheck3,remote-tp4m_nochrome'),\n ('NYYYNYNNYYYYYNYYYYYYYYYYY', '-b do -p all -u all -t other,dirtypaint,svgr,dromaeojs,xperf,remote-ts,remote-tdhtml,remote-tsvg,remote-tpan,remote-trobopan,remote-trobocheck,remote-trobocheck2,remote-trobocheck3,remote-troboprovider,remote-tp4m_nochrome'),\n ('NYYYNYYYYYYNYNYNNNNNNNNYN', '-b do -p all -u all -t tpn,nochromer,other,dirtypaint,svgr,xperf,remote-troboprovider'),\n ('NYYNYNNNNNNNNNNNNNNNNNNNNNNNNYNN', '-b do -p all -u mochitests -t none'),\n ]\n\ninvalid_tests = [\n ('NYNNNNNNNNNNNNYY', \": try: -b do -p -u all -t all\"), # No platforms\n ('NNNNYYY', \"\"), # No builds\n ('NYYNYNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN', \": try: -b do -p all -u -t none\"), # Wanted unittests, none picked\n ('NYYYNYNNNNNNNNYNNNNNNNNNN', \": try: -b do -p all -u all -t\"), # Wanted talos, none picked\n ]\n\ntests = [(a, 'try: ' + b) for (a,b) in tests]\ntests += [(a, 'Invalid selection' + b) for (a,b) in invalid_tests]\n\ndef combine(output, input):\n result = \"\"\n matches = re.findall('.*?\\[Ynh\\?\\]', output, re.M | re.S)\n assert matches != None\n# assert len(matches) == len(input)\n i = 0\n for match in matches:\n result += match\n if len(input) > i:\n result += \" \" + input[i]\n i += 1\n\n return result\n\nfor (input, expected) in tests:\n (output, full_output) = run(\"\\n\".join(input))\n\n if output != expected:\n print \"Fail:\\n\\texpected:\\n\\t\\t\" + expected + \"\\n\\tgot:\\n\\t\\t\" + output\n print combine(full_output, input)\n sys.exit(-1)\n else:\n print \"Pass [\" + input + \"]: '\" + expected + \"'\"\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39975,"cells":{"__id__":{"kind":"number","value":7782480787599,"string":"7,782,480,787,599"},"blob_id":{"kind":"string","value":"371ae2b24bbe02880faa49a663721b79feb8d7e4"},"directory_id":{"kind":"string","value":"153ecce57c94724d2fb16712c216fb15adef0bc4"},"path":{"kind":"string","value":"/Zope3/tags/before-blow-services-merge/src/zope/app/wfmc/metadirectives.py"},"content_id":{"kind":"string","value":"455e7e8b2b9afc6bb5722459f0357c1131fca9a0"},"detected_licenses":{"kind":"list like","value":["ZPL-2.1","LicenseRef-scancode-unknown-license-reference","ZPL-2.0","ICU","Python-2.0","LicenseRef-scancode-public-domain"],"string":"[\n \"ZPL-2.1\",\n \"LicenseRef-scancode-unknown-license-reference\",\n \"ZPL-2.0\",\n \"ICU\",\n \"Python-2.0\",\n \"LicenseRef-scancode-public-domain\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"pombredanne/zope"},"repo_url":{"kind":"string","value":"https://github.com/pombredanne/zope"},"snapshot_id":{"kind":"string","value":"10572830ba01cbfbad08b4e31451acc9c0653b39"},"revision_id":{"kind":"string","value":"c53f5dc4321d5a392ede428ed8d4ecf090aab8d2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-03-12T10:53:50.618672","string":"2018-03-12T10:53:50.618672"},"revision_date":{"kind":"timestamp","value":"2012-11-20T21:47:22","string":"2012-11-20T21:47:22"},"committer_date":{"kind":"timestamp","value":"2012-11-20T21:47:22","string":"2012-11-20T21:47:22"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"ZCML directives for defining privileges.\n\n$Id: $\n\"\"\"\n\nimport zope.interface\nimport zope.schema\nimport zope.configuration.fields\n\nclass IdefineXpdl(zope.interface.Interface):\n\n file = zope.configuration.fields.Path(\n title=u\"File Name\",\n description=u\"The name of the xpdl file to read.\",\n )\n\n process = zope.schema.TextLine(\n title=u\"Process Name\",\n description=u\"The name of the process to read.\",\n )\n\n id = zope.schema.Id(\n title=u\"ID\",\n description=(u\"The identifier to use for the process. \"\n u\"Defaults to the process name.\"),\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39976,"cells":{"__id__":{"kind":"number","value":12670153532697,"string":"12,670,153,532,697"},"blob_id":{"kind":"string","value":"407e1a50d0432257bf34c7f65be10972d0db2091"},"directory_id":{"kind":"string","value":"7a187e3127c2e2c0e652c91578d13afcdc509411"},"path":{"kind":"string","value":"/Numbers/Numbers.py"},"content_id":{"kind":"string","value":"fc8ee9eb178a837c0f7a5d5bab9a1a6e06bcf60c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"2gisprojectT/kaygorodov-twitter"},"repo_url":{"kind":"string","value":"https://github.com/2gisprojectT/kaygorodov-twitter"},"snapshot_id":{"kind":"string","value":"716abc82fcedadb119ab7e5f9bf2b1820144899f"},"revision_id":{"kind":"string","value":"2abe2b0e2f3c53ab47e0d03cb011c342563b7943"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-02T23:12:47.971694","string":"2021-01-02T23:12:47.971694"},"revision_date":{"kind":"timestamp","value":"2014-11-12T17:55:55","string":"2014-11-12T17:55:55"},"committer_date":{"kind":"timestamp","value":"2014-11-12T17:55:55","string":"2014-11-12T17:55:55"},"github_id":{"kind":"number","value":24887262,"string":"24,887,262"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import math\n\n\nclass Numbers:\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n if c > 0:\n self.c = c\n else:\n self.c = 0\n\n def sum(self):\n return self.a + self.b + self.c\n\n def multiplication(self):\n return self.a * self.b * self.c\n\n def abs_multiplication(self):\n return math.fabs(self.a * self.b * self.c)\n\n\nnum = Numbers(3, 10, -2)\n\nprint(num.sum())\nprint(num.multiplication())\nprint(num.abs_multiplication())"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39977,"cells":{"__id__":{"kind":"number","value":14121852488759,"string":"14,121,852,488,759"},"blob_id":{"kind":"string","value":"88c7f403c84fdf0a079133be57944c48ee58ee86"},"directory_id":{"kind":"string","value":"a704892d86252dde1bc0ff885ea5e7d23b45ce84"},"path":{"kind":"string","value":"/addons-extra/olap/cube/levels/level_date.py"},"content_id":{"kind":"string","value":"fc6ddb891a3c06eed54e30e1f8095dea6e7836e4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"oneyoung/openerp"},"repo_url":{"kind":"string","value":"https://github.com/oneyoung/openerp"},"snapshot_id":{"kind":"string","value":"5685bf8cce09131afe9b9b270f6cfadf2e66015e"},"revision_id":{"kind":"string","value":"7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-03-31T18:22:41.917881","string":"2016-03-31T18:22:41.917881"},"revision_date":{"kind":"timestamp","value":"2013-05-24T06:10:53","string":"2013-05-24T06:10:53"},"committer_date":{"kind":"timestamp","value":"2013-05-24T06:10:53","string":"2013-05-24T06:10:53"},"github_id":{"kind":"number","value":9902716,"string":"9,902,716"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\nimport sqlalchemy\nfrom level_interface import level_interface\n\nfrom olap.cube import common\nfrom olap.cube import axis_map\n\n#\n# To Be Fixed:\n# Mapping of QX and Month\n#\n\nclass level_date_month(level_interface):\n def run(self, level, metadata, table):\n col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n result = {\n 'column': [sqlalchemy.func.date_part('month',col_id)],\n 'column_name': [sqlalchemy.func.date_part('month', col)],\n 'axis_mapping': [axis_map.column_mapping],\n 'where_clause': [sqlalchemy.func.date_part('month',col_id) == level.name]\n }\n return result\n\n def children(self, level, metadata, table):\n col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String) + sqlalchemy.sql.cast(sqlalchemy.func.date_part('month',col_id),sqlalchemy.types.String)\n return {\n 'column': [sqlalchemy.func.date_part('month',col)],\n 'column_name':[sqlalchemy.func.date_part('month',col)],\n 'axis_mapping': [axis_map.column_mapping]\n }\n\n def validate(self, level, name):\n \n return 1\n\nclass level_date_year(level_interface):\n def run(self, level, metadata, table):\n col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata),level.object.column_name)\n col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n result = {\n 'column': [sqlalchemy.func.date_part('year',col_id)],\n 'column_name': [sqlalchemy.func.date_part('year', col)],\n 'axis_mapping': [axis_map.column_mapping],\n 'where_clause': [sqlalchemy.func.date_part('year',col_id) == level.name]\n }\n return result\n\n def children(self, level, metadata, table):\n col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n return {\n 'column': [sqlalchemy.func.date_part('year',col_id)],\n 'column_name':[sqlalchemy.func.date_part('year',col)],\n 'axis_mapping': [axis_map.column_mapping],\n 'where_clause':[]\n }\n\n def validate(self, level, name):\n return 1\n\n#\n# To Do: Create your own axis mapping\n#\nclass level_date_quarter(level_interface):\n\n def run(self, level, metadata, table):\n quarters = {\n 'Q1': [1,2,3],\n 'Q2': [4,5,6],\n 'Q3': [7,8,9],\n 'Q4': [10,11,12]\n }\n col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name)\n qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String)\n if not level.name in quarters:\n raise 'Quarter should be in Q1,Q2,Q3,Q4 !'\n\n result = {\n 'column': [qexpr],\n 'column_name': [qexpr],\n 'axis_mapping': [axis_map.column_static],\n 'where_clause': [\n (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][0]) |\n (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][1]) |\n (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][2])\n ]\n }\n return result\n\n\n\n def children(self, level, metadata, table):\n table = sqlalchemy.Table(level.object.table_name, metadata)\n col =common.col_get(table, level.object.column_name)\n col_id = common.col_get(table,level.object.column_name)\n qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String)\n return {\n 'column': [qexpr],\n 'column_name': [qexpr],\n 'axis_mapping': [axis_map.column_mapping_value]\n }\n\n def validate(self, level, name):\n return 1\n# vim: ts=4 sts=4 sw=4 si et\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39978,"cells":{"__id__":{"kind":"number","value":9105330674921,"string":"9,105,330,674,921"},"blob_id":{"kind":"string","value":"77fed391a9f40ba0e14c152912f4ce7b404398d8"},"directory_id":{"kind":"string","value":"2beb72aa4d15541bb351460a155fea091b51b165"},"path":{"kind":"string","value":"/src/utils.py"},"content_id":{"kind":"string","value":"5d4369083b913d6a9f04a71064260eb7d37ff75d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"menshen/GetAPKDetails"},"repo_url":{"kind":"string","value":"https://github.com/menshen/GetAPKDetails"},"snapshot_id":{"kind":"string","value":"9dd7576c84aa28e96d62f936a82cb175ee51e213"},"revision_id":{"kind":"string","value":"170c7f73e77f67cb151b5e5ba3e39e37ca8da4e3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-25T04:30:20.089932","string":"2020-12-25T04:30:20.089932"},"revision_date":{"kind":"timestamp","value":"2014-07-21T03:39:34","string":"2014-07-21T03:39:34"},"committer_date":{"kind":"timestamp","value":"2014-07-21T03:39:34","string":"2014-07-21T03:39:34"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\r\n#coding:utf-8 \r\n# --*-- encoding:utf-8 --*--\r\n'''\r\nCreated on 2011-7-7\r\n\r\n@author: Evil\r\n\r\nexport LANG=\"zh_CN.GB18030\"\r\n'''\r\n\r\nimport os,sys\r\n\r\nreload(sys) \r\nsys.setdefaultencoding('utf-8') \r\n\r\n\r\ndef convertFilePath(path):\r\n path = path.replace('(', '\\(')\r\n path = path.replace(' ', '\\ ')\r\n path = path.replace(')', '\\)')\r\n\r\n return path\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39979,"cells":{"__id__":{"kind":"number","value":635655207577,"string":"635,655,207,577"},"blob_id":{"kind":"string","value":"db3475abc03d0c706695a5354a6ce04a4f6acbf7"},"directory_id":{"kind":"string","value":"c2ff7389a75d2765ba8e8cf62ad5f1b141c10179"},"path":{"kind":"string","value":"/pyglossary.pyw"},"content_id":{"kind":"string","value":"bd226c7e01a00f5058bf11a8f5399ce5317c775a"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-or-later","GPL-1.0-or-later","GPL-3.0-only"],"string":"[\n \"GPL-3.0-or-later\",\n \"GPL-1.0-or-later\",\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"vladimirgegsistem/pyglossary-1"},"repo_url":{"kind":"string","value":"https://github.com/vladimirgegsistem/pyglossary-1"},"snapshot_id":{"kind":"string","value":"9bea2ff8109aec26a18a4b00c2ab1704c5cfd04d"},"revision_id":{"kind":"string","value":"917c3a23f10595b07c6004a2f1ccd2f515c3a6fd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-11T09:04:09.714784","string":"2020-12-11T09:04:09.714784"},"revision_date":{"kind":"timestamp","value":"2014-11-06T08:03:05","string":"2014-11-06T08:03:05"},"committer_date":{"kind":"timestamp","value":"2014-11-06T08:03:05","string":"2014-11-06T08:03:05"},"github_id":{"kind":"number","value":27014356,"string":"27,014,356"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n## ui_main.py\n##\n## Copyright © 2008-2010 Saeed Rasooli (ilius)\n## This file is part of PyGlossary project, http://sourceforge.net/projects/pyglossary/\n##\n## This program is a free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 3, or (at your option)\n## any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program. Or on Debian systems, from /usr/share/common-licenses/GPL\n## If not, see .\n\nimport os, sys, getopt, __builtin__\nfrom pyglossary.glossary import confPath, VERSION\n#from pyglossary.text_utils import printAsError ## No red color, plain\nfrom os.path import dirname, join, realpath\n\nfrom ui.ui_cmd import COMMAND, printAsError, help, parseFormatOptionsStr\n\ndef myRaise(File=None):\n i = sys.exc_info()\n if File==None:\n sys.stderr.write('line %s: %s: %s'%(i[2].tb_lineno, i[0].__name__, i[1]))\n else:\n sys.stderr.write('File \"%s\", line %s: %s: %s'%(File, i[2].tb_lineno, i[0].__name__, i[1]))\n\ndef dashToCamelCase(text):## converts \"hello-PYTHON-user\" to \"helloPythonUser\"\n parts = text.split('-')\n parts[0] = parts[0].lower()\n for i in range(1, len(parts)):\n parts[i] = parts[i].capitalize()\n return ''.join(parts)\n\nuse_psyco_file = '%s_use_psyco'%confPath\npsyco_found = None\n\n\nui_list = ('gtk', 'tk', 'qt')\n\n#print('PyGlossary %s'%VERSION)\n\nif os.path.isfile(use_psyco_file):\n try:\n import psyco\n except ImportError:\n print('Warning: module \"psyco\" not found. It could speed up execution.')\n psyco_found = False\n else:\n psyco.full()\n print('Using module \"psyco\" to speed up execution.')\n psyco_found = True\n\navailable_options = [\n 'version',\n 'help',\n 'ui=',\n 'read-options=',\n 'write-options=',\n 'read-format=',\n 'write-format=',\n 'reverse',\n 'no-progress-bar',\n]\n\n## no-progress-bar only for command line UI\n## FIXME: load ui-dependent available options from ui modules (for example ui_cmd.available_options)\n## the only problem is that it has to \"import gtk\" before it get the \"ui_gtk.available_options\"\n\ntry:\n (options, arguments) = getopt.gnu_getopt(\n sys.argv[1:],\n 'vhu:r:w:',\n available_options,\n )\nexcept getopt.GetoptError:\n printAsError(sys.exc_info()[1])\n print 'try: %s --help'%COMMAND\n sys.exit(1)\n\n\"\"\"\nui_type: User interface type\nPossible values:\n cmd - Command line interface, this ui will automatically selected if you give both input and output file\n gtk - GTK interface\n tk - Tkinter interface\n qt - Qt interface\n auto - Use the first available UI\n\"\"\"\n\nui_type = 'auto'\n\nif len(arguments)<1:## open GUI\n ipath = opath = ''\nelif len(arguments)==1:## open GUI, in edit mode (if gui support, like DB Editor in ui_gtk)\n ipath = arguments[0]\n opath = ''\nelse:## run the commnad line interface\n ui_type = 'cmd'\n ipath = arguments[0]\n opath = arguments[1]\n\n\nread_format = '' ## only used in ui_cmd for now\nwrite_format = '' ## only used in ui_cmd for now\nread_options = {} ## only used in ui_cmd for now\nwrite_options = {} ## only used in ui_cmd for now\nreverse = False ## only used in ui_cmd for now\nui_options = {}\n\n\n'''\n examples for read and write options:\n --read-options testOption=stringValue\n --read-options enableFoo=True\n --read-options fooList=[1,2,3]\n --read-options 'fooList=[1, 2, 3]'\n --read-options 'testOption=stringValue; enableFoo=True; fooList=[1, 2, 3]'\n --read-options 'testOption=stringValue;enableFoo=True;fooList=[1,2,3]'\n'''\n\n\nfor (opt, opt_arg) in options:\n if opt in ('-v', '--version'):\n print('PyGlossary %s'%VERSION)\n sys.exit(0)\n elif opt in ('-h', '--help'):\n help()\n sys.exit(0)\n elif opt in ('-u', '--ui'):\n if opt_arg in ui_list:\n ui_type = opt_arg\n else:\n printAsError('invalid ui type %s'%opt_arg)\n elif opt in ('-r', '--read-options'):\n read_options = parseFormatOptionsStr(opt_arg)\n elif opt in ('-w', '--write-options'):\n write_options = parseFormatOptionsStr(opt_arg)\n elif opt == '--read-format':\n read_format = opt_arg\n elif opt == '--write-format':\n write_format = opt_arg\n elif opt == '--reverse':\n reverse = True\n elif opt.startswith('--'):\n ui_options[dashToCamelCase(opt[2:])] = opt_arg ## opt_arg is not None, UI just ignores None value\n\n\n## FIXME\n## -v (verbose or version?)\n## -r (reverse or read-options)\n\nif ui_type == 'cmd':\n from ui import ui_cmd\n sys.exit(ui_cmd.UI(**ui_options).run(\n ipath,\n opath=opath,\n read_format=read_format,\n write_format=write_format,\n read_options=read_options,\n write_options=write_options,\n reverse=reverse,\n ))\nelse:\n if ui_type=='auto':\n ui_module = None\n for ui_type2 in ui_list:\n try:\n ui_module = getattr(__import__('ui.ui_%s'%ui_type2), 'ui_%s'%ui_type2)\n except ImportError:\n myRaise()## FIXME\n else:\n break\n if ui_module==None:\n printAsError('no user interface module found!')\n sys.exit(1)\n else:\n ui_module = getattr(__import__('ui.ui_%s'%ui_type), 'ui_%s'%ui_type)\n sys.exit(ui_module.UI(**ui_options).run(\n editPath=ipath,\n read_options=read_options,\n ))\n ## don't forget to append \"**options\" at every UI.__init__ arguments\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39980,"cells":{"__id__":{"kind":"number","value":8443905713923,"string":"8,443,905,713,923"},"blob_id":{"kind":"string","value":"10d00c67c45f8ef01a77059161bd4e04961cef47"},"directory_id":{"kind":"string","value":"7096dac027e6c7e0594dbde79eeefdce3883cff6"},"path":{"kind":"string","value":"/para_grading/ML/Tests/test_05_term_extraction.py"},"content_id":{"kind":"string","value":"74a1860636ba988d97d484b55cd47898c1031ecf"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"YuanhaoSun/PPLearn"},"repo_url":{"kind":"string","value":"https://github.com/YuanhaoSun/PPLearn"},"snapshot_id":{"kind":"string","value":"f45b72cf87895ee388948d4202e26bde3526fb8c"},"revision_id":{"kind":"string","value":"8ffb4c5f8540a37dad3e4741fbf5fef0094eeb9d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-17T11:06:58.903744","string":"2020-05-17T11:06:58.903744"},"revision_date":{"kind":"timestamp","value":"2011-12-30T20:51:50","string":"2011-12-30T20:51:50"},"committer_date":{"kind":"timestamp","value":"2011-12-30T20:51:50","string":"2011-12-30T20:51:50"},"github_id":{"kind":"number","value":2602364,"string":"2,602,364"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from time import time\nimport numpy as np\nfrom operator import itemgetter\nfrom StringIO import StringIO \n\nfrom sklearn.datasets import load_files\nfrom sklearn.utils import shuffle\nfrom sklearn.feature_extraction.text import Vectorizer\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.feature_selection import SelectKBest, chi2\n\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.cross_validation import KFold, StratifiedKFold, ShuffleSplit\n\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.linear_model import RidgeClassifier, LogisticRegression\nfrom sklearn.linear_model.sparse import SGDClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB\nfrom sklearn.lda import LDA\nfrom sklearn.svm.sparse import LinearSVC, SVC\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\n\n\n###############################################################################\n# Preprocessing\n\n\n# # Load from raw data\n# # Load categories\ncategories = ['nolimitshare','notsell', 'notsellnotshare', 'notsharemarketing', 'sellshare', \n 'shareforexception', 'shareforexceptionandconsent','shareonlyconsent']\ncategories3 = ['good','neutral', 'bad']\n# # Load data\n# print \"Loading privacy policy dataset for categories:\"\n# print categories if categories else \"all\"\n# data_set = load_files('../../Dataset/ShareStatement/raw', categories = categories,\n # shuffle = True, random_state = 42)\n# data_set = load_files('../../Dataset/ShareStatement3/raw', categories = categories3,\n # shuffle = True, random_state = 42)\n# print 'data loaded'\n# print\n\n# data_set = joblib.load('../../Dataset/test_datasets/data_set_pos_tagged.pkl')\n\n# load from pickle\n# load data and initialize classification variables\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_origin.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_stemmed.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_lemmatized.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_lemmatized_pos.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_negation_bigram.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_selected.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_term_extracted.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_tagged.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_bagged.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_firstsense.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_internal_sentence_wsd.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_corpus_sentence_wsd.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_corpus_word_wsd.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_internal_word_wsd.pkl')\n\ndata_set = joblib.load('../../Dataset/train_datasets_3/data_set_origin.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets_3/data_set_pos_selected.pkl')\n# data_set = joblib.load('../../Dataset/train_datasets_3/data_set_term_extracted.pkl')\ncategories = data_set.target_names\n\n\n\n\ny = data_set.target\n\n\n# Extract features\nvectorizer = Vectorizer(max_features=10000)\n\n# Engineering nGram\n# vectorizer.analyzer.max_n = 2\n\n# Engineering stopword\nvectorizer.analyzer.stop_words = set([])\n# vectorizer.analyzer.stop_words = set([\"amazonnn\", \"comnn\", \"incnn\", \"emcnn\", \"alexann\", \"realnetworks\", \"googlenn\", \"googlevbp\", \"linkedinnn\",\n# \"foxnn\", \"zyngann\", \"eann\", \"yahoorb\", \"travelzoo\", \"kalturann\", \"2cocd\", \"ign\", \"blizzardnn\",\n# \"jobstreetcom\", \"surveymonkeynn\", \"microsoftnn\", \"wraljj\", \"spenn\", \"tnn\", \"mobile\", \"opendnsnns\",\n# \"bentleynn\", \"allvoicesnns\", \"watsonnn\", \"dynnn\", \"aenn\", \"downn\", \"jonesnns\", \"webmnn\", \"toysrus\", \"bonnierjjr\",\n# \"skypenn\", \"wndnn\", \"landrovernn\", \"icuenn\", \"seinn\", \"entersectnn\", \"padealsnns\", \"acsnns\", \"enn\",\n# \"gettynn\", \"imagesnns\", \"winampvbp\", \"lionsgatenn\", \"opendnnn\", \"allvoicenn\", \"padealnn\", \"imagenn\",\n# \"jonenn\", \"acnn\", ])\n# vectorizer.analyzer.stop_words = set([\"amazon\", \"com\", \"inc\", \"emc\", \"alexa\", \"realnetworks\", \"google\", \"linkedin\",\n# \"fox\", \"zynga\", \"ea\", \"yahoo\", \"travelzoo\", \"kaltura\", \"2co\", \"ign\", \"blizzard\",\n# \"jobstreetcom\", \"surveymonkey\", \"microsoft\", \"wral\", \"spe\", \"t\", \"mobile\", \"opendns\",\n# \"bentley\", \"allvoices\", \"watson\", \"dyn\", \"ae\", \"dow\", \"jones\", \"webm\", \"toysrus\", \"bonnier\",\n# \"skype\", \"wnd\", \"landrover\", \"icue\", \"sei\", \"entersect\", \"padeals\", \"acs\", \"e\",\n# \"getty\", \"images\", \"winamp\", \"lionsgate\", \"opendn\", \"allvoice\", \"padeal\", \"image\",\n# \"getti\", \"gett\", \"jone\", \"ac\"])\n# vectorizer.analyzer.stop_words = set([\"amazon\", \"com\", \"inc\", \"emc\", \"alexa\", \"realnetworks\", \"google\", \"linkedin\",\n# \"fox\", \"zynga\", \"ea\", \"yahoo\", \"travelzoo\", \"kaltura\", \"2co\", \"ign\", \"blizzard\",\n# \"jobstreetcom\", \"surveymonkey\", \"microsoft\", \"wral\", \"spe\", \"t\", \"mobile\", \"opendns\",\n# \"bentley\", \"allvoices\", \"watson\", \"dyn\", \"ae\", \"dow\", \"jones\", \"webm\", \"toysrus\", \"bonnier\",\n# \"skype\", \"wnd\", \"landrover\", \"icue\", \"sei\", \"entersect\", \"padeals\", \"acs\", \"e\",\n# \"getty\", \"images\", \"winamp\", \"lionsgate\", \"opendn\", \"allvoice\", \"padeal\", \"image\",\n# \"getti\", \"gett\", \"jone\", \"ac\", \"not\"])\n# vectorizer.analyzer.stop_words = set(['as', 'of', 'in', 'you', 'rent', 'we', 'the', 'sell', 'parties', 'we', 'with', 'not', 'personal',\n # 'third', 'to', 'share', 'your', 'information', 'or', ]) #threshold 20 on training set\n# vectorizer.analyzer.stop_words = set([\"we\", \"do\", \"you\", \"your\", \"the\", \"that\", \"this\", \n# \"is\", \"was\", \"are\", \"were\", \"being\", \"be\", \"been\",\n# \"for\", \"of\", \"as\", \"in\", \"to\", \"at\", \"by\",\n# # \"or\", \"and\",\n# \"ve\",\n# \"amazon\", \"com\", \"inc\", \"emc\", \"alexa\", \"realnetworks\", \"google\", \"linkedin\",\n# \"fox\", \"zynga\", \"ea\", \"yahoo\", \"travelzoo\", \"kaltura\", \"2co\", \"ign\", \"blizzard\",\n# \"jobstreetcom\", \"surveymonkey\", \"microsoft\", \"wral\", \"spe\", \"t\", \"mobile\", \"opendns\",\n# \"bentley\", \"allvoices\", \"watson\", \"dyn\", \"ae\", \"dow\", \"jones\", \"webm\", \"toysrus\", \"bonnier\",\n# \"skype\", \"wnd\", \"landrover\", \"icue\", \"sei\", \"entersect\", \"padeals\", \"acs\", \"e\",\n# \"getty\", \"images\", \"winamp\", \"lionsgate\", ])\nX = vectorizer.fit_transform(data_set.data)\n# X = Normalizer(norm=\"l2\", copy=False).transform(X)\n\n# # Engineering feature selection\n# ch2 = SelectKBest(chi2, k = 276)\n# X = ch2.fit_transform(X, y)\n\nX = X.toarray()\n# X = X.todense()\n\nn_samples, n_features = X.shape\nprint \"n_samples: %d, n_features: %d\" % (n_samples, n_features)\nprint\n\n\n\n###############################################################################\n# Test classifier using n run of K-fold Cross Validation\n\nX_orig = X\ny_orig = y\n\nclf = BernoulliNB(alpha=.1) # used for grading classification\n# clf = RidgeClassifier(tol=1e-1)\nprint clf\n\nnum_run = 50\n\n# lists to hold all n*k data\nf1_total = []\nf5_total = []\nacc_total = []\npre_total = []\nrec_total = []\n\n# x run of Kfold\nfor i in range(num_run):\n\n X, y = shuffle(X_orig, y_orig, random_state=(i+50))\n # Setup 10 fold cross validation\n num_fold = 10\n kf = KFold(n_samples, k=num_fold, indices=True)\n\n # Initialize variables for couting the average\n f1_all = []\n f5_all = []\n acc_all = []\n pre_all = []\n rec_all = []\n\n # Test for 10 rounds using the results from 10 fold cross validations\n for train_index, test_index in kf:\n\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # fit and predict\n clf.fit(X_train, y_train)\n pred = clf.predict(X_test)\n\n # metrics\n acc_score = metrics.zero_one_score(y_test, pred)\n pre_score = metrics.precision_score(y_test, pred)\n rec_score = metrics.recall_score(y_test, pred)\n\n acc_all.append(acc_score)\n pre_all.append(pre_score)\n rec_all.append(rec_score)\n\n # put the lists into numpy array for calculating the results\n acc_all_array = np.asarray(acc_all)\n pre_all_array = np.asarray(pre_all)\n rec_all_array = np.asarray(rec_all)\n\n # add to the total k*n data set\n acc_total += acc_all\n pre_total += pre_all\n rec_total += rec_all\n\n # Result for each run\n print ( (2*pre_all_array.mean()*rec_all_array.mean()) / (rec_all_array.mean()+pre_all_array.mean()) )\n\n# put the total k*n lists into numpy array for calculating the overall results\nacc_total_array = np.asarray(acc_total)\npre_total_array = np.asarray(pre_total)\nrec_total_array = np.asarray(rec_total)\n\n# Report f1 and f0.5 using the final precision and recall for consistancy\nprint \"Overall precision: %0.5f (+/- %0.2f)\" % ( pre_total_array.mean(), pre_total_array.std() / 2 )\nprint \"Overall recall: %0.5f (+/- %0.2f)\" % ( rec_total_array.mean(), rec_total_array.std() / 2 )\n# print (2*pre_total_array.mean()*rec_total_array.mean())/(rec_total_array.mean()+pre_total_array.mean())\nprint \"Overall f1-score: %0.5f\" % ( (2*pre_total_array.mean()*rec_total_array.mean()) / (rec_total_array.mean()+pre_total_array.mean()) )\nprint \"Overall f0.5-score: %0.5f\" % ( (1.25*pre_total_array.mean()*rec_total_array.mean()) / (rec_total_array.mean()+0.25*pre_total_array.mean()) )"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39981,"cells":{"__id__":{"kind":"number","value":10720238419642,"string":"10,720,238,419,642"},"blob_id":{"kind":"string","value":"ba47d15dab4e5b41966a395d77a5e63c9846377a"},"directory_id":{"kind":"string","value":"552b5202497278d39d38c5e9dea2a2190104c8e5"},"path":{"kind":"string","value":"/docweb/context_processors.py"},"content_id":{"kind":"string","value":"eb91f0169979339ca0a6684449dbc56d507c6718"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pv/pydocweb"},"repo_url":{"kind":"string","value":"https://github.com/pv/pydocweb"},"snapshot_id":{"kind":"string","value":"c5e372488e73959ed4dbde7ca9ddc12ce9dbcf94"},"revision_id":{"kind":"string","value":"05c7b69c3903e2bb90cca511f18f9c10d7926cc6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-03-12T19:56:20.227038","string":"2021-03-12T19:56:20.227038"},"revision_date":{"kind":"timestamp","value":"2013-01-24T19:41:16","string":"2013-01-24T19:41:16"},"committer_date":{"kind":"timestamp","value":"2013-01-24T19:41:16","string":"2013-01-24T19:41:16"},"github_id":{"kind":"number","value":717383,"string":"717,383"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import pydocweb.docweb.models as models\n\ndef media_url(request):\n site = models.Site.objects.get_current()\n sites = models.Site.objects.all()\n from django.conf import settings\n return {'MEDIA_URL': settings.MEDIA_URL,\n 'SITE_PREFIX': settings.SITE_PREFIX,\n 'OTHER_SITES': [s for s in sites if s != site],\n 'CURRENT_SITE': site}\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39982,"cells":{"__id__":{"kind":"number","value":2783138853953,"string":"2,783,138,853,953"},"blob_id":{"kind":"string","value":"2dbc83a18b299415587e00483c0fed2162061da4"},"directory_id":{"kind":"string","value":"5924bfeecab771a4fd4e87fdaa20705da1a98546"},"path":{"kind":"string","value":"/scripts/divzero.py"},"content_id":{"kind":"string","value":"1979413448c5f85ee8d65a68e79661671bef173d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"odewahn/kids_code_ipython_test"},"repo_url":{"kind":"string","value":"https://github.com/odewahn/kids_code_ipython_test"},"snapshot_id":{"kind":"string","value":"2e799830e801f8be10059ca24b82adbfe673fba3"},"revision_id":{"kind":"string","value":"adcde6cab6ba8d652bd56eb48bd922889af19ffc"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-03-12T20:40:33.172558","string":"2021-03-12T20:40:33.172558"},"revision_date":{"kind":"timestamp","value":"2014-06-30T16:57:21","string":"2014-06-30T16:57:21"},"committer_date":{"kind":"timestamp","value":"2014-06-30T16:57:21","string":"2014-06-30T16:57:21"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"num1 = input(\"First number: \")\nnum2 = input(\"Second number: \")\nprint \"Result:\", num1 / num2\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39983,"cells":{"__id__":{"kind":"number","value":6047313998521,"string":"6,047,313,998,521"},"blob_id":{"kind":"string","value":"2c3045ee39ed9538d7aa790d39dd8d45b8c20a5f"},"directory_id":{"kind":"string","value":"057fb361fa10aec79376e7b0000b402590834015"},"path":{"kind":"string","value":"/src/util/filetools.py"},"content_id":{"kind":"string","value":"62ec2ae0f566016f63bb717ad792c4fe2c6216f4"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"pingansdaddy/newtempo"},"repo_url":{"kind":"string","value":"https://github.com/pingansdaddy/newtempo"},"snapshot_id":{"kind":"string","value":"e8733094ed3ebf3a2bca98b25d7addefa22d5d3c"},"revision_id":{"kind":"string","value":"cf5c150b1741e31bfc85b62f760abfbf1e63d9dd"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T14:30:31.453235","string":"2021-01-19T14:30:31.453235"},"revision_date":{"kind":"timestamp","value":"2014-10-15T06:47:18","string":"2014-10-15T06:47:18"},"committer_date":{"kind":"timestamp","value":"2014-10-15T06:47:18","string":"2014-10-15T06:47:18"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding:utf-8\nimport os\nimport time\n\n\n\nclass GrowingFile(object):\n \n def __init__(self, fn):\n self._fn = fn\n self._fd = os.open(self._fn, os.O_RDONLY)\n self._max_size = 1024*4\n\n def run(self):\n \n while True:\n buf = os.read(self._fd, self._max_size)\n if not buf:\n continue\n \n time.sleep(0.01)\n\n\nclass Tailer(object):\n \"\"\"\n 与tail命令类似,输出跟踪文件的新行\n \"\"\"\n def __init__(self, filename):\n self._fn = filename\n self._fd = os.open(self._fn, os.O_RDONLY)\n _stat = os.stat(self._fn)\n self.st_ino = _stat.st_ino\n os.lseek(self._fd, _stat.st_size, os.SEEK_SET)\n \n def handle_line(self, content):\n \"\"\"\n override in child class\n \"\"\"\n pass\n\n def do_rotate(self):\n try:\n os.close(self._fd)\n self._fd = os.open(self._fn, os.O_RDONLY)\n except Exception, e:\n pass\n\n def handle_fd_changed(self):\n c_ino = self.st_ino\n count = 0\n while c_ino == self.st_ino:\n try:\n c_ino = os.stat(self._fn).st_ino\n count += 1\n except OSError:\n time.sleep(1)\n if count > 5:\n break\n else:\n self.st_ino = c_ino\n self.do_rotate()\n\n def readline(self):\n buf = []\n while True:\n c = os.read(self._fd, 1)\n if c and c != '\\n':\n buf.append(c)\n else:\n return ''.join(buf) if buf else None\n\n def advance(self):\n line = self.readline()\n if not line:\n self.handle_fd_changed()\n line = self.readline()\n \n if line:\n self.handle_line(line)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39984,"cells":{"__id__":{"kind":"number","value":7722351234936,"string":"7,722,351,234,936"},"blob_id":{"kind":"string","value":"92b8449d352aef4603d031d47ad5bbdcde352357"},"directory_id":{"kind":"string","value":"fff1836a8a75148d6ca306f688d93c36d6a5f8eb"},"path":{"kind":"string","value":"/pyFlashCopy.py"},"content_id":{"kind":"string","value":"59abdb99882f5e8713b69ec2f88db53332256bcd"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"rmamba/pyFlashOnPI"},"repo_url":{"kind":"string","value":"https://github.com/rmamba/pyFlashOnPI"},"snapshot_id":{"kind":"string","value":"c37598acf3098b398910fec213983fcf231336da"},"revision_id":{"kind":"string","value":"eb4315c2248b8d9d15e2f6c8d67da9cb20f0c23f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-18T05:27:32.115855","string":"2020-05-18T05:27:32.115855"},"revision_date":{"kind":"timestamp","value":"2014-03-15T18:20:46","string":"2014-03-15T18:20:46"},"committer_date":{"kind":"timestamp","value":"2014-03-15T18:20:46","string":"2014-03-15T18:20:46"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin python\n# Filename: pyFlashCopy.py\n\n'''\nCreated on 13 Mar 2014\n\n@author: rmamba@gmail.com\n'''\n\nimport sys, os.path, json\n\nif __name__ == '__main__':\n\tsize = None\n\ttargetOffset = 0\n\tsourceOffset = 0\n\tbAppend = False\n\tfileIn = None\n\tfileOut = None\n\trouterSection = None\n\trouterParams = None\n\tbOW = False\n\t\n\ttry:\n\t\tif len(sys.argv) < 3:\n\t\t\tprint \"Not enough parameters!\"\n\t\t\traise SystemExit\t\t\n\n\t\tfileIn = sys.argv[1]\n\t\tif not os.path.isfile(fileIn):\n\t\t\tprint \"File does not exist!\"\n\t\t\traise SystemExit\n\t\t\n\t\tfor arg in sys.argv:\n\t\t\tif (arg == '--append') or (arg == '-a'):\n\t\t\t\tbAppend = True\n\t\t\tif arg.startswith('--size=') or arg.startswith('-si='):\n\t\t\t\ttmp = arg.split('=')\n\t\t\t\tsize = int(tmp[1])\n\t\t\tif arg.startswith('--sourceOffset=') or arg.startswith('-so='):\n\t\t\t\ttmp = arg.split('=')\n\t\t\t\tsourceOffset = tmp\n\t\t\tif arg.startswith('--targetOffset=') or arg.startswith('-to='):\n\t\t\t\ttmp = arg.split('=')\n\t\t\t\ttargetOffset = tmp\n\t\t\tif arg.startswith('--router=') or arg.startswith('-r=') or arg.startswith('-ow='):\n\t\t\t\tif arg.startswith('-ow='):\n\t\t\t\t\t#overwrite data in fIn!!!\n\t\t\t\t\t#-ow=WR741ND:v43:MAC 0xFFFFFFFFFFFF,PIN 12345678\n\t\t\t\t\tbOW = True\n\t\t\t\t#Copy section from fIn to fOut\n\t\t\t\t#-r=WR741ND:v43:uboot,rootfs\n\t\t\t\ttmp = arg.split('=')\n\t\t\t\ttmp2 = tmp[1].split(\":\")\n\t\t\t\tif len(tmp2) != 3:\n\t\t\t\t\tprint \"Invalid router parameter!\"\n\t\t\t\t\traise SystemExit\n\t\t\t\t\n\t\t\t\tj = open('romlayouts.json', 'r')\n\t\t\t\tjsn = j.read()\n\t\t\t\tj.close()\t\t\t\t\n\t\t\t\tjsn = json.loads(jsn)\n\t\t\t\tif not tmp2[0] in jsn:\n\t\t\t\t\tprint \"Unknown router!\"\n\t\t\t\t\traise SystemExit\n\t\t\t\tjsn = jsn[tmp2[0]]\n\t\t\t\tif not tmp2[1] in jsn:\n\t\t\t\t\tprint \"Unknown router version!\"\n\t\t\t\t\traise SystemExit\n\t\t\t\tjsn = jsn[tmp2[1]]\t\n\t\t\t\tif arg.startswith('-ow='):\n\t\t\t\t\trouterParams = jsn[\"DATA\"]\n\t\t\t\telse:\n\t\t\t\t\trouterParams = jsn\n\t\t\t\trouterSection = tmp2[2].split(\",\")\n\t\t\t\t#if not tmp2[2] in jsn:\n\t\t\t\t#\tprint \"Unknown router section!\"\n\t\t\t\t#\traise SystemExit\n\t\t\t\n\t\tfIn = open(fileIn, \"r+b\")\n\t\tif size == None:\n\t\t\tfIn.seek(0, 2)\n\t\t\tsize = fIn.tell() - sourceOffset\n\t\tfIn.seek(sourceOffset)\n\t\t\n\t\tif bAppend:\n\t\t\tif not os.path.isfile(fileOut):\n\t\t\t\tprint \"File does not exist!\"\n\t\t\t\traise SystemExit\n\t\t\t\n\t\t\tfOut = open(fileOut, \"ab\")\n\t\t\tfOut.seek(0, 2)\n\t\t\treadData = 0\n\t\t\t\n\t\t\twhile readData < size:\n\t\t\t\tdata = fIn.read(1024)\n\t\t\t\tfOut.write(data)\n\t\t\t\treadData = readData + 1024\n\t\t\t\n\t\t\tfout.close()\n\t\telif bOW:\n\t\t\tfor section in routerSection:\n\t\t\t\ttmp = section.split('#')\n\t\t\t\tprint tmp\n\t\t\t\tfIn.seek(int(routerParams[tmp[0]][\"offset\"], 0))\n\t\t\t\tif tmp[1].startswith('0x'):\n\t\t\t\t\t#hex data\n\t\t\t\t\tdat = tmp[1][2:].decode(\"hex\")\n\t\t\t\t\tfIn.write(bytearray(dat))\n\t\t\t\telse:\n\t\t\t\t\t#string\n\t\t\t\t\tfIn.write(bytearray(tmp[1]))\n\t\telse:\n\t\t\tif not os.path.isfile(fileOut):\n\t\t\t\tprint \"File does not exist!\"\n\t\t\t\traise SystemExit\n\t\t\tif routerSection == None:\n\t\t\t\tif targetOffset > 0:\n\t\t\t\t\t#inject data\n\t\t\t\t\tfOut = open(fileOut, \"r+b\")\n\t\t\t\t\tfOut.seek(targetOffset)\n\t\t\t\telse:\n\t\t\t\t\tfOut = open(fileOut, \"wb\")\n\t\t\t\t\n\t\t\t\treadData = 0\n\t\t\t\tchunk = 4096\n\t\t\t\twhile readData < size:\n\t\t\t\t\tdata = fIn.read(chunk)\n\t\t\t\t\tfOut.write(data)\n\t\t\t\t\treadData = readData + chunk\n\t\t\t\t\tif size-readData= MAX_FINES_QUO - 3:\n hero.alert_text = ('Еще ' + str(MAX_FINES_QUO - hero.fines) +\n ' штраф и вас уволят')\n if hero.is_near_barman() and barman.is_at_workplace():\n if not hero.alert_times:\n hero.alert_text = 'Нажмите E, что бы поесть'\n hero.alert_times = int(ALERT_TIME/5)\n\n # Движение persons(синие и оранжевые кружки)\n for person in citizens:\n if person.is_at_point(person.target_x, person.target_y):\n if not is_working_day():\n while True:\n person.target_vert = random.choice(person.Links[person.target_vert])\n if person.target_vert in NIGHT_TABOO_PLACES: continue\n else: break\n else:\n person.target_vert = random.choice(person.Links[person.target_vert]) \n \n person.target_x = person.Verts[person.target_vert][0]\n person.target_y = person.Verts[person.target_vert][1]\n\n person.make_step()\n\n # Горожане у рабочего места кружка в салоне\n for person in citizens:\n if (person.is_at_point(person.Verts[26][0], person.Verts[26][1]) or\n person.is_at_point(person.Verts[27][0], person.Verts[27][1])):\n if hero.is_at_workplace():\n if not person.is_paid_for:\n key = random.choice(Price_list)\n hero.money += Price_dict[key]\n person.is_paid_for = True\n person.alert_times = ALERT_TIME\n if hero.money > MAX_MONEY:\n hero.money = MAX_MONEY\n else:\n hero.alert_text = 'Не было на месте. Штраф!'\n hero.alert_times = ALERT_TIME\n hero.money -= FINE\n hero.fines += 1\n if hero.money < -MAX_MONEY:\n hero.money = -MAX_MONEY \n # Отображение уведомлений от горожан\n if person.is_paid_for:\n if not hero.alert_text: # затычка, что бы выводы текста не накладывались\n alert(key) # нужно бы переписать всю систему вывода алертов\n person.alert_times -= 1\n if person.alert_times == 0: person.is_paid_for = False\n\n # Условия окончания игры\n if hero.money == MAX_MONEY:\n win = True\n break\n if hero.fines == MAX_FINES_QUO:\n win = False\n break\n\n # Жизненный цикл соседа(neighbour)\n if (hours == 12 or hours == 18) and minutes == 0:\n neighbour.direction = 'toward'\n if neighbour.is_at_point(neighbour.Verts[5][0], neighbour.Verts[5][1]):\n neighbour.direction = 'back'\n if (neighbour.is_at_point(neighbour.Verts[0][0], neighbour.Verts[0][1]) and\n neighbour.direction == 'back'):\n neighbour.direction = 'stay'\n neighbour.target_vert = 0\n\n if neighbour.direction != 'stay':\n if neighbour.is_at_point(neighbour.target_x, neighbour.target_y):\n if neighbour.direction == 'toward':\n neighbour.target_vert += 1\n else: \n neighbour.target_vert -= 1\n neighbour.target_x = neighbour.Verts[neighbour.target_vert][0]\n neighbour.target_y = neighbour.Verts[neighbour.target_vert][1]\n \n neighbour.make_step()\n\n # Жизненный цикл бармена(barman)\n if hours == 8 and minutes == 0:\n barman.direction = 'toward'\n barman.target_vert = 0\n if hours == 22 and minutes == 0:\n barman.direction = 'back'\n barman.target_vert = 6\n if (barman.is_at_point(barman.Verts[6][0], barman.Verts[6][1]) and \n barman.direction == 'toward'):\n barman.direction = 'stay'\n if (barman.is_at_point(barman.Verts[0][0], barman.Verts[0][1]) and \n barman.direction == 'back'):\n barman.direction = 'stay'\n\n if barman.direction != 'stay':\n if barman.is_at_point(barman.target_x, barman.target_y):\n if barman.direction == 'toward':\n barman.target_vert += 1\n else: \n barman.target_vert -= 1\n barman.target_x = barman.Verts[barman.target_vert][0]\n barman.target_y = barman.Verts[barman.target_vert][1]\n \n barman.make_step()\n\n # Подсчет внутреигрового времени\n minutes = (START_MINUTES + (TIME_RAPID*pygame.time.get_ticks()//1000)%60 - \n minute_inc_counter*START_MINUTES)\n if minutes == 59:\n if minute_step_able:\n hours += 1\n minute_step_able = False\n minute_inc_counter += 1\n if hours == 24:\n hours = 0 \n if minutes == 1:\n minute_step_able = True\n \n # Вывод денег и времени на экран\n # собираем строку для красивого вывода денег\n money_string = ('$' + '0'*(len(str(MAX_MONEY))-len(str(abs(hero.money))))\n + str(abs(hero.money)))\n if hero.money < 0: money_string = '-' + money_string\n set_text(screen, money_string, 50, MONEY_TEXT_POS, True, green)\n \n # собираем строку для красивого вывода времени\n if not hours//10: time_string = '0' + str(hours)\n else: time_string = str(hours)\n time_string += ':'\n if not minutes//10:\n time_string += '0' + str(minutes)\n else: time_string += str(minutes)\n set_text(screen, time_string, 50, TIME_TEXT_POS, True, teal)\n\n # Отрисовываем спрайты в буфер \n hero.rander(screen)\n for person in citizens:\n person.rander(screen)\n neighbour.rander(screen)\n barman.rander(screen)\n # Отображаем буфер на экран\n pygame.display.flip()\n\n new_game = False\n screen.blit(end_background, (0, 0))\n\n if win:\n final_text = WIN_TEXT\n size = 23\n else:\n final_text = LOSE_TEXT\n size = 25\n \n set_text(screen, final_text, size, FINAL_TEXT_POS, True, white) \n btn_finish_game = Button(W_WIDTH/2 - 30,W_HEIGH/2 - 60, \"Жду...\")\n buttons = [btn_finish_game, btn_exit]\n pygame.mouse.set_visible(True)\n unless_player_press_smth()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39989,"cells":{"__id__":{"kind":"number","value":3341484567910,"string":"3,341,484,567,910"},"blob_id":{"kind":"string","value":"e5169a6084764982aeff521db9ecff79b5d61708"},"directory_id":{"kind":"string","value":"d84922ed03e9d01c84e92b384f20b87edb3ece6d"},"path":{"kind":"string","value":"/src/cog_abm/ML/core.py"},"content_id":{"kind":"string","value":"d2efc70efd70e2b9c96aa0a04ce151c8c6316974"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"plewczynski/cog-abm"},"repo_url":{"kind":"string","value":"https://github.com/plewczynski/cog-abm"},"snapshot_id":{"kind":"string","value":"b1d37cf70f4a1d005e424a085159818dd776bd5e"},"revision_id":{"kind":"string","value":"6f6450141a996b067d3a396d47f4386215a4042c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-27T11:33:24.433639","string":"2021-05-27T11:33:24.433639"},"revision_date":{"kind":"timestamp","value":"2012-08-25T21:01:37","string":"2012-08-25T21:01:37"},"committer_date":{"kind":"timestamp","value":"2012-08-25T21:01:37","string":"2012-08-25T21:01:37"},"github_id":{"kind":"number","value":765520,"string":"765,520"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nMost useful things connected with ML\n\"\"\"\nimport math\n\nfrom itertools import izip\nfrom random import shuffle\n\nfrom scipy.io.arff import loadarff\n\nfrom cog_abm.extras.tools import flatten\n\n\nclass Classifier(object):\n\n def classify(self, sample):\n pass\n\n def classify_pval(self, sample):\n \"\"\"\n Returns tuple with class and probability of sample belonging to it\n \"\"\"\n pass\n\n def class_probabilities(self, sample):\n \"\"\"\n Returns dict with mapping class->probability that sample belongs to it\n \"\"\"\n pass\n\n def train(self, samples):\n pass\n\n def clone(self):\n \"\"\"\n Returns copy of classifier. This is default implementation.\n Should be overriden in subclasses.\n\n @rtype: Classifier\n @return: New instance of classifier.\n \"\"\"\n import copy\n return copy.deepcopy(self)\n\n\nclass Attribute(object):\n\n ID = None\n \"\"\" This class field is for id when putting some conversion method in dict\n \"\"\"\n\n def get_value(self, value):\n ''' value is inner representation\n '''\n pass\n\n def set_value(self, value):\n ''' value is outer representation\n '''\n return value\n\n def __eq__(self, other):\n return self.ID == other.ID\n\n\nclass NumericAttribute(Attribute):\n\n ID = \"NumericAttribute\"\n\n def get_value(self, value):\n return value\n\n\nclass NominalAttribute(Attribute):\n\n ID = \"NominalAttribute\"\n\n def __init__(self, symbols):\n \"\"\"\n Symbols should be strings!\n For example Orange doesn't support any other format\n \"\"\"\n symbols = [str(s) for s in symbols]\n self.symbols = tuple(s for s in symbols)\n self.mapping = dict(reversed(x) for x in enumerate(self.symbols))\n self.tmp_rng = set(xrange(len(self.symbols)))\n\n def get_symbol(self, idx):\n return self.symbols[idx]\n\n def get_idx(self, symbol):\n return self.mapping[str(symbol)]\n\n def get_value(self, value):\n return self.get_symbol(value)\n\n def set_value(self, value):\n return self.set_symbol(value)\n\n def set_symbol(self, symbol):\n return self.get_idx(symbol)\n\n def __eq__(self, other):\n return super(NominalAttribute, self).__eq__(other) and \\\n set(self.symbols) == set(other.symbols)\n\n\nclass Sample(object):\n\n def __init__(self, values, meta=None, cls=None, cls_meta=None,\n dist_fun=None, last_is_class=False, cls_idx=None):\n self.values = values[:]\n self.meta = meta or [NumericAttribute() for _ in values]\n\n if last_is_class or cls_idx is not None:\n if last_is_class:\n cls_idx = -1\n self.cls_meta = self.meta[cls_idx]\n self.cls = self.values[cls_idx]\n self.meta = self.meta[:]\n del self.values[cls_idx], self.meta[cls_idx]\n else:\n self.cls = cls\n self.cls_meta = cls_meta\n\n if dist_fun is None and \\\n all(attr.ID == NumericAttribute.ID for attr in self.meta):\n self.dist_fun = euclidean_distance\n else:\n self.dist_fun = dist_fun\n\n def get_cls(self):\n if self.cls_meta is None or self.cls is None:\n return None\n\n return self.cls_meta.get_value(self.cls)\n\n def get_values(self):\n return [m.get_value(v) for v, m in izip(self.values, self.meta)]\n\n def distance(self, other):\n return self.dist_fun(self, other)\n\n def __eq__(self, other):\n return self.cls == other.cls and self.cls_meta == other.cls_meta and \\\n self.meta == other.meta and self.values == other.values\n\n def __hash__(self):\n return 3 * hash(tuple(self.values)) + 5 * hash(self.cls)\n\n def __str__(self):\n return \"({0}, {1})\".format(str(self.get_values()), self.get_cls())\n\n def __repr__(self):\n return str(self)\n\n def copy_basic(self):\n return Sample(self.values, self.meta, dist_fun=self.dist_fun)\n\n def copy_full(self):\n return Sample(self.values, self.meta, self.cls, self.cls_meta,\n self.dist_fun)\n\n def copy_set_cls(self, cls, meta):\n s = self.copy_basic()\n s.cls_meta = meta\n s.cls = meta.set_value(cls)\n return s\n\n\n#Sample distance functions\ndef euclidean_distance(sx, sy):\n return math.sqrt(math.fsum([\n (x - y) * (x - y) for x, y in izip(sx.get_values(), sy.get_values())\n ]))\n\n\ndef load_samples_arff(file_name, last_is_class=False, look_for_cls=True):\n a_data, a_meta = loadarff(file_name)\n names = a_meta.names()\n\n attr = {\"nominal\": lambda attrs: NominalAttribute(attrs),\n \"numeric\": lambda _: NumericAttribute()}\n\n gen = (a_meta[n] for n in names)\n meta = [attr[a[0]](a[1]) for a in gen]\n cls_idx = None\n if look_for_cls:\n for i, name in enumerate(names):\n if a_meta[name][0] == \"nominal\" and name.lower() == \"class\":\n cls_idx = i\n break\n\n def create_sample(s):\n values = [mi.set_value(vi) for mi, vi in izip(meta, s)]\n return \\\n Sample(values, meta, last_is_class=last_is_class, cls_idx=cls_idx)\n\n return [create_sample(s) for s in a_data]\n\n\ndef split_data(data, train_ratio=2. / 3.):\n \"\"\" data - samples to split into two sets: train and test\n train_ratio - real number in [0,1]\n\n returns (train, test) - pair of data sets\n \"\"\"\n tmp = [s for s in data]\n shuffle(tmp)\n train = [s for i, s in enumerate(tmp) if i < train_ratio * len(tmp)]\n test = [s for i, s in enumerate(tmp) if i >= train_ratio * len(tmp)]\n return (train, test)\n\n\ndef split_data_cv(data, folds=8):\n \"\"\" data - samples to split into two sets *folds* times\n\n returns [(train, test), ...] - list of pairs of data sets\n \"\"\"\n tmp = [s for s in data]\n shuffle(tmp)\n N = len(tmp)\n M = N / folds\n overflow = N % folds\n splits = []\n i = 0\n while i < N:\n n = M\n if overflow > 0:\n overflow -= 1\n n += 1\n split = tmp[i:i + n]\n splits.append(split)\n i += n\n\n return [(flatten(splits[:i] + splits[i + 1:]), splits[i])\n for i in xrange(folds)]\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39990,"cells":{"__id__":{"kind":"number","value":9526237511057,"string":"9,526,237,511,057"},"blob_id":{"kind":"string","value":"d9ca70df01fdc5490e6111aa2559f2b30dd91dc6"},"directory_id":{"kind":"string","value":"e71423f70e13c99e9d7432946d5cb6b4318eb20e"},"path":{"kind":"string","value":"/core/netbase/mainbranch/Tribler/Main/Dialogs/RemoveTorrent.py"},"content_id":{"kind":"string","value":"527cf544aa16d0aeafd03ea7ec22b2bac79d5d06"},"detected_licenses":{"kind":"list like","value":["LicenseRef-scancode-unknown-license-reference","LGPL-2.1-only","LGPL-2.1-or-later","LicenseRef-scancode-other-copyleft","MIT","GPL-1.0-or-later","LicenseRef-scancode-free-unknown","LGPL-2.0-or-later","LGPL-3.0-or-later","IJG","LicenseRef-scancode-proprietary-license","OpenSSL","LicenseRef-scancode-python-cwi","GPL-2.0-only","LGPL-3.0-only","Apache-2.0","GPL-2.0-or-later","GPL-3.0-only","LicenseRef-scancode-other-permissive","Python-2.0","WxWindows-exception-3.1"],"string":"[\n \"LicenseRef-scancode-unknown-license-reference\",\n \"LGPL-2.1-only\",\n \"LGPL-2.1-or-later\",\n \"LicenseRef-scancode-other-copyleft\",\n \"MIT\",\n \"GPL-1.0-or-later\",\n \"LicenseRef-scancode-free-unknown\",\n \"LGPL-2.0-or-later\",\n \"LGPL-3.0-or-later\",\n \"IJG\",\n \"LicenseRef-scancode-proprietary-license\",\n \"OpenSSL\",\n \"LicenseRef-scancode-python-cwi\",\n \"GPL-2.0-only\",\n \"LGPL-3.0-only\",\n \"Apache-2.0\",\n \"GPL-2.0-or-later\",\n \"GPL-3.0-only\",\n \"LicenseRef-scancode-other-permissive\",\n \"Python-2.0\",\n \"WxWindows-exception-3.1\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"bi0shacker001/Criminality"},"repo_url":{"kind":"string","value":"https://github.com/bi0shacker001/Criminality"},"snapshot_id":{"kind":"string","value":"8778ad1e48d655e0e43a8d05ec93a89298b8514a"},"revision_id":{"kind":"string","value":"85b8162989200a67e7157f66a6c22eac64f9cc7c"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-11T06:24:25.832669","string":"2016-09-11T06:24:25.832669"},"revision_date":{"kind":"timestamp","value":"2014-06-21T01:01:36","string":"2014-06-21T01:01:36"},"committer_date":{"kind":"timestamp","value":"2014-06-21T01:01:36","string":"2014-06-21T01:01:36"},"github_id":{"kind":"number","value":19465343,"string":"19,465,343"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# Written by Niels Zeilemaker\r\n# see LICENSE.txt for license information\r\n\r\nimport wx\r\nimport os\r\nimport sys\r\n\r\nfrom Tribler.Main.vwxGUI.widgets import _set_font, BetterText as StaticText,\\\r\n EditText\r\nfrom Tribler.Main.vwxGUI.GuiUtility import GUIUtility\r\nfrom Tribler.Core.TorrentDef import TorrentDef\r\nfrom Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT\r\nfrom threading import Event\r\nfrom Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue\r\nfrom Tribler.Main.vwxGUI import forceWxThread\r\nfrom traceback import print_exc\r\nfrom Tribler.community.channel.community import ChannelCommunity\r\n\r\nclass RemoveTorrent(wx.Dialog):\r\n def __init__(self, parent, torrent):\r\n canEdit = canComment = False\r\n if torrent.hasChannel():\r\n state, iamModerator = torrent.channel.getState()\r\n canEdit = state >= ChannelCommunity.CHANNEL_OPEN\r\n \r\n height = 125\r\n if canEdit:\r\n height = 200\r\n \r\n wx.Dialog.__init__(self, parent, -1, 'Are you sure you want to remove this torrent?', size=(600, height))\r\n hSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n hSizer.Add(wx.StaticBitmap(self, -1, wx.ArtProvider.GetBitmap(wx.ART_QUESTION, wx.ART_MESSAGE_BOX)), 0, wx.RIGHT, 10)\r\n \r\n vSizer = wx.BoxSizer(wx.VERTICAL)\r\n firstLine = StaticText(self, -1, \"Delete '%s' from disk, or just remove them from your downloads?\"%torrent.name)\r\n _set_font(firstLine, fontweight = wx.FONTWEIGHT_BOLD)\r\n firstLine.SetMinSize((1, -1))\r\n \r\n vSizer.Add(firstLine, 0, wx.EXPAND|wx.BOTTOM, 3)\r\n vSizer.Add(StaticText(self, -1, \"Removing from disk will move the selected item to your trash.\"), 0, wx.EXPAND)\r\n \r\n vSizer.AddStretchSpacer()\r\n \r\n self.newName = None\r\n if canEdit:\r\n vSizer.Add(StaticText(self, -1, \"While we're at it, can you improve the name of this torrent?\"), 0, wx.EXPAND|wx.BOTTOM, 3)\r\n self.newName = EditText(self, torrent.name)\r\n vSizer.Add(self.newName, 0, wx.EXPAND)\r\n vSizer.AddStretchSpacer()\r\n \r\n bSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n bSizer.AddStretchSpacer()\r\n \r\n bSizer.Add(wx.Button(self, wx.ID_CANCEL), 0, wx.RIGHT, 3)\r\n bSizer.Add(wx.Button(self, wx.ID_DEFAULT, 'Only delete from downloads'), 0, wx.RIGHT, 3)\r\n bSizer.Add(wx.Button(self, wx.ID_DELETE, 'Also delete from disk'))\r\n \r\n vSizer.Add(bSizer, 0, wx.ALIGN_RIGHT|wx.TOP, 7)\r\n hSizer.Add(vSizer, 1, wx.EXPAND)\r\n \r\n border = wx.BoxSizer()\r\n border.Add(hSizer, 1, wx.ALL|wx.EXPAND, 10)\r\n \r\n self.Bind(wx.EVT_BUTTON, lambda event: self.EndModal(event.GetId()))\r\n self.SetSizer(border)\r\n self.Layout()\r\n self.CenterOnParent()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":39991,"cells":{"__id__":{"kind":"number","value":9998683873262,"string":"9,998,683,873,262"},"blob_id":{"kind":"string","value":"f92b63b8700d532a4724ae21fa1afe6f2b66a1e1"},"directory_id":{"kind":"string","value":"3e8f7ba07f34e2e440f796e2c85e5b072f201aeb"},"path":{"kind":"string","value":"/recommender.py"},"content_id":{"kind":"string","value":"a68dad881b1d59dbd2686e987cf28f3d2adb59d1"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"kavinyao/SKBPR"},"repo_url":{"kind":"string","value":"https://github.com/kavinyao/SKBPR"},"snapshot_id":{"kind":"string","value":"57cc977c1abb2fccf47edef15cf999a6ecb5beb3"},"revision_id":{"kind":"string","value":"305aeb846ee89234d8eae3b73452c2fdad2496b4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T18:37:11.779651","string":"2021-01-01T18:37:11.779651"},"revision_date":{"kind":"timestamp","value":"2013-05-30T15:51:57","string":"2013-05-30T15:51:57"},"committer_date":{"kind":"timestamp","value":"2013-05-30T15:51:57","string":"2013-05-30T15:51:57"},"github_id":{"kind":"number","value":9961167,"string":"9,961,167"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nKeyword Recommenders.\n\"\"\"\n\nimport math\nimport config\nimport random\nfrom utils import timeit\nfrom collections import defaultdict\n\nclass NonStatisticalMixin(object):\n def reset(self):\n pass\n\n def round_statistics(self):\n pass\n\n def experiment_statistics(self):\n pass\n\n\nclass RandomRecommender(NonStatisticalMixin):\n def __init__(self, dbm, *ignored):\n \"\"\"\n @param dbm a DatabaseManager\n @param limit the (maximal) number of recommended products at a time\n \"\"\"\n self.limit = 0\n self.dbm = dbm\n self.all_products = []\n\n def __str__(self):\n return 'Random Recommender[N=%d]' % self.limit\n\n def set_limit(self, limit):\n self.limit = limit\n\n @timeit\n def preprocess(self, query_train_table):\n # retrieve all products at once as there aren't many (< 4000)\n query = '''SELECT DISTINCT pageinfo FROM visit\n WHERE pagetype = 'product' AND pageinfo != '' AND pageinfo != 'null' AND userid IN (\n SELECT user_id FROM %s\n )''' % query_train_table\n self.all_products = [(row['pageinfo'], 1.0) for row in self.dbm.get_rows(query)]\n\n def recommend(self, query):\n return random.sample(self.all_products, self.limit)\n\n\nclass HottestRecommender(NonStatisticalMixin):\n def __init__(self, dbm, *ignored):\n \"\"\"\n @param dbm a DatabaseManager\n @param limit the (maximal) number of recommended products at a time\n \"\"\"\n self.limit = 0\n self.dbm = dbm\n self.recommend_list = []\n\n def __str__(self):\n return 'Hottest Recommender[N=%d]' % self.limit\n\n def set_limit(self, limit):\n self.limit = limit\n\n @timeit\n def preprocess(self, query_train_table):\n for row in self.dbm.get_rows('''SELECT pageinfo, COUNT(id) count FROM visit\n WHERE pagetype = 'product' AND pageinfo != '' AND userid IN (\n SELECT user_id FROM %s\n ) GROUP BY pageinfo ORDER BY count DESC LIMIT %d''' % (query_train_table, self.limit)):\n self.recommend_list.append((row['pageinfo'], row['count']))\n #print self.recommend_list\n\n def recommend(self, query):\n return self.recommend_list\n\n\nclass KeywordRecommender(object):\n def __init__(self, dbm, ws, rm):\n \"\"\"\n Make sure to source rec_tables.sql before using this class.\n @param dbm a DatabaseManager\n @param ws a WordSegmenter\n @param rm a RelevanceMeasure\n \"\"\"\n self.limit = 0\n self.dbm = dbm\n self.ws = ws\n self.rm = rm\n self.reset()\n\n def reset(self):\n self._related_product_cache = {}\n self._not_enough_recs = 0\n self._round_results = []\n\n def set_limit(self, limit):\n self.limit = limit\n\n def __str__(self):\n return 'Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)\n\n def preprocess(self, query_train_table):\n self.query_train_table = query_train_table\n # empty cache so that cache from last round does not interfere with next round\n self._related_product_cache = {}\n self._not_enough_recs = 0\n\n self.dbm.begin()\n self.dbm.query('TRUNCATE TABLE keyword');\n self.dbm.query('TRUNCATE TABLE keyword_query');\n self.dbm.query('TRUNCATE TABLE keyword_product_weight');\n\n # these methods can be overridden by sub-classes\n self._build_keyword_product_mapping()\n self._build_product_keyword_mapping()\n self._measure_relevance()\n\n self.dbm.commit()\n\n @timeit\n def _build_keyword_product_mapping(self):\n self.keyword_count = defaultdict(int)\n self.keyword_product_count = defaultdict(lambda: defaultdict(int))\n for qrow in self.dbm.get_rows('SELECT id, query FROM %s' % self.query_train_table):\n # GROUP_CONCAT returns a comma-separeted string\n products = [(qprow['product_name'], qprow['sequences']) for qprow in self.dbm.get_rows('SELECT product_name, GROUP_CONCAT(sequence) AS sequences FROM query_product WHERE query_id = %s GROUP BY product_name', (qrow['id'],))]\n\n # remove duplicate\n keywords = set(self.ws.segment(qrow['query']))\n for kw in keywords:\n self.keyword_count[kw] += 1\n # store keyword-query relations in db\n self.dbm.insert('INSERT INTO keyword_query (keyword, query_id) VALUES (%s, %s)', (kw, qrow['id']))\n\n for p, sequences in products:\n # get product sequence in this session\n count = self.get_browse_count(sequences)\n # update keyword_product_count\n for kw in keywords:\n self.keyword_product_count[kw][p] += count\n\n def get_browse_count(self, sequences):\n \"\"\"Overrideable by sub-class.\n Multiple browses in a session always count 1.\"\"\"\n return 1\n\n @timeit\n def _build_product_keyword_mapping(self):\n # construct product_keyword_count\n # it's actually equivalent to keyword_product_count, but can let compute\n # related_product_count faster\n self.product_keyword_count = defaultdict(dict)\n for kw, dt in self.keyword_product_count.iteritems():\n for p, c in dt.iteritems():\n self.product_keyword_count[p][kw] = c\n\n @timeit\n def _measure_relevance(self):\n # calculate keyword-product relevance\n all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product')\n for keyword, count in self.keyword_count.iteritems():\n # will be used for statistics\n self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count))\n related_product_number = len(self.keyword_product_count[keyword].keys())\n related_product_count = sum(self.keyword_product_count[keyword].values())\n\n for product, count in self.keyword_product_count[keyword].iteritems():\n related_keyword_number = len(self.product_keyword_count[product].keys())\n related_keyword_count = sum(self.product_keyword_count[product].values())\n\n # delegate to sub-classes\n relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number)\n self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance))\n\n def round_statistics(self):\n \"\"\"Get number of query, keywords, products, keyword-product relations of current round.\"\"\"\n n_query = self.dbm.get_value(\"SELECT COUNT(*) FROM %s\" % self.query_train_table)\n n_keyword = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword\")\n n_product = self.dbm.get_value(\"SELECT COUNT(DISTINCT product) FROM keyword_product_weight\")\n n_relation = self.dbm.get_value(\"SELECT COUNT(*) FROM keyword_product_weight\")\n\n self._round_results.append((n_query, self._not_enough_recs, n_keyword, n_product, n_relation))\n\n if config.verbose:\n print 'Round statistics: query: %d (not enough %d), keyword: %d, product: %d, relation: %d, A/M: %.2f%%' % (n_query, self._not_enough_recs, n_keyword, n_product, n_relation, 100.0*n_relation / (n_keyword*n_product))\n\n def experiment_statistics(self):\n # stands for: query, not-enough, keyword, product, relation, a/m\n sums = [0, 0, 0, 0, 0, 0]\n for data in self._round_results:\n for i in range(5):\n sums[i] += data[i]\n sums[5] += 100.0*data[4]/(data[2]*data[3])\n n = float(len(self._round_results))\n n_query, not_enough_recs, n_keyword, n_product, n_relation, am = [s/n for s in sums]\n print 'Experiment statistics:\\nquery: %.2f (not enough %.2f), keyword: %.2f, product: %.2f, relation: %.2f, A/M: %.2f%%' % (n_query, not_enough_recs, n_keyword, n_product, n_relation, am)\n\n def recommend(self, query):\n keywords = self.ws.segment(query)\n product_weight = defaultdict(float)\n # gather product weights\n for kw in keywords:\n for product, weight in self.__fetch_related_products(kw):\n product_weight[product] += weight\n\n # convert dict to list for sorting\n product_weight_list = [item for item in product_weight.iteritems()]\n product_weight_list.sort(key=lambda t: t[1], reverse=True)\n\n if len(product_weight_list) < self.limit:\n self._not_enough_recs += 1\n\n return product_weight_list[:self.limit]\n\n def __fetch_related_products(self, keyword):\n if not self._related_product_cache.has_key(keyword):\n self._related_product_cache[keyword] = [(row['product'], row['weight']) for row in self.dbm.get_rows('SELECT product, weight FROM keyword_product_weight WHERE keyword = %s', (keyword,))]\n return self._related_product_cache[keyword]\n\n\nclass KeywordRecommenderHottestFallback(KeywordRecommender):\n \"\"\"A recommender which uses KeywordRecommender's recommendations first,\n but turns to HottestRecommender if its recommendations are not enough.\"\"\"\n\n def __init__(self, *args):\n \"\"\"Identical to that of KeywordRecommender\"\"\"\n super(KeywordRecommenderHottestFallback, self).__init__(*args)\n self.hottest_recommender = HottestRecommender(*args)\n\n def __str__(self):\n return 'Keyword Recommender with Hottest Recommender fallback with %s[N=%d]' % (self.rm, self.limit)\n\n def set_limit(self, limit):\n self.hottest_recommender.set_limit(limit)\n super(KeywordRecommenderHottestFallback, self).set_limit(limit)\n\n def preprocess(self, query_train_table):\n super(KeywordRecommenderHottestFallback, self).preprocess(query_train_table)\n self.hottest_recommender.preprocess(query_train_table)\n\n def recommend(self, query):\n recommendations = super(KeywordRecommenderHottestFallback, self).recommend(query)\n num_rec = len(recommendations)\n if num_rec == self.limit:\n return recommendations\n\n # ask HottestRecommender for more\n # note that create list in order not to break HottestRecommender.recommend_list\n hot_recommendations = self.hottest_recommender.recommend(query)[:self.limit-num_rec]\n\n # ensure hot_recommendations's weight is no greater than any from keyword recommendations\n max_hot_rec_weight = hot_recommendations[0][1]\n min_key_rec_weight = recommendations[-1][1] if num_rec > 0 else max_hot_rec_weight\n recommendations.extend((t[0], 1.0*min_key_rec_weight*t[1]/max_hot_rec_weight) for t in hot_recommendations)\n\n return recommendations\n\n\nfrom operator import mul\ndef product(numbers):\n return reduce(mul, numbers)\n\nclass LinearSequenceKeywordRecommender(KeywordRecommender):\n \"\"\"A tentative method using heuristic information of sequence distribution.\"\"\"\n\n def _heuristic_weight(self, sequence):\n #return -math.log(abs(sequence-26)+1, 2)/8.0 + 1.125\n return -math.log(abs(sequence-3)+1, 2)/8.0 + 1.125\n\n def get_browse_count(self, sequences):\n seqs = sequences.split(',')\n #return sum(self._heuristic_weight(int(seq)) for seq in seqs)\n #return sum(self._heuristic_weight(int(seq)) for seq in seqs) * math.log(len(seqs))\n return product(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs)\n #return sum(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs)\n\n def __str__(self):\n return 'Linear Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)\n\n\nclass WeightedSequenceRelevanceMixin(object):\n @timeit\n def _measure_relevance(self):\n # calculate keyword-product relevance\n all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product')\n for keyword, count in self.keyword_count.iteritems():\n self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count))\n related_product_number = len(self.keyword_product_count[keyword].keys())\n related_product_count = sum(self.keyword_product_count[keyword].values())\n\n for product, count in self.keyword_product_count[keyword].iteritems():\n related_keyword_number = len(self.product_keyword_count[product].keys())\n related_keyword_count = sum(self.product_keyword_count[product].values())\n # get average sequence from database\n # TODO: very inefficient, get a group all average sequences for a keyword at once\n #avg_sequence = self.dbm.get_value('select avg(sequence) from query_product where product_name = %s AND query_id in (select query_id from keyword_query where keyword = %s)', (product, keyword))\n avg_sequence = 1\n\n relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number, avg_sequence)\n\n # sub-class can override sequence_weight\n # relevance *= self.sequence_weight(avg_sequence)\n self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance))\n\n def sequence_weight(self, avg_sequence):\n return 1\n\n\n# ensure WSRM._measure_relevance will be called with putting it before KeywordRecommender\n# ref: http://python-history.blogspot.com/2010/06/method-resolution-order.html\nclass SequenceKeywordRecommender(WeightedSequenceRelevanceMixin, LinearSequenceKeywordRecommender):\n \"\"\"This recommender weights browse count by distribution of sequence.\"\"\"\n\n @timeit\n def preprocess(self, query_train_table):\n # first, get sequence distribution\n max_occurrence = self.dbm.get_value('SELECT MAX(c) FROM (SELECT sequence, COUNT(sequence) c FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence) T' % query_train_table)\n self.sequence_dist = {row['sequence']: float(row['ratio']) for row in self.dbm.get_rows('SELECT sequence, COUNT(sequence)/%d ratio FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence' % (max_occurrence,query_train_table))}\n self.pivot_seq = max(self.sequence_dist.iteritems(), key=lambda t:t[1])[0]\n\n # then, call KeywordRecommender's preprocess\n super(SequenceKeywordRecommender, self).preprocess(query_train_table)\n\n def _heuristic_weight(self, sequence):\n weight = self.sequence_dist[sequence]\n if self.pivot_seq-sequence < 0:\n return weight\n return 1 + weight\n\n def __str__(self):\n return 'Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit)\n\nclass RelevanceMeasure(object):\n \"\"\"Defines the RelevanceMeasure interface.\"\"\"\n\n def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):\n \"\"\"\n @param count number of times the keyword visit the product\n @param related_product_info the tuple (related_product_number, related_product_count)\n @param related_keyword_info the tuple (related_keyword_number, related_keyword_count)\n @param all_product_number number of all products\n \"\"\"\n raise NotImplemented\n\n\nclass BCMeasure(RelevanceMeasure):\n def get_relevance(self, count, *ignored):\n return count\n\n def __str__(self):\n return 'BC'\n\n\nclass BCIPFMeasure(RelevanceMeasure):\n def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):\n ipf = math.log(1.0 * all_product_number / related_product_info[0])\n return count * ipf\n\n def __str__(self):\n return 'BC-IPF'\n\n\nclass BFIPFMeasure(RelevanceMeasure):\n def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args):\n bf = 1.0 * count / related_keyword_info[1]\n ipf = math.log(1.0 * all_product_number / related_product_info[0])\n return bf * ipf\n\n def __str__(self):\n return 'BF-IPF'\n\n\nif __name__ == '__main__':\n import config\n from database import DatabaseManager\n from word_segment import SpaceWordSegmenter\n dbm = DatabaseManager(config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME)\n try:\n word_segmenter = SpaceWordSegmenter()\n rmeasure = BCIPFMeasure()\n recommender = KeywordRecommender(10, dbm, word_segmenter, rmeasure)\n recommender.preprocess('query_train')\n finally:\n dbm.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39992,"cells":{"__id__":{"kind":"number","value":2302102501858,"string":"2,302,102,501,858"},"blob_id":{"kind":"string","value":"e1736db4ea74d81da3a493c4a91cdb9c41759ac6"},"directory_id":{"kind":"string","value":"1e04894995fcee6e296cf45909c224f75db7676f"},"path":{"kind":"string","value":"/custom_comments/models.py"},"content_id":{"kind":"string","value":"50f90c6a8d9f8ff6badf6db37d571ca03a8385fc"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"minrivertea/chinanews"},"repo_url":{"kind":"string","value":"https://github.com/minrivertea/chinanews"},"snapshot_id":{"kind":"string","value":"bdb8b61dc1ebc61149ab22bf65e09db49421d716"},"revision_id":{"kind":"string","value":"f70d7cb769c3d4541b98eb14491c273cec77045b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-16T14:52:09.292931","string":"2016-09-16T14:52:09.292931"},"revision_date":{"kind":"timestamp","value":"2011-12-12T10:44:56","string":"2011-12-12T10:44:56"},"committer_date":{"kind":"timestamp","value":"2011-12-12T10:44:56","string":"2011-12-12T10:44:56"},"github_id":{"kind":"number","value":2842670,"string":"2,842,670"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.db import models\nfrom django.contrib.comments.models import Comment\nfrom users.models import Person\n\nclass CommentWithVote(Comment):\n votes = models.PositiveIntegerField(default=\"0\")\n voters = models.ManyToManyField(Person)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39993,"cells":{"__id__":{"kind":"number","value":9594956959187,"string":"9,594,956,959,187"},"blob_id":{"kind":"string","value":"5aa274f0688ef1b16558046be8f26bc8f547bdff"},"directory_id":{"kind":"string","value":"04830630f4042d47ec243b8c78c8a194c45cb5c8"},"path":{"kind":"string","value":"/mysqlsub/utils.py"},"content_id":{"kind":"string","value":"c51497d5b28630124d339d05b663d8eae9d6c8ba"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"yelu/mysqlsub"},"repo_url":{"kind":"string","value":"https://github.com/yelu/mysqlsub"},"snapshot_id":{"kind":"string","value":"b13f7a9c1c29d0dfc210bd1890229d09e6ef2268"},"revision_id":{"kind":"string","value":"16697b698be9bcd9e56c6069b026542333dd5b28"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T11:11:35.375739","string":"2021-01-20T11:11:35.375739"},"revision_date":{"kind":"timestamp","value":"2013-03-24T13:31:02","string":"2013-03-24T13:31:02"},"committer_date":{"kind":"timestamp","value":"2013-03-24T13:31:02","string":"2013-03-24T13:31:02"},"github_id":{"kind":"number","value":7915225,"string":"7,915,225"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2020-07-24T05:59:59","string":"2020-07-24T05:59:59"},"gha_created_at":{"kind":"timestamp","value":"2013-01-30T13:39:06","string":"2013-01-30T13:39:06"},"gha_updated_at":{"kind":"timestamp","value":"2013-12-09T13:49:12","string":"2013-12-09T13:49:12"},"gha_pushed_at":{"kind":"timestamp","value":"2013-03-24T13:31:13","string":"2013-03-24T13:31:13"},"gha_size":{"kind":"number","value":208,"string":"208"},"gha_stargazers_count":{"kind":"number","value":2,"string":"2"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"bool","value":false,"string":"false"},"gha_disabled":{"kind":"bool","value":false,"string":"false"},"content":{"kind":"string","value":"'''\r\nCreated on 2013-3-1\r\n\r\n@author: yelu01\r\n'''\r\n\r\nfrom mysql.connector.utils import *\r\nimport datetime\r\nimport decimal\r\n\r\ndef read_signed_int(buf, size, big_endian = True):\r\n \"\"\"Read a big endian integer values based on byte number\r\n \r\n Returns a tuple (truncated buffer, int)\r\n '''''\r\n \"\"\"\r\n if big_endian:\r\n endian = '>'\r\n else:\r\n endian = '<'\r\n if size == 1:\r\n res = struct.unpack(endian+'b', buf)[0]\r\n return (buf[1:], res)\r\n elif size == 2:\r\n res = struct.unpack(endian+'h', buf)[0]\r\n return (buf[2:], res)\r\n elif size == 3:\r\n a, b, c = struct.unpack(\"BBB\", buf)\r\n #TODO:may be wrong.\r\n if a & 128:\r\n res = a + (b << 8) + (c << 16)\r\n else:\r\n res = (a + (b << 8) + (c << 16)) * -1\r\n return (buf[3:], res)\r\n elif size == 4:\r\n res = struct.unpack(endian+'i', buf)[0]\r\n return (buf[4:], res)\r\n elif size == 8:\r\n res = struct.unpack(endian+'q', buf)[0]\r\n return (buf[8:], res)\r\n else:\r\n raise\r\n\r\n\r\ndef read_unsigned_int(buf, size, big_endian = False):\r\n \"\"\"Read a little endian integer values based on byte number\r\n \r\n Returns a tuple (truncated buffer, int)\r\n \"\"\"\r\n if big_endian:\r\n endian = '>'\r\n else:\r\n endian = '<'\r\n if size == 1:\r\n res = struct.unpack(endian+'B', buf)[0]\r\n return (buf[1:], res)\r\n elif size == 2:\r\n res = struct.unpack(endian+'H', buf)[0]\r\n return (buf[2:], res)\r\n elif size == 3:\r\n a, b, c = struct.unpack(\"BBB\", buf)\r\n res = a + (b << 8) + (c << 16)\r\n return (buf[3:], res)\r\n elif size == 4:\r\n res = struct.unpack(endian+'I', buf)[0]\r\n return (buf[4:], res)\r\n elif size == 8:\r\n res = struct.unpack(endian+'Q', buf)[0]\r\n return (buf[8:], res)\r\n else:\r\n raise\r\n\r\ndef read_float(buf):\r\n res = struct.unpack(\"> 9,\r\n month = (time & ((1 << 4) - 1) << 5) >> 5,\r\n day = (time & ((1 << 5) - 1))\r\n )\r\n return (head, date)\r\n\r\ndef read_new_decimal(head, precision, decimals):\r\n '''Read MySQL's new decimal format introduced in MySQL 5'''\r\n \r\n # https://github.com/jeremycole/mysql_binlog/blob/master/lib/mysql_binlog/binlog_field_parser.rb\r\n\r\n digits_per_integer = 9\r\n compressed_bytes = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4]\r\n integral = (precision - decimals)\r\n uncomp_integral = int(integral / digits_per_integer)\r\n uncomp_fractional = int(decimals / digits_per_integer)\r\n comp_integral = integral - (uncomp_integral * digits_per_integer)\r\n comp_fractional = decimals - (uncomp_fractional * digits_per_integer)\r\n\r\n # Support negative\r\n # The sign is encoded in the high bit of the the byte\r\n # But this bit can also be used in the value\r\n value = struct.pack('B', head)[0]\r\n res, mask = [\"\", 0] if (value & 0x80 != 0) else [\"-\", -1]\r\n head[0] = value ^ 0x80\r\n\r\n size = compressed_bytes[comp_integral]\r\n\r\n if size > 0:\r\n head, value = read_unsigned_int(head, size, True) ^ mask\r\n res += str(value)\r\n\r\n for i in range(0, uncomp_integral):\r\n head, value = read_signed_int(head, 4, True)\r\n value = value ^ mask\r\n res += str(value)\r\n\r\n res += \".\"\r\n\r\n for i in range(0, uncomp_fractional):\r\n head, value = read_signed_int(head, 4, True)\r\n value = value ^ mask\r\n res += str(value)\r\n\r\n size = compressed_bytes[comp_fractional]\r\n if size > 0:\r\n head, value = read_signed_int(head, size, True)\r\n value = value ^ mask\r\n res += str(value)\r\n\r\n return decimal.Decimal(res)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39994,"cells":{"__id__":{"kind":"number","value":10565619574107,"string":"10,565,619,574,107"},"blob_id":{"kind":"string","value":"60463f787f344fe460f55d89eeb31e531aef667c"},"directory_id":{"kind":"string","value":"646849715e3042a1e9b718b7ab2f2fdfe3560140"},"path":{"kind":"string","value":"/Code/pyuppaal/tests/ulp/test_basic.py"},"content_id":{"kind":"string","value":"55bb48b25de6ca4babc3c7b857c236b654b6dad7"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"jlandersen/sw10"},"repo_url":{"kind":"string","value":"https://github.com/jlandersen/sw10"},"snapshot_id":{"kind":"string","value":"45af85426b06ebdcc03a03a06e16c729eea13feb"},"revision_id":{"kind":"string","value":"8624f40dbc62304b01b190ba79a6c0e818e9d1e2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-31T01:46:01.478127","string":"2020-05-31T01:46:01.478127"},"revision_date":{"kind":"timestamp","value":"2013-06-25T19:39:26","string":"2013-06-25T19:39:26"},"committer_date":{"kind":"timestamp","value":"2013-06-25T19:39:26","string":"2013-06-25T19:39:26"},"github_id":{"kind":"number","value":8028829,"string":"8,028,829"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2013-06-12T14:10:15","string":"2013-06-12T14:10:15"},"gha_created_at":{"kind":"timestamp","value":"2013-02-05T12:44:03","string":"2013-02-05T12:44:03"},"gha_updated_at":{"kind":"timestamp","value":"2013-06-12T14:10:14","string":"2013-06-12T14:10:14"},"gha_pushed_at":{"kind":"timestamp","value":"2013-06-12T14:10:14","string":"2013-06-12T14:10:14"},"gha_size":{"kind":"number","value":5412,"string":"5,412"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"string","value":"Java"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\nimport sys\nimport os\nimport unittest\nfrom pyuppaal.ulp import lexer, parser, expressionParser, node\n\nclass TestBasicParsing(unittest.TestCase):\n\n def test_parse_declarations(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_declarations.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n #pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n self.assertEqual(declvisitor.variables, [('a', 'TypeInt', [], 0), ('b', 'TypeBool', [], False), ('b1', 'TypeBool', [], False), ('b2', 'TypeBool', [], False)])\n\n self.assertEqual(len(declvisitor.clocks), 1)\n self.assertEqual(declvisitor.clocks[0][0], 'c')\n\n self.assertEqual(declvisitor.channels, [('d', [])])\n self.assertEqual(declvisitor.urgent_channels, [('e', [])])\n self.assertEqual(declvisitor.broadcast_channels, [('f', [])])\n self.assertEqual(declvisitor.urgent_broadcast_channels, [('g', [])])\n\n def test_parse_declarations2(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_declarations2.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n self.assertEqual(res[7].type, 'VarDecl')\n self.assertEqual(res[7].leaf.type, 'TypeInt')\n self.assertEqual(res[7].children[0].type, 'Identifier')\n self.assertEqual(res[7].children[0].leaf, 'lalala')\n self.assertEqual(res[7].children[0].children[0].type, 'Assignment')\n self.assertEqual(res[7].children[0].children[0].leaf.type, 'Identifier')\n self.assertEqual(res[7].children[0].children[0].leaf.leaf, 'lalala')\n\n\n self.assertEqual(res[12].type, 'VarDecl')\n self.assertEqual(res[12].leaf.type, 'TypeBool')\n self.assertEqual(res[12].children[0].type, 'Identifier')\n self.assertEqual(res[12].children[0].leaf, 'msg')\n self.assertEqual(res[12].children[0].children[0].type, 'Index')\n self.assertEqual(res[12].children[0].children[1].type, 'Index')\n\n\n self.assertEqual(declvisitor.variables[0], ('L', 'TypeInt', [], 0))\n\n #self.assertEqual(declvisitor.variables[1], ('lalala', 'int', [], _))\n self.assertEqual(declvisitor.variables[1][0], 'lalala')\n self.assertEqual(declvisitor.variables[1][1], 'TypeInt')\n self.assertEqual(declvisitor.variables[1][2], [])\n self.assertEqual(declvisitor.variables[1][3].type, 'Expression')\n self.assertEqual(declvisitor.variables[1][3].children[0].type, 'Number')\n self.assertEqual(declvisitor.variables[1][3].children[0].leaf, 3)\n\n self.assertEqual(declvisitor.variables[3][0], 'lock')\n self.assertEqual(declvisitor.variables[3][1], 'TypeBool')\n self.assertEqual(declvisitor.variables[3][2], [])\n self.assertEqual(declvisitor.variables[3][3].type, 'Expression')\n self.assertEqual(declvisitor.variables[3][3].children[0].type, 'False')\n\n self.assertEqual(declvisitor.variables[4][0], 'lock2')\n self.assertEqual(declvisitor.variables[4][1], 'TypeBool')\n self.assertEqual(declvisitor.variables[4][2], [])\n self.assertEqual(declvisitor.variables[4][3].children[0].type, 'True')\n\n self.assertEqual(declvisitor.clocks, [('time', 10), ('y1', 10), ('y2', 10), ('y3', 10), ('y4', 10)])\n self.assertEqual(declvisitor.channels, [('take', []), ('release', [])])\n\n \n\n\n def test_parse_empty_query(self):\n lex = lexer.lexer\n pars = parser.Parser(\"\", lex)\n\n self.assertEqual(len(pars.AST.children), 0)\n\n def test_parse_array(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_array.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 7) #TODO add more asserts\n res = pars.AST.children\n #pars.AST.visit()\n self.assertEqual(res[0].children[0].children[0].type, \"Index\") \n self.assertEqual(res[1].children[0].children[0].type, \"Index\") \n self.assertEqual(res[2].children[0].children[0].type, \"Index\") \n self.assertEqual(res[3].children[0].children[0].type, \"Index\") \n self.assertEqual(res[4].children[0].children[0].type, \"Index\") \n self.assertEqual(res[6].children[0].children[0].type, \"Index\") \n self.assertEqual(res[6].children[0].children[1].type, \"Index\") \n\n #mchro 07-04-2011: don't allow empty brackets, it's not a valid expression\n #myParser = testParser(lexer.lexer)\n #res = myParser.parse(\"a[]\")\n #self.assertEqual(res.type, \"Identifier\") \n #self.assertEqual(len(res.children), 0)\n\n def test_struct(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_struct.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts\n \n def test_parse_typedef_simple(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_typedef_simple.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n pars.AST.visit()\n\n\n self.assertEqual(len(pars.AST.children), 4)\n self.assertEqual(pars.AST.type, \"RootNode\")\n self.assertEqual(pars.AST.children[0].type, \"NodeTypedef\") \n self.assertEqual(pars.AST.children[0].leaf, \"id_t\") \n self.assertEqual(pars.AST.children[0].children[0].type, \"TypeInt\")\n self.assertEqual(pars.AST.children[1].type, \"NodeTypedef\") \n self.assertEqual(pars.AST.children[1].leaf, \"id_t\") \n self.assertEqual(pars.AST.children[1].children[0].type, \"TypeInt\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].leaf, 0)\n self.assertEqual(pars.AST.children[1].children[0].children[1].type, \"Expression\")\n self.assertEqual(pars.AST.children[1].children[0].children[1].children[0].leaf, 4)\n self.assertEqual(pars.AST.children[2].type, \"NodeTypedef\") \n self.assertEqual(pars.AST.children[2].leaf, \"id_t\") \n self.assertEqual(pars.AST.children[2].children[0].type, \"TypeInt\")\n self.assertEqual(pars.AST.children[2].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[0].children[1].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[0].children[1].children[0].leaf, 4)\n self.assertEqual(pars.AST.children[2].type, \"NodeTypedef\") \n self.assertEqual(pars.AST.children[2].leaf, \"id_t\") \n self.assertEqual(pars.AST.children[2].children[0].type, \"TypeInt\")\n self.assertEqual(pars.AST.children[2].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[0].children[1].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[0].children[1].children[0].leaf, 4)\n\n self.assertEqual(len(pars.typedefDict), 1)\n self.assertTrue('id_t' in pars.typedefDict)\n\n\n def test_parse_typedef(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_typedef.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n pars.AST.visit()\n self.assertEqual(len(pars.AST.children), 8)\n\n self.assertEqual(len(pars.typedefDict), 4)\n self.assertTrue('myStructType' in pars.typedefDict)\n self.assertTrue('adr' in pars.typedefDict)\n self.assertTrue('DBMClock' in pars.typedefDict)\n self.assertTrue('clock' in pars.typedefDict)\n\n ctype = pars.typedefDict['clock']\n self.assertEqual(ctype.type, 'NodeTypedef')\n self.assertEqual(ctype.leaf, 'clock')\n self.assertEqual(len(ctype.children), 1)\n self.assertEqual(ctype.children[0], pars.typedefDict['DBMClock'])\n\n declvisitor = parser.DeclVisitor(pars)\n #XXX parses to deeply into structs!\n self.assertEqual(len(declvisitor.variables), 5)\n \n pars.AST.visit()\n print declvisitor.variables\n varnames = [x for (x, _, _, _) in declvisitor.variables]\n self.assertTrue('m' in varnames)\n self.assertTrue(('m', 'myStructType', [], None) in declvisitor.variables)\n self.assertTrue('n' in varnames)\n self.assertTrue(('n', 'adr', [], None) in declvisitor.variables)\n self.assertTrue('n2' in varnames)\n\n for (x, _, _, initval) in declvisitor.variables:\n if x == \"n2\":\n self.assertEqual(initval.type, \"Expression\")\n self.assertEqual(initval.children[0].type, \"Number\")\n self.assertEqual(initval.children[0].leaf, 3)\n\n self.assertTrue('c' in varnames)\n self.assertTrue(('c', 'DBMClock', [], None) in declvisitor.variables)\n #XXX parses to deeply into structs!\n #self.assertFalse('a' in varnames)\n\n def test_parse_brackets(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_brackets.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n\n def test_comments(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_comments.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(pars.AST.type, \"RootNode\")\n self.assertEqual(pars.AST.children[0].type, \"VarDecl\") \n self.assertEqual(pars.AST.children[1].type, \"Function\")\n self.assertEqual(pars.AST.children[1].children[0].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].type, \"Divide\")\n self.assertEqual(len(pars.AST.children), 2) \n\n def test_operators(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_operators.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(pars.AST.type, \"RootNode\")\n self.assertEqual(pars.AST.children[0].type, \"VarDecl\") \n self.assertEqual(pars.AST.children[1].type, \"Function\")\n self.assertEqual(pars.AST.children[1].children[0].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[1].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[1].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[1].children[1].children[0].children[0].type, \"Minus\")\n self.assertEqual(pars.AST.children[1].children[2].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[2].children[0].children[0].type, \"Times\")\n self.assertEqual(pars.AST.children[1].children[3].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[3].children[0].children[0].type, \"Divide\")\n self.assertEqual(pars.AST.children[1].children[4].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[4].children[0].children[0].type, \"UnaryMinus\")\n self.assertEqual(pars.AST.children[1].children[5].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[5].children[0].children[0].type, \"Minus\")\n self.assertEqual(pars.AST.children[1].children[5].children[0].children[0].children[0].type, \"UnaryMinus\")\n self.assertEqual(pars.AST.children[1].children[6].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[6].children[0].children[0].type, \"Minus\")\n self.assertEqual(pars.AST.children[1].children[6].children[0].children[0].children[0].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[1].children[7].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[7].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[7].children[0].children[0].children[0].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[1].children[8].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[8].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[8].children[0].children[0].children[0].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[1].children[9].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].children[0].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].children[1].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[1].children[10].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].children[0].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].children[1].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[1].children[11].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[11].children[0].children[0].type, \"Minus\")\n self.assertEqual(pars.AST.children[1].children[11].children[0].children[0].children[0].type, \"MinusMinusPost\")\n self.assertEqual(pars.AST.children[1].children[12].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].type, \"Minus\")\n self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].children[0].type, \"MinusMinusPost\")\n self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].children[1].type, \"MinusMinusPre\")\n self.assertEqual(pars.AST.children[1].children[13].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[13].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[13].children[0].children[0].children[0].type, \"MinusMinusPost\")\n self.assertEqual(pars.AST.children[1].children[14].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[14].children[0].children[0].type, \"Plus\")\n self.assertEqual(pars.AST.children[1].children[14].children[0].children[0].children[0].type, \"MinusMinusPre\")\n self.assertEqual(pars.AST.children[1].children[15].type, \"Assignment\")\n self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].type, \"Modulo\")\n self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[0].type, \"Identifier\")\n self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[0].leaf, \"a\")\n self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[1].type, \"Identifier\")\n self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[1].leaf, \"a\")\n\n #TODO add more operators pars.AST.visit() \n self.assertEqual(len(pars.AST.children), 2) \n\n def test_parse_assignments(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_assignments.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(pars.AST.type, \"RootNode\")\n self.assertEqual(pars.AST.children[0].type, \"VarDecl\") \n self.assertEqual(pars.AST.children[1].type, \"VarDecl\") \n self.assertEqual(pars.AST.children[2].type, \"Function\")\n self.assertEqual(pars.AST.children[2].children[0].type, \"Assignment\")\n self.assertEqual(pars.AST.children[2].children[0].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[0].children[0].children[0].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[2].children[1].type, \"Assignment\")\n self.assertEqual(pars.AST.children[2].children[1].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[1].children[0].children[0].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[2].children[2].type, \"Assignment\")\n self.assertEqual(pars.AST.children[2].children[2].children[0].type, \"Expression\")\n self.assertEqual(pars.AST.children[2].children[2].children[0].children[0].type, \"MinusMinusPre\")\n self.assertEqual(pars.AST.children[2].children[3].type, \"Assignment\")\n self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].type, \"Times\")\n self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].children[0].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].children[1].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[2].children[4].type, \"Assignment\")\n self.assertEqual(pars.AST.children[2].children[4].children[0].children[0].type, \"Times\")\n self.assertEqual(pars.AST.children[2].children[4].children[0].children[0].children[0].type, \"Times\")\n self.assertEqual(pars.AST.children[2].children[4].children[0]. \\\n children[0].children[0].children[0].type, \"PlusPlusPre\")\n self.assertEqual(pars.AST.children[2].children[4].children[0]. \\\n children[0].children[0].children[1].type, \"PlusPlusPost\")\n self.assertEqual(pars.AST.children[2].children[4].children[0]. \\\n children[0].children[0].children[1].type, \"PlusPlusPost\")\n self.assertEqual(len(pars.AST.children), 3) \n\n def test_parse_for_loop(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_for_loop.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts\n\n def test_parse_while_loop(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_while_loop.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts\n\n def test_parse_do_while_loop(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_do_while_loop.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts\n\n def test_parse_simple_function(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_function.txt'), \"r\")\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n self.assertEqual(len(pars.AST.children), 3) #TODO add more asserts\n\n def test_parse_expression(self):\n parser = testParser(lexer.lexer)\n\n res = parser.parse(\"\")\n #should not fail\n self.assertFalse(res)\n\n res = parser.parse(\" \")\n #should not fail\n self.assertFalse(res)\n\n res = parser.parse(\"5\")\n self.assertEqual(res.type, \"Number\")\n self.assertEqual(res.leaf, 5)\n \n res = parser.parse(\"5 > 5\")\n self.assertEqual(res.type, \"Greater\") \n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 5)\n\n res = parser.parse(\"5 != 5\")\n #res.visit()\n self.assertEqual(res.type, \"NotEqual\") \n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 5)\n\n res = parser.parse(\"!True\")\n self.assertEqual(res.type, \"UnaryNot\")\n self.assertEqual(res.children[0].type, 'True')\n \n res = parser.parse(\"5 && 4\")\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 4)\n\n res = parser.parse(\"5 and 4\")\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 4)\n\n res = parser.parse(\"!(5 && 4)\")\n self.assertEqual(res.type, \"UnaryNot\")\n self.assertEqual(res.children[0].type, \"And\")\n self.assertEqual(res.children[0].children[0].type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf, 5)\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 4)\n\n res = parser.parse(\"not (5 && 4)\")\n self.assertEqual(res.type, \"UnaryNot\")\n self.assertEqual(res.children[0].type, \"And\")\n self.assertEqual(res.children[0].children[0].type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf, 5)\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 4)\n\n res = parser.parse(\"5 || 4\")\n self.assertEqual(res.type, \"Or\")\n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 4)\n\n res = parser.parse(\"5 or 4\")\n self.assertEqual(res.type, \"Or\")\n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 5)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 4)\n \n res = parser.parse(\"5 < 5 and 4 > 3\")\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"Less\")\n self.assertEqual(res.children[0].children[0].type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf, 5)\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 5)\n \n res = parser.parse(\"3 * 2 + 4\")\n self.assertEqual(res.type, \"Plus\")\n self.assertEqual(res.children[0].type, \"Times\")\n self.assertEqual(res.children[0].children[0].type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf, 3)\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 2)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 4)\n\n res = parser.parse(\"Viking1.safe and Viking2.safe\") #TODO add struct support\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"Viking1\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, \"safe\")\n self.assertEqual(res.children[1].type, \"Identifier\")\n self.assertEqual(res.children[1].leaf, \"Viking2\")\n self.assertEqual(res.children[1].children[0].type, \"Identifier\")\n self.assertEqual(res.children[1].children[0].leaf, \"safe\")\n\n res = parser.parse(\n \"Viking1.safe and Viking2.safe and Viking3.safe and Viking4.safe\")\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"And\")\n self.assertEqual(res.children[1].type, \"Identifier\")\n self.assertEqual(res.children[1].leaf, \"Viking4\")\n self.assertEqual(res.children[1].children[0].type, \"Identifier\")\n self.assertEqual(res.children[1].children[0].leaf, \"safe\")\n\n self.assertEqual(res.children[0].children[0].type, \"And\")\n self.assertEqual(res.children[0].children[1].type, \"Identifier\")\n self.assertEqual(res.children[0].children[1].leaf, \"Viking3\")\n self.assertEqual(res.children[0].children[1].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[1].children[0].leaf, \"safe\")\n\n self.assertEqual(res.children[0].children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].children[0].leaf, \"Viking1\")\n self.assertEqual(res.children[0].children[0].children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].children[0].children[0].leaf, \"safe\")\n self.assertEqual(res.children[0].children[0].children[1].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].children[1].leaf, \"Viking2\")\n self.assertEqual(res.children[0].children[0].children[1].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].children[1].children[0].leaf, \"safe\")\n\n res = parser.parse(\"N - 1\")\n self.assertEqual(res.type, \"Minus\") \n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, 'N')\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 1)\n\n res = parser.parse(\"f() == 2\")\n self.assertEqual(res.type, \"Equal\") \n self.assertEqual(res.children[0].type, \"FunctionCall\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, \"f\")\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 2)\n\n res = parser.parse(\"dbm.isEmpty()\")\n self.assertEqual(res.type, \"FunctionCall\") \n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"dbm\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, \"isEmpty\")\n\n def test_parse_expression2(self):\n parser = testParser(lexer.lexer)\n res = parser.parse(\"(N - 0 - 1)\")\n self.assertEqual(res.type, \"Minus\")\n self.assertEqual(res.children[0].type, \"Minus\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, 'N')\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 1)\n\n res = parser.parse(\"-42\")\n self.assertEqual(res.type, \"UnaryMinus\")\n self.assertEqual(res.children[0].type, \"Number\")\n self.assertEqual(res.children[0].leaf, 42)\n\n res = parser.parse(\"-(42+1)\")\n self.assertEqual(res.type, \"UnaryMinus\")\n self.assertEqual(res.children[0].type, \"Plus\")\n self.assertEqual(res.children[0].children[0].type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf, 42)\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 1)\n\n res = parser.parse(\"N- 0- 1\")\n self.assertEqual(res.type, \"Minus\")\n self.assertEqual(res.children[0].type, \"Minus\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, 'N')\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 1)\n\n\n res = parser.parse(\"N-0-1\")\n self.assertEqual(res.type, \"Minus\")\n self.assertEqual(res.children[0].type, \"Minus\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, 'N')\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 1)\n\n res = parser.parse(\"(x == 5 && y == 4)\")\n self.assertEqual(res.type, \"And\")\n self.assertEqual(res.children[0].type, \"Equal\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, 'x')\n self.assertEqual(res.children[0].children[1].type, \"Number\")\n self.assertEqual(res.children[0].children[1].leaf, 5)\n self.assertEqual(res.children[1].children[0].type, \"Identifier\")\n self.assertEqual(res.children[1].children[0].leaf, 'y')\n self.assertEqual(res.children[1].children[1].type, \"Number\")\n self.assertEqual(res.children[1].children[1].leaf, 4)\n\n res = parser.parse(\"True\")\n self.assertEqual(res.type, \"True\")\n\n res = parser.parse(\"true\")\n res.visit()\n self.assertEqual(res.type, \"True\")\n\n res = parser.parse(\"x[0][1] == True\")\n self.assertEqual(res.type, \"Equal\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, 'x')\n self.assertEqual(res.children[0].children[0].type, \"Index\")\n self.assertEqual(res.children[0].children[0].leaf.type, 'Number')\n self.assertEqual(res.children[0].children[0].leaf.leaf, 0)\n self.assertEqual(res.children[0].children[1].type, \"Index\")\n self.assertEqual(res.children[0].children[1].leaf.type, 'Number')\n self.assertEqual(res.children[0].children[1].leaf.leaf, 1)\n self.assertEqual(res.children[1].type, \"True\")\n\n res = parser.parse(\"msg[ 0 ][ N - 0 - 1 ] == True\")\n self.assertEqual(res.type, \"Equal\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, 'msg')\n self.assertEqual(res.children[0].children[0].type, \"Index\")\n self.assertEqual(res.children[0].children[0].leaf.type, 'Number')\n self.assertEqual(res.children[0].children[0].leaf.leaf, 0)\n self.assertEqual(res.children[0].children[1].type, \"Index\")\n index2 = res.children[0].children[1].leaf\n self.assertEqual(index2.type, 'Minus')\n self.assertEqual(index2.children[0].type, 'Minus')\n self.assertEqual(index2.children[0].children[0].type, 'Identifier')\n self.assertEqual(index2.children[0].children[0].leaf, 'N')\n self.assertEqual(index2.children[0].children[1].type, 'Number')\n self.assertEqual(index2.children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].type, \"True\")\n\n\n def test_parse_expression3(self):\n parser = testParser(lexer.lexer)\n\n res = parser.parse(\"(x == true) && (0 > N-0-1)\")\n self.assertEqual(res.type, 'And')\n self.assertEqual(len(res.children), 2)\n self.assertEqual(res.children[0].type, 'Equal')\n self.assertEqual(res.children[0].children[0].type, 'Identifier')\n self.assertEqual(res.children[0].children[0].leaf, 'x')\n self.assertEqual(res.children[0].children[1].type, 'True')\n self.assertEqual(res.children[1].type, 'Greater')\n self.assertEqual(res.children[1].children[0].type, 'Number')\n self.assertEqual(res.children[1].children[0].leaf, 0)\n self.assertEqual(res.children[1].children[1].type, 'Minus')\n self.assertEqual(res.children[1].children[1].children[0].type, 'Minus')\n self.assertEqual(res.children[1].children[1].children[0].children[0].type, 'Identifier')\n self.assertEqual(res.children[1].children[1].children[0].children[0].leaf, 'N')\n self.assertEqual(res.children[1].children[1].children[0].children[1].type, 'Number')\n self.assertEqual(res.children[1].children[1].children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].children[1].children[1].type, 'Number')\n self.assertEqual(res.children[1].children[1].children[1].leaf, 1)\n\n res = parser.parse(\"x == true && (0 > N-0-1)\")\n self.assertEqual(res.type, 'And')\n self.assertEqual(len(res.children), 2)\n self.assertEqual(res.children[0].type, 'Equal')\n self.assertEqual(res.children[0].children[0].type, 'Identifier')\n self.assertEqual(res.children[0].children[0].leaf, 'x')\n self.assertEqual(res.children[0].children[1].type, 'True')\n self.assertEqual(res.children[1].type, 'Greater')\n self.assertEqual(res.children[1].children[0].type, 'Number')\n self.assertEqual(res.children[1].children[0].leaf, 0)\n self.assertEqual(res.children[1].children[1].type, 'Minus')\n self.assertEqual(res.children[1].children[1].children[0].type, 'Minus')\n self.assertEqual(res.children[1].children[1].children[0].children[0].type, 'Identifier')\n self.assertEqual(res.children[1].children[1].children[0].children[0].leaf, 'N')\n self.assertEqual(res.children[1].children[1].children[0].children[1].type, 'Number')\n self.assertEqual(res.children[1].children[1].children[0].children[1].leaf, 0)\n self.assertEqual(res.children[1].children[1].children[1].type, 'Number')\n self.assertEqual(res.children[1].children[1].children[1].leaf, 1)\n\n def test_parse_expression4(self):\n parser = testParser(lexer.lexer)\n\n res = parser.parse(\"x' == 0\")\n res.visit()\n self.assertEqual(res.type, 'Equal')\n self.assertEqual(res.children[0].type, 'ClockRate')\n self.assertEqual(res.children[0].leaf, 'x')\n self.assertEqual(res.children[1].type, 'Number')\n self.assertEqual(res.children[1].leaf, 0)\n\n res = parser.parse(\"y >= 5 && x' == 0\")\n res.visit()\n self.assertEqual(res.type, 'And')\n self.assertEqual(len(res.children), 2)\n self.assertEqual(res.children[0].type, 'GreaterEqual')\n self.assertEqual(res.children[0].children[0].type, 'Identifier')\n self.assertEqual(res.children[0].children[0].leaf, 'y')\n self.assertEqual(res.children[0].children[1].type, 'Number')\n self.assertEqual(res.children[0].children[1].leaf, 5)\n\n self.assertEqual(res.children[1].type, 'Equal')\n self.assertEqual(res.children[1].children[0].type, 'ClockRate')\n self.assertEqual(res.children[1].children[0].leaf, 'x')\n self.assertEqual(res.children[1].children[1].type, 'Number')\n self.assertEqual(res.children[1].children[1].leaf, 0)\n\n def test_parse_func_with_params(self):\n parser = testParser(lexer.lexer)\n\n res = parser.parse(\"ishit(4)\")\n self.assertEqual(res.type, \"FunctionCall\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"ishit\")\n #parameters\n self.assertEqual(len(res.leaf), 1)\n self.assertEqual(res.leaf[0].type, \"Number\")\n self.assertEqual(res.leaf[0].leaf, 4)\n\n res = parser.parse(\"cache.ishit(4)\")\n self.assertEqual(res.type, \"FunctionCall\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"cache\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, \"ishit\")\n #parameters\n self.assertEqual(len(res.leaf), 1)\n self.assertEqual(res.leaf[0].type, \"Number\")\n self.assertEqual(res.leaf[0].leaf, 4)\n\n\n res = parser.parse(\"cache.ishit(acc)\")\n self.assertEqual(res.type, \"FunctionCall\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"cache\")\n self.assertEqual(res.children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].leaf, \"ishit\")\n #parameters\n self.assertEqual(len(res.leaf), 1)\n self.assertEqual(res.leaf[0].type, \"Identifier\")\n self.assertEqual(res.leaf[0].leaf, \"acc\")\n\n res = parser.parse(\"ishit(4, 5, x, True, a.b.c)\")\n res.visit()\n self.assertEqual(res.type, \"FunctionCall\")\n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, \"ishit\")\n #parameters\n self.assertEqual(len(res.leaf), 5)\n self.assertEqual(res.leaf[0].type, \"Number\")\n self.assertEqual(res.leaf[0].leaf, 4)\n self.assertEqual(res.leaf[1].type, \"Number\")\n self.assertEqual(res.leaf[1].leaf, 5)\n self.assertEqual(res.leaf[2].type, \"Identifier\")\n self.assertEqual(res.leaf[2].leaf, \"x\")\n self.assertEqual(res.leaf[3].type, \"True\")\n self.assertEqual(res.leaf[4].type, \"Identifier\")\n self.assertEqual(res.leaf[4].leaf, \"a\")\n self.assertEqual(res.leaf[4].children[0].type, \"Identifier\")\n self.assertEqual(res.leaf[4].children[0].leaf, \"b\")\n self.assertEqual(res.leaf[4].children[0].children[0].type, \"Identifier\")\n self.assertEqual(res.leaf[4].children[0].children[0].leaf, \"c\")\n\n def test_parse_array_index_expression(self):\n parser = testParser(lexer.lexer)\n res = parser.parse(\"a[1] == 2\")\n #parser = testParser(lexer.lexer)\n #res = pars.parse()\n #res.visit()\n self.assertEqual(res.type, \"Equal\") \n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].children[0].type, \"Index\")\n self.assertEqual(res.children[0].children[0].leaf.type, \"Number\")\n self.assertEqual(res.children[0].children[0].leaf.leaf, 1)\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 2)\n\n res = parser.parse(\"N-1\")\n self.assertEqual(res.type, \"Minus\") \n self.assertEqual(res.children[0].type, \"Identifier\")\n self.assertEqual(res.children[0].leaf, 'N')\n self.assertEqual(res.children[1].type, \"Number\")\n self.assertEqual(res.children[1].leaf, 1)\n\n def test_parse_extern(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n #pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n def test_parse_extern2(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern2.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n self.assertTrue('TestExternalLattice' in pars.externList)\n\n self.assertEqual(declvisitor.get_type('mylat'), 'TestExternalLattice')\n\n def test_parse_extern3(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern3.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n self.assertTrue('WideningIntRange' in pars.externList)\n\n self.assertEqual(declvisitor.get_type('x'), 'WideningIntRange')\n\n wideningIntRangeTypeNode = pars.typedefDict['WideningIntRange']\n\n print \"typedefdict:\"\n wideningIntRangeTypeNode.visit()\n\n self.assertEqual(wideningIntRangeTypeNode.leaf.type, \"Identifier\")\n self.assertEqual(wideningIntRangeTypeNode.leaf.leaf, \"WideningIntRange\")\n \n self.assertEqual(len(wideningIntRangeTypeNode.children), 1)\n self.assertEqual(wideningIntRangeTypeNode.children[0].type, 'FunctionCall')\n parameters = wideningIntRangeTypeNode.children[0].leaf\n self.assertEqual(len(parameters), 4)\n self.assertEqual(parameters[0].leaf, 1)\n self.assertEqual(parameters[1].leaf, 2)\n self.assertEqual(parameters[2].leaf, 3)\n self.assertEqual(parameters[3].leaf, 9)\n\n #self.assertTrue(False)\n\n\n\n def test_parse_extern_dbm(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern_dbm.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n #pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n #print declvisitor.variables\n\n self.assertEqual(len(declvisitor.variables), 5)\n\n self.assertEqual(declvisitor.variables[0], ('dbm', 'DBMFederation', [], None))\n self.assertEqual(declvisitor.variables[1], ('dbm.x', 'DBMClock', [], None))\n self.assertEqual(declvisitor.variables[2], ('dbm.c', 'DBMClock', [], None))\n self.assertEqual(declvisitor.variables[3][0], 'dbm.y') #('dbm.y', 'DBMClock', [10])\n self.assertEqual(declvisitor.variables[3][1], 'DBMClock')\n self.assertEqual(len(declvisitor.variables[3][2]), 1)\n self.assertEqual(declvisitor.variables[3][2][0].children[0].leaf, 10)\n self.assertEqual(declvisitor.variables[4][0], 'dbm.z') #('dbm.z', 'DBMClock', [10, 20])\n self.assertEqual(declvisitor.variables[4][1], 'DBMClock')\n self.assertEqual(len(declvisitor.variables[4][2]), 2)\n self.assertEqual(declvisitor.variables[4][2][0].children[0].leaf, 10)\n self.assertEqual(declvisitor.variables[4][2][1].children[0].leaf, 20)\n\n def test_parse_constants(self):\n test_file = open(os.path.join(os.path.dirname(__file__), 'test_parse_constants.txt'), \"r\")\n\n lex = lexer.lexer\n pars = parser.Parser(test_file.read(), lex)\n res = pars.AST.children\n\n pars.AST.visit()\n\n declvisitor = parser.DeclVisitor(pars)\n\n inorder = [\"a\", \"b\", \"c\", \"d\"]\n #should return the constants in file order\n self.assertEqual(declvisitor.constants.keys(), inorder)\n\n\n\n#TODO clean this up a bit\nclass myToken:\n type = None\n\n def __init__(self, type):\n self.type = type\n \nclass testParser:\n currentToken = None\n lex = None\n\n def __init__(self, lexer):\n self.lex = lexer\n\n def parse(self, str):\n self.lex.input(str)\n self.currentToken = self.lex.token()\n exParser = expressionParser.ExpressionParser(self.lex, self)\n return exParser.parse()\n\n def parseNumber(self):\n n = node.Node('Number', [], self.currentToken.value)\n self.accept('NUMBER')\n return n\n\n def parseIdentifierComplex(self):\n n = node.Node('Identifier', [], self.currentToken.value)\n self.accept('IDENTIFIER')\n \n p = n\n while self.currentToken.type == 'DOT':\n self.accept('DOT')\n element = node.Node('Identifier', [], self.currentToken.value)\n self.accept('IDENTIFIER')\n p.children = [element]\n p = element\n\n return n\n\n def accept(self, expectedTokenType):\n if self.currentToken.type == expectedTokenType:\n self.currentToken = self.lex.token()\n if self.currentToken == None:\n t = myToken('UNKNOWN')\n self.currentToken = t\n else:\n self.error('at token %s on line %d: Expected %s but was %s' % (self.currentToken.value, self.currentToken.lineno, expectedTokenType, self.currentToken.type))\n\n def error(self, msg):\n raise Exception(\"Parser error\" + msg)\n \nif __name__ == '__main__':\n unittest.main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39995,"cells":{"__id__":{"kind":"number","value":3959959863001,"string":"3,959,959,863,001"},"blob_id":{"kind":"string","value":"647044170106f0a9c3f6d9261fb50edb471e0d64"},"directory_id":{"kind":"string","value":"7b9e0f3d6be02ff0f2be23e9944441b4ca68751a"},"path":{"kind":"string","value":"/ass4/project/utils.py"},"content_id":{"kind":"string","value":"03be7a7f6bed01d284c0cf2369e4678430544ed6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"darora/cs3245"},"repo_url":{"kind":"string","value":"https://github.com/darora/cs3245"},"snapshot_id":{"kind":"string","value":"a8a0bc4feba4b820ae114c853b4f0f4ad848e8ab"},"revision_id":{"kind":"string","value":"cd8428de243baf0a987821c73f5d304be59e2697"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-15T03:04:23.286813","string":"2020-05-15T03:04:23.286813"},"revision_date":{"kind":"timestamp","value":"2013-03-29T15:43:55","string":"2013-03-29T15:43:55"},"committer_date":{"kind":"timestamp","value":"2013-03-29T15:43:55","string":"2013-03-29T15:43:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from contextlib import contextmanager\n\n__author__ = 'darora'\n\n@contextmanager\ndef ignored(*exceptions):\n try:\n yield\n except exceptions:\n pass"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39996,"cells":{"__id__":{"kind":"number","value":11390253279109,"string":"11,390,253,279,109"},"blob_id":{"kind":"string","value":"fe65cd8f3b93f98ea1b0e5274dc20803bd8bef14"},"directory_id":{"kind":"string","value":"12bcd942e8523212132cbae470f0d6cdf192664b"},"path":{"kind":"string","value":"/dbhelper.py"},"content_id":{"kind":"string","value":"2eac9e82e805a8520c65cf71e418b68ca40d39e1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"icbmike/magicTracker"},"repo_url":{"kind":"string","value":"https://github.com/icbmike/magicTracker"},"snapshot_id":{"kind":"string","value":"db69bde44e263926e3b20ffc5f47b74d53c1bb29"},"revision_id":{"kind":"string","value":"53ec5013f5f8984f19e89f4f3e63d8763ddfd185"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-03T17:11:48.652845","string":"2020-05-03T17:11:48.652845"},"revision_date":{"kind":"timestamp","value":"2013-06-30T05:11:25","string":"2013-06-30T05:11:25"},"committer_date":{"kind":"timestamp","value":"2013-06-30T05:11:25","string":"2013-06-30T05:11:25"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sqlite3\n\ninstance = None\n\ndef get_DBHelper_instance():\n\n\tclass DBHelper(object):\n\t\t\n\t\tdef __init__(self):\n\t\t\t#open the connections\n\t\t\tconn = sqlite3.connect('magic.db')\n\n\t\t\t#create the tables if they dont exist\n\t\t\tcursor = conn.cursor()\n\t\t\tcursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS decks\n\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\tname TEXT,\n\t\t\t\t\t\t\t\tversion INTEGER,\n\t\t\t\t\t\t\t\tcolor TEXT,\n\t\t\t\t\t\t\t\tcreator TEXT\n\t\t\t\t\t\t\t PRIMARY KEY (name, version));\"\"\")\n\n\t\t\tcursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS games\n\t\t\t\t(\n\t\t\t\t\tdateTime DATETIME PRIMARY KEY,\n\t\t\t\t\twinningPlayer TEXT,\n\t\t\t\t\twinningDeckName TEXT,\n\t\t\t\t\twinningDeckVersion INTEGER,\n\t\t\t\t\twinningMulligans INTEGER,\n\t\t\t\t\tlosingPlayer TEXT,\n\t\t\t\t\tlosingDeckName TEXT,\n\t\t\t\t\tlosingDeckVesrion INTEGER,\n\t\t\t\t\tlosingMulligans INTEGER\n\t\t\t\t\t\n\t\t\t\t);\"\"\")\n\t\t\tconn.commit()\n\t\t\tconn.close()\n\t\t\n\t\tdef addDeck(self, name, version, color, creator):\n\t\t\t#open the connections\n\t\t\twith sqlite3.connect('magic.db') as conn:\n\t\t\t\tcursor = conn.cursor()\n\t\t\t\tcursor.execute(\"\"\"INSERT INTO decks (\n\t\t\t\t\t\t\t\tname, version, color, creator, wins, losses\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tVALUES (\n\t\t\t\t\t\t\t\t?, ?, ?, ?, 0, 0\n\t\t\t\t\t\t\t\t);\"\"\", (name, version, color, creator))\n\n\t\t\t\tconn.commit()\n\n\tglobal instance\n\tif instance is None:\n\t\tinstance = DBHelper()\n\treturn instance"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":39997,"cells":{"__id__":{"kind":"number","value":1726576862282,"string":"1,726,576,862,282"},"blob_id":{"kind":"string","value":"f4f4e838de265a12b14adad8ee4fd531e1c780ce"},"directory_id":{"kind":"string","value":"a744c3a78f6b625c4052162132351966d878d54e"},"path":{"kind":"string","value":"/pybo.wsgi"},"content_id":{"kind":"string","value":"2b4a8c7b3c14030fb4aec254abf2b717f9e6227e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"lksh/pybo"},"repo_url":{"kind":"string","value":"https://github.com/lksh/pybo"},"snapshot_id":{"kind":"string","value":"58eb17d6e4fb0d3fe69a29e2f700935667f87881"},"revision_id":{"kind":"string","value":"a9f9e3054bbdfe19fc68d86c26be6f33392b2fa1"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-03-30T07:06:23.522052","string":"2016-03-30T07:06:23.522052"},"revision_date":{"kind":"timestamp","value":"2012-06-08T10:44:17","string":"2012-06-08T10:44:17"},"committer_date":{"kind":"timestamp","value":"2012-06-08T10:44:17","string":"2012-06-08T10:44:17"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os\nimport sys\npath = '/var/www'\nif path not in sys.path:\n sys.path.append(path)\n\nsys.path.append('/var/www/pybo')\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pybo.settings'\n\nimport django.core.handlers.wsgi\napplication = django.core.handlers.wsgi.WSGIHandler()"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":39998,"cells":{"__id__":{"kind":"number","value":8985071621829,"string":"8,985,071,621,829"},"blob_id":{"kind":"string","value":"de486564463a83fcfc71edccc9df3c20d6f91cb1"},"directory_id":{"kind":"string","value":"a911232b8fc7592c25128877408741519bfeda74"},"path":{"kind":"string","value":"/engine/storage.py"},"content_id":{"kind":"string","value":"22e5e3295294441d2a2eea0faae0619d0a1f105d"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"lotem/rime.py"},"repo_url":{"kind":"string","value":"https://github.com/lotem/rime.py"},"snapshot_id":{"kind":"string","value":"355c09fee0f4649432f17f1cea52ca04c6498a57"},"revision_id":{"kind":"string","value":"4f6c532f3311d9494e74b905dee6872068c23449"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-22T07:39:14.089224","string":"2021-01-22T07:39:14.089224"},"revision_date":{"kind":"timestamp","value":"2011-06-18T07:34:36","string":"2011-06-18T07:34:36"},"committer_date":{"kind":"timestamp","value":"2011-06-18T07:34:36","string":"2011-06-18T07:34:36"},"github_id":{"kind":"number","value":367290,"string":"367,290"},"star_events_count":{"kind":"number","value":5,"string":"5"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# vim:set et sts=4 sw=4:\n\nimport os\nimport sqlite3\nimport sys\nimport time\n\n\ndef debug(*what):\n print >> sys.stderr, u'[DEBUG]: ', u' '.join(map(unicode, what))\n\n\n# sql for global tables\n\nINIT_ZIME_DB_SQL = \"\"\"\nCREATE TABLE IF NOT EXISTS setting_paths (\n id INTEGER PRIMARY KEY,\n path TEXT UNIQUE\n);\n\nCREATE TABLE IF NOT EXISTS setting_values (\n path_id INTEGER,\n value TEXT\n);\n\nCREATE TABLE IF NOT EXISTS phrases (\n id INTEGER PRIMARY KEY,\n phrase TEXT UNIQUE\n);\n\"\"\"\n\nQUERY_SETTING_SQL = \"\"\"\nSELECT value FROM setting_values WHERE path_id IN (SELECT id FROM setting_paths WHERE path = :path);\n\"\"\"\n\nQUERY_SETTING_ITEMS_SQL = \"\"\"\nSELECT path, value FROM setting_paths, setting_values WHERE path LIKE :pattern AND id = path_id;\n\"\"\"\n\nQUERY_SETTING_PATH_SQL = \"\"\"\nSELECT id FROM setting_paths WHERE path = :path;\n\"\"\"\n\nADD_SETTING_PATH_SQL = \"\"\"\nINSERT INTO setting_paths VALUES (NULL, :path);\n\"\"\"\n\nADD_SETTING_VALUE_SQL = \"\"\"\nINSERT INTO setting_values VALUES (:path_id, :value);\n\"\"\"\n\nUPDATE_SETTING_VALUE_SQL = \"\"\"\nUPDATE setting_values SET value = :value WHERE path_id == :path_id;\n\"\"\"\n\nCLEAR_SETTING_VALUE_SQL = \"\"\"\nDELETE FROM setting_values \nWHERE path_id IN (SELECT id FROM setting_paths WHERE path LIKE :path);\n\"\"\"\nCLEAR_SETTING_PATH_SQL = \"\"\"\nDELETE FROM setting_paths WHERE path LIKE :path;\n\"\"\"\n\nQUERY_SCHEMA_LIST_SQL = \"\"\"\nSELECT substr(path, length('SchemaList/') + 1), value FROM setting_paths p \nLEFT JOIN setting_values v ON p.id = v.path_id \nWHERE path LIKE 'SchemaList/%';\n\"\"\"\n\nQUERY_DICT_PREFIX_SQL = \"\"\"\nSELECT substr(path, 1, length(path) - length('/Dict')), value \nFROM setting_paths p LEFT JOIN setting_values v ON p.id = v.path_id \nWHERE path LIKE '%/Dict';\n\"\"\"\n\nQUERY_PHRASE_SQL = \"\"\"\nSELECT id FROM phrases WHERE phrase = :phrase;\n\"\"\"\n\nADD_PHRASE_SQL = \"\"\"\nINSERT INTO phrases VALUES (NULL, :phrase);\n\"\"\"\n\n# dict specific sql\n\nCREATE_DICT_SQL = \"\"\"\nCREATE TABLE IF NOT EXISTS %(prefix)s_keywords (\n keyword TEXT\n);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_keys (\n id INTEGER PRIMARY KEY,\n ikey TEXT UNIQUE\n);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_stats (\n sfreq INTEGER,\n ufreq INTEGER\n);\nINSERT INTO %(prefix)s_stats VALUES (0, 0);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_unigram (\n id INTEGER PRIMARY KEY,\n p_id INTEGER,\n okey TEXT,\n sfreq INTEGER,\n ufreq INTEGER\n);\nCREATE UNIQUE INDEX IF NOT EXISTS %(prefix)s_entry_idx\nON %(prefix)s_unigram (p_id, okey);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_ku (\n k_id INTEGER,\n u_id INTEGER,\n PRIMARY KEY (k_id, u_id)\n);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_bigram (\n e1 INTEGER,\n e2 INTEGER,\n bfreq INTEGER,\n PRIMARY KEY (e1, e2)\n);\n\nCREATE TABLE IF NOT EXISTS %(prefix)s_kb (\n k_id INTEGER,\n b_id INTEGER,\n PRIMARY KEY (k_id, b_id)\n);\n\"\"\"\n\nDROP_DICT_SQL = \"\"\"\nDROP TABLE IF EXISTS %(prefix)s_keywords;\nDROP TABLE IF EXISTS %(prefix)s_keys;\nDROP TABLE IF EXISTS %(prefix)s_stats;\nDROP INDEX IF EXISTS %(prefix)s_entry_idx;\nDROP TABLE IF EXISTS %(prefix)s_unigram;\nDROP TABLE IF EXISTS %(prefix)s_ku;\nDROP TABLE IF EXISTS %(prefix)s_bigram;\nDROP TABLE IF EXISTS %(prefix)s_kb;\n\"\"\"\n\nLIST_KEYWORDS_SQL = \"\"\"\nSELECT keyword FROM %(prefix)s_keywords;\n\"\"\"\n\nQUERY_KEY_SQL = \"\"\"\nSELECT id FROM %(prefix)s_keys WHERE ikey = :ikey;\n\"\"\"\n\nADD_KEY_SQL = \"\"\"\nINSERT INTO %(prefix)s_keys VALUES (NULL, :ikey);\n\"\"\"\n\nQUERY_STATS_SQL = \"\"\"\nSELECT sfreq + ufreq AS freq, ufreq FROM %(prefix)s_stats;\n\"\"\"\n\nUPDATE_SFREQ_TOTAL_SQL = \"\"\"\nUPDATE %(prefix)s_stats SET ufreq = ufreq + :n;\n\"\"\"\n\nUPDATE_UFREQ_TOTAL_SQL = \"\"\"\nUPDATE %(prefix)s_stats SET sfreq = sfreq + :n;\n\"\"\"\n\nQUERY_UNIGRAM_SQL = \"\"\"\nSELECT phrase, okey, u.id, sfreq + ufreq AS freq, ufreq \nFROM %(prefix)s_unigram u, %(prefix)s_ku ku, %(prefix)s_keys k, phrases p \nWHERE ikey = :ikey AND k.id = k_id AND u_id = u.id AND p_id = p.id\nORDER BY freq DESC;\n\"\"\"\n\nUNIGRAM_EXIST_SQL = \"\"\"\nSELECT id FROM %(prefix)s_unigram WHERE p_id = :p_id AND okey = :okey;\n\"\"\"\n\nADD_UNIGRAM_SQL = \"\"\"\nINSERT INTO %(prefix)s_unigram VALUES (NULL, :p_id, :okey, :freq, 0);\n\"\"\"\n\nINC_SFREQ_SQL = \"\"\"\nUPDATE %(prefix)s_unigram SET sfreq = sfreq + :freq WHERE id = :id;\n\"\"\"\n\nINC_UFREQ_SQL = \"\"\"\nUPDATE %(prefix)s_unigram SET ufreq = ufreq + :freq WHERE id = :id;\n\"\"\"\n\nQUERY_BIGRAM_SQL = \"\"\"\nSELECT e1, e2, bfreq AS freq FROM %(prefix)s_bigram b , %(prefix)s_kb kb, %(prefix)s_keys k\nWHERE ikey = :ikey AND k.id = k_id AND b_id = b.rowid\nORDER BY freq;\n\"\"\"\n\nQUERY_BIGRAM_BY_ENTRY_SQL = \"\"\"\nSELECT e2, bfreq FROM %(prefix)s_bigram WHERE e1 = :e1;\n\"\"\"\n\nBIGRAM_EXIST_SQL = \"\"\"\nSELECT rowid FROM %(prefix)s_bigram WHERE e1 = :e1 AND e2 = :e2;\n\"\"\"\n\nADD_BIGRAM_SQL = \"\"\"\nINSERT INTO %(prefix)s_bigram VALUES (:e1, :e2, 1);\n\"\"\"\n\nINC_BFREQ_SQL = \"\"\"\nUPDATE %(prefix)s_bigram SET bfreq = bfreq + :freq WHERE e1 = :e1 AND e2 = :e2;\n\"\"\"\n\nQUERY_KB_SQL = \"\"\"\nSELECT rowid FROM %(prefix)s_kb WHERE k_id = :k_id AND b_id = :b_id;\n\"\"\"\n\nADD_KB_SQL = \"\"\"\nINSERT INTO %(prefix)s_kb VALUES (:k_id, :b_id);\n\"\"\"\n\nADD_KEYWORD_SQL = \"\"\"\nINSERT INTO %(prefix)s_keywords VALUES (:keyword);\n\"\"\"\n\nADD_KU_SQL = \"\"\"\nINSERT INTO %(prefix)s_ku VALUES (:k_id, :u_id);\n\"\"\"\n\nQUERY_USER_FREQ_SQL = \"\"\"\nSELECT phrase, ufreq, okey\nFROM %(prefix)s_unigram u LEFT JOIN phrases p ON p_id = p.id\nWHERE ufreq > 0\n\"\"\"\n\nQUERY_USER_GRAM_SQL = \"\"\"\nSELECT p1.phrase, p2.phrase, bfreq, u1.okey, u2.okey\nFROM %(prefix)s_bigram b, \n %(prefix)s_unigram u1 LEFT JOIN phrases p1 ON u1.p_id = p1.id,\n %(prefix)s_unigram u2 LEFT JOIN phrases p2 ON u2.p_id = p2.id\nWHERE e1 = u1.id AND e2 = u2.id AND bfreq > 0\n\"\"\"\n\nUPDATE_USER_FREQ_SQL = \"\"\"\nUPDATE OR IGNORE %(prefix)s_unigram SET ufreq = ufreq + :freq\nWHERE p_id IN (SELECT id FROM phrases WHERE phrase = :phrase) AND okey = :okey;\n\"\"\"\n\ndef _generate_dict_specific_sql(db, prefix_args):\n db._create_dict_sql = CREATE_DICT_SQL % prefix_args\n db._drop_dict_sql = DROP_DICT_SQL % prefix_args\n db._list_keywords_sql = LIST_KEYWORDS_SQL % prefix_args\n db._add_keyword_sql = ADD_KEYWORD_SQL % prefix_args\n db._query_key_sql = QUERY_KEY_SQL % prefix_args \n db._add_key_sql = ADD_KEY_SQL % prefix_args\n db._query_stats_sql = QUERY_STATS_SQL % prefix_args\n db._update_ufreq_total_sql = UPDATE_UFREQ_TOTAL_SQL % prefix_args\n db._update_sfreq_total_sql = UPDATE_SFREQ_TOTAL_SQL % prefix_args\n db._query_unigram_sql = QUERY_UNIGRAM_SQL % prefix_args\n db._unigram_exist_sql = UNIGRAM_EXIST_SQL % prefix_args\n db._add_unigram_sql = ADD_UNIGRAM_SQL % prefix_args\n db._inc_sfreq_sql = INC_SFREQ_SQL % prefix_args\n db._inc_ufreq_sql = INC_UFREQ_SQL % prefix_args\n db._add_ku_sql = ADD_KU_SQL % prefix_args\n db._query_bigram_sql = QUERY_BIGRAM_SQL % prefix_args\n db._query_bigram_by_entry_sql = QUERY_BIGRAM_BY_ENTRY_SQL % prefix_args\n db._bigram_exist_sql = BIGRAM_EXIST_SQL % prefix_args\n db._add_bigram_sql = ADD_BIGRAM_SQL % prefix_args\n db._inc_bfreq_sql = INC_BFREQ_SQL % prefix_args\n db._query_kb_sql = QUERY_KB_SQL % prefix_args\n db._add_kb_sql = ADD_KB_SQL % prefix_args\n db._query_user_freq_sql = QUERY_USER_FREQ_SQL % prefix_args\n db._query_user_gram_sql = QUERY_USER_GRAM_SQL % prefix_args\n db._update_user_freq_sql = UPDATE_USER_FREQ_SQL % prefix_args\n\n\nclass DB:\n\n UNIG_LIMIT = 1000\n BIG_LIMIT = 50\n\n FLUSH_INTERVAL = 2 * 60 # 2 minutes\n __last_flush_time = 0\n __conn = None\n\n @classmethod\n def open(cls, db_file, read_only=False):\n #debug('opening db file:', db_file)\n if cls.__conn:\n return\n cls.__conn = sqlite3.connect(db_file)\n cls.read_only = read_only\n if not read_only:\n cls.__conn.executescript(INIT_ZIME_DB_SQL)\n cls.flush(True)\n\n @classmethod\n def read_setting(cls, key):\n r = cls.__conn.execute(QUERY_SETTING_SQL, {'path': key}).fetchone()\n return r[0] if r else None\n\n @classmethod\n def read_setting_list(cls, key):\n r = cls.__conn.execute(QUERY_SETTING_SQL, {'path': key}).fetchall()\n return [x[0] for x in r]\n\n @classmethod\n def read_setting_items(cls, key):\n r = cls.__conn.execute(QUERY_SETTING_ITEMS_SQL, {'pattern': key + '%'}).fetchall()\n return [(x[0][len(key):], x[1]) for x in r]\n\n @classmethod\n def add_setting(cls, key, value):\n if cls.read_only:\n return False\n path_id = cls.__get_or_insert_setting_path(key)\n args = {'path_id': path_id, 'value': value}\n cls.__conn.execute(ADD_SETTING_VALUE_SQL, args)\n return True\n\n @classmethod\n def update_setting(cls, key, value):\n if cls.read_only:\n return False\n path_id = cls.__get_or_insert_setting_path(key)\n args = {'path_id': path_id, 'value': value}\n if cls.read_setting(key) is None:\n cls.__conn.execute(ADD_SETTING_VALUE_SQL, args)\n else:\n cls.__conn.execute(UPDATE_SETTING_VALUE_SQL, args)\n return True\n\n @classmethod\n def __get_or_insert_setting_path(cls, path):\n cur = cls.__conn.cursor()\n args = {'path' : path}\n r = cur.execute(QUERY_SETTING_PATH_SQL, args).fetchone()\n if r:\n return r[0]\n else:\n cur.execute(ADD_SETTING_PATH_SQL, args)\n return cur.lastrowid\n\n @classmethod\n def clear_setting(cls, path):\n cur = cls.__conn.cursor()\n cur.execute(CLEAR_SETTING_VALUE_SQL, {'path' : path})\n cur.execute(CLEAR_SETTING_PATH_SQL, {'path' : path})\n\n @classmethod\n def flush(cls, immediate=False):\n now = time.time()\n if immediate or now - cls.__last_flush_time > cls.FLUSH_INTERVAL:\n cls.__conn.commit()\n cls.__last_flush_time = now\n\n def __init__(self, name):\n self.__name = name\n self.__section = '%s/' % name\n prefix_args = {'prefix' : self.read_config_value('Dict')}\n _generate_dict_specific_sql(self, prefix_args)\n # for recovery from learning accidental user input\n self.__pending_updates = []\n\n def recreate_tables(self):\n cur = DB.__conn.cursor()\n cur.executescript(self._drop_dict_sql)\n cur.executescript(self._create_dict_sql)\n\n def read_config_value(self, key):\n return DB.read_setting(self.__section + key)\n\n def read_config_list(self, key):\n return DB.read_setting_list(self.__section + key)\n \n def list_keywords(self):\n return [x[0] for x in DB.__conn.execute(self._list_keywords_sql, ()).fetchall()]\n\n def lookup_freq_total(self):\n self.proceed_pending_updates()\n r = DB.__conn.execute(self._query_stats_sql).fetchone()\n return r\n\n def lookup_unigram(self, key):\n #print 'lookup_unigram:', key\n args = {'ikey' : key}\n r = DB.__conn.execute(self._query_unigram_sql, args).fetchmany(DB.UNIG_LIMIT)\n return r\n\n def lookup_bigram(self, key):\n #print 'lookup_bigram:', key\n args = {'ikey' : key}\n r = DB.__conn.execute(self._query_bigram_sql, args).fetchmany(DB.BIG_LIMIT)\n return r\n\n def lookup_bigram_by_entry(self, e):\n #print 'lookup_bigram_by_entry:', unicode(e)\n args = {'e1' : e.get_eid()}\n r = DB.__conn.execute(self._query_bigram_by_entry_sql, args).fetchmany(DB.BIG_LIMIT)\n return r\n\n def update_freq_total(self, n):\n #print 'update_freq_total:', n\n self.__pending_updates.append(lambda: self.__update_ufreq_total(n))\n\n def update_unigram(self, e):\n #print 'update_unigram:', unicode(e)\n self.__pending_updates.append(lambda: self.__update_unigram(e))\n\n def update_bigram(self, a, b, indexer):\n #print 'update_bigram:', unicode(a), unicode(b)\n self.__pending_updates.append(lambda: self.__update_bigram(a, b, indexer))\n\n def proceed_pending_updates(self):\n if self.__pending_updates:\n for f in self.__pending_updates:\n f()\n self.__pending_updates = []\n\n def cancel_pending_updates(self):\n if self.__pending_updates:\n self.__pending_updates = []\n\n def __update_ufreq_total(self, n):\n if DB.read_only:\n return\n args = {'n' : n}\n DB.__conn.execute(self._update_ufreq_total_sql, args)\n DB.flush()\n \n def __update_unigram(self, e):\n if DB.read_only:\n return\n args = {'id' : e.get_eid(), 'freq': 1}\n DB.__conn.execute(self._inc_ufreq_sql, args)\n\n def __update_bigram(self, a, b, indexer):\n if DB.read_only:\n return\n cur = DB.__conn.cursor()\n args = {'e1' : a.get_eid(), 'e2' : b.get_eid(), 'freq': 1}\n if cur.execute(self._bigram_exist_sql, args).fetchone():\n cur.execute(self._inc_bfreq_sql, args)\n else:\n cur.execute(self._add_bigram_sql, args)\n # generate ikey-bigram index\n b_id = cur.execute(self._bigram_exist_sql, args).fetchone()[0]\n okey = u' '.join([a.get_okey(), b.get_okey()])\n k_ids = [self.__get_or_insert_key(k) for k in indexer(okey)]\n for k_id in k_ids:\n self.__add_kb(k_id, b_id)\n\n def __get_or_insert_key(self, key):\n cur = DB.__conn.cursor()\n args = {'ikey' : key}\n r = None\n while not r:\n r = cur.execute(self._query_key_sql, args).fetchone()\n if not r:\n cur.execute(self._add_key_sql, args)\n return r[0]\n\n def __add_kb(self, k_id, b_id):\n args = {'k_id' : k_id, 'b_id' : b_id}\n if not DB.__conn.execute(self._query_kb_sql, args).fetchone():\n DB.__conn.execute(self._add_kb_sql, args)\n\n # used by zimedb-admin.py\n\n @classmethod\n def get_schema_list(self):\n schema_list = DB.__conn.execute(QUERY_SCHEMA_LIST_SQL).fetchall()\n return schema_list\n\n @classmethod\n def get_installed_dicts(self):\n prefixes = DB.__conn.execute(QUERY_DICT_PREFIX_SQL).fetchall()\n return prefixes\n\n def drop_tables(self, compact=False):\n DB.__conn.executescript(self._drop_dict_sql)\n\n @classmethod\n def compact(cls):\n DB.__conn.execute(\"\"\"VACUUM;\"\"\")\n \n def add_keywords(self, keywords):\n args = [{'keyword': k} for k in keywords]\n DB.__conn.executemany(self._add_keyword_sql, args)\n\n def add_phrases(self, phrase_table, indexer, reporter=None):\n '''批量添加詞條並以indexer建立編碼索引'''\n # 第一趟,讀取phrase id,寫入新增詞條\n phrase_id = dict()\n missing_phrases = set()\n for (k, freq) in phrase_table:\n phrase = k[0]\n p_id = self.__get_phrase_id(phrase)\n if p_id:\n phrase_id[phrase] = p_id\n else:\n missing_phrases.add(phrase)\n if missing_phrases:\n table = [{'phrase': p} for p in missing_phrases]\n DB.__conn.executemany(ADD_PHRASE_SQL, table)\n for phrase in missing_phrases:\n p_id = self.__get_phrase_id(phrase)\n if p_id:\n phrase_id[phrase] = p_id\n # 第二趟,累計詞頻、生成unigram\n unigram_freq = dict()\n total = 0\n for (k, freq) in phrase_table:\n if k in unigram_freq:\n unigram_freq[k] += freq\n else:\n unigram_freq[k] = freq\n total += freq\n increment = list()\n missing_unigrams = set()\n for (phrase, okey), freq in unigram_freq.iteritems():\n p_id = phrase_id[phrase]\n u_id = self.__get_unigram_id(p_id, okey)\n if u_id:\n # 已有unigram,累計詞頻\n if freq > 0:\n increment.append({'id': u_id, 'freq': freq})\n else:\n missing_unigrams.add((phrase, okey))\n if reporter:\n reporter(phrase, okey)\n if missing_unigrams:\n table = [{'p_id': phrase_id[k[0]], 'okey': k[1], 'freq': unigram_freq[k]}\n for k in missing_unigrams]\n DB.__conn.executemany(self._add_unigram_sql, table)\n if increment:\n DB.__conn.executemany(self._inc_sfreq_sql, increment)\n if total > 0:\n self.__inc_freq_total(total)\n \n # 建立索引\n key_id = dict()\n missing_keys = set()\n missing_ku_links = set()\n for (phrase, okey) in missing_unigrams:\n u_id = self.__get_unigram_id(phrase_id[phrase], okey)\n if not u_id:\n # shouldn't happen!\n continue\n for key in indexer(okey):\n missing_ku_links.add((key, u_id))\n k_id = self.__get_key_id(key)\n if k_id:\n key_id[key] = k_id\n else:\n missing_keys.add(key)\n if missing_keys:\n table = [{'ikey': k} for k in missing_keys]\n DB.__conn.executemany(self._add_key_sql, table)\n for key in missing_keys:\n k_id = self.__get_key_id(key)\n if k_id:\n key_id[key] = k_id\n if missing_ku_links:\n table = [{'k_id': key_id[k], 'u_id': u} for (k, u) in missing_ku_links]\n DB.__conn.executemany(self._add_ku_sql, table)\n\n def __get_phrase_id(self, phrase):\n args = {'phrase': phrase}\n r = DB.__conn.execute(QUERY_PHRASE_SQL, args).fetchone()\n return r[0] if r else None\n\n def __get_key_id(self, key):\n args = {'ikey': key}\n r = DB.__conn.execute(self._query_key_sql, args).fetchone()\n return r[0] if r else None\n\n def __get_unigram_id(self, p_id, okey):\n args = {'p_id': p_id, 'okey': okey}\n r = DB.__conn.execute(self._unigram_exist_sql, args).fetchone()\n return r[0] if r else None\n\n def __inc_freq_total(self, n):\n args = {'n' : n}\n DB.__conn.execute(self._update_sfreq_total_sql, args)\n\n def dump_user_freq(self):\n return DB.__conn.execute(self._query_user_freq_sql).fetchall()\n \n def dump_user_gram(self):\n return DB.__conn.execute(self._query_user_gram_sql).fetchall()\n\n def restore_user_freq(self, freq_table):\n cur = DB.__conn.cursor()\n unigram_freq = dict()\n for (u, n) in freq_table:\n if u in unigram_freq:\n unigram_freq[u] += n\n else:\n unigram_freq[u] = n\n table = list()\n total_increment = 0\n for (phrase, okey), n in unigram_freq.iteritems():\n table.append({'phrase': phrase, 'okey': okey, 'freq': n})\n total_increment += n\n cur.executemany(self._update_user_freq_sql, table)\n if total_increment > 0:\n cur.execute(self._update_ufreq_total_sql, {'n': total_increment})\n\n def restore_user_gram(self, freq_table, indexer):\n cur = DB.__conn.cursor()\n bigram_freq = dict()\n for (a, b, n) in freq_table:\n k = (a, b)\n if k in bigram_freq:\n bigram_freq[k] += n\n else:\n bigram_freq[k] = n\n missing = list()\n increment = list()\n for ((phrase1, okey1), (phrase2, okey2)), n in bigram_freq.iteritems():\n p1 = self.__get_phrase_id(phrase1)\n if not p1:\n continue\n e1 = self.__get_unigram_id(p1, okey1)\n if not e1:\n continue\n p2 = self.__get_phrase_id(phrase2)\n if not p2:\n continue\n e2 = self.__get_unigram_id(p2, okey2)\n if not e2:\n continue\n args = {'e1': e1, 'e2': e2, 'freq': n, 'okey': u' '.join([okey1, okey2])}\n if cur.execute(self._bigram_exist_sql, args).fetchone():\n increment.append(args)\n else:\n missing.append(args)\n cur.executemany(self._inc_bfreq_sql, increment)\n cur.executemany(self._add_bigram_sql, missing)\n # generate ikey-bigram index\n for args in missing:\n b_id = cur.execute(self._bigram_exist_sql, args).fetchone()[0]\n k_ids = [self.__get_or_insert_key(k) for k in indexer(args['okey'])]\n for k_id in k_ids:\n self.__add_kb(k_id, b_id)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":39999,"cells":{"__id__":{"kind":"number","value":18279380837036,"string":"18,279,380,837,036"},"blob_id":{"kind":"string","value":"91b639d79ce4dbdb97b612eb3a75f83e74cea2f6"},"directory_id":{"kind":"string","value":"bd9dcfb3fecce1786470570866cd527bd8016f66"},"path":{"kind":"string","value":"/export_layers/pylibgimpplugin/pylibgimp.py"},"content_id":{"kind":"string","value":"0ec90fb256710c3e680fd051104124c093694eed"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"ncornette/gimp-plugin-export-layers"},"repo_url":{"kind":"string","value":"https://github.com/ncornette/gimp-plugin-export-layers"},"snapshot_id":{"kind":"string","value":"c96812cfb0736695002623cd435be76262896dcc"},"revision_id":{"kind":"string","value":"5cb46039c164c9796b4bb9f6ad5e3ea59a7a1f80"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T15:42:20.363509","string":"2021-01-23T15:42:20.363509"},"revision_date":{"kind":"timestamp","value":"2014-11-10T22:55:31","string":"2014-11-10T22:55:31"},"committer_date":{"kind":"timestamp","value":"2014-11-10T22:55:31","string":"2014-11-10T22:55:31"},"github_id":{"kind":"number","value":26461215,"string":"26,461,215"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#-------------------------------------------------------------------------------\n#\n# This file is part of pylibgimpplugin.\n#\n# Copyright (C) 2014 khalim19 \n#\n# pylibgimpplugin is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pylibgimpplugin is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pylibgimpplugin. If not, see .\n#\n#-------------------------------------------------------------------------------\n\n\"\"\"\nThis module defines functions dealing with GIMP objects (images, layers, etc.)\nnot defined in the Python API for GIMP plug-ins.\n\"\"\"\n\n#===============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nstr = unicode\n\n#===============================================================================\n\nfrom contextlib import contextmanager\n\nimport gimp\nimport gimpenums\n\n#===============================================================================\n\npdb = gimp.pdb\n\n#===============================================================================\n# Functions\n#===============================================================================\n\n@contextmanager\ndef undo_group(image):\n \"\"\"\n Wrap the enclosing block of code into one GIMP undo group for the specified\n image.\n \n Use this function as a context manager:\n \n with undo_group(image):\n # do stuff\n \"\"\"\n pdb.gimp_image_undo_group_start(image)\n try:\n yield\n finally:\n pdb.gimp_image_undo_group_end(image)\n\n\ndef merge_layer_group(image, layer_group):\n \"\"\"\n Merge layers in the specified layer group belonging to the specified image\n into one layer.\n \n This function can handle both top-level and nested layer groups.\n \"\"\"\n \n if not pdb.gimp_item_is_group(layer_group):\n raise TypeError(\"not a layer group\")\n \n with undo_group(image):\n orig_parent_and_pos = ()\n if layer_group.parent is not None:\n # Nested layer group\n orig_parent_and_pos = (layer_group.parent, pdb.gimp_image_get_item_position(image, layer_group))\n pdb.gimp_image_reorder_item(image, layer_group, None, 0)\n \n orig_layer_visibility = [layer.visible for layer in image.layers]\n \n for layer in image.layers:\n layer.visible = False\n layer_group.visible = True\n \n merged_layer_group = pdb.gimp_image_merge_visible_layers(image, gimpenums.EXPAND_AS_NECESSARY)\n \n for layer, orig_visible in zip(image.layers, orig_layer_visibility):\n layer.visible = orig_visible\n \n if orig_parent_and_pos:\n pdb.gimp_image_reorder_item(image, merged_layer_group, orig_parent_and_pos[0], orig_parent_and_pos[1])\n \n return merged_layer_group\n\n\ndef is_layer_inside_image(image, layer):\n \"\"\"\n Return True if the layer is inside the image canvas (partially or completely).\n Return False if the layer is completely outside the image canvas.\n \"\"\"\n \n return ((-image.width < layer.offsets[0] < image.width) and\n (-image.height < layer.offsets[1] < image.height))\n\n\ndef remove_all_layers(image):\n \"\"\"\n Remove all layers from the specified image.\n \"\"\"\n \n for layer in image.layers:\n pdb.gimp_image_remove_layer(image, layer)\n\n\ndef remove_all_channels(image):\n \"\"\"\n Remove all layers from the specified image.\n \"\"\"\n \n for channel in image.channels:\n pdb.gimp_image_remove_channel(image, channel)\n\n\ndef remove_all_paths(image):\n \"\"\"\n Remove all paths (vectors) from the specified image.\n \"\"\"\n \n for path in image.vectors:\n pdb.gimp_image_remove_vectors(image, path)\n\n\ndef remove_all_items(image):\n \"\"\"\n Remove all items (layers, channels, paths) from the specified image.\n \"\"\"\n \n remove_all_layers(image)\n remove_all_channels(image)\n remove_all_paths(image)\n\n\ndef duplicate(image, remove_items=False):\n \"\"\"\n Duplicate the specified image.\n \n If `remove_items` is true, remove all items (layers, channels, paths)\n from the duplicated image.\n \"\"\"\n \n image_new = pdb.gimp_image_duplicate(image)\n if remove_items:\n remove_all_items(image_new)\n \n return image_new\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":399,"numItemsPerPage":100,"numTotalItems":42509,"offset":39900,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODA3MjQ0MSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU4MDc2MDQxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.4h6tGAf3AQ80no68qXaQeAeKNdM9WzzY4Hr0tmZD-h2qCgd1FdqIIgPZRU9wLTMerV6-OGaHKsGtw7L8ImyLAw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
__id__
int64
3.09k
19,722B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
256
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
3 values
repo_name
stringlengths
5
109
repo_url
stringlengths
24
128
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
6.65k
581M
star_events_count
int64
0
1.17k
fork_events_count
int64
0
154
gha_license_id
stringclasses
16 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
5.76M
gha_stargazers_count
int32
0
407
gha_forks_count
int32
0
119
gha_open_issues_count
int32
0
640
gha_language
stringlengths
1
16
gha_archived
bool
2 classes
gha_disabled
bool
1 class
content
stringlengths
9
4.53M
src_encoding
stringclasses
18 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
year
int64
1.97k
2.01k
16,209,206,613,751
05c8e8bc5217db4eba45cca50850e999597257dc
e6c6a7a59fa1e4d534949a9c17a1c4c844629ed1
/jsonlines.py
70ddb78481c13c84d19e033c56d61b0900351ba4
[]
no_license
tigerlight/temp
https://github.com/tigerlight/temp
ca930eb446e7352bc8cfa2ad5d668ca56f406906
e8f759397b2b5b9cddfa4e743ba82900aebcd167
refs/heads/master
2016-09-06T01:04:54.034845
2010-04-06T22:05:19
2010-04-06T22:05:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Copyright Jim Pitman March 29 2010 import string, simplejson def dict2line(d): try: line = simplejson.dumps(d) + ',\n' except: e = {} e['error'] = 'Error with simplejson.dumps: unable to encode the following python object in json: ' + str(d) line = simplejson.dumps(e) + ',\n' return line def line2dict(line): try: d = simplejson.loads(line[0:-2]) except: d = {} d['error'] = 'Unable to load dictionary from this line: ' + line return d def put_jsonlines(fname,meta,dls): topd = {} topd['begin_json'] = '' p = '[' + dict2line(topd) + dict2line(meta) for d in dls: p += dict2line(d) ###meta['facet_counts'] = make_facet_counts(lines) ## !! Note facet_counts are overwritten each time !! botd = {} botd['end_json'] = '' p += dict2line(botd)[0:-2] + ']\n' outfile = open(fname,'w') outfile.write(p.encode('utf-8')) outfile.close def get_jsonlines(fname): infile = open(fname,'r') inlines = infile.readlines() meta = line2dict(inlines[1]) dls = [] for line in inlines[2:-1]: dls += [line2dict(line)] return (fname, meta, dls) def append_records(fname, addls): ### append records fname,meta,dls = get_jsonlines(fname) dls += addls put_jsonlines(fname,meta,dls) def update_record(fname, id, e ): fname,meta,dls = get_jsonlines(fname) dls1 = [] found = 0 for d in dls: if d.get('id','') == id: d.update(e) found = 1 dls1 += [d] if not found: e['id'] = id dls1 += [e] put_jsonlines(fname,meta,dls1) def assertSameAs(fname, id, addls): ## should create a new id if id = '' or not allowed fname,meta,dls = get_jsonlines(fname) dls1 = [] found = 0 dkeys = [] for d in dls: dkeys += [ d.get('id','')] if d.get('id','') == id: ids = d.get('sameAs','').split() for e in addls: eid = e.get('id', '') if not eid in ids: ids += [eid] d['sameAs'] = ' '.join(ids) found = 1 for e in addls: if d.get('id','') == e['id']: d.update(e) ### update records from the addls dls1 += [d] if not found: e = {} e['id'] = id ids = [] for e in addls: eid = e.get('id', '') if not eid in ids: ids += [eid] e['sameAs'] = ' '.join(ids) dls1 += [e] for d in addls: if not d.get('id','') in dkeys: ### only append the record if its not there: could overwrite instead dls1 += [d] put_jsonlines(fname,meta,dls1) if __name__ == "__main__": fname = 'test.json' meta = {} dls = [] for x in range(10): d = {} d['id'] = str(x) dls += [d] put_jsonlines(fname,meta,dls) append_records(fname, [{},{}] ) fname,meta,dls = get_jsonlines(fname) print dls d = {} d['var'] = 'val' update_record(fname, '3', d ) fname,meta,dls = get_jsonlines(fname) update_record(fname, '3', d ) update_record(fname, '4', d ) fname,meta,dls = get_jsonlines(fname) r = {} r['name'] = 'Billy' r['id'] = 'other_id' s = {} s['name'] = 'Sammy' s['id'] = 'guid' t = {} t['name'] = 'Sammy' t['id'] = 'xxxxguid' assertSameAs(fname, '5', [r] ) fname,meta,dls = get_jsonlines(fname) print dls assertSameAs(fname, '3', [r,s,t] ) fname,meta,dls = get_jsonlines(fname) print dls
UTF-8
Python
false
false
2,010
8,512,625,181,843
af0e7d108058f95e4112966cf9c888a501ad020c
6afe152da2f65844637e960d290e556d4b6b8b99
/coinpy-lib/src/coinpy/lib/blockchain/bsddb/serialization/s11n_disktxpos.py
c71cbf9d3c2951b218cd425a9bdd60b129c6d377
[ "LGPL-3.0-only" ]
non_permissive
wizardofozzie/coinpy
https://github.com/wizardofozzie/coinpy
1c1a286f54bb4bea68373c9f02655d639817ef52
085d9409fb4e86256bafa54c0bd953882aff7b3d
refs/heads/master
2020-04-07T14:22:24.431532
2014-01-24T17:47:16
2014-01-24T17:47:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from coinpy.lib.serialization.common.field import Field from coinpy.lib.serialization.common.structure import Structure from coinpy.lib.blockchain.bsddb.objects.disktxpos import DiskTxPos class DiskTxPosSerializer(): DISKTXPOS = Structure([Field("<I", "file"), Field("<I", "blockpos"), Field("<I", "txpos")], "disktxpos") def serialize(self, disktxpos_obj): return (self.DISKTXPOS.serialize([disktxpos_obj.file, disktxpos_obj.blockpos, disktxpos_obj.txpos])) def deserialize(self, data, cursor=0): (file, nblockpos, ntxpos), cursor = self.DISKTXPOS.deserialize(data, cursor) return (DiskTxPos(file, nblockpos, ntxpos), cursor)
UTF-8
Python
false
false
2,014
18,554,258,762,504
8645383452ec263d3c79b667ef9a42abf9cf7322
a9315bae6b16dc37a52633dc10a80713d4d8a1c2
/week4/test_manage_company.py
7498dec5cd521924ade7167af958a11a84a64c9e
[]
no_license
leohaskell/Programming-101
https://github.com/leohaskell/Programming-101
00bdfba02b9d915faba5efc896866e2f6135689b
4e12fdcbb6192950da4027e01343ec7c39d872bd
refs/heads/master
2016-09-05T18:27:47.310182
2014-04-22T18:49:05
2014-04-22T18:49:05
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest import manage_company import sqlite3 from subprocess import call class TestManageCompany(unittest.TestCase): def setUp(self): self.conn = sqlite3.connect("test_employees.db") self.cursor = self.conn.cursor() self.cursor.execute('''CREATE TABLE employees (id int, name text, monthly_salary int, yearly_bonus int, position text)''') self.employee = { 'id': 0, 'name': "daniel taskoff", 'monthly_salary': -100, 'yearly_bonus': 0, 'position': "tester"} def test_add_employee(self): manage_company.add_employee(self.cursor, self.employee) result = self.cursor.execute("SELECT * FROM employees").fetchall() self.assertEqual([(0, "daniel taskoff", -100, 0, "tester")], result) def test_list_employees_while_empty(self): result = manage_company.list_employees(self.cursor) self.assertEqual("", result) def test_list_employees_while_not_empty(self): manage_company.add_employee(self.cursor, self.employee) result = manage_company.list_employees(self.cursor) self.assertEqual("0 - daniel taskoff - tester", result) def test_delete_employee(self): manage_company.add_employee(self.cursor, self.employee) first_count = self.cursor.execute("SELECT COUNT(*) FROM employees") self.assertEqual(1, first_count.fetchone()[0]) manage_company.delete_employee(self.cursor, 0) second_count = self.cursor.execute("SELECT COUNT(*) FROM employees") self.assertEqual(0, second_count.fetchone()[0]) def test_monthly_spending(self): manage_company.add_employee(self.cursor, self.employee) result = manage_company.monthly_spending(self.cursor) self.assertEqual(-100, result) def test_yearly_spending(self): manage_company.add_employee(self.cursor, self.employee) manage_company.add_employee(self.cursor, { 'id': 1, 'name': "bar", 'monthly_salary': 100, 'yearly_bonus': 1000, 'position': "foo"}) result = manage_company.yearly_spending(self.cursor) self.assertEqual(1000, result) def tearDown(self): self.conn.close() call("rm -r test_employees.db", shell=True) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,014
19,481,971,656,215
ebfef063d538f973073ccaf60480211e45e051c3
d1a22892e61d17ef0ad2440c661d16dd2dc922b9
/indexMaker.py
d530e35cd0fd47df443c56a34153d72073c5ecd4
[ "MIT" ]
permissive
young/pdf-index-maker
https://github.com/young/pdf-index-maker
8e279bf1283006abf5528a1c208780698618289b
0be368027dcefafc75a586ee8ae6112d6a66ce98
refs/heads/master
2021-01-01T20:00:07.990775
2014-01-27T06:11:41
2014-01-27T06:11:41
16,084,921
2
2
null
null
null
null
null
null
null
null
null
null
null
null
null
import string import time import argparse from sys import argv from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfpage import PDFPage from cStringIO import StringIO class IndexMaker(): def __init__(self, args): self.run(args) def run(self, args): try: # get the word list and create a dict of words words_list = [line.strip() for line in open(args.w)] except IOError as e: print e exit() # start the timer start = time.clock() # buil the index index = create_index(args.f, words_list) f = open(args.o,'w') for word in index: # write the index to a file f.write("%s: %s \n" % (word, index[word])) if args.p: print "%s: %s \n" % (word, index[word]) f.close() end = time.clock() print "Finished in %f seconds" % (end - start) def get_pdf_text(path): """ Reads a pdf file and returns a dict of the text where the index represents the page number. http://stackoverflow.com/a/20905381 """ rsrcmgr = PDFResourceManager() retstr = StringIO() # change to to utf-8 if the text comes out garbled codec = 'ascii' #codec = 'utf-8' laparams = LAParams() pages = {} device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams, showpageno=True, pages=pages) fp = file(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos=set() for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True): interpreter.process_page(page) fp.close() device.close() retstr.close() return pages def find_whole_word(needle, haystack, case_sensitive = False): """ Searches text for a whole word match. Ignores whitespace and punctuation. Example: find_whole_word('test', 'This is a test; This is also a tester') matches 'test;' but not 'tester' http://stackoverflow.com/a/4155029 """ if case_sensitive: index = haystack.find(needle) else: index = haystack.lower().find(needle.lower()) if index == -1: return False if index != 0 and haystack[index-1].isalnum(): return False L = index + len(needle) if L < len(haystack) and haystack[L].isalnum(): return False return True def create_index(pdf_path, words_list): """ Create a word index from pdf file """ text_data = get_pdf_text(pdf_path) word_index = {} for page in text_data: for word in words_list: if find_whole_word(word, text_data[page]): if word in word_index: word_index[word].append(page) else: word_index[word] = [page] return word_index if __name__ == '__main__': parser = argparse.ArgumentParser(description='Creates an index of words from a PDF file.') parser.add_argument('--version', action='version', version='0.02') parser.add_argument('-o', default='index.txt', help='The file to output the index to') parser.add_argument('-p', default=False, help='Print output to console') parser.add_argument('-w', required=True, help='A text file of new line delimited words') parser.add_argument('-f', required=True, help='The pdf file to create the index from') args = parser.parse_args() IndexMaker(args)
UTF-8
Python
false
false
2,014
18,571,438,597,087
125c3df41e1bf3ee56f2adbb169f84ffb36e27b6
b928a3ebadcd704e7c454f746ddd092bede00720
/Labs/Lab5/lab5.py
c2b7c0a452952722abc583e8b0c89b365a82a613
[]
no_license
Tony-Gagliardi/Network-Management
https://github.com/Tony-Gagliardi/Network-Management
f2093159c1827aff365f4ce8f4606707f164243d
65386422eefd89fefd90868216acb67cf84f5316
refs/heads/master
2020-06-05T22:43:29.660780
2014-04-29T04:15:10
2014-04-29T04:15:10
17,575,260
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Network Management - TLEN 5410 Created by Anthony Gagliardi and Sanket Nasre 23 March 2014 Lab 5 - The purpose of this lab is to gain experience with NetFlow data and determine network information from flow data. ''' # allows for true division using '/', as opposed to integer division. from __future__ import division import math import flowd import sys import operator from pylab import * import matplotlib.pyplot as plt import int_time class Top_Hosts(object): def counter(self, format, dst, src, octets, dict_count, ip_dst, ip_src): ''' The counter method for the Top_Hosts class is used for counting the total number of octets that traveled over a particular port. It returns a dictonary that has port numbers as the keys and octet counts as the corresponsing values. In order to gain more useful information, internal ports have been excluded from the counts. ''' if dst in dict_count: dict_count[dst] += octets else: if ip_dst.startswith('192.168.1') == True: pass else: dict_count[dst] = octets if src in dict_count: dict_count[src] += octets else: if ip_src.startswith('192.168.1') == True: pass else: dict_count[src] = octets return dict_count def create(self, top_hosts): ''' The create method for the Top_Hosts class is used to generate a pie chart using pylab. It iterates over the dictionary from the counter method several times to create lists that serve as the labels and quantities. ''' total_octets = 0 percents = list() port_list = list() for host in top_hosts: total_octets += host[1] for host_total in top_hosts: frac = (host_total[1] / total_octets) * 100 percents.append(frac) for host_port in top_hosts: port_list.append(host_port[0]) figure(1, figsize = (8,8)) ax = axes() pie(percents, labels=port_list, autopct='%1.0f%%') savefig('topports.png') class Top_Remote(object): def counter(self, format, ip_dst, ip_src, octets, dict_count): ''' The counter method for the Top_Remotes class is used for counting the total number of octets that a particular site consumed. It returns a dictonary that has IP Addrreses as the keys and octet counts as the corresponsing values. In order to gain more useful information, internal hosts have been excluded from the counts. ''' if ip_dst in dict_count: dict_count[ip_dst] += octets else: if ip_dst.startswith('192.168.1') == True: pass else: dict_count[ip_dst] = octets if ip_src in dict_count: dict_count[ip_src] += octets else: if ip_src.startswith('192.168.1') == True: pass else: dict_count[ip_src] = octets return dict_count def create(self, top_remotes): ''' The create method for the Top_Remotes class is used to generate a pie chart using pylab. It iterates over the dictionary from the counter method several times to create lists that serve as the labels and quantities. ''' total_octets = 0 percents = list() site_list = list() for host in top_remotes: total_octets += host[1] for host_total in top_remotes: frac = (host_total[1] / total_octets) * 100 percents.append(frac) for host_port in top_remotes: site_list.append(host_port[0]) figure(2, figsize = (9,9)) ax = axes() pie(percents, labels=site_list, autopct='%1.0f%%') savefig('topsites.png') class Line_Graph(object): def counter(self, format, ip_dst, ip_src, octets, dict_count): ''' The counter method for the Line_Graph class is used for counting the total number of octets that a internal host consumed. It returns a dictonary that has IP Addrreses as the keys and octet counts as the corresponsing values. In order to gain more useful information, external sites have been excluded from the counts. ''' if ip_dst in dict_count: dict_count[ip_dst] += octets else: if ip_dst.startswith('192.168.1') == True: dict_count[ip_dst] = octets elif ip_dst.startswith('10.') == True: dict_count[ip_dst] = octets else: pass if ip_src in dict_count: dict_count[ip_src] += octets else: if ip_src.startswith('192.168.1') == True: dict_count[ip_src] = octets elif ip_src.startswith('10.') == True: dict_count[ip_src] = octets else: pass return dict_count def create(self, top_internal): ''' The create method for the Line_Graph class is used to generate a pie chart using pylab. It iterates over the dictionary from the counter method several times to create lists that serve as the labels and quantities. ''' total_octets = 0 octet_amount = list() internal_hosts = list() for host in top_internal: total_octets += host[1] for host_total in top_internal: frac = (host_total[1] / total_octets) * 100 octet_amount.append(frac) for host_port in top_internal: internal_hosts.append(host_port[0]) figure(3, figsize = (8,8)) ax = axes() pie(octet_amount, labels = internal_hosts, autopct = '%1.0f%%') savefig('internal.png') class Time_Graph(object): def counter(self, format, octets, time, dict_count): ''' The counter method for the Time_Graph class is used for counting the total number of octets that were consumed at a particular time. It returns a dictonary that has times as the keys and octet counts as the corresponsing values. ''' if time in dict_count: dict_count[time] += octets else: dict_count[time] = octets return dict_count def create(self, top_time): ''' The create method for the Time_Graph class is used to generate a pie chart using pylab. It iterates over the dictionary from the counter method several times to create lists that serve as the labels and quantities. ''' total_octets = 0 octets_time = list() time_slots = list() for host in top_time: total_octets += host[1] for host_total in top_time: frac = (host_total[1] / total_octets) * 100 octets_time.append(frac) for host_port in top_time: time_slots.append(host_port[0]) figure(4, figsize = (10,10)) ax = axes() pie(octets_time, labels = time_slots, autopct = '%1.0f%%') savefig('time.png') class Log(object): def read_file(self, file_name, graph_type): ''' The read file method extracts useful information from each individual flow and sends it to the proper counter method so that the graph can be created. Looking back on it now, this is an extremely inefficient implementation because every iteration of the for loop invokes the counter method. Our code is incredibly slow because of the way we implemented this. ''' dict_count = {} call_list = [dict_count] log_name = flowd.FlowLog('/usr/local/' + file_name) for flow in log_name: format = flow.format() if isinstance(graph_type, Top_Hosts): dst = flow.dst_port src = flow.src_port ip_dst = flow.dst_addr ip_src = flow.src_addr octets = flow.octets call_list[0] = graph_type.counter(format, dst, src, octets, call_list[0], ip_dst, ip_src) if isinstance(graph_type, Top_Remote): ip_dst = flow.dst_addr ip_src = flow.src_addr octets = flow.octets call_list[0] = graph_type.counter(format, ip_dst, ip_src, octets, call_list[0]) if isinstance(graph_type, Line_Graph): ip_dst = flow.dst_addr ip_src = flow.src_addr octets = flow.octets call_list[0] = graph_type.counter(format, ip_dst, ip_src, octets, call_list[0]) if isinstance(graph_type, Time_Graph): octets = flow.octets time = flow.recv_sec time = int_time.int_to_time(time) call_list[0] = graph_type.counter(format, octets, time, dict_count) return call_list[0] class Octets(object): def toptalkers(self, dictionary, graph_type): ''' NOTE: We used the help of stack overflow to solve the problem of sorting a dictionary by values, particularly the following http://stackoverflow.com/questions/613183/python-sort-a-dictionary-by-value The toptalkers method sorts the dictionary by value into a list of tuples, which we then use to extract top consumer information for each of the graphs. ''' sorted_dict = sorted(dictionary.iteritems(), key = operator.itemgetter(1)) top_ten = list() if isinstance(graph_type, Line_Graph): for i in range(-9, -1): top_ten.append(sorted_dict[i]) else: for i in range(-11, -1): top_ten.append(sorted_dict[i]) graph_type.create(top_ten) def main(): ''' The main function initializes each of our graph objects and then invokes the toptalkers method on the corresponding dictionary to create each of the graphs. ''' file_select = raw_input("Enter the NetFlow file to analyze: ") log_file = Log() th_graph = Top_Hosts() tr_graph = Top_Remote() ln_graph = Line_Graph() tm_graph = Time_Graph() oct = Octets() dictionary = log_file.read_file(file_select, th_graph) oct.toptalkers(dictionary, th_graph) dictionary2 = log_file.read_file(file_select, tr_graph) oct.toptalkers(dictionary2, tr_graph) dictionary3 = log_file.read_file(file_select, ln_graph) oct.toptalkers(dictionary3, ln_graph) dictionary4 = log_file.read_file(file_select, tm_graph) oct.toptalkers(dictionary4, tm_graph) if __name__ == '__main__': main()
UTF-8
Python
false
false
2,014
11,141,145,191,152
fa98c0612cd6bb98fc0515a35c03c2d723785593
4d62f8cbefce29099da603938c1e10aa99877a01
/Server/DBManager.py
50103bf5b4728a665acc470d48e01224161ebc96
[]
no_license
greentuzi/Whister
https://github.com/greentuzi/Whister
601c6ee5646928cde6401c4b24697c5e387505e8
76782ac405dd6163ae83bb56e60cffdcf0abefdb
refs/heads/master
2021-01-20T20:56:58.071657
2014-06-07T14:36:55
2014-06-07T14:36:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import MySQLdb class DBManager: def __init__(self, host="", user="root", password="1", db="db"): self.host = host self.user = user self.password = password self.db = db try: self.conn = MySQLdb.connect(host=self.host, user=self.user, passwd=self.password ,db=self.db) except: print "Failed to connect to MySQL" def transaction(self): return self.conn.cursor(MySQLdb.cursors.DictCursor) def commit(self): self.conn.commit() def __del__(self): if self.conn: self.conn.close()
UTF-8
Python
false
false
2,014
4,475,355,924,721
53807fbd7e74a6a1127956ba25e0a5bff17ca0d1
552b8850eb6142708cec8161e245bbc02cfcde48
/telnetlib3/__init__.py
03c705b7ec90be8e6a7b54f4b346bf6a1b1d20e7
[ "ISC", "BSD-3-Clause" ]
permissive
tehmaze/telnetlib3
https://github.com/tehmaze/telnetlib3
f3993d8ac63e951ea9fe31c9821f66a9c0b2846f
427f660ff8b7ccb1af5783e333bfa15ef0b500e0
refs/heads/master
2021-01-18T03:12:45.191827
2013-05-29T09:49:38
2013-05-29T09:49:38
10,356,095
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Telnet Protocol using the 'tulip' project of PEP 3156. Requires Python 3.3. For convenience, the 'tulip' module is included. See the ``README`` file for details and license. """ __author__ = "Jeffrey Quast" __url__ = u'https://github.com/jquast/telnetlib3/' __copyright__ = "Copyright 2013" __credits__ = ["Jim Storch",] __license__ = 'ISC' __all__ = ['TelnetServer', 'TelnetStreamReader'] from server import TelnetServer from telopt import TelnetStreamReader
UTF-8
Python
false
false
2,013
4,415,226,391,058
60245406144eb1b5f3607efafe875daa06bebcb5
6967abe282637a3366fb0f5c1ef830939c0d2f75
/data/buses.py
194cff83938c08529b703d0a5770efa4cd1e2a2a
[]
no_license
achiang/openmotion
https://github.com/achiang/openmotion
1f4f8f531a5fe30411da9b36e85612aae6d72ac5
57560429bc37e1ccf737d0d3f58f3d3aaf15b00f
refs/heads/master
2020-05-18T15:57:03.211115
2014-08-04T16:57:17
2014-08-04T16:57:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 import bng import csv import simplejson as json import pymongo from lxml import etree from pykml import parser def parse_madrid_bus(basepath): files = ['EMT.kml', 'Interurbanos.kml'] places = [] for f in files: with open(basepath + f, 'rb') as x: xml = etree.parse(x) k = parser.fromstring(etree.tostring(xml)) places.extend(k.findall('.//{http://www.opengis.net/kml/2.2}Placemark')) stations = [] count = 0 for p in places: station = { 'mode': 'bus' } station['city'] = 'Madrid' station['name'] = p.name.text coords = [float(c.strip()) for c in p.Point.coordinates.text.split(',')] loc = { 'type' : 'Point' } loc['coordinates'] = coords station['loc'] = loc stations.append(station) return stations def parse_bcn_bus(basepath): with open(basepath + 'BUS_EST.kml', 'rb') as f: xml = etree.parse(f) k = parser.fromstring(etree.tostring(xml)) places = k.findall('.//{http://www.opengis.net/kml/2.2}Placemark') stations = [] count = 0 for p in places: station = { 'mode': 'bus' } station['city'] = 'Barcelona' station['name'] = p.name.text coords = [float(c.strip()) for c in p.Point.coordinates.text.split(',')] # BCN inserts a trailing 0 coordinate? Why!? coords.pop() loc = { 'type' : 'Point' } loc['coordinates'] = coords station['loc'] = loc stations.append(station) return stations def parse_valencia_bus(basepath): with open(basepath + 'Emt_paradas.KML', 'rb') as f: xml = etree.parse(f) k = parser.fromstring(etree.tostring(xml)) places = k.findall('.//{http://www.opengis.net/kml/2.2}Placemark') stations = [] count = 0 for p in places: station = { 'mode': 'bus' } station['city'] = 'Valencia' data = p.findall('.//{http://www.opengis.net/kml/2.2}Data') for d in data: if d.attrib['name'] == 'numportal': station['name'] = str(d.value) coords = [float(c.strip()) for c in p.Point.coordinates.text.split(',')] loc = { 'type' : 'Point' } loc['coordinates'] = coords station['loc'] = loc stations.append(station) return stations def parse_bilbao_bus(basepath): with open(basepath + 'stops.txt') as f: reader = csv.reader(f, delimiter=',') stations = [] for row in reader: if row[0] == "stop_id": continue station = { 'mode': 'bus' } station['city'] = 'Bilbao' station['name'] = row[2] # stop_name loc = { 'type' : 'Point' } loc['coordinates'] = [ float(row[5]), float(row[4]) ] station['loc'] = loc stations.append(station) return stations def parse_malaga_bus(basepath): # If Malaga gets more than 1 bus stop, we'll do some parsing then ;) station = { 'mode': 'bus' } station['city'] = 'Malaga' station['name'] = 'Paseo de los Tilos' loc = { 'type' : 'Point' } loc['coordinates'] = [-4.4342172937, 36.7130306843] station['loc'] = loc return [station] def parse_london_bus(basepath): with open(basepath + 'bus-stops.csv') as f: reader = csv.reader(f, delimiter=',') stations = [] for row in reader: if row[0] == "Stop_Code_LBSL": continue station = { 'mode': 'bus' } station['city'] = 'London' station['name'] = row[3] # Stop_Name loc = { 'type' : 'Point' } lng, lat = bng.tolnglat(int(row[4]), int(row[5])) loc['coordinates'] = [ lng, lat ] station['loc'] = loc stations.append(station) return stations def parse_uk_bus(basepath): json_data = open(basepath + 'UK.json').read() data = json.loads(json_data) stations = [] for d in data: stations.append(d) return stations def do_import(mongo_uri, basepath): station_parsers = [ ['Madrid', parse_madrid_bus], ['Barcelona', parse_bcn_bus], ['Valencia', parse_valencia_bus], ['Malaga', parse_malaga_bus], ['Bilbao', parse_bilbao_bus], ['London', parse_london_bus], ['UK', parse_uk_bus], ] client = pymongo.MongoClient(mongo_uri) db = client.openmotion buses = db.buses for parser in station_parsers: buses.insert(parser[1](basepath)) client.disconnect() if __name__ == "__main__": from lib import get_mongo_config, get_basepath, mongo_drop, mongo_index mode = 'buses' mongo_uri = get_mongo_config() basepath = get_basepath() + mode + '/' mongo_drop(mongo_uri, mode) do_import(mongo_uri, basepath) mongo_index(mongo_uri, mode)
UTF-8
Python
false
false
2,014
14,577,119,022,663
8f0bcb12ea930527d96bb0c793a50e947abadbe6
20a872331e80f6ad11752fa2d9d63864c2812b10
/pybble/blueprint/_root/part/wanttracking.py
44ed908f0e1a65e31c8818a45f8cccad68bb27ea
[ "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference", "AGPL-3.0-or-later" ]
non_permissive
smurfix/pybble
https://github.com/smurfix/pybble
1ee535c74ae73605bc725a1ae1a41ef83190d000
305ba81d4600abb4d575b39926abc76992696c17
refs/heads/master
2021-01-22T23:53:05.310683
2014-07-23T16:41:10
2014-07-23T16:41:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division, unicode_literals ## ## This is part of Pybble, a WMS (Whatever Management System) based on ## Jinja2/Haml, Werkzeug, Flask, and Optimism. ## ## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>, ## it is licensed under the GPLv3. See the file `README.md` for details, ## including an optimistic statements by the author. ## ## This paragraph is auto-generated and may self-destruct at any time, ## courtesy of "make update". The original is in ‘utils/_boilerplate.py’. ## Thus, please do not remove the next line, or insert any blank lines. ##BP from flask import request, url_for, flash from werkzeug import redirect from werkzeug.exceptions import NotFound from wtforms import Form, BooleanField, TextField, SelectField, validators from pybble.render import valid_obj, valid_read, valid_admin_self from pybble.core.models.object import Object from pybble.core.models._const import PERM, PERM_READ from pybble.core.models.tracking import WantTracking from pybble.core.db import db from pybble.render import render_template from .._base import expose expose = expose.sub("part.wanttracking") from datetime import datetime @expose("/admin/wanttracking") def list_wanttracking(): """Complete list""" return render_template('wanttrackinglist.html', data=db.filter_by(WantTracking, user=request.user), title_trace=["Beobachtungsliste"]) @expose("/admin/wanttracking/<oid>") def edit_wanttracking(oid=None): """Sub-list below the current object""" obj = Object.by_oid(oid) if isinstance(obj,WantTracking): return editor(obj) else: # show list of tracks for that object return render_template('wanttrackinglist.html', obj=obj, title_trace=["Beobachtungsliste"]) plc = PERM.items() plc.sort() class WantTrackingForm(Form): user = TextField('User', [valid_obj, valid_admin_self]) object = TextField('Object', [valid_obj, valid_read]) objtyp = SelectField('Object type') ###TODO , choices=(("-","any object"),)+tuple((str(q.id),q.name) for q in D)) email = BooleanField(u'Mail schicken') track_new = BooleanField(u'Meldung bei neuen Einträgen') track_mod = BooleanField(u'Meldung bei Änderungen') track_del = BooleanField(u'Meldung bei Löschung') def newer(parent, name=None): return editor(parent=parent) def editor(obj=None, parent=None): form = WantTrackingForm(request.form, prefix="perm") if request.method == 'POST' and form.validate(): user = Object.by_oid(form.user.data) dest = Object.by_oid(form.object.data) objtyp = None if form.objtyp.data == "-" else int(form.objtyp.data) email = bool(form.email.data) track_new = bool(form.track_new.data) track_mod = bool(form.track_mod.data) track_del = bool(form.track_del.data) if parent: obj = WantTracking.new(user, dest, objtyp) else: obj.record_change() obj.owner = user obj.parent = dest obj.objtyp = objtyp obj.track_new=track_new obj.track_mod=track_mod obj.track_del=track_del obj.email=email flash(u"Gespeichert.",True) return redirect(url_for("pybble.views.view_oid", oid=(parent or dest).oid)) elif request.method == 'GET': if obj: # bearbeiten / kopieren form.object.data = parent.oid if parent else obj.parent.oid form.user.data = obj.owner.oid form.objtyp.data = str(obj.objtyp) form.track_new.data = obj.track_new form.track_mod.data = obj.track_mod form.track_del.data = obj.track_del form.email.data = obj.email else: form.object.data = parent.oid form.user.data = request.user.oid form.objtyp.data = "-" form.track_new.data = True form.track_mod.data = False form.track_del.data = False form.email.data = False return render_template('edit/wanttracking.html', obj=obj, parent=parent or obj.parent, form=form, title_trace=["Beobachten"])
UTF-8
Python
false
false
2,014
3,332,894,661,262
f937574359536b682ac650ab692471ff9c88e0db
cd957535983d2e996d3db3c4e3d0f20d2fcb7e9b
/bump
c0bf9af8a73ee29befa19b5a6ff888620936c793
[ "WTFPL" ]
permissive
silky/git-bump
https://github.com/silky/git-bump
092e1e7add37c95b4924699272ece8a8cd258bb6
0fb04048ab72c881b566903fcf63607d689d419d
refs/heads/master
2021-01-18T10:38:46.083556
2014-01-03T22:09:05
2014-01-03T22:09:05
20,876,479
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import re, sys __USAGE__ = \ """BUMP is a semantic versioning bump script which accepts the following mutually exclusive arguments: -m - a "major" version bump equal to +1.0.0 -n - a "minor" version bump equal to +0.1.0 -p - a "patch" version bump equal to +0.0.1 All of these options allow for the -r flag, which indicates that the state is a RELEASE not a SNAPSHOT. If -r is not specified, then -SNAPSHOT is appended to the updated version string.""" __INITIAL__ = ['0', '0', '1'] if __name__ == "__main__": v = [] try: v = re.split(re.compile("\.|-"),open("VERSION").read()) or __INITIAL__ v = v[0:3] map(int, v) except ValueError: print("failed to parse the existing VERSION file, assuming v 0.0.1") v = ['0', '0', '1'] except FileNotFoundError: print("failed to find a VERSION file, assuming v 0.0.0") v = ['0', '0', '0'] op = '' try: op = sys.argv[1] except: print(__USAGE__) sys.exit(-1) if(op == '-m'): v = [str(int(v[0])+1), '0', '0'] elif(op == '-n'): v = [v[0], str(int(v[1])+1), '0'] elif(op == '-p'): v = [v[0], v[1], str(int(v[2])+1)] else: print(__USAGE__) sys.exit(-1) v = '.'.join(v) if "-r" not in sys.argv: v += "-SNAPSHOT" v += "\n" print(v) open("VERSION",'w').write(v) sys.exit(0)
UTF-8
Python
false
false
2,014
51,539,624,138
af8d2404105b8db6fb65565a1f6d033ae4252ace
e5e69d5530ea4dd3fede3bec93344bd21f93c6f0
/settings.py
671790eb9646ec5222d7887ce9c3778ec472cef1
[]
no_license
HaySayCheese/IsWork
https://github.com/HaySayCheese/IsWork
2ff79b7ca4f4653cbae1e4011eae0b697b736977
c809b645f8cb0641c6e21379e09c455c12066ea3
refs/heads/master
2016-03-26T02:52:22.619403
2012-07-25T22:27:46
2012-07-25T22:27:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
PID_FILE_ADDRESS = "/tmp/iswork.pid" # main pid file address. WAIT_FOR_APP_STARTS_SECS = 2 # time interval in seconds to run exec_command. # must be set to max run-time of all apps that will be watched. INTERVAL_BETWEEN_CHECKS_SECS = 60*10 # 10min
UTF-8
Python
false
false
2,012
14,766,097,592,509
5c7e9e5c3d0beeac5b2368dcc1a8ea8266028e79
5532f31150deeb18b9eae807c36328a55918d4d1
/src/Forecasting/util.py
1f69d7cf1d26ac49ceb714c73ebcc6ece75cc2f9
[ "GPL-3.0-only" ]
non_permissive
martin1/thesis
https://github.com/martin1/thesis
021f81cb2c05cca0fad68757b53599e1a0b79801
ee1f790484629d7f1b79ef7d373edc04f7ae4eef
refs/heads/master
2016-09-10T20:07:48.056641
2014-01-13T07:42:00
2014-01-13T07:42:00
15,042,291
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from Data.data import get_system_price_volume from sklearn.metrics.metrics import mean_absolute_error, mean_squared_error from pybrain.datasets.supervised import SupervisedDataSet from pybrain.tools.neuralnets import NNregression from pybrain.supervised.trainers.rprop import RPropMinusTrainer from pybrain.structure.modules.sigmoidlayer import SigmoidLayer import logging from pybrain.tools.shortcuts import buildNetwork from datetime import timedelta, datetime from pandas.core.series import Series from pandas.tseries.index import date_range import numpy import os import cPickle as pickle import matplotlib.pyplot as plt from matplotlib.pyplot import legend from operator import itemgetter def get_daily_sys_prices_times(hourly_sys_prices, hourly_times): d = {} daily_sys_prices = list() daily_sys_price = 0 #convert times to dates times = [time.date() for time in hourly_times] #print times for i in range(0, len(times)-1): if times[i] == times[i+1]: daily_sys_price += hourly_sys_prices[i] elif times[i] != times[i+1]: daily_sys_price += hourly_sys_prices[i] daily_sys_prices.append(round(daily_sys_price/times.count(times[i]),2)) d[times[i]] = round(daily_sys_price/times.count(times[i]),2) daily_sys_price = 0 if i+1 == len(times)-1:#last price in list daily_sys_price += hourly_sys_prices[i+1] daily_sys_prices.append(round(daily_sys_price/times.count(times[i+1]),2)) d[times[i+1]] = round(daily_sys_price/times.count(times[i+1]),2) return [d[key] for key in sorted(d.keys())], sorted(d.keys()) def get_sysprice_list(start_time, end_time, frequency='hourly'): '''Wrapper function for creating pandas Series object from data received from the database. Returns: pandas Series object with Elspot daily system prices and corresponding dates for predefined time period. Parameters: start_time - string representing the start of the time period, format must be 'yyyy-mm-dd'. end_time - string representing the end of the time period, format must be 'yyyy-mm-dd'. frequency - string representing the frequency of the output pandas Series object. Currently must be one of ['hourly', 'daily'] ''' #Retrieve hourly system prices and timestamps from database as lists _ , sys_prices, times = get_system_price_volume(start_time, end_time) ts = Series(sys_prices, index=times) if frequency == 'daily': resampling_frequency = 'D' '''Weekly functionality not needed for now''' '''elif frequency == 'weekly': start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S') if start_time.date().weekday() != 0: raise ValueError(str(start_time.date())+ " is a " + start_time.date().strftime('%A') + ". start_date must be a Monday.") if end_time.date().weekday() != 6: raise ValueError(str(end_time.date())+ " is a " + end_time.date().strftime('%A') + ". end_date must be a Sunday.") resampling_frequency = 'W' ''' if frequency == 'monthly': resampling_frequency = 'M' if frequency == 'hourly': #Resampling is not necessary return ts else: return ts.resample(resampling_frequency, how='mean', kind='timestamp') def get_errors(forecast_actual_data, forecast_data): return round(mean_absolute_error(forecast_actual_data, forecast_data),2), round(mean_squared_error(forecast_actual_data, forecast_data),2) def NN_data(ts, max_lag): '''Function for creating a normalized dataset suitable for training PyBrain's neural networks from pandas Series object. Returns: dataset suitable for neural net training, max value of dataset for denormalization purposes''' ds = SupervisedDataSet(max_lag, 1) times = ts.index prices = [item for item in normalize(ts.values)[0]] target = list() for item in prices: target.append(item) input_cols = list() for i in range(1, max_lag+1): col = prices[:-i] while len(col) < len(prices): col = ['nan'] + list(col) input_cols.append(col) #convert input columns to input rows input_rows = zip(*input_cols) #Remove rows containing 'nan' input_rows = input_rows[max_lag:] target = target[max_lag:] for i in range(0, len(target)): ds.appendLinked(input_rows[i], target[i]) return ds, normalize(ts.values)[1] def normalize(list_of_prices): '''Normalize list of prices to range of [0..1] Returns: normalized list of prices, max price for denormalization''' return [item/max(list_of_prices) for item in list_of_prices], \ max(list_of_prices) def NN_forecast(f_start, forecast_period_in_days, trained_nnet, max_lag): '''Perform forecast on provided trained instance of neural net. Returns: forecasted values as pandas Series object. Parameters: f_start(String) - string representing date of the start of the forecast period, format "yyyy-mm-dd" forecast_period_in_days(Integer) - length of forecast period in days trained_nnet(NeuralNetwork) - reference to a trained PyBrain neural network instance max_lag(Integer) - max lag of the dataset. must be a positive integer ''' #1st date needed for forecasting start = datetime.strptime(f_start, "%Y-%m-%d") end = start + timedelta(days=forecast_period_in_days-1) start = start - timedelta(days=max_lag) data = get_sysprice_list(start, end, frequency='daily') nn_data, normalization_constant = NN_data(data, max_lag) forecast_data=list() #print nn_data['input'] for item in nn_data['input']: forecast_data.append(trained_nnet.activate(item)[0]* \ normalization_constant) #print forecast_data return Series(forecast_data, index=date_range(f_start, \ periods=forecast_period_in_days)) def get_best_forecast(list_of_forecasts, type='mean', get_top10=False): '''Function for determining best forecast in provided list of forecasts. Returns: label of best forecast from list_of_forecasts (optionally) model of best forecast Parameters: list_of_forecasts - dict("forecast_model_label":[MAE, MSE]) type(String) - type of ranking function used. Choices: 'mean', 'sum'. Used for ranking models according to error metrics. get_top10(boolean) - 'True' returns top 10 best models from list_of_forecasts and writes to file ''' mae_dict = dict() mse_dict = dict() top10 = list() best_mae = [None, 9999] best_mse = best_mae for key in list_of_forecasts.keys(): #for each ev_period mae, mse = list(), list() for i in range(0, len(ev_periods)): try: mae.append(list_of_forecasts[key][i][0]) mse.append(list_of_forecasts[key][i][1]) except TypeError: #found forecast where one of the errors #is None. Ensure that this forecast will not be #considered the best mae.append(9999) mse.append(9999) if ranking_function(type, mae) < best_mae[1]: best_mae = [key, round(ranking_function(type, mae),2)] if ranking_function(type, mse) < best_mse[1]: best_mse = [key, round(ranking_function(type, mse),2)] mae_dict[key] = round(ranking_function(type, mae),2) if get_top10 is True: #compose final list for top 10 top10 = get_top10_forecasts(mae_dict) #write top 10 to file write_to_file("/home/martin/dev/git/masters_thesis/Files/" \ "best_ev_forecasts.txt", top10) return best_mae[0] def ranking_function(type, x): '''Function for calculating mean and sum of values in a list. Returns: mean or sum of values of x, depending on value of 'type'. Parameters: type(String) - 'mean' or 'sum'. x(list()) - list of numeric (Integer, Float) values''' if type == 'mean': return numpy.mean(x) if type == 'sum': return sum(x) #Maybe later: '''if type == 'mae+mse': return [a+b for a, b in zip(x, y)]''' def write_to_file(fname, output_list): '''Helper function for writing a list to a file. Parameters: fname(String) - output file name with absolute path output_list(list()) - list that is written to file''' with open(fname, "w") as f: for item in output_list: f.write(item + "\n") f.close() print "Writing file: " + fname + "..." def read_nets(path_to_nets_folder): nets_list = list() for filename in os.listdir(path_to_nets_folder): nets_list.append(pickle.load(open(path_to_nets_folder + filename, "rb"))) return nets_list def get_key(dict, value, get_multiple_keys=False): '''return key corresponding to value in provided dictionary dict''' if get_multiple_keys is True: keys = list() for key in dict.keys(): if dict[key] == value: keys.append(key) return keys for key in dict.keys(): if dict[key] == value:return key def plot_forecasts(data, legend_data, save=False, filename=None): '''Plot data using predefined styles suitable for master's thesis data - list of pandas Series representing the forecasts and/or actual data legend_data - list of strings representing the display names for the elements of data. save(boolean) - if True then plot is saved as filename filename(string) - absolute path to name of file where plot is saved Note: data and legend_data must be ordered such that legend_data[i] represents the display name of data[i]''' if len(data) == 2: plot_styles = ['ks--', 'ko-'] elif len(data) == 3: plot_styles = ['ko-.', 'ks--', 'ko-'] elif len(data) == 4: plot_styles = ['ko-', 'ks--', 'ko-.', 'ko:'] for ds, sty, name in zip(data, plot_styles, legend_data): ds.plot(style=sty, markersize=3.0, label=name) legend(loc='best') plt.autoscale() plt.grid(False) plt.ylabel("System Price (EUR/MWh)") if save is True: plt.savefig(filename) else: plt.show() def get_top10_forecasts(dict_label_mae): '''Function that returns list of top 10 from a dict of ('fc_model_label': mean(mae's for all ev_periods)). This function is used exclusively in method get_best_forecast. Note: might return more than 10 forecasts in rare cases where multiple forecasts have the same MAE and that MAE is in top 10 MAEs Returns: list of labels of top 10 forecast models''' #Choose top 10 mae values values = sorted(dict_label_mae.values()) top_10 = values[:10] top_10 = [get_key(dict_label_mae, value, get_multiple_keys=True) for value in top_10] #flatten top_mae list top_10=[item for sublist in top_10 for item in sublist] #remove duplicates top_10 = list(set(top_10)) return top_10 def get_top_ev_forecasts(ev_forecasts_dict): '''Sort ev forecasts according to min(sum(MAE)) and return top 10''' sum_mae_dict = dict() output = list() for key in ev_forecasts_dict.keys(): mae_sum = 0 for i in [0,1,2]:mae_sum += ev_forecasts_dict[key][i][0] sum_mae_dict[key] = mae_sum for value in sorted(sum_mae_dict.values()): key = get_key(sum_mae_dict, value) output.append(key + " & " + str(ev_forecasts_dict[key][0][0]) + " & " + str(ev_forecasts_dict[key][0][1]) + " & " + str(ev_forecasts_dict[key][1][0]) + " & " + str(ev_forecasts_dict[key][1][1]) + " & " + str(ev_forecasts_dict[key][2][0]) + " & " + str(ev_forecasts_dict[key][2][1]) + "\\\ \hline") return output def get_top_actual_forecasts(forecasts_dict): '''Sort forecasts according to 1)MAE, 2)MSE''' output = list() for value in sorted(forecasts_dict.values(), key=itemgetter(0,1)): key = get_key(forecasts_dict, value) output.append(key + " & " + str(forecasts_dict[key][0]) + " & " + str(forecasts_dict[key][1]) + "\\\ \hline") return output ################################# #Extend PyBrain's NNRegression class for different types of neural #networks, not just SigmoidLayer FNNs class NeuralNetRegression(NNregression): def setupNN(self, trainer=RPropMinusTrainer, hidden=None, hiddenType=SigmoidLayer, recurrentNN=False, bias=True, **trnargs): if hidden is not None: self.hidden = hidden logging.info("Constructing NN with following config:") NN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, recurrent=recurrentNN, hiddenclass=hiddenType) self.net = NN logging.info(str(NN) + "\n Hidden layer:" + str(hiddenType) + "\n Hidden units:\n " + str(self.hidden)) logging.info("Training NN with following special arguments:") logging.info(str(trnargs)) self.Trainer = trainer(NN, dataset=self.DS, **trnargs) def getNeuralNet(self): return self.net ######################################### #classes ######################################### class Forecaster(object): '''Abstract class that provides a unified interface for all forecasters''' def __init__(self, training_ts): self.training_ts = training_ts def forecast(self, forecast_start_str, forecast_period_in_days): '''Child classes compute forecast and return as pandas Series object''' pass ############################## #Datasets and related objects ############################## #Error validation periods (start and end dates) ev_periods = [ ["2013-05-20", "2013-05-26"], ["2013-05-27", "2013-06-02"], ["2013-06-03", "2013-06-09"]] ev_ds = list() #get actual data of error validation datasets for i in range(0, len(ev_periods)): ev_ds.append(get_sysprice_list(ev_periods[i][0], ev_periods[i][1], frequency='daily')) #Actual data for final forecast actual_data = get_sysprice_list("2013-06-10", "2013-06-16", frequency="daily") #Training dataset for forecasters ds = get_sysprice_list('2011-01-01 00:00:00', '2013-05-19 23:00:00', frequency='daily') forecast_dataset = ["2013-06-10", "2013-06-16"]
UTF-8
Python
false
false
2,014
5,257,039,974,615
946f3358b6cbd2b5ad4fdbdd71023666c770c5f2
840ef5b2f8882e3c40a90731791a738afc369abb
/panel_a.py
939bbf36df2d892399da270613b016c0fa675f7b
[]
no_license
nukulele/dymaxion
https://github.com/nukulele/dymaxion
878db8f659130b6b94fd66fd4a62e08a621e5e61
8f879d85df5963652818fe7738d88987e589062f
refs/heads/master
2021-01-15T14:29:30.625998
2013-06-12T20:14:59
2013-06-12T20:14:59
40,198,514
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sympy as s from util.pdf import get_letter_canvas, draw_hexes from util.circle import three_point_circle from util.polyhedron import Icosahedron from util.mapping import sx, sy, face_mappings from util.space import rot_x, rot_y, rot_z, vector_3d, zero_3d, to_rad from util.space import two_point_line from util.space import line_plane_intersect _rt3 = s.sqrt(3).evalf() _pi = s.pi.evalf() def make_map( filename ): def _sphere_to_cart( theta, phi ): return vector_3d( \ s.sin( theta ) * s.cos( phi ), \ s.cos( theta ), \ s.sin( theta ) * s.sin( phi ) ) def _draw_this_circle( c, Circle ): pass def _draw_latitude_circle( c, latitude ): pass def _draw_longitude_circle( c, longitude ): pass # set up our triangle c = get_letter_canvas( filename ) # hex grid c.setStrokeColor( (0.75, 0.75, 1) ) # c.setLineWidth( 0.001 ) # draw_hexes( c, 2 ) c.setLineWidth( 0.0005 ) draw_hexes( c, 10 ) c.setStrokeColor( (0.75,0.75,0.75) ) d20 = Icosahedron( numeric = True ) d20.rotate_vertices( rot_x( -s.atan2( s.GoldenRatio-1, s.GoldenRatio ).evalf() ) ) d20.make_faces() mappings = face_mappings( d20 ) face_plane = d20.faces[0].plane rot_mat = mappings.face_dict['a'][0] for lat in range( 0, 40, 5 ): if lat % 5: c.setLineWidth( 0.0005 ) else: c.setLineWidth( 0.001 ) lat_rad = to_rad( lat ).evalf() try: point = line_plane_intersect( two_point_line( zero_3d, _sphere_to_cart( lat_rad, 0 ) ), face_plane ) mp1 = point.multiply( rot_mat ) point = line_plane_intersect( two_point_line( zero_3d, _sphere_to_cart( lat_rad, _pi/2 ) ), face_plane ) mp2 = point.multiply( rot_mat ) point = line_plane_intersect( two_point_line( zero_3d, _sphere_to_cart( lat_rad, -_pi/2 ) ), face_plane ) mp3 = point.multiply( rot_mat ) the_circle = three_point_circle( mp1[0], mp1[1], mp2[0], mp2[1], mp3[0], mp3[1] ) c.circle( the_circle.x0, the_circle.y0, the_circle.radius, fill=0, stroke=1 ) except: pass for lon in range( 0, 360, 5 ): if lon % 5: c.setLineWidth( 0.0005 ) else: c.setLineWidth( 0.001 ) try: point = line_plane_intersect( two_point_line( _sphere_to_cart( _pi/4, to_rad( lon).evalf() ), zero_3d ), face_plane ) mp1 = point.multiply( rot_mat ) end_pt = _pi/9 if (lon % 5) == 0: end_pt = _pi/18 if (lon % 10) == 0: end_pt = _pi/36 if (lon % 30) == 0: end_pt = 0 point = line_plane_intersect( two_point_line( _sphere_to_cart( end_pt, to_rad( lon).evalf() ), zero_3d ), face_plane ) mp2 = point.multiply( rot_mat ) c.line( mp1[0], mp1[1], mp2[0], mp2[1] ) except: pass c.showPage() c.save() if __name__ == '__main__': make_map( "panel_a.pdf" )
UTF-8
Python
false
false
2,013
9,603,546,881,214
56b66446c3bb7d7e40101353ed4d658b9a935b25
7933c03e9445cf71d5d8c65b8033dc162c8eee12
/data/import-tiles.py
ab58561fa94e586444e6a5e42a5f3f22471a8409
[]
no_license
codeforamerica/ohhs_Falcon
https://github.com/codeforamerica/ohhs_Falcon
21c6ee694a26a82cc09a884b9df0a7d6a848a725
71616e6b6d95b70b9bcf9d3bc6bc0f98ec0be963
refs/heads/master
2021-01-13T07:29:00.647883
2013-06-23T17:25:40
2013-06-23T17:25:40
10,748,128
0
2
null
false
2014-08-27T07:01:24
2013-06-17T21:47:44
2014-01-06T13:39:06
2013-06-23T17:25:45
450
1
4
7
JavaScript
null
null
#!/usr/bin/env python from itertools import product from os import makedirs from json import dump from ModestMaps.Geo import Location from ModestMaps.Core import Coordinate from ModestMaps.OpenStreetMap import Provider from lib import load_violations, load_inspections, load_buildings from lib import match_inspection_violations, match_building_inspections min_zoom = 14 max_zoom = 17 def starting_tiles(buildings): ''' Get tile coordinates at min_zoom for a list of buildings. ''' minlat = min([b['latitude'] for b in buildings]) minlon = min([b['longitude'] for b in buildings]) maxlat = max([b['latitude'] for b in buildings]) maxlon = max([b['longitude'] for b in buildings]) osm = Provider() ul = osm.locationCoordinate(Location(maxlat, minlon)).zoomTo(min_zoom).container() lr = osm.locationCoordinate(Location(minlat, maxlon)).zoomTo(min_zoom).container() rows, cols = range(int(ul.row), int(lr.row+1)), range(int(ul.column), int(lr.column+1)) coords = [Coordinate(row, col, min_zoom) for (row, col) in product(rows, cols)] return coords def search_tile(coord, buildings): ''' Search list of buildings for those within a tile coordinate. ''' osm = Provider() sw = osm.coordinateLocation(coord.down()) ne = osm.coordinateLocation(coord.right()) found_buildings = [b for b in buildings if sw.lon <= b['longitude'] < ne.lon and sw.lat <= b['latitude'] < ne.lat] return found_buildings if __name__ == '__main__': print 'Getting violations...' violations_url = 'http://s3.amazonaws.com/data.codeforamerica.org/OHHS/SF/1.2/Violations.csv' violations = load_violations(violations_url) print 'Getting inspections...' inspections_url = 'http://s3.amazonaws.com/data.codeforamerica.org/OHHS/SF/1.2/Inspections.csv' inspections = load_inspections(inspections_url) print 'Getting buildings...' buildings_url = 'http://s3.amazonaws.com/data.codeforamerica.org/OHHS/SF/1.2/Buildings.csv' buildings = load_buildings(buildings_url) print 'Matching inspection violations...' match_inspection_violations(violations, inspections) print 'Matching building inspections...' match_building_inspections(inspections, buildings) building_list = buildings.values() search_coords = [(coord, building_list) for coord in starting_tiles(building_list)] while search_coords: coord, building_list = search_coords.pop(0) found_buildings = search_tile(coord, building_list) print ('%(zoom)d/%(column)d/%(row)d' % coord.__dict__), print len(found_buildings), 'of', len(building_list) try: makedirs('tiles/%(zoom)d/%(column)d' % coord.__dict__) except OSError: pass # directory probably exists with open('tiles/%(zoom)d/%(column)d/%(row)d.json' % coord.__dict__, 'w') as out: features = [dict( id=p['id'], type='Feature', properties=p, geometry=dict( type='Point', coordinates=(p['longitude'], p['latitude']) ) ) for p in found_buildings] geojson = dict( type='FeatureCollection', features=features ) dump(geojson, out, indent=2) if coord.zoom < max_zoom: search_coords.append((coord.zoomBy(1), found_buildings)) search_coords.append((coord.zoomBy(1).down(), found_buildings)) search_coords.append((coord.zoomBy(1).right(), found_buildings)) search_coords.append((coord.zoomBy(1).right().down(), found_buildings))
UTF-8
Python
false
false
2,013
15,238,543,966,840
62211ff78eb4b47196f363af158bd7e414cdfd99
4514825367c602b6b7c43267ad7820102b933100
/protloc.py
0e06bae47fe3c5f697ecbe175d2615f776f1c7b9
[]
no_license
janosbinder/redmamba
https://github.com/janosbinder/redmamba
fed97ac1f41f6a80978b6c17b5305e0b56616520
a9b55b1724dcc493e129cb712bac0ee4062a36d7
refs/heads/master
2021-01-22T01:55:00.108969
2012-03-14T09:46:13
2012-03-14T09:46:13
35,410,908
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pg import math import datetime from html import * import obo import textmining import mamba.setup import mamba.task class MyConfig(mamba.setup.Configuration): def __init__(self, ini_filename): mamba.setup.Configuration.__init__(self, ini_filename) self.doid = obo.Ontology("/home/purple1/dictionary/doid.obo") for id in self.doid.terms: term = self.doid.terms[id] term.wiki = None term.gocc = {} term.gomf = {} term.gobp = {} if term.definition == None: term.definition = "" term.wikitext = "" for line in open("/home/green1/frankild/ModernDiseaseDB/wikipedia/wikitext_doid_log.tsv"): id, sentence, name, rname, wiki_url = line[:-1].split("\t") if id in self.doid.terms: term = self.doid.terms[id] if wiki_url.startswith("http://en.wikipedia.org") and term.wiki == None: term.wiki = wiki_url for line in open("/home/green1/frankild/ModernDiseaseDB/wikipedia/wikitext_doid_par.tsv"): serial, id, text = line[:-1].split("\t") if id in self.doid.terms: self.doid.terms[id].wikitext = text for line in open("/home/green1/frankild/ModernDiseaseDB/GO/gocc_pairs.tsv"): type1, doid, type2, go, score = line[:-1].split("\t") score = float(score) if score > 2.4 and doid in self.doid.terms: term = self.doid.terms[doid] term.gocc[go] = score for id in self.doid.terms: term = self.doid.terms[id] term.gocc = sorted(map(lambda a: (a[1],a[0]), term.gocc.items()), reverse=True) for line in open("/home/green1/frankild/ModernDiseaseDB/GO/gomf_pairs.tsv"): type1, doid, type2, go, score = line[:-1].split("\t") score = float(score) if score > 2.4 and doid in self.doid.terms: term = self.doid.terms[doid] term.gomf[go] = score for line in open("/home/green1/frankild/ModernDiseaseDB/GO/gobp_pairs.tsv"): type1, doid, type2, go, score = line[:-1].split("\t") score = float(score) if score > 2.4 and doid in self.doid.terms: term = self.doid.terms[doid] term.gobp[go] = score class TalkDB: @staticmethod def get_best_name(typex, idx, dictionary): bestname = idx names = dictionary.query("SELECT name FROM preferred WHERE type=%i AND id='%s';" % (typex, idx)).getresult() if len(names): bestname = names[0][0] return bestname @staticmethod def get_stars(score): stars = int(min(5, float(score))) return '<span class="stars">%s</span>' % "".join(["&#9733;"]*stars + ["&#9734;"]*(5-stars)) class HTMLPageCollide(XPage): def __init__(self): XPage.__init__(self, "<table><tr><td>Compendium of Liteature Listed</td></tr><tr><td>Disease Gene Associations (Collide&#0153;)</td></tr></table>") XP(self.frame.sidebar, datetime.datetime.now().strftime('%a-%d-%b-%Y %H:%M:%S %Z')) tbl = XTable(self.frame.sidebar, {"width":"100%"}) tbl.addrow("Disease", "8,553") tbl.addrow("Proteins", "2,714") class HTMLPageBrowseDisease(HTMLPageCollide): def __init__(self, rest): HTMLPageCollide.__init__(self) if "doid" not in rest: id = "DOID:4" else: id = rest["doid"].encode("utf8") term = mamba.setup.config().doid.terms[id] title = '<a href="/Browse" style="link:inherit; text-decoration:none; visited:inherit;">Browse the disease ontology</a>' group = XGroup(self.frame.content, title) section = XSection(group.body, '%s &nbsp &nbsp; (%s)' % (term.name.capitalize(), term.id)) p1 = XP(section.body) if len(term.definition): XH4(p1, "Definition (DOID)") XFree(p1, term.definition) if len(term.wikitext): XH4(p1, "Description (Wikipedia)") XFree(p1, term.wikitext) if term.wiki: XFree(p1, '<br></br><a href="%s" target="wikipedia_tab">Wikipedia</a>' % term.wiki) p2 = XP(section.body) XH4(p2, "Synonyms") ul = XTag(p2, "ul") for synonym in term.synonyms: XFree(XTag(ul, "li"), synonym.capitalize()) if len(term.parents): p3 = XP(section.body) XH4(p3, "Derives from") ul = XTag(p3, "ul") for parent in term.parents: XFree(ul, '<li><a href="/Browse?doid=%s">%s</a></li>' % (parent.id, parent.name.capitalize())) if len(term.children): p5 = XP(section.body) XH4(p5, "Relates to") ul = XTag(p5, "ul") for child in term.children: XFree(ul, '<li><a href="/Browse?doid=%s">%s</a></li>' % (child.id, child.name.capitalize())) conn_text = pg.connect(host='localhost', user='ljj', dbname='textmining') conn_dict = pg.connect(host='localhost', user='ljj', dbname='dictionary') XH4(section.body, "Literature") textmining.DocumentsHTML(section.body, conn_text, -26, term.id) XHr(section.body) p6 = XP(section.body) XH4(p6, "Sub-cellular localization") tbl = XDataTable(p6) tbl["width"] = "100%" tbl.addhead("#", "Term", "GO", "Z-score", "Evidence") i = 1 for score, go in term.gocc: if i > 10: break best = TalkDB.get_best_name(-22, go, conn_dict).capitalize() golink = '<a href="http://www.ebi.ac.uk/QuickGO/GTerm?id=%s" target="gene_ontology_tab">%s</a>' % (go, go) zscore = "%.2f" % score stars = TalkDB.get_stars(score) row = tbl.addrow(i, "<strong>%s</strong>" % best, golink, zscore, stars) i += 1 p7 = XP(section.body) XH4(p7, "Genes") textmining.PairsHTML(p7, -26, term.id, 9606, conn_text, conn_dict) XH4(p7, "Drugs and Compounds") tbl = XDataTable(p7) textmining.PairsHTML(p7, -26, term.id, -1, conn_text, conn_dict) class HTMLPageSearch(HTMLPageCollide): def __init__(self, rest): HTMLPageCollide.__init__(self) if "query" in rest: self.head.title = "Search result" dictionary = pg.connect(host='localhost', user='ljj', dbname='dictionary') search = pg.escape_string(rest["query"]) names = dictionary.query("SELECT type, id, name FROM preferred WHERE name ILIKE '%s%%' AND type<>-11;" % search).getresult() if len(names): XH1(self.frame.content, "Result for '%s'" % (rest["query"])) table = XTable(self.frame.content) for type, id, name in names: table.addrow(type, id, name) else: XH1(self.frame.content, "Nothing found for '%s' (%s)" % (rest["query"], rest["filter"])) else: self.head.title = "Search diseases and genes" form = XTag(self.frame.content, "form") form["action"] = "Search" form["method"] = "post" center = XTag(form, "center") p1 = XP(center) XH3(p1, "Search for diseases, genes and identifiers") XTag(p1, "input", {"type":"text", "name":"query", "size":"100%", "value":"ABCA17P"}) submit = XTag(XP(p1), "input", {"type":"submit", "value":"submit"}) class HTMLPageProtein(HTMLPageCollide): def __init__(self, type, id): HTMLPageCollide.__init__(self) self.head.title = "Protein %s" % id box = XBox(self.frame.content) conn_text = pg.connect(host="localhost", user="ljj", dbname="textmining") textmining.DocumentsHTML(group2.body, conn_text, 9606, "ENSP00000332369") class HTMLPageDiseaseInfo(HTMLPageCollide): def __init__(self, disease): HTMLPageCollide.__init__(self) self.head.title = "Diseases" group1 = XGroup(self.frame.content, "Diseases") XSection(group1.body, "Disease gene associations", '<img src="figure2.png" width="250px"></img>Using an andvanced textmining pipeline against the full body of indexed medical literatur and a ontology-derived, ontology-self-curated dictionary consisting of proteins, disease, chemicals etc. we have created the worlds first resource linking genes to diseases on a scale never seen before.') XSection(group1.body, "Alzheimer's disease", "A dementia that results in progressive memory loss, impaired thinking, disorientation, and changes in personality and mood starting in late middle age and leads in advanced cases to a profound decline in cognitive and physical functioning and is marked histologically by the degeneration of brain neurons especially in the cerebral cortex and by the presence of neurofibrillary tangles and plaques containing beta-amyloid.") group2 = XGroup(self.frame.content, "Text-mining") conn_text = pg.connect(host="localhost", user="ljj", dbname="textmining") textmining.DocumentsHTML(group2.body, conn_text, 9606, "ENSP00000332369") class HTMLTestPage(HTMLPageCollide): def __init__(self): HTMLPageCollide.__init__(self) XFree(self.frame.content, open("test.html").read()) # ============================================================================== class Search(mamba.task.Request): def main(self): rest = mamba.task.RestDecoder(self) page = HTMLPageSearch(rest) reply = mamba.http.HTMLResponse(self, page.tohtml()) reply.send() class Protein(mamba.task.Request): def main(self): rest = mamba.task.RestDecoder(self) page = HTMLPageProtein(rest["type"], rest["identifier"]) reply = mamba.http.HTMLResponse(self, page.tohtml()) reply.send() class Disease(mamba.task.Request): def main(self): rest = mamba.task.RestDecoder(self) page = HTMLPageDiseaseInfo(rest["disease"]) reply = mamba.http.HTMLResponse(self, page.tohtml()) reply.send() class Browse(mamba.task.Request): def main(self): rest = mamba.task.RestDecoder(self) page = HTMLPageBrowseDisease(rest) reply = mamba.http.HTMLResponse(self, page.tohtml()) reply.send() class Test(mamba.task.Request): def main(self): page = HTMLTestPage() reply = mamba.http.HTMLResponse(self, page.tohtml()) reply.send()
UTF-8
Python
false
false
2,012
7,490,422,988,143
f983ff071b020829d6da608a8ed5c8361d57261b
e5027276367a5c051160221cf5a23c3d812fe83c
/src/freeseer/plugins/videoinput/desktop/desktop.py
065f6a5b57b6b2dea408560d2eea3645e28f425c
[ "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later" ]
non_permissive
tobeportable/freeseer
https://github.com/tobeportable/freeseer
195a5eda8380337281da1c97978df773ab88a4eb
4535eb70085f7d00b8891d06b4b2c2fd508e856e
refs/heads/master
2021-01-15T16:01:10.107606
2013-03-03T15:36:44
2013-03-03T15:36:44
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' freeseer - vga/presentation capture software Copyright (C) 2011-2013 Free and Open Source Software Learning Centre http://fosslc.org This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. For support, questions, suggestions or any other inquiries, visit: http://wiki.github.com/Freeseer/freeseer/ @author: Thanh Ha ''' import ConfigParser import sys import pygst pygst.require("0.10") import gst if sys.platform.startswith("linux"): import Xlib.display from PyQt4 import QtGui, QtCore from freeseer.framework.plugin import IVideoInput class DesktopLinuxSrc(IVideoInput): name = "Desktop Source" os = ["linux", "linux2", "win32", "cygwin"] # ximagesrc screen = 0 def get_videoinput_bin(self): """ Return the video input object in gstreamer bin format. """ bin = gst.Bin() # Do not pass a name so that we can load this input more than once. videosrc = None if sys.platform.startswith("linux"): videosrc = gst.element_factory_make("ximagesrc", "videosrc") elif sys.platform in ["win32", "cygwin"]: videosrc = gst.element_factory_make("dx9screencapsrc", "videosrc") bin.add(videosrc) colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace") bin.add(colorspace) videosrc.link(colorspace) # Setup ghost pad pad = colorspace.get_pad("src") ghostpad = gst.GhostPad("videosrc", pad) bin.add_pad(ghostpad) return bin def load_config(self, plugman): self.plugman = plugman try: self.screen = self.plugman.get_plugin_option(self.CATEGORY, self.get_config_name(), "Screen") except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): self.plugman.set_plugin_option(self.CATEGORY, self.get_config_name(), "Screen", self.screen) except TypeError: # Temp fix for issue where reading audio_quality the 2nd time causes TypeError. pass def get_widget(self): if self.widget is None: self.widget = QtGui.QWidget() layout = QtGui.QFormLayout() self.widget.setLayout(layout) self.screenLabel = QtGui.QLabel("Screen") self.screenSpinBox = QtGui.QSpinBox() layout.addRow(self.screenLabel, self.screenSpinBox) # Connections self.widget.connect(self.screenSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.set_screen) return self.widget def widget_load_config(self, plugman): self.load_config(plugman) # Xlib is only available on linux if sys.platform.startswith("linux"): display = Xlib.display.Display() self.screenSpinBox.setMaximum(display.screen_count() - 1) # minus 1 since we like to start count at 0 def set_screen(self, screen): self.plugman.set_plugin_option(self.CATEGORY, self.get_config_name(), "Screen", screen)
UTF-8
Python
false
false
2,013
13,185,549,639,860
8743240470646abcd7a5fedf7e36897432ffc916
e298cba5cdf30abaf0ba7323af19bd7e414215a7
/src/ui/windows/MainWindow.py
764b9ed812cc4a500516cb6845f64a775fd859d5
[]
no_license
BloodyD/DocAnalyzer
https://github.com/BloodyD/DocAnalyzer
abf150d1180aa52f2179fa1a8c76d27f34dc020c
726553b98e36f2c93f8efd1eedde2d73d0f328d1
refs/heads/master
2016-09-06T13:07:45.485208
2014-03-30T11:56:30
2014-03-30T11:56:30
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# coding: utf-8 from PyQt4.QtGui import QPushButton, QLineEdit, QFileDialog from PyQt4.QtGui import QTabWidget, QListWidget, QMessageBox, QProgressBar from PyQt4.QtCore import QString, QDir import os from fileReader import Reader from ui.windows import myDialog, DBSelectorWindow from ui.windows.ViewWindow import ViewWindow from ui.windows.helper import messageBox from ui.widgets import myWidget from PyQt4.Qt import pyqtSignal class MainWindow(myDialog): _dbViewWin = None def __init__(self, parent = None, *args, **kwargs): super(MainWindow, self).__init__(parent, *args, **kwargs) self.setWindowTitle("DocAnalyzer") self.myLayout.setContentsMargins(0, 0, 0, 0) self._tabWidget = QTabWidget(self) self._analyzerTab = AnalyzerTab(self._tabWidget) self._viewTab = ViewTab(self._tabWidget) self._viewTab.clicked.connect(self.openDBView) self._tabWidget.addTab(self._viewTab, "View") self._tabWidget.addTab(self._analyzerTab, "Analyze") self.myLayout.addRow(self._tabWidget) # self.setFixedSize(400, 300) def openDBView(self, dbName): if not dbName: messageBox(QMessageBox.Critical, "Fehler", "Der Name darf nicht leer sein!", self, QMessageBox.Ok) return None # if self._dbViewWin == None: self._dbViewWin = ViewWindow(self, "%s.sqlite" %str(dbName)) self._dbViewWin.show() class ViewTab(myWidget): clicked = pyqtSignal(str) def __init__(self, parent = None, *args): super(ViewTab, self).__init__(parent, *args) self._lineEdit = QLineEdit(self) self._acceptBtn = QPushButton(QString.fromUtf8("Öffnen"), self) self._acceptBtn.clicked.connect(self.click) self.myLayout.addRow("Name der Datenbank:", self._lineEdit) self.myLayout.addRow(self._acceptBtn) def click(self): self.clicked.emit(str(self._lineEdit.text())) class AnalyzerTab(myWidget): def __init__(self, parent = None, *args): super(AnalyzerTab, self).__init__(parent, *args) self.currentFolder = QDir.currentPath() self.addFileRow() self.addListWidget() self.addReadRow() self.addProgressBars() def addFileRow(self): self._openBtn = QPushButton(QString.fromUtf8("Ordner auswählen"), self) self._openTextField = QLineEdit(self) self._openBtn.clicked.connect(self.openFolder) self.myLayout.addRow(self._openTextField, self._openBtn) def addListWidget(self): self._listWidget = QListWidget(self) self.myLayout.addRow(self._listWidget) def addReadRow(self): self._readBtn = QPushButton(QString.fromUtf8("Inhalt speichern"), self) self.myLayout.addRow(self._readBtn) self._readBtn.clicked.connect(self.saveFilesToDB) def addProgressBars(self): self._progresses = myWidget(self) self._progressFiles = QProgressBar(self._progresses) self._progressParagraphs = QProgressBar(self._progresses) self._progresses.myLayout.addRow("Dateien", self._progressFiles) self._progresses.myLayout.addRow("Paragraphen", self._progressParagraphs) self._progresses.setEnabled(False) self.myLayout.addRow(self._progresses) def openFolder(self): newFolder = str(QFileDialog.getExistingDirectory(parent=self, caption=QString.fromUtf8("Ordner auswählen"), directory = self.currentFolder)) if not newFolder: print "HIER: ", newFolder return self._openTextField.setText(newFolder) self.listFolderFiles(newFolder) self.currentFolder = newFolder def listFolderFiles(self, folderPath): if not os.path.isdir(folderPath): print "%s is not a folder!" %folderPath return self._listWidget.clear() for fName in os.listdir(folderPath): if fName.endswith(".doc") and not fName.startswith("~"): self._listWidget.addItem(fName) def saveFilesToDB(self): selWindow = DBSelectorWindow(self) if not selWindow.exec_(): return dbName = selWindow.getDBName() if not dbName: messageBox(QMessageBox.Critical, "Fehler", "Der Name darf nicht leer sein!", self, QMessageBox.Ok) return self.currentReader = Reader() self._progresses.setEnabled(True) self.currentReader.ready.connect(self.readerReady) self.currentReader.folderStatusUpdated.connect(self.updateFolderStatus) self.currentReader.fileStatusUpdated.connect(self.updateFileStatus) self.currentReader.save_folder(str(self.currentFolder), "%s.sqlite" %str(dbName)) def readerReady(self): self._progresses.setEnabled(False) def updateFolderStatus(self, ready, fileCount): self._progressFiles.setMaximum(100) self._progressFiles.setValue(self._calc_status(ready, fileCount)) def updateFileStatus(self, ready, paragraphCount): self._progressParagraphs.setMaximum(100) self._progressParagraphs.setValue(self._calc_status(ready, paragraphCount)) def _calc_status(self, done, toDo): r = done * 100 / toDo if r == 99: return 100 else: return r
UTF-8
Python
false
false
2,014
6,863,357,765,697
ebf3ce470f16056bf37b13d785e55b69090f4ff9
f68ea6cd9aca40936eab8795cc17ce1d1b9043b7
/requirements.py
589576056a86ff43ed2fdcf3c24ed62a275f4478
[]
no_license
elbertludica/ptlakumas
https://github.com/elbertludica/ptlakumas
020ee87138fc1b567aa1af9d4a2ce25150256671
4c6a14dd0fa10a1709efb4d0b82df7edbad0cb11
refs/heads/master
2021-04-09T17:24:35.327221
2013-12-07T03:50:51
2013-12-07T03:50:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
-curses==2.2 Django==1.5.5 Fabric==1.8.0 MySQL-python==1.2.4 Pygments==1.6 South==0.8.2 bpython==0.12 distribute==0.7.3 django-request-provider==1.0.2 django-widget-tweaks==1.3 ecdsa==0.10 lpthw.web==1.1 nose==1.3.0 paramiko==1.12.0 pycrypto==2.6 virtualenv==1.10.1
UTF-8
Python
false
false
2,013
10,402,410,804,389
eb73637947df16dd5aa726c56b2d6e3ef87bb85f
2bbd6a7aaf0f6927ac59f05626dd5cf119130e35
/cuckoo/processing/analysis.py
89b93357396114fba753630df90cf120cd4e83b5
[]
no_license
AlbertoF/cuckoo
https://github.com/AlbertoF/cuckoo
3a726a0122fac39c4cfe7c116ec046faa4367590
6c02378ae7f83cdb4d42a540dd5092ed409e82f0
refs/heads/master
2020-04-05T23:43:01.261907
2011-11-28T14:06:21
2011-11-28T14:06:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # Cuckoo Sandbox - Automated Malware Analysis # Copyright (C) 2010-2011 Claudio "nex" Guarnieri ([email protected]) # http://www.cuckoobox.org # # This file is part of Cuckoo. # # Cuckoo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Cuckoo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. import os import sys from cuckoo.processing.analysislog import AnalysisLog class Analysis: def __init__(self, logs_path): self._logs_path = logs_path def process(self): results = [] # Check if the specified directory exists. if not os.path.exists(self._logs_path): return False # Check if the specified directory contains any file. if len(os.listdir(self._logs_path)) == 0: return False # Walk through all the files. for file_name in os.listdir(self._logs_path): file_path = os.path.join(self._logs_path, file_name) # Skip if it is a directory. if os.path.isdir(file_path): continue # Invoke parsing of current log file. current_log = AnalysisLog(file_path) current_log.extract() # If the current log actually contains any data, add its data to # the global results list. if len(current_log.calls) > 0: process = {} process["process_id"] = current_log.process_id process["process_name"] = current_log.process_name process["first_seen"] = current_log.process_first_seen process["calls"] = current_log.calls results.append(process) # Sort the items in the results list chronologically. In this way we # can have a sequential order of spawned processes. results.sort(key=lambda process: process["first_seen"]) return results
UTF-8
Python
false
false
2,011
4,629,974,773,799
3b8b5ce6fb3aa9d0e9fb3966795926fe61858152
02af3b71856cf68ba8627b5f830746eaf7df2b82
/test.environment.rsync/test.py
3741754751eaa72b15f1310e443e88a9bb7e2618
[]
no_license
science-automation/scicloud-test
https://github.com/science-automation/scicloud-test
e0fd8f7c888ca9cd1c6b321cdb8a9a77bb9b7888
9347e219bff4d19eedc03af38319cfabc26d0f70
refs/heads/master
2016-09-06T13:06:49.681183
2014-01-16T07:59:42
2014-01-16T07:59:42
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# # Test cases for Science VM scicloud API # # Copyright 2014 Science Automation # import cloud from cloud import CloudException, CloudTimeoutError from nose import with_setup from nose.tools import * import random def setup_function(): pass def teardown_function(): pass @with_setup(setup_function, teardown_function) def test_environment_rsync(): '''Create environment with random name and then call rsync''' name = "testenv" + str(random.randint(1,1000000)) clonename = "testenv" + str(random.randint(1,1000000)) cloud.environment.create(name,'precise') sync_path = name + ":/tmp" result = cloud.environment.rsync('sync',sync_path) cloud.environment.save_shutdown(name) assert result is None @raises(TypeError) def test_exception1(): '''Raise TypeError since cloud.environment.rsync called with 0 arguments''' cloud.environment.rsync() @raises(TypeError) def test_exception2(): '''Raise TypeError since cloud.environment.rsync called with one argument''' cloud.environment.rsync('asdf') @raises(TypeError) def test_exception3(): '''Raise TypeError since cloud.environment.clone called with env that does not exist''' cloud.environment.save_shutdown('asdfd','asdg')
UTF-8
Python
false
false
2,014
7,009,386,631,273
6429cad7654aa0cb67fdb3e44a2bfff947524461
6e4a46331a181e177603148596cc20a1354c69a7
/obslight/ObsLightGui/Wizard/ConfigWizard.py
f640f5091d2cbce0989c9e47475583aa2eaf9e7c
[]
no_license
ronan22/obs-light
https://github.com/ronan22/obs-light
f296282398327ac14c27eb2773a243017f9c9ed8
f404ddc941836c87fd3f889f80ee5fe065887a3f
refs/heads/master
2021-01-22T21:54:09.019051
2013-07-24T13:27:21
2013-07-25T07:43:52
92,743,625
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf8 -*- # # Copyright 2011-2012, Intel Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # ''' Created on 19 déc. 2011 @author: Florent Vennetier ''' import os from PySide.QtGui import QPlainTextEdit, QWizard from ObsLightGui.ObsLightGuiObject import ObsLightGuiObject from ChooseLocalGbsOrOBSProjectPage import ChooseLocalGbsOrOBSProjectPage from ChooseProjectTemplatePage import ChooseProjectTemplatePage from ChooseProjectConfPage import ChooseProjectConfPage from ChooseRepositoryTreePage import ChooseRepositoryTreePage from ChooseRepositoryPage import ChooseRepositoryPage from ChooseGbsArchPage import ChooseGbsArchPage from ChooseManifestPage import ChooseManifestPage from ConfigureGitPackagePage import ConfigureGitPackagePage from ConfigProjectGitAliasPage import ConfigProjectGitAliasPage from ChooseServerPage import ChooseServerPage from ConfigureServerUrlPage import ConfigureServerUrlPage from ConfigureServerAliasPage import ConfigureServerAliasPage from ChooseProjectPage import ChooseProjectPage from ChooseLocalProjectPage import ChooseLocalProjectPage from ChooseProjectTargetPage import ChooseProjectTargetPage from ChooseProjectArchPage import ChooseProjectArchPage from ConfigureProjectAliasPage import ConfigureProjectAliasPage from ChooseNewOrExistingPackagePage import ChooseNewOrExistingPackagePage from ConfigureNewPackagePage import ConfigureNewPackagePage from ChoosePackagePage import ChoosePackagePage from ChooseLocalPackagePage import ChooseLocalPackagePage class ConfigWizard(QWizard, ObsLightGuiObject): Pages = {} def __init__(self, gui, parent=None): ObsLightGuiObject.__init__(self, gui) if parent is None: QWizard.__init__(self, self.mainWindow) else: QWizard.__init__(self, parent) self.setButtonText(QWizard.CommitButton, u"Validate >") # QPlainTextEdit is not a known field type so we have to register it self.setDefaultProperty(QPlainTextEdit.__name__, "plainText", "textChanged") self.loadPages() self.isModifyingServer = False self.__pageCounter = 0 def pageIndex(self, pageName): return self.Pages[pageName].index def loadPages(self): self.__pageCounter = 0 def addPage(name, aClass): self.Pages[name] = aClass(self.gui, self.__pageCounter) self.__pageCounter += 1 addPage(u'ChooseLocalGbsOrOBSProject', ChooseLocalGbsOrOBSProjectPage) addPage(u'ChooseProjectTemplate', ChooseProjectTemplatePage) addPage(u'ChooseProjectConf', ChooseProjectConfPage) addPage(u'ChooseRepositoryTree', ChooseRepositoryTreePage) addPage(u'ChooseRepository', ChooseRepositoryPage) addPage(u'ChooseGbsArch', ChooseGbsArchPage) addPage(u'ChooseManifestPage', ChooseManifestPage) addPage(u'ConfigProjectGitAliasPage', ConfigProjectGitAliasPage) addPage(u'ChooseServer', ChooseServerPage) addPage(u'ConfigureServerUrl', ConfigureServerUrlPage) addPage(u'ConfigureServerAlias', ConfigureServerAliasPage) addPage(u'ChooseProject', ChooseProjectPage) addPage(u'ChooseProjectTarget', ChooseProjectTargetPage) addPage(u'ChooseProjectArch', ChooseProjectArchPage) addPage(u'ConfigureProjectAlias', ConfigureProjectAliasPage) addPage(u'ChooseNewOrExistingPackage', ChooseNewOrExistingPackagePage) addPage(u'ConfigureNewPackage', ConfigureNewPackagePage) addPage(u'ChoosePackage', ChoosePackagePage) addPage(u'ChooseLocalProject', ChooseLocalProjectPage) addPage(u'ChooseLocalPackagePage', ChooseLocalPackagePage) addPage(u'ConfigureGitPackagePage', ConfigureGitPackagePage) for page in self.Pages.values(): self.setPage(page.index, page) def isLocalProject(self): return self.Pages[u'ChooseLocalGbsOrOBSProject'].isLocalProject() def getSelectedProject(self): return self.Pages[u'ChooseProject'].getSelectedProject() def getSelectedLocalProject(self): return self.Pages[u'ChooseLocalProject'].getSelectedLocalProject() def getSelectedProjectAlias(self): return self.Pages[u'ConfigureProjectAlias'].getSelectedProjectAlias() def getSelectedServerAlias(self): return self.Pages[u'ConfigureServerAlias'].getSelectedServerAlias() def getSelectedTarget(self): return self.Pages[u'ChooseProjectTarget'].getSelectedTarget() def getSelectedArch(self): return self.Pages[u'ChooseProjectArch'].getSelectedArch() def getProjectTemplatePath(self, fullPath=True): res = self.Pages[u'ChooseProjectTemplate'].getSelectedProjectConf() if res is not None and fullPath: return res else: return os.path.basename(res) # def getCreateChrootOption(self): # return self.field(u'CreateChroot') def skipToPackageSelection(self, projectAlias): self.setField(u"projectAlias", projectAlias) self.setStartId(self.Pages[u'ChooseNewOrExistingPackage'].index) def skipToPackageCreation(self, projectAlias): self.setField(u"projectAlias", projectAlias) self.setStartId(self.Pages[u'ConfigureNewPackage'].index) def skipToServerCreation(self, **prefilledValues): """ Skip to server creation page. `prefilledValues` allow to specify already known server configuration values. Possible keys for `prefilledValues`: "webUrl", "apiUrl", "repoUrl", "username", "password", "serverAlias". """ self.setStartId(self.Pages[u'ConfigureServerUrl'].index) for key, value in prefilledValues.iteritems(): self.setField(key, value) self.isModifyingServer = prefilledValues.has_key('serverAlias') def getProjectConfPath(self): return self.Pages[u'ChooseProjectConf'].getSelectedProjectConf() def setSelectedBuildConf(self, selectedBuildConf): return self.Pages[u'ChooseProjectConf'].setSelectedBuildConf(selectedBuildConf) def getGbsAddedRepo(self): return self.Pages[u'ChooseRepository'].getAddedRepo() def getSelectedGbsArch(self): return self.Pages[u'ChooseGbsArch'].getArch() def getSelectedGbsProject(self): return self.getProjectTemplatePath(False) def getInitProjectRepo(self): return self.Pages[u'ChooseRepositoryTree'].getInitProjectRepo() def autoAddProjectRepo(self): return self.Pages[u'ChooseRepository'].autoAddProjectRepo() def getManifestFilePath(self): return self.Pages[u'ChooseManifestPage'].getManifestFilePath()
UTF-8
Python
false
false
2,013
1,941,325,222,603
d6334872f5f209607ad1767b2b63c280ccdeb0c0
a317cbc5b6dde04737312c99e2701cc1a41a3300
/etc/ienv.py
de95054c559917adc5dfe62f18712447eef8438e
[]
no_license
sean-dougherty/modalkb
https://github.com/sean-dougherty/modalkb
08254157bf58dc02a3226a4bfa305f38e569ef42
c167bced87a07892b74e76869b1a3bc7d74b4a8d
refs/heads/master
2016-09-10T22:31:14.837496
2014-12-19T22:41:50
2014-12-19T22:41:50
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from math import * def log2(x): return log(x)/log(2)
UTF-8
Python
false
false
2,014
15,195,594,342,471
5447e8b78118fa259bdc2190b9fd4d0b0912facd
c955988ef1f47d9143b38f266673bb357b13d04f
/IFCPythonSDK/ifclib/ifcconstraintclassificationrelationship.py
0fbeaae5cf3b71426172d260b300b942f3718351
[]
no_license
quntian/BimCenter
https://github.com/quntian/BimCenter
1c3d75afe0ed79320ed503d07089560ede6f313b
5d67a975321d0b7c352d5e9aec2f6d440c205fa0
refs/heads/master
2020-02-20T09:16:13.958734
2014-05-22T11:20:57
2014-05-22T11:20:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python #coding=utf-8 #Filename:IfcConstraintClassificationRelationship.py import log import common from baseobject import BaseObject from utils import * class IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP(BaseObject): """""" def __init__(self,id,arg): super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).__init__(id,arg) self.type='IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP' self.inverse={} self.ClassifiedConstraint=None #IfcConstraint self.RelatedClassifications=None #SET def load(self): """register inverses""" if not super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).load(): return False idx=super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).getAttrCount() if self.args.argc()<=idx+0: log.error("Inverse links : Error during reading parameter 0 of IfcConstraintClassificationRelationship, line %d"%common.counter) return False currentRefList=getIdParam(self.args.argv[idx+0]) if currentRefList: if currentRefList[0]==ID_UNDEF: log.error("Inverse links : Error during reading parameter 0 of IfcConstraintClassificationRelationship, line %d"%common.counter) return False if currentRefList[0]!=ID_UNSET: for i in currentRefList: self.expDataSet.getArgs(i).addInverse('IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP','ClassifiedConstraint',self.lid) return True def init(self): """get every argument's value and parse inverses""" if not super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).init(): return False arg = self.args.getNext() if not isUnset(arg): self.ClassifiedConstraint= spfToId(arg) arg = self.args.getNext() if not isUnset(arg): self.RelatedClassifications= getIdListParam(arg,spfToTypeRef) return True def getAttrCount(self): """get the index of the last attribute""" return super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).getAttrCount()+2 def toString(self): """""" line=super(IFCCONSTRAINTCLASSIFICATIONRELATIONSHIP,self).toString() line += idToSPF(self.ClassifiedConstraint)+',' line += listParamToSPF(self.RelatedClassifications,typerefToSPF)+',' return line
UTF-8
Python
false
false
2,014
5,961,414,651,791
bad51d2932ec08f0da42301796cd23aee45806ab
19cca1cbc64fafef277e5a53a94d71ce64ab868a
/lib/tmdb/setup.py
7620bef10bd2cad34943400a3f703fdd3eb5d927
[]
no_license
cytec/pyEncoder
https://github.com/cytec/pyEncoder
b6e094b1ce7a000b7214666df06913dddf519d91
6c27467dc84cdbeddd849efd154328f715f48f1f
refs/heads/master
2020-03-29T13:35:27.669230
2012-09-20T11:24:48
2012-09-20T11:24:53
2,870,836
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import distutils.core distutils.core.setup( name="tmdb", packages = ["."])
UTF-8
Python
false
false
2,012
7,524,782,706,054
134103716a246d687212311902a8b6f27034d8d1
6c39be3a343f9b3efcd8a9e4bc3cd8662d0edd55
/qiime/make_otu_heatmap_html.py
f5e72ed34ba4a5a2430d7b918354aa8766616e4d
[]
no_license
rob-knight/qiime
https://github.com/rob-knight/qiime
a377601147425751e30589484189d9100b70730f
afb3eb6531badeb74fc69ae4c9e698d3e9cbe70e
refs/heads/master
2021-01-21T08:57:50.204091
2013-12-12T21:39:59
2013-12-12T21:39:59
15,172,997
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python from __future__ import division __author__ = "Jesse Stombaugh" __copyright__ = "Copyright 2011, The QIIME Project" __credits__ = ["Jesse Stombaugh", "Jose Carlos Clemente Litran", "Jai Ram Rideout"] #remember to add yourself __license__ = "GPL" __version__ = "1.8.0-dev" __maintainer__ = "Jesse Stombaugh" __email__ = "[email protected]" from numpy import array,concatenate,asarray,transpose,log,invert,asarray,\ float32,float64, minimum, inf from cogent.parse.table import SeparatorFormatParser from optparse import OptionParser from qiime.util import MissingFileError import os from qiime.filter import filter_otus_from_otu_table from biom.parse import parse_biom_table def make_html_doc(js_filename): """Create the basic framework for the OTU table heatmap""" html_script = \ r''' <html> <head> <script type="text/javascript" src="js/overlib.js"></script> <script type="text/javascript" src="%s"></script> <script type="text/javascript" src="js/otu_count_display.js"></script> <script type="text/javascript" src="./js/jquery.js"></script> <script type="text/javascript" src="./js/jquery.tablednd_0_5.js"></script> <script type="text/javascript"> $(document).ready(function() { $('#otu_table_body').tableDnD({ onDragStart: function(table, new_row) { if (row==new_row.parentNode.rowIndex && is_selected==1){ change_sel_row=1; }else{ old_row=new_row.parentNode.rowIndex; change_sel_row=0; } }, onDrop: function(table, new_row) { if (change_sel_row==1){ row=new_row.rowIndex; }else if(old_row<row && new_row.rowIndex>row){ row=row-1; }else if(old_row>row && new_row.rowIndex<row){ row=row+1; } }, dragHandle: "dragHandle" }); var otu_cutoff=document.getElementById("otu_count_cutoff"); otu_cutoff.value=otu_num_cutoff; }); </script> <style type="text/css"> th.rotate{ white-space : nowrap; -webkit-transform: rotate(-90deg) translate(20px, 0px); -moz-transform: rotate(-90deg) translate(20px, 0px); font-family:arial; font-size:9px; } th.lineage{ white-space : nowrap; text-align:left; font-family:arial; font-size:10px; font-weight: bolder; } td.dragHandle{ white-space : nowrap; text-align:left; font-family:arial; font-size:10px; font-weight: bolder; } td{ white-space : nowrap; font-family:arial; font-size:10px; text-align:center; font-weight: bolder; } table{ border-spacing: 0; text-align:center; } p{ text-align:left; font-weight: normal; } </style> </head> <body> <p> Filter by Counts per OTU: <input type="text" id="otu_count_cutoff" value=""> <input type="button" onclick="javascript:create_OTU_intervals();" value="Sample ID"> <input type="button" onclick="javascript:write_taxon_heatmap();" value="Taxonomy"> </p> <br><br><br><br><br><br> <table id='otu_table_html'> <thead id='otu_table_head'> </thead> <tbody id='otu_table_body'> <tr><td class="dragHandle"></td> </tr> <tr><td class="dragHandle"></td> </tr> </tbody> </table> </body> </html>''' % (js_filename) return html_script def create_javascript_array(otu_table, use_floats=False): """Convert the OTU table counts into a javascript array""" # Build up list of strings and concatenate at end, as this string can be # very large and have many concatenations. js_array = ['var OTU_table=new Array();\n' 'var i=0;\n' 'for (i==0;i<%i;i++) {\n' 'OTU_table[i]=new Array();}\n' % (len(otu_table.SampleIds) + 2)] #0 ['#OTU ID', 'OTU2', 'OTU3'] #1 ['Sample1', 1, 2] #2 ['Sample2', 5, 4] #3 ['Consensus Lineage', 'Archaea', 'Bacteria'] # OTU ids first js_array.append("OTU_table[0][0]='#OTU ID';\n") for (idx, otu_id) in enumerate(otu_table.ObservationIds): js_array.append("OTU_table[0][%i]='%s';\n" % (idx+1, otu_id)) # Sample ids and values in the table i = 1 for (sam_val, sam_id, meta) in otu_table.iterSamples(): js_array.append("OTU_table[%i][0]='%s';\n" % (i, sam_id)) for (idx, v) in enumerate(sam_val): if use_floats: js_array.append("OTU_table[%i][%i]=%.4f;\n" % (i, idx+1, float(v))) else: # don't quite understand why int(float(v)), rather than int(v) js_array.append("OTU_table[%i][%i]=%d;\n" % (i, idx+1, int(float(v)))) i += 1 # Consensus lineages for each OTU last_idx = len(otu_table.SampleIds) + 1 js_array.append("OTU_table[%i][0]='Consensus Lineage';\n" % last_idx) i = 1 for (otu_val, otu_id, meta) in otu_table.iterObservations(): js_array.append("OTU_table[%i][%i]='%s';\n" % (last_idx, i, ";".join(meta['taxonomy']).strip('"'))) i += 1 return ''.join(js_array) def filter_by_otu_hits(num_otu_hits, otu_table): """Filter the OTU table by the number of otus per sample""" # Filter out rows with sum > num_otu_hits new_otu_table = filter_otus_from_otu_table(otu_table, otu_table.ObservationIds, num_otu_hits, inf,0,inf) return new_otu_table def get_log_transform(otu_table, eps=None): """ This function and the one in make_otu_heatmap.py are essentially the same except the non-negative transform at the end of this function. Dan Knights suggests this might be due to this script not being able to handle negative values, hence the transform. """ # explicit conversion to float: transform def f(s_v, s_id, s_md): return float64(s_v) float_otu_table = otu_table.transformSamples(f) if eps is None: # get the minimum among nonzero entries and divide by two eps = inf for (obs, sam) in float_otu_table.nonzero(): eps = minimum(eps, float_otu_table.getValueByIds(obs,sam)) if eps == inf: raise ValueError('All values in the OTU table are zero!') # set zero entries to eps/2 using a transform def g2(x): return [i if i !=0 else eps/2 for i in x] # do we have map in OTU object? g = lambda x : x if (x != 0) else eps/2 def g_m(s_v, s_id, s_md): return asarray(map(g,s_v)) eps_otu_table = float_otu_table.transformSamples(g_m) # take log of all values with transform def h(s_v, s_id, s_md): return log(s_v) log_otu_table = eps_otu_table.transformSamples(h) # one more transform min_val = inf for val in log_otu_table.iterSampleData(): min_val = minimum(min_val, val.min()) def i(s_v, s_id, s_md): return s_v - min_val res_otu_table = log_otu_table.transformSamples(i) return res_otu_table def get_otu_counts(fpath): """Reads the OTU table file into memory""" try: otu_table = parse_biom_table(open(fpath,'U')) except (TypeError, IOError): raise MissingFileError, 'OTU table file required for this analysis' if (otu_table.ObservationMetadata is None or otu_table.ObservationMetadata[0]['taxonomy'] is None): raise ValueError, '\n\nThe lineages are missing from the OTU table. Make sure you included the lineages for the OTUs in your OTU table. \n' return otu_table def generate_heatmap_plots(num_otu_hits, otu_table, otu_sort, sample_sort, dir_path, js_dir_path, filename,fractional_values=False): """Generate HTML heatmap and javascript array for OTU counts""" #Filter by number of OTU hits # rows come transposed in the original code filtered_otu_table = filter_by_otu_hits(num_otu_hits, otu_table) if otu_sort: # Since the BIOM object comes back with fewer Observation_ids, we need to # remove those from the original sort_order actual_observations=filtered_otu_table.ObservationIds new_otu_sort_order=[] for i in otu_sort: if i in actual_observations: new_otu_sort_order.append(i) filtered_otu_table = filtered_otu_table.sortObservationOrder(new_otu_sort_order) # This sorts the samples by the order supplied if sample_sort: # Since the BIOM object may come back with fewer Sampleids, we need to # remove those from the original sample_sort actual_samples=filtered_otu_table.SampleIds new_sample_sort_order=[] for i in sample_sort: if i in actual_samples: new_sample_sort_order.append(i) filtered_otu_table = filtered_otu_table.sortSampleOrder(new_sample_sort_order) #Convert OTU counts into a javascript array js_array=create_javascript_array(filtered_otu_table, fractional_values) #Write otu filter number js_otu_cutoff='var otu_num_cutoff=%d;\n' % num_otu_hits #Write js array to file js_filename=os.path.join(js_dir_path,filename)+'.js' jsfile = open(js_filename,'w') jsfile.write(js_otu_cutoff) jsfile.write(js_array) jsfile.close() #Write html file html_filename=os.path.join(dir_path,filename)+'.html' js_file_location='js/'+filename+'.js' table_html=make_html_doc(js_file_location) ofile = open(html_filename,'w') ofile.write(table_html) ofile.close()
UTF-8
Python
false
false
2,013
15,676,630,664,127
9ed21372d76cc5cecb095f3e9157eeeab67f62c3
2ca2f655ab504c5240546c403d9c09cba6b0f0ba
/file_sharing_server.py
bbe9a1df5037eb22310cf34b1a581e5f9021b5c0
[]
no_license
caovanmanhqx2/centralised-monitoring-on-a-network
https://github.com/caovanmanhqx2/centralised-monitoring-on-a-network
0f0c5f0ad8fa9313e7fff62dd149da277fce1ec5
56bc0fb86e2ac4464c9fd165e04e8bdafc119c2c
refs/heads/master
2021-12-02T13:48:20.709981
2013-10-10T17:49:56
2013-10-10T17:49:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer import os import socket import pickle afile=[] bfile=[] c=len(bfile) for subdir, dirs, files in os.walk('c:/xampp/htdocs/file_transfer/'): for file in files: if(len(afile)<25): file_size=os.path.getsize('c:/xampp/htdocs/file_transfer/'+file) afile.append(str(file)) bfile.append(str(file_size)) #s=socket.socket() # Create a socket object #host=socket.gethostname() # Get local machine name #port=12345 # Reserve a port for your service. #s.bind((host, port)) #s.listen(5) #while True: #c, addr = s.accept() #print "\n".join(afile) #c.send(pickle.dumps(afile)) #c.close() #Create custom HTTPRequestHandler class class KodeFunHTTPRequestHandler(BaseHTTPRequestHandler): #handle GET command def do_GET(self): rootdir = 'c:/xampp/htdocs/file_transfer/' #file location try: print self.path if self.path.endswith('.html') or self.path.endswith('.txt'): f = open(rootdir + self.path) #open requested file #file_size=os.path.getsize('rootdir + self.path') #print file_size self.send_response(200) self.send_header('Content-type','text-html') self.end_headers() self.wfile.write(f.read()) f.close() return elif self.path.endswith('.jpg') or self.path.endswith('.jpeg') or self.path.endswith('.jpe'): f = open(rootdir + self.path,'rb') #open requested file self.send_response(200) self.send_header('Content-type','image/jpeg') self.end_headers() self.wfile.write(f.read()) f.close() return elif self.path.endswith('.avi'): f = open(rootdir + self.path,'rb') #open requested file self.send_response(200) self.send_header('Content-type','video/x-msvideo') self.end_headers() self.wfile.write(f.read()) f.close() return elif self.path == "list": self.send_response(200) self.send_header('Content-type','text-html') self.end_headers() self.wfile.write("\n".join(afile)) #self.wfile.write("\n".join(bfile)) return elif self.path == "update": uafile="" for subdir, dirs, files in os.walk('c:/xampp/htdocs/file_transfer/'): for file in files: file_size=os.path.getsize('c:/xampp/htdocs/file_transfer/'+file) uafile = uafile + str(file) + ";" + str(file_size) + "\n" self.send_response(200) uafile = uafile.rstrip("\n") #print uafile self.send_header('Content-type','text-html') self.end_headers() self.wfile.write(uafile) return except IOError: self.send_error(404, 'file not found') def run(): print('http server is starting...') server_address = ('0.0.0.0', 82) httpd = HTTPServer(server_address, KodeFunHTTPRequestHandler) print('http server is running...') httpd.serve_forever() if __name__ == '__main__': run()
UTF-8
Python
false
false
2,013
377,957,143,579
b8437d7a3e9a9e6f490b085cd7dedad8e37e6e7c
bd20e1698dc386e025f88de5e9905243bb9e700e
/plotting/multilineplot.py
7b9645d4f988ab42f03ce9722461a9224ec94f25
[]
no_license
MrOwen/CS106
https://github.com/MrOwen/CS106
451dcb2104802d8c3545462debdb183dba7b20da
60eb718b41da959c2a4377efcce1c3f0d31a2520
refs/heads/master
2021-01-20T11:06:00.444830
2013-02-18T20:47:56
2013-02-18T20:47:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#multilineplot.py import numpy as np import pylab as pl #Make an array of x values x = [1, 2, 3, 4, 5] #Define another set of x values x2 = [1, 2, 4, 6, 8] # Make an array of y values y = [1, 4, 9, 16, 25] #Define another set of x values y2 = [2, 4, 8, 12, 16] # Use pylab to plot x and y points p1 = pl.plot(x, y, 'ro') # Secondary plot p2 = pl.plot(x2, y2, '*') # Add a plot legend plots = (p1, p2) # Lables for legend labels = ('Results of y=x^2', 'Results of y=2x') pl.legend(plots, labels, 'best', numpoints=1) # Set the labels and title for the graph pl.xlabel("X-axis") pl.ylabel("Y-axis") pl.title("An interesting graph") # Set axis limits for the graph pl.xlim(0, 9) pl.ylim(0, 30) # Show the plot on the screen pl.show()
UTF-8
Python
false
false
2,013
15,272,903,729,883
388a67e7f4438a0680fcf88945724a545b02da3c
f0e6dab1f65595cddc5f4efda99a3f223450e24c
/tests/api/__init__.py
ae7a9bf49813c0ee1d9984eeb518fd4be3a5595c
[ "BSD-3-Clause" ]
permissive
shanzi/code-vilya
https://github.com/shanzi/code-vilya
9d448936b6f0c85335b0423ab95070baa35e2749
9e3cde7764f4bfdd0f86e95bec5c7a97458b2689
refs/heads/master
2016-09-11T14:58:48.128059
2014-06-28T13:42:14
2014-06-28T13:42:14
18,682,238
1
0
null
false
2014-06-28T13:42:15
2014-04-11T17:12:28
2014-06-25T10:13:21
2014-06-28T13:42:14
875
12
4
2
Python
null
null
from vilya.api import create_app from tests import VilyaAppTestCase class VilyaApiTestCase(VilyaAppTestCase): def _create_app(self, settings): return create_app(settings)
UTF-8
Python
false
false
2,014
2,534,030,752,784
82cc81c42f71c9a2b126a631ac02a56ee2542a18
95e42007ed8df1837191bfda2550391b9da27d5d
/example_paramiko_user.py
97be7666e852aa354e9ca8f5cde7ff93abee4b47
[]
no_license
grhawk/MyQueue
https://github.com/grhawk/MyQueue
83c3a74a7cf0dc8f02610bfbbbad3f84ae0a3aeb
c00012a168dbc0da2df4d519747b265c65e1713c
refs/heads/master
2016-09-05T20:11:13.515854
2014-10-29T16:53:39
2014-10-29T16:53:39
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Oct 15 17:03:54 2014 @author: petragli """ """ analisys:ignore """ # adapted from http://stackoverflow.com/questions/3485428/creating-multiple-ssh-connections-at-a-time-using-paramiko import signal, sys, threading import paramiko CMD = 'uname -n' def signal_cleanup(_signum, _frame): print '\nCLEANUP\n' sys.exit(0) def workon(host): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host[0], username=host[1], password=host[2]) _stdin, stdout, _stderr = ssh.exec_command(CMD) # for line in stdout: # print threading.current_thread().name, line, return stdout.readline() def main(): hosts = [['localhost','petragli','06111983'], ['128.178.134.191','riccardo','06111983']] # exit after a few seconds (see WARNINGs) signal.signal(signal.SIGALRM, signal_cleanup) signal.alarm(10) threads = [ threading.Thread( target=workon, args=(host,), name='host #{}'.format(num+1) ) for num,host in enumerate(hosts) ] print 'starting' for t in threads: # WARNING: daemon=True allows program to exit when main proc # does; otherwise we'll wait until all threads complete. t.daemon = True t.start() print 'joining' for t in threads: # WARNING: t.join() is uninterruptible; this while loop allows # signals # see: http://snakesthatbite.blogspot.com/2010/09/cpython-threading-interrupting.html while t.is_alive(): t.join(timeout=0.1) print 'done!' if __name__=='__main__': main()
UTF-8
Python
false
false
2,014
9,311,489,145,256
3ab4a81e40f11b7aa559c62d74d8f1ca0549d994
73a61e58674d61d7b8c76bb2f9c60babd18d51ad
/app/py/blbr/wsgis.py
686a1eafdd7ac3ee99c6a90fb62def5de3f27d29
[]
no_license
omo/blbr
https://github.com/omo/blbr
6c5e2824d2d05d12170a944be2311acdce394589
1060cf950d17140b317af2a01ef8878f4a8d04d4
refs/heads/master
2016-09-08T00:35:22.986849
2011-11-27T11:21:43
2011-11-27T11:21:43
2,749,553
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import functools import webapp2 from google.appengine.api import users def require_login(**kwargs): if users.get_current_user(): return None redirect = kwargs.get('redirect') if redirect: return webapp2.redirect(users.create_login_url(redirect)) resp = webapp2.Response() resp.status = '400 Bad Request' return resp def login_required(func, **deco_kwargs): @functools.wraps(func) def decorated_view(*args, **kwargs): return require_login(**deco_kwargs) or func(*args, **kwargs) return decorated_view def to_application(handler_classes): return webapp2.WSGIApplication([webapp2.Route(p.url, handler=p) for p in handler_classes])
UTF-8
Python
false
false
2,011
5,205,500,385,505
df1cd414435e9dd058a76e962e2a6f7ae14a5062
7ff333dd18ebea4159160b07c2e281461e021e25
/lib/flows/general/collectors_test.py
764a886e420c4ee9595b4cc448150779b011186b
[ "Apache-2.0", "DOC" ]
permissive
defaultnamehere/grr
https://github.com/defaultnamehere/grr
d768240ea8ffc9d557f5fe2e272937b83398b6e3
ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e
refs/heads/master
2021-01-21T19:09:18.863900
2014-12-07T01:49:53
2014-12-07T01:49:53
27,655,857
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python """Test the collector flows.""" import os from grr.client import vfs from grr.lib import action_mocks from grr.lib import aff4 from grr.lib import artifact from grr.lib import artifact_lib from grr.lib import artifact_test from grr.lib import flags from grr.lib import rdfvalue from grr.lib import test_lib from grr.lib import utils from grr.lib.flows.general import collectors from grr.lib.flows.general import transfer from grr.test_data import client_fixture # pylint: mode=test class CollectorTest(artifact_test.ArtifactTest): pass class TestArtifactCollectors(CollectorTest): """Test the artifact collection mechanism with fake artifacts.""" def setUp(self): """Make sure things are initialized.""" super(TestArtifactCollectors, self).setUp() self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts artifact_lib.ArtifactRegistry.ClearRegistry() self.LoadTestArtifacts() artifact_reg = artifact_lib.ArtifactRegistry.artifacts self.fakeartifact = artifact_reg["FakeArtifact"] self.fakeartifact2 = artifact_reg["FakeArtifact2"] self.output_count = 0 with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd: fd.Set(fd.Schema.SYSTEM("Linux")) kb = fd.Schema.KNOWLEDGE_BASE() artifact.SetCoreGRRKnowledgeBaseValues(kb, fd) fd.Set(kb) def tearDown(self): super(TestArtifactCollectors, self).tearDown() artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg self.fakeartifact.collectors = [] # Reset any Collectors self.fakeartifact.conditions = [] # Reset any Conditions self.fakeartifact2.collectors = [] # Reset any Collectors self.fakeartifact2.conditions = [] # Reset any Conditions def testInterpolateArgs(self): collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token) collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase()) collect_flow.current_artifact_name = "blah" collect_flow.state.knowledge_base.MergeOrAddUser( rdfvalue.KnowledgeBaseUser(username="test1")) collect_flow.state.knowledge_base.MergeOrAddUser( rdfvalue.KnowledgeBaseUser(username="test2")) test_rdf = rdfvalue.KnowledgeBase() action_args = {"usernames": ["%%users.username%%", "%%users.username%%"], "nointerp": "asdfsdf", "notastring": test_rdf} kwargs = collect_flow.InterpolateDict(action_args) self.assertItemsEqual(kwargs["usernames"], ["test1", "test2", "test1", "test2"]) self.assertEqual(kwargs["nointerp"], "asdfsdf") self.assertEqual(kwargs["notastring"], test_rdf) # We should be using an array since users.username will expand to multiple # values. self.assertRaises(ValueError, collect_flow.InterpolateDict, {"bad": "%%users.username%%"}) list_args = collect_flow.InterpolateList(["%%users.username%%", "%%users.username%%aa"]) self.assertItemsEqual(list_args, ["test1", "test2", "test1aa", "test2aa"]) list_args = collect_flow.InterpolateList(["one"]) self.assertEqual(list_args, ["one"]) def testGrepRegexCombination(self): collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token) self.assertEqual(collect_flow._CombineRegex([r"simple"]), "simple") self.assertEqual(collect_flow._CombineRegex(["a", "b"]), "(a)|(b)") self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]), "(a)|(b)|(c)") self.assertEqual(collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]), "(a|b)|([^_]b)|(c|d)") def testGrep(self): class MockCallFlow(object): def CallFlow(self, *args, **kwargs): self.args = args self.kwargs = kwargs mock_call_flow = MockCallFlow() with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow", mock_call_flow.CallFlow): collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token) collect_flow.state.Register("knowledge_base", rdfvalue.KnowledgeBase()) collect_flow.current_artifact_name = "blah" collect_flow.state.knowledge_base.MergeOrAddUser( rdfvalue.KnowledgeBaseUser(username="test1")) collect_flow.state.knowledge_base.MergeOrAddUser( rdfvalue.KnowledgeBaseUser(username="test2")) collector = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.GREP, args={"path_list": ["/etc/passwd"], "content_regex_list": [r"^a%%users.username%%b$"]}) collect_flow.Grep(collector, rdfvalue.PathSpec.PathType.TSK) conditions = mock_call_flow.kwargs["conditions"] self.assertEqual(len(conditions), 1) regexes = conditions[0].contents_regex_match.regex.SerializeToString() self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"]) self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"]) def testGetArtifact1(self): """Test we can get a basic artifact.""" client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find", "FingerprintFile", "HashBuffer") client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Linux")) client.Flush() # Dynamically add a Collector specifying the base path. file_path = os.path.join(self.base_path, "test_img.dd") coll1 = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.FILE, args={"path_list": [file_path]}) self.fakeartifact.collectors.append(coll1) artifact_list = ["FakeArtifact"] for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, use_tsk=False, token=self.token, client_id=self.client_id ): pass # Test the AFF4 file that was created. fd1 = aff4.FACTORY.Open("%s/fs/os/%s" % (self.client_id, file_path), token=self.token) fd2 = open(file_path) fd2.seek(0, 2) self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE))) def testRunGrrClientActionArtifact(self): """Test we can get a GRR client artifact.""" client_mock = action_mocks.ActionMock("ListProcesses") client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Linux")) client.Flush() coll1 = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION, args={"client_action": r"ListProcesses"}) self.fakeartifact.collectors.append(coll1) artifact_list = ["FakeArtifact"] for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, output="test_artifact" ): pass # Test the AFF4 file that was created. fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add("test_artifact"), token=self.token) self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process)) self.assertTrue(len(fd) > 5) def testRunGrrClientActionArtifactSplit(self): """Test that artifacts get split into separate collections.""" client_mock = action_mocks.ActionMock("ListProcesses", "StatFile") client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Linux")) client.Flush() coll1 = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION, args={"client_action": r"ListProcesses"}) self.fakeartifact.collectors.append(coll1) self.fakeartifact2.collectors.append(coll1) artifact_list = ["FakeArtifact", "FakeArtifact2"] for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, output="test_artifact", split_output_by_artifact=True): pass # Check that we got two separate collections based on artifact name fd = aff4.FACTORY.Open(rdfvalue.RDFURN( self.client_id).Add("test_artifact_FakeArtifact"), token=self.token) self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process)) self.assertTrue(len(fd) > 5) fd = aff4.FACTORY.Open(rdfvalue.RDFURN( self.client_id).Add("test_artifact_FakeArtifact2"), token=self.token) self.assertTrue(len(fd) > 5) self.assertTrue(isinstance(list(fd)[0], rdfvalue.Process)) def testConditions(self): """Test we can get a GRR client artifact with conditions.""" # Run with false condition. client_mock = action_mocks.ActionMock("ListProcesses") coll1 = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION, args={"client_action": "ListProcesses"}, conditions=["os == 'Windows'"]) self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "AFF4Volume") # Now run with matching or condition. coll1.conditions = ["os == 'Linux' or os == 'Windows'"] self.fakeartifact.collectors = [] self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "RDFValueCollection") # Now run with impossible or condition. coll1.conditions.append("os == 'NotTrue'") self.fakeartifact.collectors = [] self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "AFF4Volume") def testSupportedOS(self): """Test supported_os inside the collector object.""" # Run with false condition. client_mock = action_mocks.ActionMock("ListProcesses") coll1 = rdfvalue.Collector( collector_type=rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION, args={"client_action": "ListProcesses"}, supported_os=["Windows"]) self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "AFF4Volume") # Now run with matching or condition. coll1.conditions = [] coll1.supported_os = ["Linux", "Windows"] self.fakeartifact.collectors = [] self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "RDFValueCollection") # Now run with impossible or condition. coll1.conditions = ["os == 'Linux' or os == 'Windows'"] coll1.supported_os = ["NotTrue"] self.fakeartifact.collectors = [] self.fakeartifact.collectors.append(coll1) fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"]) self.assertEqual(fd.__class__.__name__, "AFF4Volume") def _RunClientActionArtifact(self, client_mock, artifact_list): client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Linux")) client.Flush() self.output_count += 1 output = "test_artifact_%d" % self.output_count for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, output=output ): pass # Test the AFF4 file was not created, as flow should not have run due to # conditions. fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id).Add(output), token=self.token) return fd class TestArtifactCollectorsInteractions(CollectorTest): """Test the collection of artifacts. This class loads both real and test artifacts to test the interaction of badly defined artifacts with real artifacts. """ def setUp(self): """Add test artifacts to existing registry.""" super(TestArtifactCollectorsInteractions, self).setUp() self.original_artifact_reg = artifact_lib.ArtifactRegistry.artifacts self.LoadTestArtifacts() def tearDown(self): super(TestArtifactCollectorsInteractions, self).tearDown() artifact_lib.ArtifactRegistry.artifacts = self.original_artifact_reg def testProcessCollectedArtifacts(self): """Test downloading files from artifacts.""" client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Windows")) client.Set(client.Schema.OS_VERSION("6.2")) client.Flush() vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find", "HashBuffer", "FingerprintFile", "ListDirectory") # Get KB initialized for _ in test_lib.TestFlowHelper( "KnowledgeBaseInitializationFlow", client_mock, client_id=self.client_id, token=self.token): pass artifact_list = ["WindowsPersistenceMechanismFiles"] with test_lib.Instrument( transfer.MultiGetFile, "Start") as getfile_instrument: for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, output="analysis/{p}/{u}-{t}", split_output_by_artifact=True): pass # Check MultiGetFile got called for our runkey files # TODO(user): RunKeys for S-1-5-20 are not found because users.sid only # expands to users with profiles. pathspecs = getfile_instrument.args[0][0].args.pathspecs self.assertItemsEqual([x.path for x in pathspecs], [u"C:\\Windows\\TEMP\\A.exe"]) artifact_list = ["BadPathspecArtifact"] with test_lib.Instrument( transfer.MultiGetFile, "Start") as getfile_instrument: for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, output="analysis/{p}/{u}-{t}", split_output_by_artifact=True): pass self.assertFalse(getfile_instrument.args) class TestArtifactCollectorsRealArtifacts(CollectorTest): """Test the collection of real artifacts.""" def _CheckDriveAndRoot(self): client_mock = action_mocks.ActionMock("StatFile", "ListDirectory") for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=[ "SystemDriveEnvironmentVariable"], token=self.token, client_id=self.client_id, output="testsystemdrive"): pass fd = aff4.FACTORY.Open(rdfvalue.RDFURN( self.client_id).Add("testsystemdrive"), token=self.token) self.assertEqual(len(fd), 1) self.assertEqual(str(fd[0]), "C:") for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", client_mock, artifact_list=["SystemRoot"], token=self.token, client_id=self.client_id, output="testsystemroot"): pass fd = aff4.FACTORY.Open( rdfvalue.RDFURN(self.client_id).Add("testsystemroot"), token=self.token) self.assertEqual(len(fd), 1) # Filesystem gives WINDOWS, registry gives Windows self.assertTrue(str(fd[0]) in [r"C:\Windows", r"C:\WINDOWS"]) def testSystemDriveArtifact(self): client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Windows")) client.Set(client.Schema.OS_VERSION("6.2")) client.Flush() class BrokenClientMock(action_mocks.ActionMock): def StatFile(self, _): raise IOError def ListDirectory(self, _): raise IOError # No registry, broken filesystem, this should just raise. with self.assertRaises(RuntimeError): for _ in test_lib.TestFlowHelper("ArtifactCollectorFlow", BrokenClientMock(), artifact_list=[ "SystemDriveEnvironmentVariable"], token=self.token, client_id=self.client_id, output="testsystemdrive"): pass # No registry, so this should use the fallback flow vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture self._CheckDriveAndRoot() # Registry is present, so this should use the regular artifact collection vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler self._CheckDriveAndRoot() def testRunWMIComputerSystemProductArtifact(self): class WMIActionMock(action_mocks.ActionMock): def WmiQuery(self, _): return client_fixture.WMI_CMP_SYS_PRD client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Windows")) client.Set(client.Schema.OS_VERSION("6.2")) client.Flush() client_mock = WMIActionMock() for _ in test_lib.TestFlowHelper( "ArtifactCollectorFlow", client_mock, artifact_list=["WMIComputerSystemProduct"], token=self.token, client_id=self.client_id, dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS, store_results_in_aff4=True): pass client = aff4.FACTORY.Open(self.client_id, token=self.token,) hardware = client.Get(client.Schema.HARDWARE_INFO) self.assertTrue(isinstance(hardware, rdfvalue.HardwareInfo)) self.assertEqual(str(hardware.serial_number), "2RXYYZ1") def testRunWMIArtifact(self): class WMIActionMock(action_mocks.ActionMock): def WmiQuery(self, _): return client_fixture.WMI_SAMPLE client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Windows")) client.Set(client.Schema.OS_VERSION("6.2")) client.Flush() client_mock = WMIActionMock() for _ in test_lib.TestFlowHelper( "ArtifactCollectorFlow", client_mock, artifact_list=["WMILogicalDisks"], token=self.token, client_id=self.client_id, dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.IGNORE_DEPS, store_results_in_aff4=True): pass # Test that we set the client VOLUMES attribute client = aff4.FACTORY.Open(self.client_id, token=self.token) volumes = client.Get(client.Schema.VOLUMES) self.assertEqual(len(volumes), 2) for result in volumes: self.assertTrue(isinstance(result, rdfvalue.Volume)) self.assertTrue(result.windows.drive_letter in ["Z:", "C:"]) if result.windows.drive_letter == "C:": self.assertAlmostEqual(result.FreeSpacePercent(), 76.142, delta=0.001) self.assertEqual(result.Name(), "C:") elif result.windows.drive_letter == "Z:": self.assertEqual(result.Name(), "homefileshare$") self.assertAlmostEqual(result.FreeSpacePercent(), 58.823, delta=0.001) def testRetrieveDependencies(self): """Test getting an artifact without a KB using retrieve_depdendencies.""" client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") client.Set(client.Schema.SYSTEM("Windows")) client.Set(client.Schema.OS_VERSION("6.2")) client.Flush() vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler vfs.VFS_HANDLERS[ rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find", "HashBuffer", "FingerprintFile", "ListDirectory") artifact_list = ["WinDirEnvironmentVariable"] for _ in test_lib.TestFlowHelper( "ArtifactCollectorFlow", client_mock, artifact_list=artifact_list, token=self.token, client_id=self.client_id, dependencies=rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW, output="testRetrieveDependencies"): pass output = aff4.FACTORY.Open(self.client_id.Add("testRetrieveDependencies"), token=self.token) self.assertEqual(len(output), 1) self.assertEqual(output[0], r"C:\Windows") def main(argv): # Run the full test suite test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
UTF-8
Python
false
false
2,014
2,851,858,308,265
deff41835049343dd22af89b6e777e63d64898d2
8d5aa8c52ced487031c6eb3bc9df1ac007aa50f0
/pybloomfire/bloomfire.py
afc08947d92d5a96e04507a9f30055a5068d25d4
[]
no_license
Sendside/pybloomfire
https://github.com/Sendside/pybloomfire
960da2b8110c3aefff990b472e2d0a2b7db0b99e
9ec43485916d9f1de346bac33e6197c47982144e
refs/heads/master
2021-05-28T02:25:51.010906
2013-01-22T03:47:01
2013-01-22T03:47:01
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import json from urllib import urlencode import urllib2 class API(object): """ API object that provides a Python binding to the Bloomfire REST API: http://bloomfire.com """ def __init__(self, subdomain, api_key, auth_email, auth_password): self._api_key = api_key self._endpoint_prefix = 'https://%s.bloomfire.com/api/' % subdomain self._authenticate(auth_email, auth_password) def _authenticate(self, auth_email, auth_password): passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, self._endpoint_prefix, auth_email, auth_password) proxyhandler = urllib2.ProxyHandler() authhandler = urllib2.HTTPBasicAuthHandler(passman) opener = urllib2.build_opener(authhandler, proxyhandler) urllib2.install_opener(opener) def get(self, api_name, kwargs=None): if kwargs: endpoint_query = '?%s' % urlencode(kwargs) else: endpoint_query = '' return self._call('%s%s.json%s' % (self._endpoint_prefix, api_name, endpoint_query)) def post(self, api_name, kwargs=None): req = urllib2.Request(url='%s%s.json' % (self._endpoint_prefix, api_name), data=urlencode(kwargs)) return self._call(req) def _call(self, request): try: result = urllib2.urlopen(request) response_dict = json.loads(result.read()) except urllib2.HTTPError as http_error: response_dict = json.loads(http_error.read()) return response_dict
UTF-8
Python
false
false
2,013
6,124,623,397,321
ba03f91b56c24492d042310729f4fb140efce3a1
ee7925f2d461a075aac8f2b29dfa32a30561bc7a
/catch_web.py
1d4b63a153c532d9e63c68a7d0bc405eff6d1ab6
[]
no_license
qq40660/weixinGetPostDemo
https://github.com/qq40660/weixinGetPostDemo
d01576d4a7aa971bcad0ba2ba73d2ba31b090837
f6cc8894dd45c9c68bb99847bb023b97051b286c
refs/heads/master
2021-01-17T23:58:31.113535
2013-03-10T01:36:33
2013-03-10T01:36:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import urllib2 from bs4 import BeautifulSoup def catch_url( url ): content = urllib2.urlopen( url ) soup = BeautifulSoup( content ) return soup
UTF-8
Python
false
false
2,013
7,550,552,544,500
eda253ff69e275dd710031146742bc06aa2f495d
2de99713a3abf6e7780a0d9ea3aec0b1de51e242
/nxtdemo.py
305969aa7e5472df60125511486fd8497582214a
[]
no_license
pepijndevos/nxtwrite
https://github.com/pepijndevos/nxtwrite
e518d0f8b98168c465c4bb37cf75d6690a51ddd0
e3b99d7387427e558b6ac109d062a29623a1114b
refs/heads/master
2016-09-16T10:29:19.539805
2011-12-12T13:18:07
2011-12-12T13:18:07
2,916,047
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
from nxt import locator, motor import nxtprinter b = locator.find_one_brick() p = nxtprinter.NxtPrinter(b, motor.PORT_B, motor.PORT_A, motor.PORT_C) p.write(raw_input("print: "), 15)
UTF-8
Python
false
false
2,011
6,287,832,137,724
228c73504d3ee0b8d536f166de6b7d93cc6f0ac6
63c89d672cb4df85e61d3ba9433f4c3ca39810c8
/python/testdata/launchpad/lib/lp/services/librarianserver/tests/test_db_outage.py
833f61d25a7b5a5ed82190a0c68ee76f0fe87cbe
[ "AGPL-3.0-only", "AGPL-3.0-or-later" ]
non_permissive
abramhindle/UnnaturalCodeFork
https://github.com/abramhindle/UnnaturalCodeFork
de32d2f31ed90519fd4918a48ce94310cef4be97
e205b94b2c66672d264a08a10bb7d94820c9c5ca
refs/heads/master
2021-01-19T10:21:36.093911
2014-03-13T02:37:14
2014-03-13T02:37:14
17,692,378
1
3
AGPL-3.0
false
2020-07-24T05:39:10
2014-03-13T02:52:20
2018-01-05T07:03:31
2014-03-13T02:53:59
24,904
0
3
1
Python
false
false
# Copyright 2011 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test behavior of the Librarian during a database outage. Database outages happen by accident and during fastdowntime deployments.""" __metaclass__ = type from cStringIO import StringIO import urllib2 from fixtures import Fixture from lp.services.librarian.client import LibrarianClient from lp.services.librarianserver.testing.server import LibrarianServerFixture from lp.testing import TestCase from lp.testing.fixture import PGBouncerFixture from lp.testing.layers import ( BaseLayer, DatabaseFunctionalLayer, ) class PGBouncerLibrarianLayer(DatabaseFunctionalLayer): """Custom layer for TestLibrarianDBOutage. We are using a custom layer instead of standard setUp/tearDown to avoid the lengthy Librarian startup time, and to cope with undoing changes made to BaseLayer.config_fixture to allow access to the Librarian we just started up. """ pgbouncer_fixture = None librarian_fixture = None @classmethod def setUp(cls): # Fixture to hold other fixtures. cls._fixture = Fixture() cls._fixture.setUp() cls.pgbouncer_fixture = PGBouncerFixture() # Install the PGBouncer fixture so we shut it down to # create database outages. cls._fixture.useFixture(cls.pgbouncer_fixture) # Bring up the Librarian, which will be connecting via # pgbouncer. cls.librarian_fixture = LibrarianServerFixture( BaseLayer.config_fixture) cls._fixture.useFixture(cls.librarian_fixture) @classmethod def tearDown(cls): cls.pgbouncer_fixture = None cls.librarian_fixture = None cls._fixture.cleanUp() @classmethod def testSetUp(cls): cls.pgbouncer_fixture.start() class TestLibrarianDBOutage(TestCase): layer = PGBouncerLibrarianLayer def setUp(self): super(TestLibrarianDBOutage, self).setUp() self.pgbouncer = PGBouncerLibrarianLayer.pgbouncer_fixture self.client = LibrarianClient() # Add a file to the Librarian so we can download it. self.url = self._makeLibraryFileUrl() def _makeLibraryFileUrl(self): data = 'whatever' return self.client.remoteAddFile( 'foo.txt', len(data), StringIO(data), 'text/plain') def getErrorCode(self): # We need to talk to every Librarian thread to ensure all the # Librarian database connections are in a known state. # XXX StuartBishop 2011-09-01 bug=840046: 20 might be overkill # for the test run, but we have no real way of knowing how many # connections are in use. num_librarian_threads = 20 codes = set() for count in range(num_librarian_threads): try: urllib2.urlopen(self.url).read() codes.add(200) except urllib2.HTTPError as error: codes.add(error.code) self.assertTrue(len(codes) == 1, 'Mixed responses: %s' % str(codes)) return codes.pop() def test_outage(self): # Everything should be working fine to start with. self.assertEqual(self.getErrorCode(), 200) # When the outage kicks in, we start getting 503 responses # instead of 200 and 404s. self.pgbouncer.stop() self.assertEqual(self.getErrorCode(), 503) # When the outage is over, things are back to normal. self.pgbouncer.start() self.assertEqual(self.getErrorCode(), 200)
UTF-8
Python
false
false
2,014
12,902,081,805,088
716ca55f82e717938798a239ef7c79ec72c9feff
acf8fe77e599f8372adf4fc971012394715795d6
/flask/lib/python2.7/site-packages/egginst/utils.py
cc97bb0514962a5dffd4961cc61c7e5602623998
[]
no_license
shaheershantk/Blog-Engine-Using-Flask
https://github.com/shaheershantk/Blog-Engine-Using-Flask
3e2f1457a59f282c336bbb63ff48171f938f5108
450e76a8bde0bd702d995fa7bb746ed920917f98
refs/heads/master
2021-01-01T19:42:03.401554
2014-11-10T15:01:08
2014-11-10T15:01:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import errno import sys import os import shutil import stat import tempfile import zipfile from os.path import basename, isdir, isfile, islink, join if sys.version_info[:2] < (2, 7): class ZipFile(zipfile.ZipFile): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() else: ZipFile = zipfile.ZipFile on_win = bool(sys.platform == 'win32') if on_win: bin_dir_name = 'Scripts' rel_site_packages = r'Lib\site-packages' else: bin_dir_name = 'bin' rel_site_packages = 'lib/python%i.%i/site-packages' % sys.version_info[:2] ZIP_SOFTLINK_ATTRIBUTE_MAGIC = 0xA1ED0000L def rm_empty_dir(path): """ Remove the directory `path` if it is a directory and empty. If the directory does not exist or is not empty, do nothing. """ try: os.rmdir(path) except OSError: # directory might not exist or not be empty pass def rm_rf(path, verbose=False): if not on_win and islink(path): # Note that we have to check if the destination is a link because # exists('/path/to/dead-link') will return False, although # islink('/path/to/dead-link') is True. if verbose: print "Removing: %r (link)" % path os.unlink(path) elif isfile(path): if verbose: print "Removing: %r (file)" % path if on_win: try: os.unlink(path) except (WindowsError, IOError): os.rename(path, join(tempfile.mkdtemp(), basename(path))) else: os.unlink(path) elif isdir(path): if verbose: print "Removing: %r (directory)" % path if on_win: try: shutil.rmtree(path) except (WindowsError, IOError): os.rename(path, join(tempfile.mkdtemp(), basename(path))) else: shutil.rmtree(path) def get_executable(prefix): if on_win: paths = [prefix, join(prefix, bin_dir_name)] for path in paths: executable = join(path, 'python.exe') if isfile(executable): return executable else: path = join(prefix, bin_dir_name, 'python') if isfile(path): from subprocess import Popen, PIPE cmd = [path, '-c', 'import sys;print sys.executable'] p = Popen(cmd, stdout=PIPE) return p.communicate()[0].strip() return sys.executable def human_bytes(n): """ Return the number of bytes n in more human readable form. """ if n < 1024: return '%i B' % n k = (n - 1) / 1024 + 1 if k < 1024: return '%i KB' % k return '%.2f MB' % (float(n) / (2**20)) def makedirs(path): """Recursive directory creation function that does not fail if the directory already exists.""" try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def ensure_dir(path): """ Create the parent directory of the give path, recursively is necessary. """ makedirs(os.path.dirname(path)) def is_zipinfo_symlink(zip_info): """Return True if the given zip_info instance refers to a symbolic link.""" return zip_info.external_attr == ZIP_SOFTLINK_ATTRIBUTE_MAGIC def is_zipinfo_dir(zip_info): """Returns True if the given zip_info refers to a directory.""" return stat.S_ISDIR(zip_info.external_attr >> 16) def zip_write_symlink(fp, link_name, source): """Add to the zipfile the given link_name as a softlink to source Parameters ---------- fp: file object ZipFile instance link_name: str Path of the symlink source: str Path the symlink points to (the output of os.readlink) """ zip_info = zipfile.ZipInfo(link_name) zip_info.create_system = 3 zip_info.external_attr = ZIP_SOFTLINK_ATTRIBUTE_MAGIC fp.writestr(zip_info, source)
UTF-8
Python
false
false
2,014
3,427,383,908,791
1c9110db73e8870ac65a6faa18e3c87d906b8b6e
16f95a43b0c0bf86b9a0c50f5bf2639bdb8b07d0
/KWebLoc/atualizadores/anbima/anbima_MAPEAMENTO.py
c234a86d17bc39c2190cdccc3371cd220e05a747
[]
no_license
tigum27/KondorPythonProjects
https://github.com/tigum27/KondorPythonProjects
33432b8d263f20d96d0ef678aaa12dfacbb300c4
132dd7e50d987c0aa8f5aa60ca0ccefb7c7fefe0
refs/heads/master
2015-07-09T12:10:35
2014-09-03T20:11:48
2014-09-03T20:11:48
23,467,645
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: cp1252 -*- ipcaBase = 1614.62 igpmBase = 1000 from QuantLib import Schedule,Date,Brazil,Period,Months,Following,DateGeneration,Business252 import MySQLdb from sympy import Symbol from sympy.solvers import solve import inspect as IS class SqlQuery: def __init__(self): self.db = MySQLdb.connect(host="192.168.180.249",user="root",passwd="marisco",db="posicoes") def execute(self,query): cur = self.db.cursor() cur.execute(query) return cur class mySchedule: def __init__(self,settlementDate,maturityDate,per,calendar = Brazil()): self.schedule = Schedule(settlementDate,maturityDate,Period(per,Months),calendar,Following,Following,DateGeneration.Backward,False) self.cupons = [self.schedule.__getitem__(x) for x in range(self.schedule.__len__())] self.cupons.pop(0) class QLDate: @staticmethod def toString(date = Date(),sep = '-'): day = date.dayOfMonth() year = date.year() month = int(date.month()) return str(year)+sep+str(month) +sep+str(day) @staticmethod def fromString(date,sep = '-',ordem = 'ymd'): list1 = date.split(sep) day = int(list1[ordem.find('d')]) month = int(list1[ordem.find('m')]) year = int(list1[ordem.find('y')]) return Date(day,month,year) def GoalSeek(fction,pfction,vari,target,lextreme=0.00001,hextreme=100000,erro=0.00000001,maxit=100): argos=IS.getargspec(fction) nargos=[pfction[x] for x in argos[0]] vl=lextreme vh=hextreme vm=(vl+vh)/2 pfction[vari]=vl nargos=[pfction[x] for x in argos[0]] plow=fction(*nargos)-target pfction[vari]=vh nargos=[pfction[x] for x in argos[0]] phigh=fction(*nargos)-target pfction[vari]=vm nargos=[pfction[x] for x in argos[0]] pmid=fction(*nargos)-target i=0 while abs(pmid)>=erro and i<=maxit: if pmid*phigh>0: vh=vm vm=(vl+vh)/2 else: vl=vm vm=(vl+vh)/2 pfction[vari]=vl nargos=[pfction[x] for x in argos[0]] plow=fction(*nargos)-target pfction[vari]=vh nargos=[pfction[x] for x in argos[0]] phigh=fction(*nargos)-target pfction[vari]=vm nargos=[pfction[x] for x in argos[0]] pmid=fction(*nargos)-target i=+1 if i>=maxit and abs(pmid)>=erro: return 0 else: return vm def interpolar(x,y,i): n = 0 xx = x[n] while(i>xx): n = n+1 xx =x[n] A = ((1+y[n-1]/100)**(x[n-1]/252.0)) B = ((1+y[n]/100)**(x[n]/252.0)) Z = A*((B/A)**(float(i-x[n-1])/float(x[n]-x[n-1]))) return Z**(252.0/i)-1 def f(xx,str1): X=xx return eval(str1) def cupom(tabela,titulo): mtm = [float(u.replace('.','').replace(',','.')) for u in [tabela['col6'][str(x)] for x in sorted([int(key) for key in tabela['col6'].keys()])][5:]] VCTOs = [tabela['col2'][str(x)] for x in sorted([int(key) for key in tabela['col2'].keys()])][5:] yields = [float(u.replace(',','.')) for u in [tabela['col5'][str(x)] for x in sorted([int(key) for key in tabela['col5'].keys()])][5:]] schedules=[mySchedule(QLDate.fromString(data),QLDate.fromString(x,'/','dmy'),6) for x in VCTOs] sql = SqlQuery() if titulo[-1].lower() != 'f': ipca = float(sql.execute('select posicoes.vna("%s","2014-02-28")'%('IPCA' if titulo[-1].lower() == 'b' else 'IPCA')).fetchall()[0][0]) fatorAcum = ipca/(ipcaBase if titulo[-1].lower() == 'b' else igpmBase) notional = 1000*fatorAcum else: notional = 1000 dc = Business252() today = QLDate.fromString(data) curva = {} def bootstrapping(n = 0): if n == 0: for vt in schedules[n].cupons: curva.update({dc.dayCount(today,vt):yields[n]}) else: ultimoVtConhecido = sorted(curva.keys())[len(curva)-1] vtDesejados =[] vtDesejados2={} vtx = schedules[n].cupons[len(schedules[n].cupons)-1] vtx = dc.dayCount(today,vtx) eq = '- %2.9f'%mtm[n] cupom = 1.06**0.5-1 for vt in schedules[n].cupons: vt = dc.dayCount(today,vt) if vt in curva: Cupom = notional*cupom/(1+curva[vt]/100)**(vt/252.0) eq = eq + ' + %2.9f'%(Cupom) else: if vt < ultimoVtConhecido: sort = sorted(curva.keys()) if vt <sort[0]: curva.update({vt:curva[sort[0]]}) Cupom = notional*cupom/(1+curva[sort[0]]/100)**(vt/252.0) eq = eq + ' + %2.9f'%(Cupom) else: curva.update({vt:interpolar(sort,[curva[x] for x in sort],vt)*100}) Cupom = notional*cupom/(1+curva[vt]/100)**(vt/252.0) eq = eq + ' + %2.9f'%(Cupom) else: vtDesejados.append(vt) Cupom = notional*cupom A = (1+curva[ultimoVtConhecido]/100)**(ultimoVtConhecido/252.0) EXP = float(vt-ultimoVtConhecido)/float(vtx-ultimoVtConhecido) eq = eq + ' + %2.9f/(%2.9f*((((1+X)**(%2.9f/252))/%2.9f)**(%2.9f)))'%(Cupom,A,vtx,A,EXP) vtDesejados2.update({vt:'(%2.9f*((((1+X)**(%2.9f/252))/%2.9f)**(%2.9f)))'%(A,vtx,A,EXP)}) eq = eq + '+ %2.9f/(1+X)**(%2.9f/252)'%(notional,vtx) x = GoalSeek(f,{'str1':eq,'xx':0.06},'xx',0) curva.update({vtx:x*100}) vtDesejados.pop(len(vtDesejados)-1) for vt in vtDesejados2: X = x i = eval(vtDesejados2[vt])**(252.0/vt) - 1 curva.update({vt:i*100}) if n < len(schedules)-1: n = n+1 bootstrapping(n) bootstrapping() return curva def bullet(tabela): #vts = [QLDate.fromString(tabela['col2'][str(x)],'/','dmy') for x in sorted([int(key) for key in tabela['col2'].keys()])][5:] vts = [tabela['col2'][str(x)] for x in sorted([int(key) for key in tabela['col2'].keys()])][5:] dc = Business252() vts = [dc.dayCount(QLDate.fromString(data),QLDate.fromString(vt,'/','dmy') )for vt in vts] taxas = [tabela['col5'][str(x)] for x in sorted([int(key) for key in tabela['col5'].keys()])][5:] curva = {} for vt,tx in zip(vts,taxas): curva.update({vt:float(tx.replace(',','.'))}) return curva def spreadLtnDi(curva): sql = SqlQuery() query = 'select ' for vt in curva: query = query + 'indexadores_novo.getcurva_pre("%s","%s") as vt%s,'%(vt,data,vt) query = query[:-1] pre = sql.execute(query).fetchall() print pre if len(pre) > 0: i = 0 for vt in curva: curva[vt] = ((1 + curva[vt]/100)/(1 + pre[0][i]) - 1)*100 i = i + 1 return curva arvoreSaida= [] depara = {'ntn-f':'curva_ntnf','ntn-b':'curva_ntnb','ntn-c':'curva_ntnc','lft':'curva_lft','ltn':'curva_ltn','lspread':'spread_ltndi'} entrada['lspread'] = entrada['ltn'] for titulo in entrada.keys(): print 'iniciando bootstrapping ' + titulo curva = {} if titulo[0].lower() == 'l': curva = bullet(entrada[titulo]) if titulo == 'lspread': curva = spreadLtnDi(curva) else: curva = cupom(entrada[titulo],titulo) for vt in curva.keys(): valor= curva[vt]/100.0 arvoreSaida.append(data+"|"+depara[titulo]+"|"+str(vt)+"|"+'0'+"|"+str(valor)) print 'finalizando ' + titulo
UTF-8
Python
false
false
2,014
11,785,390,280,879
2ade252f6fffc6374133cebd46c37c1c834c2247
21306ff756b6bb49fc51f5a8d89f4924c3f5caf5
/obsolete/demo/vor/__init__.py
a5c56657a754ea5cac6755f275d783f7e5129a8e
[ "GPL-3.0-only" ]
non_permissive
MaxTyutyunnikov/lino
https://github.com/MaxTyutyunnikov/lino
d5d0392eb76c7fc3858c9589243e3f3743daa9d4
9a6322272d36b8c747e06b6b9eb0889a2e0d27a1
refs/heads/master
2021-01-19T16:50:17.870568
2013-11-23T03:47:26
2013-11-23T03:47:26
39,275,252
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding: latin1 label = 'Prototyp einer Homepage für den V.O.R.' def populate(db): import vor1 vor1.populate(db)
ISO-8859-1
Python
false
false
2,013
12,103,217,860,902
392d648d6017431db3bd7ea741a6622b1c4d8313
1065414f55761f1ce0fc80c85eebc078f8a18f91
/apps/payouts/admin_utils.py
ffc9dd1d559cf8793146942cb295bc8c73375b00
[ "BSD-2-Clause", "BSD-3-Clause" ]
permissive
gannetson/onepercentclub-site
https://github.com/gannetson/onepercentclub-site
69d7e4fa93b28b71ff5b5d0e02e0795621d233f3
fb0d14beac1bbb1e477c8521fc9e77a1509fcf1d
refs/heads/master
2021-01-15T18:59:16.973559
2014-07-01T06:45:14
2014-07-01T06:45:14
8,796,961
1
1
null
true
2014-07-14T12:18:28
2013-03-15T10:40:04
2014-07-07T07:56:39
2014-07-10T13:41:13
67,782
0
0
2
Shell
null
null
import urllib from django.core.urlresolvers import reverse def link_to(value, url_name, view_args=(), view_kwargs={}, query={}, short_description=None): """ Return admin field with link to named view with view_args/view_kwargs or view_[kw]args(obj) methods and HTTP GET parameters. Parameters: * value: function(object) or string for object proeprty name * url_name: name used to reverse() view * view_args: () or function(object) -> () returing view params * view_kwargs: {} or function(object) -> {} returing view params * query: {} or function(object) -> {} returning HTTP GET params * short_description: string with description, defaults to 'value'/property name """ def prop(self, obj): # Replace view_args methods by result of function callss if callable(view_args): args = view_args(obj) else: args = view_args if callable(view_kwargs): kwargs = view_kwargs(obj) else: kwargs = view_kwargs # Construct URL url = reverse(url_name, args=args, kwargs=kwargs) if callable(query): params = query(obj) else: params = query # Append query parameters if params: url += '?' + urllib.urlencode(params) # Get value if callable(value): # Call value getter new_value = value(obj) else: # String, assume object property assert isinstance(value, basestring) new_value = getattr(obj, value) return u'<a href="{0}">{1}</a>'.format(url, new_value) # Decorate function prop.allow_tags = True if not short_description: # No short_description set, use property name assert isinstance(value, basestring) short_description = value prop.short_description = short_description return prop
UTF-8
Python
false
false
2,014
15,513,421,885,001
e0dceed2587b5c11e305770e88f815b78a0a4366
79f5290060ead677fc35a238dc6f9b9621b3f35c
/lyrics_searcher/tools/fetchers/musixmatch_fetcher.py
18c795bf090c7fd6bdb011ece58e1b1aaccfe07f
[]
no_license
rrader/nowLyrics
https://github.com/rrader/nowLyrics
8c67c97bc809187c71de237f44c50c56ed55d2f3
2ce91707dbc649b01e3b1880f6e469c196acab8e
refs/heads/master
2020-07-04T04:42:49.088625
2011-12-28T12:35:02
2011-12-28T12:35:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding:utf8 -*- MM_API_KEY = 'a453f8a71d8e47edf061cc7d53b375d6' import os os.environ['MUSIXMATCH_API_KEY'] = MM_API_KEY from musixmatch.track import search as mm_search from BaseFetcher import BaseFetcher class Fetcher(BaseFetcher): def _do_fetch(self, title, artist): sr = mm_search(q_track=title, q_artist=artist) sr = filter(lambda x: x.lyrics_id != 0, sr) sr = map(lambda x: x.lyrics(), sr[:3]) sr = map(lambda x: u"%s \n%s (c)\nSource: MusiXmatch.com" % (x['lyrics_body'],x['lyrics_copyright']), sr) return sr
UTF-8
Python
false
false
2,011
5,583,457,493,049
026bf32126d2d2dcd855f9f227378735296baa25
abb71ed66f53a40d28be79825417e69c926bd1a4
/src/python/example3-1.py
005919b028290ade40c8844945a82a7fb3dfd2af
[]
no_license
yarox/libxml-examples
https://github.com/yarox/libxml-examples
957a221642f5ce3de6579e9822d1fcf434f700ae
40189d6f06db8ef474d0e29d332e03067de89c39
refs/heads/master
2020-05-21T13:21:58.824973
2012-07-25T09:50:52
2012-07-25T09:50:52
4,985,667
10
2
null
null
null
null
null
null
null
null
null
null
null
null
null
''' example3-1.py Find elements in a simple XML document. ''' from lxml import etree doc = etree.parse('../../data/simple.xml') # Retrieve all room elements and print tag, 'id' property, and number of # children rooms = doc.xpath('/house/room') for room in rooms: print '{0}: a {1} with {2} objects'.format(room.tag, room.attrib['id'], len(room.getchildren())) # Retrieve all red chairs ('chair' nodes with property 'color' = 'red') and # print color, name, and room red_chairs = doc.xpath('//chair[@color="red"]') for chair in red_chairs: print 'a {1} {2} from the {0}'.format(chair.getparent().attrib['id'], chair.attrib['color'], chair.tag) # List nodes whith text, if any for element in doc.iter(): if element.text and element.text.strip(): print '{0}: {1}'.format(element.tag, element.text)
UTF-8
Python
false
false
2,012
14,121,852,480,552
bf02272c8ff5d65ad8db17a7f5b3c211e04e587d
44f0da4da4bde82c6df6c806680f524b200e0e5b
/nonmainprograms/generate_light_curve.py
a88470cbe48862a41091625a1edd82298c031266
[]
no_license
davidwhogg/exoplanet-research
https://github.com/davidwhogg/exoplanet-research
51b747a3c817fe779fadeb922a507e6fcfbf9185
5b040ab523f5f0e49d806088684b7b32dc2ea9a0
refs/heads/master
2021-01-22T11:37:36.204830
2014-10-21T02:27:43
2014-10-21T02:27:43
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#This code generates the light curve of a given kepler object. #Many parts of the code were written with the help from http://yourlocaltautologist.blogspot.com/2012/08/viewing-kepler-light-curves-via.html import os import pyfits import matplotlib.pyplot as plt import numpy as np #This short segment of code allows the user to input the kepler ID #and change the working directory to the proper one. (Given that the proper local data exists) kplr_id = raw_input('Input kepler id:') path = '/Users/SHattori/.kplr/data/lightcurves/%s' %kplr_id os.chdir(path) # print path # print os.getcwd() #Code to allow the user to decide which FITS format file to generate light curve. filename = raw_input('Input FITS file to use: ') FITSfile = pyfits.open(filename) #FITSfile.info() dataheader = FITSfile[1].header topheader = FITSfile[0].header jdadj = str(dataheader["BJDREFI"]) obsobject = str(dataheader["OBJECT"]) lightdata = FITSfile[1].data #Convert the data to fractional relative flux #Currently this code is written to generate graphs for PDCSAP_FLUX. flux = lightdata.field("PDCSAP_FLUX") time = lightdata.field('TIME') print time assert 0 median = np.median(flux) # for i, e in enumerate(flux): # #fractional relative flux # flux[i] = ((e - median) / median) time = lightdata.field("TIME") #Barycenter corrected Julian date fig1 = plt.figure() sub1 = fig1.add_subplot(111) sub1.plot(time,flux, color="black", marker=",", linestyle = 'None') #The following code is to set the labels and title xlab = "Time (days, Kepler Barycentric Julian date - %s)"%jdadj sub1.set_xlabel(xlab) sub1.set_ylabel("Relative Brightness (electron flux)") plottitle="Light Curve for %s"%obsobject sub1.set_title(plottitle) plt.show() FITSfile.close()
UTF-8
Python
false
false
2,014
3,547,642,990,851
2f936e7a7da6c99259e924f78846142921d05f03
86e27a49d54a807902c5dec48da3e597c1bcf7da
/importers/delicious-xml.py
3d27635536555788931d0539b7f9d04c768d2a89
[]
no_license
pombredanne/selficious
https://github.com/pombredanne/selficious
9b8d497b055f13be34ea39ac6f5c4724ce5e1c7e
4f57f00a36b40c71ffab031bfc41b2a17a7b939f
refs/heads/master
2017-12-02T07:54:35.585125
2010-12-24T14:07:49
2010-12-24T14:07:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This is SELFICIOUS by Yuuta # UPDATED: 2010-12-23 20:53:41 import urllib2 import base64 import hashlib from xml.dom import minidom import dateutil.parser from importers import BaseImporter class DeliciousLocalXMLImporter(BaseImporter): """ Imports bookmarks from an XML file saved from delicious. To get this kind of files, visit <a href="https://api.del.icio.us/v1/posts/all">http://api.del.icio.us/v1/posts/all </a> (if you're an old delicious user) """ service_name = 'delicious-xml' service_verbose_name = "Local XML file saved from delicious" form = """ <p> <label for="xmlfile" class="gauche">Upload your XML file: </label> <input id="xmlfile" type="file" name="xmlfile" /> </p> """ def __init__(self, tornado_handler): try: uploaded_file = tornado_handler.request.files['xmlfile'][0] self.data = uploaded_file['body'] self.success = True except: self.success = False self.error = 'fetch' super(DeliciousLocalXMLImporter, self).__init__(tornado_handler) def posts(self): if self.success: posts = [] dom = minidom.parseString(self.data) h = hashlib.sha1() for node in dom.getElementsByTagName('post'): h.update(node.getAttribute('href')) posts.append({ 'hash':h.hexdigest(), 'url':node.getAttribute('href'), 'title':node.getAttribute('description'), 'description':node.getAttribute('extended'), 'tags':node.getAttribute('tag').split(' '), 'time':dateutil.parser.parse(node.getAttribute('time')) }) return posts else: return []
UTF-8
Python
false
false
2,010
5,428,838,676,443
893e00b67bc3b9c29c5f7b76b1d9bc805fa93f8c
5c9530bab0210bbd3a47992ca11e895a8f9836c5
/scikits/learn/naive_bayes/__init__.py
0f632219df385b18fbe29257be8ed9af720c47f9
[ "BSD-3-Clause" ]
permissive
amitibo/scikit-learn
https://github.com/amitibo/scikit-learn
4b94e8d307102290f1329608a88c9e3dfa184434
3dd1d00532c70cc3f5ebf2db916e072138afa0f1
refs/heads/master
2020-12-30T19:22:28.287688
2011-05-11T16:20:43
2011-05-11T16:20:43
1,502,609
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Naive Bayes models ================== Naive Bayes algorithms are a set of supervised learning methods based on applying Baye`s theorem with strong (naive) independence assumptions. See http://scikit-learn.sourceforge.net/modules/naive_bayes.html for complete documentation. """ from .naive_bayes import GNB, MultinomialNB from . import sparse
UTF-8
Python
false
false
2,011
8,572,754,758,203
ff7dea97b2df357a903bf2052097ca471e936164
fcd2148474f1e8cd7c426e1d5c48559f320fdbbc
/playlists.py
ff3d2e9d477af133673907157f19be2587db30e5
[]
no_license
keroserene/mens-amplio
https://github.com/keroserene/mens-amplio
f0125c749a5217bd0bab00c463cbb7c62ac42fb0
e8c05c85742c3624cf8aed8149857e931a566333
refs/heads/master
2021-01-20T22:40:27.262447
2013-08-16T09:46:51
2013-08-16T09:46:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Defines full set of lighting effect playlists to use when actually running piece from led.effects.base import ( EffectParameters, SnowstormLayer, TechnicolorSnowstormLayer, WhiteOutLayer, NoDataLayer) from led.effects.digital_rain import DigitalRainLayer from led.effects.drifters import * from led.effects.firefly_swarm import FireflySwarmLayer from led.effects.impulses import * from led.effects.lightning_storm import LightningStormLayer from led.effects.plasma import PlasmaLayer from led.effects.waves import WavesLayer from led.renderer import Playlist headsetOn = Playlist([ [ ImpulseLayer2(), WavesLayer(color=(1,0,.2)), PlasmaLayer(color=(.1,.1,.1)), ], [ WavesLayer(), LightningStormLayer(), ] ]) headsetOff = Playlist([ [ TreeColorDrifterLayer([ (1,0,1), (.5,.5,1), (0,0,1) ], 5), PlasmaLayer(), ], [ OutwardColorDrifterLayer([ (1,0,0), (.7,.3,0), (.7,0,.3) ], 10), PlasmaLayer(), ] ]) transition = Playlist([ [WhiteOutLayer()], [SnowstormLayer()] ])
UTF-8
Python
false
false
2,013
10,471,130,281,730
7171d40572b1d6b72403a98559d7c092ae2df004
fe89fbe21a9e7b3b43e27e39bdf1e81e2f6c839f
/test/test_all.py
76da3836c7ce2f17998c17ac1688fd69354d23f1
[ "MIT" ]
permissive
pbulsink/turbocontrol
https://github.com/pbulsink/turbocontrol
dc2d88a2578baa413ee59b5ba7929476f8ebc218
67f036e520bc03e76f71e5ca41d2455b2712b2e7
refs/heads/master
2020-03-25T07:39:24.761624
2014-12-24T02:08:58
2014-12-24T02:08:58
15,980,284
0
0
null
false
2014-08-08T01:48:23
2014-01-16T20:39:57
2014-08-08T01:48:07
2014-08-08T01:48:23
356
1
1
0
Python
null
null
#!/usr/bin/env python """ Turbogo Main Test Suite Runs all test files """ from unittest import TestLoader, TextTestRunner, TestSuite from test_turbogo import TestControlEdit, TestJob, TestSetup from test_turbogo import TestWriteCoord, TestSubmitScriptPrep from test_turbogo_helpers import TestArgs, TestChSpin from test_turbogo_helpers import TestControlMods, TestGeom from test_turbogo_helpers import TestRoute, TestSimpleFuncs from test_turbocontrol import TestJobset, TestFindInputs from test_turbocontrol import TestJobChecker, TestWriteStats, TestWriteFreeh from test_def_op import TestDefine from test_screwer_op import TestScrewer from test_freeh_op import TestFreeh from test_cosmo_op import TestCosmo if __name__ == "__main__": loader = TestLoader() suite = TestSuite(( loader.loadTestsFromTestCase(TestControlEdit), loader.loadTestsFromTestCase(TestJob), loader.loadTestsFromTestCase(TestSetup), loader.loadTestsFromTestCase(TestWriteCoord), loader.loadTestsFromTestCase(TestSubmitScriptPrep), loader.loadTestsFromTestCase(TestArgs), loader.loadTestsFromTestCase(TestChSpin), loader.loadTestsFromTestCase(TestControlMods), loader.loadTestsFromTestCase(TestGeom), loader.loadTestsFromTestCase(TestRoute), loader.loadTestsFromTestCase(TestSimpleFuncs), loader.loadTestsFromTestCase(TestJobset), loader.loadTestsFromTestCase(TestFindInputs), loader.loadTestsFromTestCase(TestJobChecker), loader.loadTestsFromTestCase(TestWriteStats), loader.loadTestsFromTestCase(TestDefine), loader.loadTestsFromTestCase(TestScrewer), loader.loadTestsFromTestCase(TestFreeh), loader.loadTestsFromTestCase(TestCosmo), loader.loadTestsFromTestCase(TestWriteFreeh), )) runner = TextTestRunner(verbosity = 2) runner.run(suite)
UTF-8
Python
false
false
2,014
19,335,942,780,279
57bf1e3fe5b66559a0aed584b46bf764ad4b2f63
f42f06849f549f1781c2d5a5455bb1593a56b8c4
/sitemap.py
de62ebf3a440f54377d05c949eef19addb158cbe
[ "MIT" ]
permissive
freshy969/Lonava
https://github.com/freshy969/Lonava
6e700de0198db83653f6f4c2f78fb603db91e31d
30906235597b0617789f4d7e1a6c3b24c5415137
refs/heads/master
2020-03-23T04:21:02.463563
2013-03-08T07:28:01
2013-03-08T07:28:01
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import time import datetime import psycopg2 import psycopg2.extras import os db = psycopg2.connect("dbname='lonava' user='lonuser' host='localhost' password='YOURPASS'") cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor) print "Generating Count" cur.execute ("select count(*) as count from (select distinct on (commentgroup) * from storygroup ) as bar") totalrows = cur.fetchone()['count'] print "There are " + str(totalrows) + " stories in Lonava." looppasses = 1 + (totalrows / 40000) print "A total of " + str(looppasses) + " story sitemaps" date1 = datetime.datetime.now().isoformat() datenow = date1[0:date1.find(".")] + "+00:00" print datenow startat = 0 sitemapindex = open('/usr/local/lonava/static/sitemap_index.xml', 'w') sitemapindex.write("""<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/siteindex.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap> <loc>http://lonava.com/static/sitemap-0.xml</loc> <lastmod>""" + datenow + """</lastmod> </sitemap> """) for i in range(looppasses): sitemap_path = "/usr/local/lonava/static/sitemap-" + str(i + 1) + ".xml" sitemap = open(sitemap_path, 'w') sitemapindex.write("<sitemap>") sitemapindex.write("<loc>http://lonava.com/static/sitemap-" + str(i + 1) + ".xml</loc>\n") sitemapindex.write("<lastmod>" + datenow + "</lastmod>\n") sitemapindex.write("</sitemap>\n") cur.execute("create temporary table mystories (like stories);") cur.execute("alter table mystories add column ord bigserial;") cur.execute("alter table mystories add column cachedreplycount bigint;") cur.execute("insert into mystories(lastedit,pimgurl,imgurl,usr,storytime,title,url,text,name,score,commentgroup,storyid,location,cachedreplycount) select lastedit,pimgurl,imgurl,usr,storytime,title,url,text,name,score,commentgroup,storyid,location,cachedreplycount from storygroup where location in (select subbedchan from usrsubs) order by storyid asc;",) cur.execute("select * from (select distinct on (commentgroup) * from (select *,(1.0 + score + (cachedreplycount / 10)) / (1.0 + (select count(*) from mystories) - ord) as rank from mystories) as foo) as bar order by storyid asc offset %s limit %s;",[startat,39999]) sitemap.write("""<?xml version="1.0" encoding="UTF-8"?>\n""") sitemap.write("""<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""") print "Starting file " + str(i) + "; startat = " + str(startat) + " ; endat " + str(startat + 39999) startat = startat + 40000 rows = cur.fetchall() cur.execute("drop table mystories") print "Writing " + str(len(rows)) + " rows to " + sitemap_path for row in rows: url = 'http://lonava.com/stories/' + str(row['storyid']) date = row['storytime'] + datetime.timedelta(days=2) datestr = date.isoformat() datecur = datestr[0:datestr.find(".")] + "+00:00" sitemap.write("<url>\n") sitemap.write("<loc>" + url + "</loc>\n") sitemap.write("<lastmod>" + datecur + "</lastmod>\n") sitemap.write("<changefreq> monthly </changefreq>\n") sitemap.write("<priority> .5 </priority>\n") sitemap.write("</url>\n") sitemap.write("</urlset>\n") sitemap.close() #### DONE WITH STORIES, now do USERS cur.execute ("select count(*) from usrs as count") totalrows = cur.fetchone()['count'] print "There are " + str(totalrows) + " usrs of Lonava." looppasses = 1 + (totalrows / 40000) print "A total of " + str(looppasses) + " usr sitemaps" date1 = datetime.datetime.now().isoformat() datenow = date1[0:date1.find(".")] + "+00:00" i = 0 startat = 0 for i in range(looppasses): sitemap_path = "/usr/local/lonava/static/sitemap-usr-" + str(i + 1) + ".xml" sitemap = open(sitemap_path, 'w') sitemapindex.write("<sitemap>") sitemapindex.write("<loc>http://lonava.com/static/sitemap-usr-" + str(i + 1) + ".xml</loc>\n") sitemapindex.write("<lastmod>" + datenow + "</lastmod>\n") sitemapindex.write("</sitemap>\n") cur.execute ("select * from usrs limit %s offset %s",[startat + 39999,startat]) sitemap.write("""<?xml version="1.0" encoding="UTF-8"?>\n""") sitemap.write("""<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""") startat = startat + 40000 rows = cur.fetchall() for row in rows: url = 'http://lonava.com/user/' + str(row['usrid']) date = row['regtime'] + datetime.timedelta(days=2) datestr = date.isoformat() datecur = datestr[0:datestr.find(".")] + "+00:00" sitemap.write("<url>\n") sitemap.write("<loc>" + url + "</loc>\n") sitemap.write("<lastmod>" + datecur + "</lastmod>\n") sitemap.write("<changefreq> monthly </changefreq>\n") sitemap.write("<priority> .6 </priority>\n") sitemap.write("</url>\n") sitemap.write("</urlset>\n") sitemap.close() sitemapindex.write("</sitemapindex>\n") sitemapindex.close() print "Notifying Bing" cmd = 'curl http://www.bing.com/webmaster/ping.aspx?siteMap=http://lonava.com/static/sitemap_index.xml > /dev/null' os.system(cmd) print "Notifying Google" cmd = 'curl http://www.google.com/webmasters/sitemaps/ping?sitemap=http://lonava.com/static/sitemap_index.xml > /dev/null' os.system(cmd)
UTF-8
Python
false
false
2,013
15,006,615,768,946
0582d0cb2df257b0e697286d10f4e807f046eae2
270487b95e2309dc6a4e392b241374beb24f2633
/clean_sentences.py
d4079e0998c8505c9ade3cc2944758b41fc7add3
[]
no_license
cdg720/parsing
https://github.com/cdg720/parsing
58b85b0ea6013e3e1034fd5a8667e66ebcd7e1b5
9c79a364887395d91a45aa4f8090e26606fa00f7
refs/heads/master
2021-01-01T18:07:16.278184
2014-11-13T00:20:18
2014-11-13T00:20:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import gzip import string import sys # NYT only. check for other coropra. def clean_sentences(): if len(sys.argv) == 1: print 'usage: python clean_sentences.py file1 file2 ... out' print 'e.g.: python clean_sentences.py ~/data/gigaword/nyt/*.gz /pro/dpg/dc65/data/gigaword/chunks/nyt' sys.exit(0) doc_size = 500 out = sys.argv[-1] i = 0 g = None x = [0,] * 2 for doc in sys.argv[1:-1]: f = gzip.open(doc, 'rb') y = [0,] * 2 for line in f.read().splitlines(): count = 0 count2 = 0 if line[22:24] == '> ': processed = line[24:-6] elif line[23:25] == '> ': processed = line[25:-6] elif line[24:26] == '> ': processed = line[26:-6] else: print line for ch in processed: if ch.islower(): count += 1 if ch != ' ': count2 += 1 x[1] += 1 y[1] += 1 if count * 100 / count2 >= 90: # go back to 90 x[0] += 1 y[0] += 1 if i % doc_size == 0: # new file if g: g.flush() g.close() g = gzip.open(out + '/' + str(i / doc_size + 1) + '.gz', 'wb') g.write(line + '\n') i += 1 print doc, y, x def clean_sentences2(): if len(sys.argv) == 1: print 'usage: python clean_sentences.py file1 file2 ... out' print 'e.g.: python clean_sentences.py ~/data/gigaword/nyt/*.gz /pro/dpg/dc65/data/gigaword/chunks/nyt' sys.exit(0) doc_size = 500 out = sys.argv[-1] i = 0 g = None x = [0,] * 2 for doc in sys.argv[1:-1]: f = gzip.open(doc, 'rb') y = [0,] * 2 for line in f.read().splitlines(): x[1] += 1 y[1] += 1 x[0] += 1 y[0] += 1 if i % doc_size == 0: # new file if g: g.flush() g.close() g = gzip.open(out + '/' + str(i / doc_size + 1) + '.gz', 'wb') g.write(line + '\n') i += 1 print doc, y, x clean_sentences2()
UTF-8
Python
false
false
2,014
15,040,975,485,713
28d814caf31ab9754206adfd6da704bb184b56a2
c0b744ca35ef8791c0f7cc22a66a3754a0d33493
/interface.py
9ff898f25ff6630b299b49b988925198a0bd61f7
[ "MIT" ]
permissive
Cjreynol/Python-Monster-Zoo
https://github.com/Cjreynol/Python-Monster-Zoo
fc91207f04fbb52a8adccb2f1a0025f1df7fb5ac
811055206ef1ce994677e208606b16964baf1e42
refs/heads/master
2021-05-16T02:25:35.809134
2013-12-27T19:15:21
2013-12-27T19:15:21
15,458,914
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Chad Reynolds # 12/24/13 # Creates an interface to hold multiple managers from tkinter import * from manager import Name_Window, Manager class Interface(): """Main Window to add and hold multiple monsters.""" def __init__(self): self.root = Tk() self.root.title("Monster Zoo") self.interface_frame = Frame(self.root) self.interface_frame.pack() self.button = Button(self.interface_frame, text = "Add Monster", command = self.add_monster) self.button.pack(side = LEFT) self.monster_total = IntVar() self.monster_total.set(0) self.total = Label(self.interface_frame, textvariable = self.monster_total) self.total.pack(side = RIGHT) Label(self.interface_frame, text = "Monster Total: ").pack(side = RIGHT) def add_monster(self): """Creates a name menu to add a new monster.""" new = Name_Window(self.root, self.monster_total) def mainloop(self): """Starts the root's mainloop.""" self.root.mainloop()
UTF-8
Python
false
false
2,013
13,125,420,098,678
69b866147be5708fa38154c0e012fdded7312f68
430293e7a0a2dd89e43103cb76eded423454834a
/models/models.py
bcebf7808210e761b2692e8a20468dd8a9b11c95
[ "GPL-3.0-only" ]
non_permissive
dezkareid/bicitacora
https://github.com/dezkareid/bicitacora
b2db8e24a6a513408a384554a6feff35e6a24441
b10e943382596d8a60d9ccbf7e68e12d573e5089
refs/heads/master
2021-01-10T21:16:40.370738
2013-12-27T21:46:45
2013-12-27T21:46:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from google.appengine.ext import db from google.appengine.api import users class Ciclista(db.Model): """Entidad para administrar a los Ciclistas que usen la aplicacion""" nombre = db.StringProperty(required=True) facebook = db.LinkProperty(required=True) twitter = db.StringProperty(required=True) es_hombre = db.BooleanProperty(default=True) fecha_nacimiento = db.DateProperty() fecha_registro = db.DateProperty(auto_now_add=True) usuario = db.UserProperty() class Ruta(db.Model): """Entidad para guardar una Ruta""" nombre = db.StringProperty(required=True) puntos = db.ListProperty(db.GeoPt,default=[]) inicio = db.DateTimeProperty(required=True,auto_now_add=True) termino = db.DateTimeProperty(required=True,auto_now_add=True) ciclista = db.ReferenceProperty(Ciclista) class Tipo_Lugar(db.Model): """Entidad para guardar los tipos de lugares""" tipo = db.StringProperty(required=True) class Lugar(db.Model): """Entidad para guardar lugares de interes para los ciclistas: talleres, biciestacionamientos,etc.""" nombre = db.StringProperty(required=True) tipo = db.ReferenceProperty(Tipo_Lugar) descripcion = db.TextProperty(required=True) direccion = db.StringProperty(required=True) alta = db.DateProperty(auto_now_add=True) ubicacion = db.GeoPtProperty() class Tipo_Suceso(db.Model): """Entidad que identifica a los tipos de sucesos: Manifestaciones, bloqueos, composturas de calle""" tipo = db.StringProperty(required=True) duracion = db.IntegerProperty(required=True) class Suceso(db.Model): """Entidad para reportar los tipos de sucesos que pueden haber en la ruta del ciclista""" tipo = db.ReferenceProperty(Tipo_Suceso) descripcion = db.TextProperty(required=True) alta = db.DateTimeProperty(auto_now_add=True) ubicacion = db.GeoPtProperty()
UTF-8
Python
false
false
2,013
11,355,893,570,767
7c0b39c8798eecf916c539238eff6381aaad4597
2e908072b624c46240ee9e7fcb993b2f21aee0da
/plugins/presence_handler.py
c2925081028c10ab791ed3a189ee2168c10a7546
[]
no_license
bogobog/steerage
https://github.com/bogobog/steerage
d7e562c5d7cf8a730b0de5e8176c78cb7ecd1147
ce3d0e39efb26615ad114195d9675401e9cac99f
refs/heads/master
2016-09-06T13:50:15.830892
2014-01-08T05:15:56
2014-01-08T05:15:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from twisted.python import log from wokkel import xmppim import logging from common import CommonClientManager class CommonPresenceHandler( xmppim.PresenceClientProtocol ): received_statuses = {} def __init__( self, client ): super( CommonPresenceHandler, self ).__init__() self.my_client = client def connectionInitialized(self): self.available() def subscribedReceived(self, entity): log.msg( 'subscribedReceived', level = logging.DEBUG ) def subscribeReceived(self, subscriber): rtr_client = CommonClientManager.getHandler( 'roster', self.my_client ) def rosterAddResponse( response ): log.msg( 'rosterAddResponse', level = logging.DEBUG ) self.subscribed( subscriber ) # subscribe to subscriber user_status = rtr_client.getUserStatus( subscriber ) if not user_status or ( not user_status == 'both' and not user_status == 'to' ): rtr_client.addItem( subscriber.userhostJID() ).addCallback( addSubscriberToRosterResponse ).addErrback( log.err ) def addSubscriberToRosterResponse( response ): log.msg( 'addSubscriberToRosterResponse', level = logging.DEBUG ) if response.attributes['type'] == 'result': self.subscribe( subscriber.userhostJID() ) rtr_client.addItem( subscriber ).addCallback( rosterAddResponse ) def availableReceived(self, entity, show=None, statuses=None, priority=0): log.msg( 'availableReceived', level = logging.DEBUG ) # ignore if self if entity.full() == self.my_client.jid.full(): return self.received_statuses[ entity.full() ] = 'available' CommonClientManager.addHandler( 'presence', CommonPresenceHandler )
UTF-8
Python
false
false
2,014
14,886,356,680,695
59dcd9d9438d0446307ea9c88e48d6f3b2b8d8b8
e9cfb8cec8d62b02136e6f2efe793c9f62a1c96f
/src/main.py
2368a096fc2e97fa71aac9ec0997ffca2dfadb45
[]
no_license
jleake/pytetris
https://github.com/jleake/pytetris
d6558340fd9ff074c80e013d2003649ae1a98da4
c481c8d6fac6db054fdf78245b2cd5b1062ff738
refs/heads/master
2020-05-18T10:39:10.850861
2013-10-03T01:06:28
2013-10-03T01:06:28
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from wrapper import CursesGraphics, curses_wrapper from game import Block def main(cg): for n in range(1, 30): cg.draw('#', (CursesGraphics.RED, CursesGraphics.BLACK), [(n, 1), (n, 2), (n, 3), (n+1, 3)]) cg.refresh() cg.wait_for_key(1000) cg.erase([(n, 1), (n, 2), (n, 3), (n+1, 3)]) # Call the main curses wrapper function to start things off. The parameter refers to the main function to be run after curses initialization. (In this case, this is the above "main" function.) curses_wrapper(main)
UTF-8
Python
false
false
2,013
6,476,810,712,219
90a9a0dcd8b6dabb1a051002af273bc9edf51685
c3da8ec01fbdf0dc2040afa12c9a3b3d13940f7a
/regression/test_cross_validation.py
81b87782a2bbdb273e62a5f2d5c96e8779263680
[]
no_license
jjakeman/pyheat
https://github.com/jjakeman/pyheat
18ae60174dcd09028ae274e7b0401ce09ce6f677
2050e4fbe70503f04e54c3436276ba5903145dcd
refs/heads/master
2016-09-06T11:19:56.409206
2013-08-09T04:19:28
2013-08-09T04:19:28
7,924,075
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest import numpy from regression.compressed_sensing import * from regression.gaussian_process import * from utilities.quadrature_rules import clenshaw_curtis from utilities.math_utils import cartesian_product, hypercube_map from utilities import visualisation as plot from matplotlib import cm from scipy import stats from cross_validation import * from regression.compressed_sensing import * from utilities.tensor_product_domain import * from interpolation.least_interpolation import least_factorization from polynomial_chaos.orthogonal_polynomial import LegendrePolynomial1D, \ JacobiPolynomial1D from polynomial_chaos.polynomial_chaos_expansion import \ PolynomialChaosExpansion as PCE from utilities.tensor_product_basis import * from examples.test_functions import matlab_peaks from utilities.math_utils import ridge_regression class TestCrossValidation(unittest.TestCase): def setUp( self ): self.eps = 20*numpy.finfo( numpy.float ).eps self.verbosity = 0 def xtest_grid_search_cross_validation( self ): f_1d = lambda x: x**10 build_pts = numpy.linspace(-.85,.9,14) build_pts = numpy.atleast_2d( build_pts ) build_vals = f_1d( build_pts ).T # Test grid search cross validation when applied to Gaussian Process num_dims = 1 func_domain = TensorProductDomain( num_dims, [[-1,1]] ) GP = GaussianProcess() GP.set_verbosity( 0 ) GP.function_domain( func_domain ) loo_cv_iterator = LeaveOneOutCrossValidationIterator() CV = GridSearchCrossValidation( loo_cv_iterator, GP ) CV.run( build_pts, build_vals ) I = numpy.arange( build_pts.shape[1] ) for i in xrange( build_pts.shape[1] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) train_pts = build_pts[:,J] train_vals = build_vals[J,:] GP.build( train_pts, train_vals ) pred_vals = GP.evaluate_set( build_pts ) assert numpy.allclose( build_vals[i,0]-pred_vals[i], CV.residuals[0][i] ) # Test grid search cross validation when applied to polynomial chaos # expansions that are built using ridge regression # The vandermonde matrix is built from scratch every time by the pce num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) loo_cv_iterator = LeaveOneOutCrossValidationIterator() CV = GridSearchCrossValidation( loo_cv_iterator, pce ) CV.run( build_pts, build_vals ) I = numpy.arange( build_pts.shape[1] ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b ) assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i], CV.residuals[0][i] ) # Test grid search cross validation when applied to polynomial chaos # expansions that are built using ridge regression # Specifying parse_cross_validation_data = True will ensure that # the vandermonde matrix is not built from scratch every time by # the pce num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) loo_cv_iterator = LeaveOneOutCrossValidationIterator() CV = GridSearchCrossValidation( loo_cv_iterator, pce, use_predictor_cross_validation = True) CV.run( build_pts, build_vals ) I = numpy.arange( build_pts.shape[1] ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b ) assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i], CV.residuals[0][i] ) # Test grid search cross validation when applied to polynomial chaos # expansions that are built using ridge regression # A closed form for the cross validation residual is used num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) loo_cv_iterator = LeaveOneOutCrossValidationIterator() CV = GridSearchCrossValidation( loo_cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = True ) CV.run( build_pts, build_vals ) I = numpy.arange( build_pts.shape[1] ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b ) assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i], CV.residuals[0][i] ) # Test grid search cross validation when applied to polynomial chaos # expansions that are built using ridge regression num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) max_order = build_pts.shape[1] orders = numpy.arange( 1, max_order ) lamda = numpy.array( [0.,1e-3,1e-2,1e-1] ) # note cartesian product takes type from first array in 1d sets # so if I use orders first lamda will be rounded to 0 cv_params_grid_array = cartesian_product( [lamda,orders] ) cv_params_grid = [] for i in xrange( cv_params_grid_array.shape[0] ): cv_params = {} cv_params['lambda'] = cv_params_grid_array[i,0] cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] ) cv_params_grid.append( cv_params ) loo_cv_iterator = LeaveOneOutCrossValidationIterator() CV = GridSearchCrossValidation( loo_cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = False ) CV.run( build_pts, build_vals, cv_params_grid ) k = 0 I = numpy.arange( build_pts.shape[1] ) for cv_params in cv_params_grid: order = cv_params['order'] lamda = cv_params['lambda'] pce.set_order( order ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b, lamda = lamda ) assert numpy.allclose( ( build_vals[i,0]- numpy.dot( V, x ) )[i], CV.residuals[k][i] ) k += 1 print 'best',CV.best_cv_params # Test grid search cross validation when applied to # expansions that are built using a step based method # ( LARS ) num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) max_order = build_pts.shape[1] orders = numpy.arange( 1, max_order ) lamda = numpy.array( [0.,1e-3,1e-2,1e-1] ) # note cartesian product takes type from first array in 1d sets # so if I use orders first lamda will be rounded to 0 cv_params_grid_array = cartesian_product( [lamda,orders] ) cv_params_grid = [] for i in xrange( cv_params_grid_array.shape[0] ): cv_params = {} cv_params['solver'] = 4 # LARS cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] ) cv_params_grid.append( cv_params ) print cv_params_grid loo_cv_iterator = LeaveOneOutCrossValidationIterator() #loo_cv_iterator = KFoldCrossValidationIterator( 3 ) CV = GridSearchCrossValidation( loo_cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = False ) CV.run( build_pts, build_vals, cv_params_grid ) k = 0 I = numpy.arange( build_pts.shape[1] ) for cv_params in cv_params_grid: order = cv_params['order'] pce.set_order( order ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] b = b.reshape( b.shape[0] ) x, metrics = least_angle_regression( A, b, 0., 4, 0., 1000, 0 ) assert numpy.allclose( ( build_vals[i,0]- numpy.dot( V, x ) )[i], CV.residuals[k][i] ) k += 1 #for i in xrange( len( CV.cv_params_set ) ): # print CV.cv_params_set[i], CV.scores[i] print 'best param', CV.best_cv_params print 'best score', CV.best_score print build_pts.shape[1] # ( OMP ) num_dims = 1 order = 3 build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) max_order = build_pts.shape[1] orders = numpy.arange( 1, max_order ) lamda = numpy.array( [0.,1e-3,1e-2,1e-1] ) # note cartesian product takes type from first array in 1d sets # so if I use orders first lamda will be rounded to 0 cv_params_grid_array = cartesian_product( [lamda,orders] ) cv_params_grid = [] for i in xrange( cv_params_grid_array.shape[0] ): cv_params = {} cv_params['solver'] = 2 # OMP cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] ) cv_params_grid.append( cv_params ) print cv_params_grid loo_cv_iterator = LeaveOneOutCrossValidationIterator() #loo_cv_iterator = KFoldCrossValidationIterator( 3 ) CV = GridSearchCrossValidation( loo_cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = False ) CV.run( build_pts, build_vals, cv_params_grid ) k = 0 I = numpy.arange( build_pts.shape[1] ) for cv_params in cv_params_grid: order = cv_params['order'] pce.set_order( order ) V = pce.vandermonde( build_pts ).T for i in xrange( V.shape[0] ): if i == 0 : J = I[1:] elif i == build_pts.shape[1]-1 : J = I[:-1] else: J = numpy.hstack( ( I[:i], I[i+1:] ) ) A = V[J,:] b = build_vals[J,:] b = b.reshape( b.shape[0] ) x, metrics = orthogonal_matching_pursuit( A, b, 0., 1000, 0 ) assert numpy.allclose( ( build_vals[i,0]- numpy.dot( V, x ) )[i], CV.residuals[k][i] ) k += 1 #for i in xrange( len( CV.cv_params_set ) ): # print CV.cv_params_set[i], CV.scores[i] print 'best param', CV.best_cv_params print 'best score', CV.best_score print build_pts.shape[1] def test_omp_choloesky( self ): f_1d = lambda x: x**10 num_dims = 1 order = 20 func_domain = TensorProductDomain( num_dims, [[-1,1]] ) build_pts = numpy.linspace(-.85,.9,14) build_pts = numpy.atleast_2d( build_pts ) build_vals = f_1d( build_pts ).T poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, basis, order, func_domain ) all_train_indices = [] all_validation_indices = [] cv_iterator = LeaveOneOutCrossValidationIterator( build_pts.shape[1] ) for train_indices, validation_indices in cv_iterator: all_train_indices.append( train_indices ) all_validation_indices.append( validation_indices ) vandermonde = pce.vandermonde( build_pts ).T out = orthogonal_matching_pursuit_cholesky( vandermonde, build_vals.squeeze(), all_train_indices, all_validation_indices, 0.0, 1000, 0 ) num_steps = out[1].shape[1] # use num_steps -1 bscause leave one out cross validation is # invalid when V is underdterimed which happens when i = num_steps. for i in xrange( num_steps-1 ): I = numpy.asarray( out[1][1,:i+1], dtype = numpy.int32 ) V = vandermonde[:,I] for j in xrange( len( all_validation_indices ) ): J = all_train_indices[j] K = all_validation_indices[j] A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b ) assert numpy.allclose( ( build_vals[K,0] - numpy.dot( V, x )[K,0 ]), out[2][i][j] ) all_train_indices = [] all_validation_indices = [] num_folds = 5 cv_iterator = KFoldCrossValidationIterator( num_folds, build_pts.shape[1] ) for train_indices, validation_indices in cv_iterator: all_train_indices.append( train_indices ) all_validation_indices.append( validation_indices ) vandermonde = pce.vandermonde( build_pts ).T out = orthogonal_matching_pursuit_cholesky( vandermonde, build_vals.squeeze(), all_train_indices, all_validation_indices, 0.0, 1000, 0 ) num_steps = out[1].shape[1] for i in xrange( num_steps-1 ): I = numpy.asarray( out[1][1,:i+1], dtype = numpy.int32 ) V = vandermonde[:,I] for j in xrange( len( all_validation_indices ) ): J = all_train_indices[j] K = all_validation_indices[j] A = V[J,:] b = build_vals[J,:] x = ridge_regression( A, b ) if ( len( I ) <= len( J ) ): assert numpy.allclose( ( build_vals[K,0] - numpy.dot( V, x )[K,0] ), out[2][i][j] ) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,013
7,344,394,114,408
ba6a4e2d0926f9a0db7650ca6e863e7606a7cc9d
04da89950f839b2ccc10522061e058dfe6afd4a8
/Packer.py
c4b1d3c44587ab739446dec0a14110030d6eaf01
[]
no_license
Cocobug/MINI
https://github.com/Cocobug/MINI
8c9bf90b0ed29f6d8955490f488e0731d5247d57
9b193d0d6b40aaf46079247695c1f71932ef0144
refs/heads/master
2016-09-03T06:31:19.653880
2011-11-15T11:07:55
2011-11-15T11:07:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding:latin-1 -*- ##################################### # Date: 05/04/10 # # Auteur: Rigaut Maximilien # # Nom: Packer # # Version: 1.0 # # Copyright 2010: Rigaut Maximilien # ##################################### # This file is part of YASMS. # # YASMS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YASMS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YASMS. If not, see <http://www.gnu.org/licenses/>. ################################# from tarfile import open import os nom="YASMS" v=raw_input("Version du programme: ") nom=nom+'_All '+v+'.tar.gz' tFile= open(nom,'w:gz') fichiers=os.listdir('.') fichiers.pop(fichiers.index('Archives')) for fichier in fichiers: tFile.add(fichier,fichier,True) tFile.close() os.rename(nom,'./Archives/'+nom)
UTF-8
Python
false
false
2,011
2,989,297,241,708
fb702299473de4bcae86d59aef083a5014a0adad
42a28c247f89a594144aa8369773e43d8bb098d0
/src_daemon/test_dictate.py
3dfbc92c4939c9e7806262bfd8fbffa64f0b0a07
[]
no_license
gabadie/yotta
https://github.com/gabadie/yotta
d22da520e6bfe2744a0d5134ad6077225f636f37
744f6295a15c4c907778e241cf379cf9ddaddcdf
refs/heads/master
2020-08-04T12:18:39.500233
2014-04-26T21:48:19
2014-04-26T21:48:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import struct from daemon import Daemon import dictate def test_frame_error(): msg = 'hello world' frame = dictate.frame_error(msg) theoric_frame = '' theoric_frame += '\x00\x00' theoric_frame += '\x0B\x00\x00\x00\x00\x00\x00\x00' theoric_frame += struct.pack('{}s'.format(len(msg)), msg) assert len(theoric_frame) == 2 + 8 + len(msg) assert frame == theoric_frame def test_deamon_info(): daemon = Daemon() daemon.computers = 3 daemon.threads = 7 frame = dictate.deamon_info(daemon) theoric_frame = '' theoric_frame += '\x00\x10' theoric_frame += '\x10\x00\x00\x00\x00\x00\x00\x00' theoric_frame += '\x03\x00\x00\x00\x00\x00\x00\x00' theoric_frame += '\x07\x00\x00\x00\x00\x00\x00\x00' assert frame == theoric_frame
UTF-8
Python
false
false
2,014
3,238,405,389,919
ad302ee60c67cc56b1db462c9f5464b316dce74a
6d531163482af8876b79be727f8eb2fdfab73112
/ex33.py
460d6d18cf844d88281651d356bc8a963a3618fb
[]
no_license
joetait/learnpythonthehardway
https://github.com/joetait/learnpythonthehardway
ffe9c85fd834a3130396e049ac394a0f9c6ed537
436718a30e6a0e4d2dee80e6b7a348be4f261371
refs/heads/master
2020-06-04T19:09:04.203151
2014-07-27T20:38:30
2014-07-27T20:38:30
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def list_numbers(length, inc): numbers = [] for i in range (0,length,inc): print "At the top i is %d" % i numbers.append(i) print "Numbers now: ", numbers print "At the bottom i is %d" % i return numbers numbers = list_numbers(20,3) # #def list_numbers(length, inc): # i = 0 # j = inc # numbers = [] # while i < length: # print "At the top i is %d" % i # numbers.append(i) # # i = i + j # print "Numbers now: ", numbers # print "At the bottom i is %d" % i # # return numbers # #numbers = list_numbers(10,2) print "The numbers: " for num in numbers: print num
UTF-8
Python
false
false
2,014
18,683,107,741,556
c8d0a19855709973c906ae4db51f632d7397479c
31b90992af2285159c32e1e389d37efae482563e
/application/logic/web_handlers/add_manager_form_handler.py
b06ce60cc4b41342fd60d151f2a346d2289c4327
[]
no_license
mkorenkov/Pragmatiq
https://github.com/mkorenkov/Pragmatiq
633345350b03bfc77c85d726fe08181af6647780
fb61b9fb2be087d9e9b9ea0307e6d4b27bebd945
refs/heads/master
2020-01-23T05:10:30.548338
2012-02-20T10:17:00
2012-02-20T10:17:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from logic.func import get_prev_pr from logic.models import Salary, Grade, Position, Conclusion from logic.web_handlers.add_form_handler import AddEmployeeForm class AddManagerForm(AddEmployeeForm): type = "manager" form = None flag = 0 def get(self, key): super(AddManagerForm, self).get(key) if self.form.type == 'manager': prev_pr = get_prev_pr(self.form.pr) try: prev_form = prev_pr.forms.filter('type', self.form.type).get() prev_salary = prev_form.get_all_data['salary'] prev_grade = prev_form.get_all_data['grade'] except AttributeError: prev_salary = None prev_grade = None prev_position = self.form.pr.employee.position if prev_salary: salary = Salary(value=prev_salary.value, form=self.form) else: salary = Salary(value='N/A', form=self.form) salary.put() if prev_grade: grade = Grade(value=prev_grade.value, form=self.form) else: grade = Grade(value='N/A', form=self.form) grade.put() if prev_position: position = Position(value=prev_position, form=self.form) else: position = Position(value='N/A', form=self.form) position.put() conclusion = Conclusion(value='meet expectations', form=self.form) conclusion.put() self.redirect('/manager/pr/get/%(type)s/%(key)s' % {'type': self.type, 'key': self.form.pr.key()})
UTF-8
Python
false
false
2,012
17,549,236,380,589
c6a278e80e241b534aa8a48d594277bede06609a
a6b3a096090672d46f754217987fd10deb6780c3
/rSpider_multiThread/testRenrenBrowser.py
7fb417d2d977424943875331101d652e9aab01b0
[]
no_license
jinmfeng/dataBang
https://github.com/jinmfeng/dataBang
7c1e0b8b15952373898247eb0d17641bcfa3896f
20eaa17a498b34fea5431bd899a20fda1f896f6a
refs/heads/master
2021-01-20T21:26:16.500908
2013-03-13T08:38:26
2013-03-13T08:38:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import shutil import os import unittest from renrenBrowser import * class TestRenrenBrowser(unittest.TestCase): def setUp(self): self.pwdRoot='./testBrowser' self.pwdSave=self.pwdRoot+'/renrenData/pages' if os.path.exists(self.pwdSave)==True: shutil.rmtree(self.pwdSave) self.browser=RenrenBrowser(path=self.pwdRoot) #self.browser.login() def tearDown(self): # self.browser.dispose() self.browser=None pass def testProfile(self): renrenIds={'233330059','230760442','223981104','410941086','285060168'} #myself,timeline ok/unavailable,old style ok/unavailable self.browser.setLogLevel(10)#debug self.browser.localSave(False) for renrenId in renrenIds: self.assertNotEqual(self.browser.profile(renrenId),'timeout') self.assertFalse(os.path.exists(self.pwdSave))#path not exist self.browser.localSave(True) for renrenId in renrenIds: self.assertNotEqual(self.browser.profile(renrenId),'timeout') self.assertEqual(len(os.listdir(self.pwdSave)),len(renrenIds)) def testFriendList(self): #target 1 page and check htmlStr not 'timeout' renrenIds={'233330059','410941086','267654044','285060168','240303471'} #myself,3+pages/2pages/1page/unavailable self.browser.setLogLevel(10)#debug #target page=0,1,2,3 self.browser.localSave(False) for targetPage in range(0,3): for renrenId in renrenIds: self.assertNotEqual(self.browser.friendList(renrenId,targetPage),'timeout') self.assertFalse(os.path.exists(self.pwdSave))#path not exist self.browser.localSave(True) for targetPage in range(0,3): for renrenId in renrenIds: self.assertNotEqual(self.browser.friendList(renrenId,targetPage),'timeout') self.assertEqual(len(os.listdir(self.pwdSave)),len(renrenIds)*(targetPage+1)) #target all pages and check len(set) flist={'232639310':35,'242543024':152,'285060168':5} for item in flist.items(): self.assertEqual(len(self.browser.friendList(item[0])),item[1]) if __name__=='__main__': suite=unittest.TestSuite() suite.addTest(TestRenrenBrowser('testProfile')) suite.addTest(TestRenrenBrowser('testFriendList')) runner=unittest.TextTestRunner() runner.run(suite)
UTF-8
Python
false
false
2,013
7,782,480,754,530
afaed1e84daf4f393f387a0c81a382690fb6037b
f9905ef34784f332deb22da395df037c28c9ef76
/haicheng/cost_model/select.py
82f53a71f6e942a3927e1974a4272b0e77b4bc8d
[]
no_license
batvph00561/harmonyruntime
https://github.com/batvph00561/harmonyruntime
c76fb06eaf3697c351ac538ccfe98c4b7f5a4087
b4df7a045109d19c1133f2225822a3e5f94a0d15
refs/heads/master
2016-09-15T20:54:27.082826
2012-04-18T20:37:01
2012-04-18T20:37:01
33,104,889
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/python import os import numpy import sys import subprocess #check arguments if len(sys.argv) > 1 and len(sys.argv) != 4 : print "arguments error, usage: ./select.py element_num cta_num thread_num" exit() #read file #f = open("select_v13.cu") #read beginning lines #tmpHeader=[] #for i in range(1,19): # tmpHeader.append(f.readline()) #ctaLine = f.readline() #threadLine = f.readline() # #tmpTail = f.readlines() #f.close() #output rmCmd = "rm -rf select.csv" print rmCmd os.system(rmCmd) print "Writing result to select.csv" output = open("select.csv", "w") output.close() #modify cta number and thread number #assert ctaLine[0:13] == "#define CTAS " #assert threadLine[0:16] == "#define threads " #eleNumPowEnd = 18 #256K*1024 eleNumTimeEnd = 40 ctaNumPowEnd = 15 #32K threadNumTimeEnd = 16 #16*64 #eleNumArray = numpy.power(2, range(0,eleNumPowEnd+1)) * 1024 #eleNumArray = numpy.append(eleNumArray, 400*1024*1024) eleNumArray = 100*1024*1024 * numpy.arange(5, eleNumTimeEnd + 1) firstTime = True #choose start element if len(sys.argv) > 1: eleStart = numpy.where(eleNumArray == int(sys.argv[1]))[0][0] else: eleStart = 0 #modify element number for eleNum in eleNumArray[eleStart:]: # ctaNumArray = numpy.power(2, range(1,ctaNumPowEnd+1)) # # if len(sys.argv) > 1 and firstTime: # ctaStart = numpy.where(ctaNumArray == int(sys.argv[2]))[0][0] # else: # ctaStart = 0 # # for ctaNum in ctaNumArray[ctaStart:]: # # if ctaNum > eleNum: #should be in range # continue # # threadNumArray = 64 * numpy.arange(1, threadNumTimeEnd+1) # # if len(sys.argv) > 1 and firstTime: # threadStart = numpy.where(threadNumArray == int(sys.argv[3]))[0][0] # firstTime = False # else: # threadStart = 0 # for threadNum in threadNumArray[threadStart:]: # # if ctaNum * threadNum > eleNum: #should be in range # break # # ctaLine = "#define CTAS " + str(ctaNum) + "\n" # threadLine = "#define threads " + str(threadNum) + "\n" # # outf = open("tmp.cu", "w") # outf.writelines(tmpHeader) # outf.write(ctaLine) # outf.write(threadLine) # outf.writelines(tmpTail) # outf.close() # # # #compile and execute # print "CTAS", ctaNum # print "threads", threadNum # compileCmd = "nvcc tmp.cu -o tmp -arch sm_23 -O3 -lpthread" # print compileCmd # os.system(compileCmd) exeCmd = "./select_v13 " + str(eleNum) print exeCmd out = subprocess.Popen(["./select_v13", str(eleNum)], stdout = subprocess.PIPE) output = open("select.csv", "a") output.write(out.stdout.read()) output.close()
UTF-8
Python
false
false
2,012
17,085,379,943,846
b5292b53361c522e246830f8415851d9c2f5784c
f48a2fc8152ebd290c0f96358811087617d0f499
/python_side/Door_serverv0.6.py
4e8f1078593613c0e50ff63fef8b1e8a0e4e22a4
[]
no_license
kism3t1/Door-LCD
https://github.com/kism3t1/Door-LCD
0038ea74decb1478f74e2eaa85e9d0e37a8a0929
93c19015fac048d7a4bf6af2fd73153d300379ad
refs/heads/master
2021-01-19T08:56:06.611141
2012-03-12T18:05:08
2012-03-12T18:05:08
3,400,896
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# TCP server example import socket import serial import time import datetime import sys import argparse the_Time = datetime.datetime.now() if len(sys.argv) != 2: print ("[+] usage: ./Door_serverv0.5.py <HOST>") sys.exit(1) PORT = int(sys.argv[1]) #HOST = " " print ("The chosen port is ") + str(PORT) ser = serial.Serial ('/dev/ttyACM0') server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #server_socket.bind(("", 5000)) server_socket.bind(("", PORT)) server_socket.listen(5) print ("TCPServer Waiting for client on port ") + str(PORT) while 1: client_socket, address = server_socket.accept() print "I got a connection from ", address while 1: data = client_socket.recv(512) if ( data == 'q' or data == 'Q'): client_socket.close() f = open('doorlog.ian', 'a') f.write("Exiting Server @ " + (str(the_Time) + "/n")) f.close exit() break; elif ( data == 'out'): print "RECIEVED:" , data ser.write("o") f = open('doorlog.ian', 'a') f.write("Out of Office @ " + (str(the_Time) + "/n")) f.close elif ( data == 'in'): print "RECIEVED:" , data ser.write("i") f = open('doorlog.ian', 'a') f.write("In Office @ " + (str(the_Time) + "/n")) f.close elif ( data == 'pub'): print "RECIEVED:" , data ser.write("p") f = open('doorlog.ian', 'a') f.write("Down the Pub @ " + (str(the_Time) + "/n")) f.close elif ( data == 'yes'): print "RECIEVED:" , data ser.write("y") f = open('doorlog.ian', 'a') f.write("Come in! @ " + (str(the_Time) + "/n")) f.close elif ( data == 'meeting'): print "RECIEVED:" , data ser.write("m") f = open('doorlog.ian', 'a') f.write("Sorry in a Meeting @ " + (str(the_Time) + "/n")) f.close elif ( data == 'face'): print "RECIEVED:" , data ser.write("f") f = open('doorlog.ian', 'a') f.write("Displaying the FACE! @ " + (str(the_Time) + "/n")) f.close elif (data == 'close'): print "RECIEVED:" , data client_socket.close() f = open('doorlog.ian', 'a') f.write("Retarting Connection... @ " + (str(the_Time) + "/n")) f.close print "Restarting connection:" #server_socket.bind(("", 5000)) server_socket.listen(5) print "Door LCD: Waiting for Ian's PC on port 5000" else: print("*")
UTF-8
Python
false
false
2,012
14,164,802,159,443
0a56fd023775cd0fa3026345a84f267d0048a909
daf95a08aa12d2251f3991fbe2b678ed910a60ee
/weather.py
ba01d8b1fefc24bada5c916716fe2a3863575a29
[]
no_license
stuycs-softdev-fall-2013/proj2-pd6-06-GoodMorning
https://github.com/stuycs-softdev-fall-2013/proj2-pd6-06-GoodMorning
90843d9fd554aecf56bf8d847414d1e852c5d1c2
0707cbddefce390e3b183cc415b8476fe3d4bd15
refs/heads/master
2021-01-23T11:33:34.232421
2013-12-06T09:00:50
2013-12-06T09:00:50
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import urllib2 import json def url(): f = urllib2.urlopen('http://api.wunderground.com/api/987077b70105ec11/hourly/q/NY/New_York_City.json') return f #-----------------USING WUNDERGROUND (weather underground/weather channel) API def getWeather(): f = url() json_string = f.read() parsed_json = json.loads(json_string) weather = parsed_json['hourly_forecast'][0]['condition'] f.close() #-----------------returns what the weather is predicted to be (ex: cloudy, rainy, etc.) return weather def getTemp(): f = url() json_string = f.read() parsed_json = json.loads(json_string) temp_f = parsed_json['hourly_forecast'][0]['temp']['english'] f.close() #-----------------returns the expected temperature in fahrenheit. celsius is possible by replacing 'english' with 'metric' return temp_f
UTF-8
Python
false
false
2,013
6,021,544,166,369
51413e1aae4c2f0a165afec264ed886492d9738c
6dd1cca222f557eaa87645f9fed90d230a52f40c
/scriptsForPictures/scatterplot200and2000frames.py
2ccb21d109f740ef1baf750dc6a57dee1ade123c
[]
no_license
fherrmannsdoerfer/masterArbeit
https://github.com/fherrmannsdoerfer/masterArbeit
84c546737a4c1a0d63f090c9e6178b62c80db078
d6639759b0b9c66c8c561dad067185e58993a9c4
refs/heads/master
2020-12-24T16:49:58.265911
2013-06-07T16:22:18
2013-06-07T16:22:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import matplotlib.pyplot as plot y200 = [681.743, 662.12, 931.126, 1214.01, 632.229, 658.116, 603.67, 536.833, 756.438, 1044.05, 653.162, 577.872, 568.438, 562.657, 552.272, 547.224, 532.969, 505.124, 610.332, 540.592, 575.771, 498.912, 489.702, 598.537, 573.327, 584.177, 617.958, 667.46, 568.544, 516.282, 609.562, 690.694, 574.577, 614.993, 604.329, 632.576, 563.449, 558.082, 614.49, 657.338, 631.615, 626.906, 651.686, 770.814, 668.025, 616.958, 697.004, 643.536, 749.92, 722.886, 685.852, 668.536, 725.505, 772.818, 712.879, 653.323, 774.895, 734.905, 779.83, 854.317, 622.252, 756.669, 957.158, 753.328, 833.74, 813.774, 599.949, 688.538, 856.767, 824.431, 785.724, 867.578, 764.84, 761.088, 673.847, 713.189, 879.369, 678.094, 703.111, 758.669, 822.999, 873.861, 835.924, 1095.43, 952.186, 884.578, 936.288, 3362.11, 1185.11, 879.104, 677.031, 982.987, 1209.75, 957.92, 719.284, 1037.5, 796.198, 2422.94, 954.599, 1464.55, 858.934, 924.957, 1067.82, 1012.1, 2190.56, 820.816, 1255.44, 1144.36, 946.304, 954.194, 1062.74, 1467.66, 1051.94, 2419.82, 2860.71, 1053.89, 2073.97, 928.202, 886.938, 1018.56, 1175.71, 1337.36, 3102.44, 977.935, 1081.49, 948.079, 950.438, 918.01, 3888.59, 1102.57, 1697.7, 2793, 1299.08, 1223.99, 1176.7, 993.709, 1006.51, 1123.77, 1141.53, 1083.39, 1472.94, 1088.77, 1170.23, 1537.65, 1155.26, 1300.27, 1359, 2135.16, 1393, 905.614, 1145.05, 1444.29, 983.482, 1083.65, 1220.55, 1516.89, 1199.18, 1163.77, 1923.71, 1301.71, 2516.39, 1129.27, 1301.87, 1280.35, 1702.66, 1814.58, 1651.47, 2336.18, 1504.18, 1430.18, 2332.07, 1167.41, 1581.03, 1002.2, 1243.13, 1058.51, 1782.7, 1225.01, 1195.16, 1577.81, 23929, 1059.58, 5423.47, 1133.51, 1917.53, 1107.96, 1268.91, 3419.2, 1793.77, 4151.62, 1179.79, 2084.39, 1353.24, 1470.79, 1429.52, 1558.47, 1493.52, 3660.16, 1420.78, 1245.21, 1409.37, 1324.64, 1296.32, 2471.82, 2050.03, 1437.92, 1522.4, 3132.82, 10654, 5890.01, 1406.81, 1326.32, 1724.42, 2290.95, 1439.53, 1667.12, 1660.29, 2415.91, 6546.46, 1314.76, 1611.3, 2480.19, 1429.86, 1767.77, 1811.65, 2822.91, 1575.8, 2536.36, 1253.17, 1916.63, 2672.12, 2059.45, 1568, 1854.89, 3843.53, 1704.17, 3445.03, 1636.03, 1704.45, 1580.38, 1731.48, 1789.67, 2557.66, 1619.69, 1669.88, 3076.99, 7281.66, 1289.18, 1581.71, 1881.08, 1586.08, 1663.76, 6455.87, 1681.9, 6393.47, 3048.4, 1675.54, 1656.01, 14749.3, 4676.28, 7223.98, 25183.7, 1731.44, 13572.3, 3387.42, 2375.23, 3240.39, 1677.17, 4616.91, 1705.99, 1395.03, 2109.66, 2563.98, 3359.19, 5614.09, 4099.91, 1918.14, 10484.3, 2730.39, 3659.11, 1783.3, 1583.47, 7363.56, 1916.97, 2073.23, 1948.28, 1712.64, 6930.29, 1635.33, 1912.95, 1688.31, 2097.27, 5737.7, 4376.4, 4476.25, 10203.6, 2008.16, 1867.04, 2074.93, 3604.52, 7713.94, 2263.18, 3496.64, 2106.36, 2389.58, 1941.93, 2181.15, 2003.48, 1856.04, 2415.85, 1977.1, 2613.18, 1983.39, 15753.1, 1729.34, 2011.13, 3859.13, 8179.89, 2221.26, 8125.42, 1771.9, 2080.67, 1921.51, 2127, 1700.51, 2353.38, 2060.66, 18387.5, 1895.56, 2026.48, 4528.17, 7582.23, 2499.08, 2047.94, 2244.21, 2225.63, 4372.16, 5601.77, 2897.22, 5335.34, 2000.38, 13550.7, 5255.65, 10813, 1831.48, 2573.45, 13120.1, 6544.92, 3580.48, 5535.24, 4463.65, 10876.9, 21035.3, 22631.1, 8061.44, 13452.1, 24642.4, 13949.6, 9204.74, 11998.4, 5281.75, 45236.6, 12117.1, 5788.4, 11672.8, 6294.83, 8693.7, 2143.08, 6895.15, 12318.9, 5431.11, 5146.42, 28870, 6040.51, 9489.4, 5968.09, 8144.23, 7511.53, 31740.5, 22570.1, 3891, 2511.15, 5354.78, 18269.1, 16045.1, 13173, 5682.23, 25674, 21055.2, 19824.8, 14339.7, 19345.5, 6500.51, 6589.79, 26822.4, 42188.3, 6137.9, 6667.73, 13687, 17607.4, 11421.7, 5932.95, 48730.2, 7127.63, 10138.8, 3360.97, 33839.3, 13131.2, 8231.54, 18418.6, 27789.5, 8440.87, 8569.06, 16161.5, 22395.7, 7460.18, 8863.26, 12981.6, 12722.4, 3290.23, 6170.84, 23394.4, 26645.9, 26190.4, 24060.1, 20012.9, 25879.3, 13125.1, 55868.1, 12125.8, 25007.5, 32598.4, 25497.6, 27041, 12496.4, 24173.2, 11790.5, 49739.4, 4375.74, 8834.62, 17607.8, 7815.58, 26741.6, 10198.3, 10660.9, 6647.83, 14379, 24882.1, 6586.66, 35046.4, 10473.1, 37564.5, 35652.6, 27447.7, 29942.6, 25322.6, 2945.71, 34405.9, 14147.6, 10217.4, 59009.5, 9293.24, 49480, 40575.1, 54103.8, 32235.9, 32593.6, 24406.1, 65377, 32905.7, 17155.8, 18469.6, 33690.2, 22991.5, 51052.1, 42759.5, 98931.4, 36313.9, 70043.3, 15018.8, 45743.2, 98736.6, 27154.5, 33297.6, 25277.7, 41649.2, 53341.9, 28213.4, 12058.4, 43461.7, 24195.5, 13411.1, 53510.6, 5408.42, 49844, 22291.3, 37217, 36250, 45040.5, 27934.7, 23350.4, 8606.62, 46089.4, 16645, 21176.8, 5974.27, 4921.36, 39559.4, 58506.5, 55672.2, 28504.1, 26848.4, 16313.3, 16385.4, 24949.8, 25375.1, 43033.6, 38589.5, 138138, 32297.6, 29498.8, 23463.9, 19668.3, 16922.3, 4973.9, 23978.3, 9486.55, 39987.1, 20181.3, 63596.9, 17941.3, 24533.8, 93196.6, 34196.7, 32874.6, 36172.9, 35704.1, 30068.2, 23862.1, 21868.1, 16654.6, 36960.4, 13132.2, 32152.4, 37471.6, 80530.7, 20571.9, 62294.6, 18939, 30203.8, 38578.2, 21315.8, 6796.62, 44202.6, 38294.4, 40568.3, 48245.7, 36564.1, 63809.6, 22523.2, 76868.6, 5760.12, 44153.3, 3575.75, 23618.5, 17583.3, 4078.61, 3555.44, 35145.4, 39034.5, 15989.3, 34611, 33922.7, 20767.8, 76214.6, 132816, 57810.6, 59038.9, 64896.3, 37417.1, 30619.3, 77341.8, 17271.5, 26133, 34230.4, 30024.9, 45931.1, 47610.3, 6123.11, 28327.3, 60511.7, 48278.2, 36342, 20393.3, 41307, 34378, 65206.7, 37869.1, 48936.3, 36284.1, 20644.5, 27136.3, 43090.6, 3856.1, 47390.5, 105371, 95623.8, 31408.5, 86566.1, 40960.1, 22986, 70859.3, 146400, 52548.8, 36374.1, 64135.9, 65109.9, 22198.7, 5575.37, 69587.1, 27239.2, 27162.9, 21177.7, 37809.3, 72692.8, 26366.9, 28650.5, 40022.9, 45985.8, 23640, 45092.6, 83745.1, 5141.75, 49769.8, 56259.1, 46780.2, 59016.2, 3971.98, 6739.78, 21849.9, 55271.8, 69229.7, 56925, 29833, 73973.7, 72531, 38204, 66877.7, 22564.8, 33877.5, 41992.2, 34361.4, 51078.1, 61805.5, 51426.4, 53489.5, 42156.5, 50946.3, 66129, 5645.18, 4823.37, 44501.6, 36930.9, 50654.6, 39622.5, 44066.4, 4062.11, 27244.9, 28766.5, 81498.4, 32875.4, 61151.7, 33294.4, 37596.5, 92853.2, 40586.6, 39932.5, 61394.2, 7799.78, 6237.06, 27655.9, 57931.4, 132553, 3724.75, 57728.8, 75947.5, 7319.06, 3949.84, 53195.7, 7842.14, 77541.6, 42739.8, 88916.4, 42425.2, 63093.3, 57315.8, 52557.8, 56712.7, 50769.3, 100589, 6675.86, 64297.2, 82295.6, 44531.5, 58754.1, 4766.3, 119179, 74638.9, 95997.9, 65025.2, 4721.07, 13539.6, 13102.5, 7485.12, 54090.6, 44410.2, 84027.9, 44325.1, 47587.5, 76094.3, 44969, 35599.5, 86226, 14324.6, 54845, 4426.81, 46756.2, 71578.2, 59334.6, 63169, 49948.4, 91508, 47096.3, 103631, 42901.6, 49046.7, 9835.06, 64919, 49592.1, 50410, 37430.8, 69280.2, 79929.7, 13098.8, 59906.2, 43524.5, 90587.6, 85733.9, 63013.8, 7194.28, 11005.4, 86249.2, 78298, 65777.6, 51196.4, 60964, 90430.5, 86296.1, 13995.2, 63891.8, 62598.2, 44361.1, 64331.6, 56223.4, 5108.35, 163709, 49726.9, 61080.7, 58848, 5871.35, 59436.4, 60737.6, 53703.2, 52377.1, 71293.6, 56901, 53153.6, 48983.7, 63438.6, 59098.7, 81402.2, 49110.1, 4375.64, 43909.8, 61835.9, 15527.8, 13710.3, 82468.9, 60570.7, 17052, 8148.16, 70369.6, 58206.4, 13271.4, 65552.1, 8874.01, 57901.9, 58696.5, 5332.27, 62792.8, 86875.2, 58215.2, 61148.4, 71872.1, 4876.7, 13308.6, 92341.4, 74564.2, 67693.9, 10326.5, 11598.5, 59704.7, 65194.8, 68453.4, 93051.1, 72639.9, 111978, 62109.5, 5317.88, 57122.6, 72096.1, 48818.8, 59345.4, 12872.2, 10708.1, 63614.7, 71964.5, 120129, 62175.4, 58723.6, 91531.7, 80278.5, 71782.1, 24598.3, 61741.2, 96108.6, 81256.7, 12570.5, 89902.4, 6462.38, 95028.6, 10596.4, 11038.5, 99600.6, 66316.5, 92988, 78046.9, 78903.4, 67743.1, 92541, 81190.7, 75018.5, 7166.41, 23959, 11243.9, 71535.5, 16302.4, 78499.2, 93503.1, 10011.6, 100367, 98180.2, 70130.1, 79870.8, 73093.8, 7829.21, 82850.9, 18861.9, 73479.4, 14041.6, 91267.9, 124262, 32196.9, 21360.6, 10780.4, 119193, 15488.8, 9343.45, 12823.4, 8634.23, 21193.7, 77982.7, 19240.9, 28027.2, 82691.4, 15112.8, 126556, 141808, 13535.2, 12793.3, 29263, 59273.3, 30243.5, 20689.7, 80290.4, 21165.4, 10308.4, 24851.2, 11457.8, 11402.3, 15188.5, 61145.8, 32465.6, 14362.1, 14079.9, 14435.9, 28660.7, 28717.9, 87708.5, 36121.2, 18565.8, 14411.4, 15290.9, 40018.9, 30999.1,] x200 = [476.955, 481.235, 490.79, 490.805, 491.725, 496.71, 498.36, 504.335, 504.89, 506.56, 507.72, 508.22, 508.25, 508.45, 509.09, 509.34, 509.395, 509.715, 509.72, 510.055, 510.155, 510.28, 510.365, 510.385, 510.445, 510.445, 510.455, 510.505, 510.69, 510.72, 510.74, 510.855, 510.865, 511.13, 511.275, 511.315, 511.325, 511.41, 511.48, 512.04, 512.985, 514.06, 515.12, 516.175, 517.245, 518.32, 519.375, 520.435, 521.51, 522.56, 523.635, 524.685, 525.755, 526.82, 527.875, 528.955, 530.015, 531.07, 532.14, 533.195, 534.26, 535.325, 536.39, 537.45, 538.515, 539.575, 540.645, 541.705, 542.77, 543.83, 544.895, 545.96, 547.02, 548.085, 549.15, 550.215, 551.275, 552.34, 553.405, 554.465, 555.53, 556.595, 557.66, 558.72, 559.79, 560.85, 561.915, 562.975, 564.045, 565.105, 566.17, 567.23, 568.295, 569.36, 570.42, 571.49, 572.55, 573.61, 574.675, 575.74, 576.81, 577.865, 578.93, 579.995, 581.06, 582.12, 583.185, 584.255, 585.31, 586.375, 587.44, 588.5, 589.565, 590.63, 591.695, 592.755, 593.82, 594.885, 595.95, 597.01, 598.075, 599.145, 600.2, 601.265, 602.33, 603.395, 604.455, 605.52, 606.585, 607.645, 608.71, 609.775, 610.84, 611.9, 612.965, 614.03, 615.09, 616.155, 617.22, 618.285, 619.345, 620.41, 621.475, 622.54, 623.6, 624.665, 625.73, 626.79, 627.855, 628.92, 629.985, 631.05, 632.115, 633.175, 634.235, 635.3, 636.365, 637.43, 638.49, 639.555, 640.62, 641.685, 642.745, 643.81, 644.875, 645.935, 647, 648.065, 649.13, 650.19, 651.255, 652.325, 653.38, 654.445, 655.51, 656.575, 657.635, 658.7, 659.765, 660.825, 661.89, 662.955, 664.02, 665.085, 666.145, 667.21, 668.275, 669.34, 670.4, 671.465, 672.525, 673.59, 674.655, 675.72, 676.78, 677.845, 678.91, 679.97, 681.035, 682.1, 683.165, 684.23, 685.295, 686.36, 687.415, 688.48, 689.545, 690.61, 691.67, 692.735, 693.8, 694.87, 695.925, 696.99, 698.055, 699.12, 700.18, 701.245, 702.31, 703.37, 704.435, 705.51, 706.56, 707.625, 708.69, 709.755, 710.825, 711.885, 712.945, 714.015, 715.07, 716.145, 717.2, 718.265, 719.33, 720.39, 721.465, 722.515, 723.585, 724.645, 725.71, 726.77, 727.835, 728.915, 729.98, 731.025, 732.095, 733.15, 734.22, 735.28, 736.36, 737.405, 738.485, 739.55, 740.605, 741.67, 742.735, 743.81, 744.85, 745.92, 746.995, 748.045, 749.105, 750.175, 751.27, 752.315, 753.36, 754.425, 755.49, 756.55, 757.625, 758.68, 759.76, 760.815, 761.875, 762.945, 764.015, 765.07, 766.145, 767.2, 768.265, 769.315, 770.385, 771.45, 772.515, 773.57, 774.67, 775.71, 776.765, 777.855, 778.905, 779.955, 781.025, 782.09, 783.14, 784.215, 785.275, 786.34, 787.4, 788.485, 789.525, 790.6, 791.65, 792.74, 793.79, 794.85, 795.915, 796.97, 798.035, 799.115, 800.18, 801.23, 802.295, 803.415, 804.425, 805.485, 806.54, 807.605, 808.68, 809.745, 810.8, 811.885, 812.97, 813.985, 815.065, 816.135, 817.18, 818.245, 819.31, 820.38, 821.43, 822.51, 823.575, 824.64, 825.685, 826.765, 827.855, 828.915, 829.94, 831.01, 832.075, 833.175, 834.2, 835.28, 836.33, 837.415, 838.595, 839.53, 840.625, 841.65, 842.73, 843.77, 844.845, 845.94, 846.99, 848.115, 849.115, 850.2, 851.3, 852.36, 853.53, 854.51, 855.525, 856.635, 857.595, 858.705, 859.72, 860.87, 861.85, 862.995, 864.02, 865.16, 866.24, 867.175, 868.265, 869.34, 870.395, 871.56, 872.5, 873.765, 874.76, 875.69, 876.915, 877.925, 878.915, 880.06, 881.03, 882.085, 883.12, 884.235, 885.325, 886.365, 887.445, 888.475, 889.505, 890.595, 891.635, 892.705, 893.87, 894.875, 895.955, 897.11, 898.085, 899.18, 900.14, 901.225, 902.295, 903.43, 904.615, 905.555, 906.535, 907.625, 908.725, 909.76, 910.775, 911.885, 912.985, 913.99, 915.165, 916.155, 917.18, 918.48, 919.3, 920.36, 921.44, 922.53, 923.63, 924.825, 925.695, 926.735, 927.885, 928.98, 930.065, 931.05, 932.415, 933.23, 934.255, 935.32, 936.625, 937.395, 938.45, 939.54, 940.68, 941.96, 942.7, 943.975, 944.845, 946.03, 946.965, 948.235, 949.31, 950.21, 951.21, 952.295, 953.43, 954.555, 955.46, 956.675, 957.57, 958.765, 959.945, 960.9, 961.915, 962.92, 964.05, 965.105, 966.13, 967.325, 968.265, 969.38, 970.43, 972.015, 972.63, 974.34, 974.915, 975.89, 976.965, 978.035, 979.35, 980.135, 981.37, 982.105, 983.14, 984.43, 985.365, 986.51, 987.425, 988.66, 989.5, 990.57, 991.685, 992.715, 993.925, 994.915, 996.4, 997.1, 998.12, 999.225, 1000.14, 1001.32, 1002.47, 1003.45, 1004.47, 1005.71, 1007.45, 1008.11, 1008.83, 1009.74, 1010.78, 1012.34, 1013.05, 1014.32, 1015.46, 1016.2, 1017.22, 1018.81, 1019.85, 1020.55, 1021.63, 1022.48, 1023.77, 1025.05, 1026.69, 1026.95, 1028.12, 1028.88, 1030.07, 1031.2, 1032.21, 1033.17, 1034.28, 1035.66, 1036.98, 1037.51, 1038.59, 1039.56, 1040.8, 1042.44, 1043.67, 1044.14, 1044.94, 1046.4, 1047.1, 1048.13, 1049.09, 1050.18, 1051.49, 1053.02, 1054.52, 1054.62, 1055.67, 1056.55, 1058.31, 1058.73, 1059.99, 1060.78, 1061.9, 1063.03, 1063.98, 1065.34, 1066.26, 1067.15, 1068.38, 1069.47, 1070.37, 1071.75, 1072.89, 1074.26, 1075.43, 1075.81, 1076.85, 1077.82, 1078.94, 1080.19, 1081, 1082.3, 1083.1, 1084.27, 1086.06, 1086.29, 1087.69, 1088.71, 1090.29, 1090.72, 1091.68, 1092.69, 1093.92, 1095.44, 1095.94, 1097.02, 1099.92, 1100.72, 1101.9, 1102.31, 1103.04, 1103.39, 1105.58, 1105.68, 1106.79, 1107.79, 1109.02, 1109.69, 1111.98, 1112.17, 1113.56, 1114.01, 1115.12, 1116.18, 1117.39, 1118.29, 1120.55, 1120.82, 1121.58, 1122.54, 1123.67, 1125.4, 1125.94, 1126.9, 1127.83, 1129.01, 1130.05, 1131.84, 1132.09, 1133.18, 1135.2, 1135.24, 1138.06, 1138.71, 1138.73, 1142.22, 1143.07, 1143.1, 1143.53, 1144, 1144.79, 1146.11, 1147.39, 1148.36, 1149.46, 1150.19, 1151.7, 1153.2, 1153.76, 1154.86, 1155.44, 1156.54, 1157.61, 1159.31, 1159.76, 1160.94, 1161.93, 1163.57, 1164.01, 1165.19, 1166.81, 1167.27, 1170.73, 1170.83, 1172.51, 1172.89, 1173.05, 1173.86, 1174.98, 1175.97, 1176.68, 1179.72, 1183.27, 1183.74, 1183.91, 1184.81, 1184.86, 1186.02, 1186.66, 1187.2, 1188.06, 1188.95, 1190.23, 1190.86, 1192.14, 1192.84, 1195.08, 1196.12, 1198.06, 1198.28, 1198.31, 1199.07, 1201.99, 1202.29, 1202.32, 1204.34, 1204.64, 1205.48, 1207.69, 1208.33, 1209.17, 1209.65, 1212.44, 1212.8, 1213.4, 1213.91, 1215.58, 1219.1, 1219.39, 1219.94, 1220.32, 1221.84, 1222.43, 1223.6, 1223.66, 1226.91, 1228.43, 1230.99, 1231.86, 1232.34, 1232.65, 1232.97, 1233.52, 1234.88, 1235.66, 1235.83, 1237.11, 1237.39, 1239.77, 1242.17, 1243.24, 1243.69, 1243.8, 1244.8, 1245.08, 1245.81, 1247.55, 1247.94, 1249.57, 1250.64, 1251.27, 1252.78, 1255.01, 1255.2, 1259.02, 1259.08, 1260.49, 1261.25, 1261.59, 1264.93, 1267.47, 1267.92, 1269.08, 1269.33, 1272.7, 1272.71, 1272.78, 1273.08, 1273.49, 1274.69, 1275.44, 1278.68, 1281.65, 1282.52, 1282.67, 1284.01, 1284.35, 1284.42, 1286.11, 1288.92, 1289.05, 1289.81, 1289.95, 1290.8, 1292.72, 1292.93, 1294.24, 1295.94, 1296.49, 1297.17, 1297.94, 1298.46, 1303.51, 1303.9, 1307.9, 1308.02, 1308.91, 1311.44, 1312.31, 1312.67, 1313.17, 1314.77, 1315.71, 1319.74, 1320.44, 1320.62, 1321.85, 1322.11, 1322.56, 1323.23, 1328.73, 1328.95, 1335.44, 1337.9, 1341.02, 1342.33, 1343.22, 1347.7, 1348.26, 1348.91, 1350.56, 1351.68, 1356.09, 1356.9, 1358.8, 1361.73, 1361.95, 1363.16, 1365.05, 1365.15, 1367.26, 1369.09, 1369.82, 1372.17, 1374.85, 1375.17, 1380.13, 1382.59, 1384.06, 1390.94, 1398.3, 1399.94, 1401.34, 1401.42, 1404.11, 1407.56, 1409.44, 1409.79, 1410.38, 1412.47, 1414.53, 1415.04, 1415.22, 1415.94, 1424.47, 1424.92, 1425.81, 1426.16, 1426.89, 1427.05, 1428.13, 1428.28, 1431.08, 1448.83, 1462.28, 1467.28, 1472.24, 1475.01, 1481.66, 1491.71, 1492.1, 1493.39, 1501.56, 1508.19, 1510.34, 1517.3, 1522.89, 1525.12, 1527.1, 1529.14, 1530.76, 1536.15, 1540.71, 1548.15, 1550.05, 1553.65, 1555.67, 1557.11, 1564.11, 1575.24, 1581.66, 1583.35, 1604.53, 1605.8, 1649.47, 1656.36, 1657.05, 1657.53, 1664.23, 1664.86, 1672.6, 1675.35, 1703.63, 1705.12, 1705.58, 1708.38, 1714.99, 1751.62, 1796, 1796.85, 1919.04, 1981.48, 1984.73, 1994.78, 2035.6, 2060.94, 2083.54, 2103.58, 2186.93, 2255.43, 2259.26, 2290.61, 2330.15, 2349.48, 2375.01, 2376.64, 2392.32, 2477.42, 2514.92, 2586.97, 2597.65,] y2000 = [463.568, 511.752, 567.601, 601.309, 550.731, 599.666, 647.981, 627.123, 590.324, 705.949, 595.653, 591.492, 623.537, 573.613, 621.538, 582.926, 588.022, 845.963, 560.104, 573.418, 609.648, 570.771, 609.016, 579.097, 599.163, 559.733, 569.81, 556.383, 569.158, 592.739, 603.963, 589.582, 566.231, 598.155, 566.146, 578.214, 605.389, 564.788, 624.991, 584.879, 570.096, 582.281, 570.954, 600.081, 559.388, 564.034, 578.86, 578.628, 586.587, 596.214, 603.566, 578.381, 631.426, 595.221, 656.938, 664.76, 659.244, 709.816, 681.967, 634.885, 611.582, 693.25, 686.668, 783.009, 698.688, 774.203, 806.348, 720.173, 692.94, 675.93, 787.305, 753.821, 726.267, 970.511, 751.047, 1014.65, 715.171, 655.566, 831.972, 735.428, 711.206, 797.938, 878.551, 1032.83, 774.162, 728.909, 774.311, 1006.09, 1889.75, 1101.54, 940.443, 856.049, 1136.42, 1148.29, 819.051, 938.336, 1051.28, 912.42, 972.569, 1157.4, 1223.97, 997.594, 1248.86, 912.128, 886.262, 1576.63, 952.398, 1286.78, 1518.44, 2017.85, 886.082, 1025.62, 979.953, 976.759, 3404.74, 6311.81, 922.492, 2139.62, 900.462, 1163.42, 1144.74, 950.044, 1196.74, 1910.96, 2172.49, 1151.29, 1276.69, 996.497, 1080.86, 1114.81, 1389.23, 1101.42, 982.03, 1024.37, 1583.08, 1518.95, 1480.04, 4811.01, 1337.4, 1109.1, 2356.71, 1094.69, 1041.76, 1434.82, 1177.17, 1736.21, 1654.43, 1409.45, 3658.62, 1182.28, 1538.92, 1334.05, 1191.01, 1247.77, 1686.47, 1302.21, 1288.96, 1168.88, 1224.14, 1252.68, 1170.03, 1456.82, 1255.8, 1238.89, 1168.49, 3919.74, 1259.39, 1478.73, 1269.95, 2264.71, 1428.69, 1249.88, 1278.51, 1374.28, 2934.64, 2264.41, 1353.14, 1904.42, 1499.88, 2761.67, 1557.2, 1570.11, 1469.92, 1355.28, 3266.01, 9290.76, 2061.46, 1278.17, 1538.05, 1563.53, 3567.94, 1404.62, 1387.06, 1474.33, 1360.49, 1514.48, 2241.55, 2265.28, 1522.08, 4485.8, 1697.74, 1422.07, 2554.5, 1564.96, 1629.62, 1788.88, 2153.64, 1444.51, 1618.68, 1623.03, 1705.84, 6316.03, 3213.73, 2299.24, 1520.91, 1602.02, 1636.37, 1750.13, 4266.15, 1744.13, 1709.58, 1782.51, 8506.23, 1642.07, 3015.08, 2014.97, 1800.11, 1726.63, 1623.83, 2475.96, 1732.28, 1813.73, 2004.2, 3441.54, 1574.1, 3048.79, 3824.85, 2002.55, 2283.71, 2600.16, 2959.94, 2231.45, 4252.78, 1749.33, 1886.15, 3793.61, 2709.95, 2107.37, 4219.59, 4901.02, 4196.81, 4159.07, 2743.17, 4300.86, 3050.44, 2122.53, 2660.65, 2251.86, 2265.21, 1914.6, 2125.44, 3307.03, 2145.59, 5854.42, 3918.05, 7015.5, 2104.95, 2200.78, 1948.92, 6596.31, 3184.72, 1962.57, 2364.54, 1933.57, 2226.78, 3630, 3435.08, 4213.87, 6979.17, 8109.76, 2125.48, 2116.37, 2040.1, 1941.15, 2628.72, 2603.3, 2748.43, 2480.22, 2006.24, 2771.96, 3967.06, 15910.5, 6431.4, 2208.41, 2641.65, 2653.52, 3087.49, 2342.75, 2177.78, 3577.33, 2411.62, 2245.52, 2681.11, 2898.18, 2485.79, 6852.02, 7413.21, 2297.92, 2457.06, 8211.89, 4343.98, 2200.23, 2493.32, 2544.09, 2260.01, 2333.7, 3844.66, 2425.09, 2306.18, 2425.92, 2398.82, 4598.26, 2437.19, 2404.24, 2564.55, 6330.84, 2380.26, 2611.75, 5972.55, 6296.98, 2538.59, 8438.47, 2353, 2577.65, 10310, 8587.21, 2614.1, 2652.69, 4157.58, 8744.91, 17513.2, 3102.7, 8143.23, 2594.11, 2551.47, 2545.2, 5613.47, 2597.11, 2710.99, 2719.59, 7278.92, 11682.5, 12306.7, 9348.77, 17202.4, 5232.14, 11227.3, 17931.5, 6031.79, 2727.55, 28807.7, 27725.5, 25352.1, 12407.8, 12445, 13232, 12018.5, 7898.1, 7592.39, 16800.8, 7594.41, 36919.8, 21013.2, 8322.73, 14266, 12556.4, 8617.49, 12729.9, 8821.11, 43659.7, 17352.7, 8876.03, 30686.5, 17685.4, 24340.2, 33384.2, 26833.3, 18793.6, 9740.08, 8366.67, 11718.3, 17662.8, 13187.3, 12308.6, 22709.2, 12353.4, 20496.1, 12459.3, 24675.2, 18277.8, 35012.6, 17285.2, 18940.9, 17224.5, 12316.5, 17988.7, 15106.7, 7747.36, 16699.2, 8600.27, 13486.3, 15138.3, 3933.47, 27780.4, 23317.6, 20780, 11069.5, 17565.5, 8570.9, 34030.5, 14207.5, 10776, 11939.8, 18661.3, 4964.24, 40415.1, 11049.8, 40990.6, 14262.1, 14561.5, 29623, 19444.3, 29592.1, 32700.6, 13178.4, 17123.2, 13105.6, 21859.1, 38914.3, 21915.6, 29199.7, 20537.5, 25397.5, 25827.6, 32832.4, 11341, 20797.6, 37452.1, 37527.1, 44262.6, 6474.13, 13578.7, 19464.2, 46841.4, 42849.2, 14397.4, 18135, 11456.9, 43806.3, 47240.5, 56625.3, 14890.7, 10559.5, 42045.5, 20768.2, 42092.2, 43346.5, 27798.7, 4791.4, 7449.79, 23062.8, 15236.3, 55853.9, 26656.2, 6539.09, 27542.7, 45573.8, 7763.38, 16015.4, 27404.4, 22970.7, 19585.3, 34021.9, 37479.1, 20168.7, 19232.5, 28777, 28899.6, 22071.4, 26847.4, 56918.6, 17612.9, 17995.1, 12113.1, 44057.8, 11166.1, 18605.7, 58695, 38751, 39060.5, 20573.6, 39451, 22737.3, 6480.64, 32212.7, 27123.6, 16946.6, 17148, 23245.1, 27559.2, 26753.7, 17309.3, 17437.9, 29009.8, 35491.8, 59604.3, 28979, 25495.4, 35429, 46225.1, 40021.2, 37840.9, 13743.8, 22444.2, 35692.1, 36438.8, 16815, 11795.7, 11074.9, 44216.6, 27618, 6458.24, 19643.9, 51121.3, 54627.5, 9311.95, 27720, 44002.2, 36401.6, 24530.1, 19170.1, 35555.9, 29648.5, 59280.4, 19897.6, 9190.2, 58555.4, 47140, 30011, 30704.1, 35339.8, 57121.7, 21056.2, 40998.5, 28413.3, 41793.4, 47192.1, 27407.3, 30047.4, 27120.6, 29118.3, 23938.9, 58925.5, 72058.5, 22873.5, 42101.8, 66972.2, 44823.1, 35764, 34239.8, 29360.9, 76930.4, 33239.8, 60258.3, 18153.6, 28983.3, 48602.4, 40165, 39565.2, 25828.4, 36787.2, 45500.5, 27645.6, 11434.3, 13844.6, 30231.8, 22930.5, 34881.4, 25895.3, 41087.4, 33922.5, 28840.5, 28979.4, 29940.5, 8378.83, 34045.9, 34945.5, 61958.6, 13666.6, 39247.7, 43760.2, 70173.1, 66980.1, 27152.9, 55580.7, 46298.2, 9292.65, 41173, 33442.3, 29804.2, 31102.9, 36029.9, 33434.6, 80651.7, 17740.9, 59478, 22610.9, 45120.8, 54997.1, 68034.4, 48126.5, 11208.1, 35740.2, 13588.5, 29276.1, 52817, 46777.3, 46040, 54908.9, 49109.5, 31901.2, 62873.5, 66155.9, 39686.1, 57656.1, 43331.8, 48869.3, 51264.9, 62262.8, 29629.6, 51923.4, 30152.5, 9487.38, 32083.1, 36148.5, 38376.8, 13995.1, 63639, 46674.6, 46288.5, 35630.7, 10194.3, 38090.3, 62287, 14768.3, 49471.9, 9263.67, 61216.1, 18063.5, 35784, 50631.8, 46918.3, 12267.8, 48272.2, 41000.3, 54276.7, 51610.7, 60803.1, 39649.4, 39596.7, 49838, 10832.3, 65093.3, 73042, 39173.7, 16413.5, 57140.8, 75723.7, 32271.1, 65812.8, 43370.3, 24131.5, 69167.1, 54257.5, 63354.8, 57821, 11508, 62243.3, 14790.8, 62076.2, 65049.2, 9378.87, 44190.5, 19507.5, 17976.6, 6800.52, 41426.2, 66740.3, 64705.9, 6375.2, 70862, 73881.1, 27408.8, 42709.7, 74539.8, 15954.2, 23547.2, 55779.2, 17418.7, 53234.2, 76335.6, 56133.1, 72035.8, 62793, 52364.6, 81927.1, 63502.9, 19121.8, 52346.3, 60380.5, 23852.3, 65034.8, 65720.5, 55139.4, 19533.1, 57660, 44206.1, 52533.6, 5622.85, 75929.6, 90270.6, 41565, 42164.2, 56284.5, 49377, 32105.1, 77273.2, 41399.8, 16532.9, 76408.1, 79276.5, 52875.6, 55748.7, 56037.4, 60631.4, 42748.1, 68959, 56964, 73782.4, 11853.4, 18037, 92233.3, 60963.8, 71852.8, 68425, 93216.7, 67420.6, 92619.1, 33786.3, 86222.7, 38859.7, 68007.6, 48184.8, 68676.1, 100067, 70234.3, 73904, 94252.5, 21600.5, 59535.8, 71448.6, 53050, 50875.7, 99244.3, 86279.4, 103999, 111737, 73396.5, 71489.1, 17860.2, 53927.1, 103003, 9487.82, 79201.4, 56103.8, 108290, 77176.6, 75248.7, 76368.5, 76435.6, 55491.3, 31986.6, 70160.7, 110679, 22666.9, 60777.2, 77638.4, 59705, 72981.3, 120519, 89567.8, 68281.5, 88196.6, 85647.6, 39182.8, 34051.6, 23978.1, 104582, 49728.6, 21758.3, 108744, 84070.5, 41880.7, 11106, 69714, 75212.7, 37634.2, 77947.3, 88241.8, 81061, 41752.1, 87753.4, 87902.9, 19569.7, 39363.3, 39224.8, 14412.1, 27383.5, 10602.6, 31496.7, 39034.8, 58931.9, 74993.1, 82528.2, 80649, 37035.4, 113668, 57351.6, 55105.1, 21720.9, 108942, 52483.7, 84357, 66011, 125539, 69543.3, 77999, 33902.2, 163735, 225836, 135163, 113768, 116990, 115874, 60565.6, 106437, 51296.9, 118108, 77679.1] x2000=[467.944, 470.328, 474.58, 477.983, 483.731, 487.206, 489.609, 491.522, 492.445, 492.9, 495.144, 501.409, 501.422, 501.734, 501.793, 501.829, 501.852, 501.959, 502.031, 502.117, 502.12, 502.12, 502.121, 502.152, 502.172, 502.205, 502.298, 502.306, 502.359, 502.366, 502.389, 502.461, 502.48, 502.491, 502.526, 502.531, 502.54, 502.589, 502.608, 502.626, 502.654, 502.661, 502.681, 502.697, 502.703, 502.717, 502.727, 503.461, 504.374, 505.354, 506.244, 507.175, 508.091, 509.016, 509.945, 510.871, 511.805, 512.736, 513.659, 514.586, 515.512, 516.45, 517.371, 518.304, 519.227, 520.156, 521.083, 522.01, 522.94, 523.87, 524.795, 525.726, 526.661, 527.58, 528.511, 529.437, 530.365, 531.296, 532.222, 533.154, 534.08, 535.008, 535.936, 536.864, 537.792, 538.721, 539.648, 540.579, 541.505, 542.433, 543.362, 544.292, 545.219, 546.146, 547.075, 548.005, 548.932, 549.86, 550.788, 551.716, 552.644, 553.574, 554.502, 555.43, 556.357, 557.286, 558.216, 559.143, 560.076, 561, 561.929, 562.857, 563.786, 564.713, 565.641, 566.57, 567.497, 568.425, 569.356, 570.282, 571.211, 572.141, 573.07, 573.997, 574.925, 575.852, 576.781, 577.709, 578.64, 579.565, 580.495, 581.426, 582.352, 583.279, 584.207, 585.137, 586.065, 586.995, 587.922, 588.849, 589.778, 590.708, 591.64, 592.565, 593.49, 594.419, 595.349, 596.276, 597.205, 598.131, 599.062, 599.989, 600.917, 601.845, 602.773, 603.705, 604.632, 605.558, 606.487, 607.417, 608.348, 609.271, 610.201, 611.128, 612.059, 612.985, 613.914, 614.845, 615.77, 616.699, 617.628, 618.558, 619.487, 620.412, 621.343, 622.274, 623.196, 624.125, 625.066, 625.983, 626.909, 627.838, 628.771, 629.699, 630.625, 631.556, 632.482, 633.408, 634.343, 635.264, 636.195, 637.122, 638.052, 638.982, 639.906, 640.834, 641.766, 642.69, 643.62, 644.549, 645.476, 646.412, 647.333, 648.262, 649.192, 650.119, 651.049, 651.977, 652.905, 653.84, 654.76, 655.688, 656.621, 657.552, 658.479, 659.404, 660.331, 661.257, 662.189, 663.117, 664.044, 664.979, 665.901, 666.83, 667.755, 668.686, 669.615, 670.541, 671.472, 672.397, 673.333, 674.253, 675.188, 676.114, 677.047, 677.969, 678.907, 679.825, 680.758, 681.68, 682.609, 683.542, 684.469, 685.395, 686.322, 687.256, 688.191, 689.108, 690.036, 690.972, 691.891, 692.823, 693.755, 694.677, 695.606, 696.544, 697.464, 698.393, 699.326, 700.257, 701.18, 702.109, 703.032, 703.959, 704.904, 705.818, 706.75, 707.678, 708.607, 709.529, 710.461, 711.387, 712.318, 713.245, 714.17, 715.103, 716.046, 716.968, 717.89, 718.813, 719.746, 720.673, 721.615, 722.531, 723.46, 724.401, 725.331, 726.248, 727.182, 728.11, 729.042, 729.966, 730.887, 731.812, 732.737, 733.704, 734.596, 735.533, 736.453, 737.38, 738.307, 739.265, 740.167, 741.095, 742.026, 742.969, 743.881, 744.827, 745.757, 746.664, 747.611, 748.523, 749.532, 750.383, 751.308, 752.255, 753.167, 754.094, 755.023, 755.956, 756.873, 757.802, 758.74, 759.66, 760.631, 761.534, 762.453, 763.372, 764.318, 765.245, 766.164, 767.086, 768.015, 768.943, 769.872, 770.813, 771.747, 772.657, 773.583, 774.516, 775.489, 776.374, 777.305, 778.252, 779.157, 780.081, 781.052, 781.938, 782.875, 783.81, 784.734, 785.679, 786.622, 787.552, 788.477, 789.392, 790.424, 791.261, 792.166, 793.102, 794.042, 794.995, 795.985, 796.854, 797.741, 798.677, 799.662, 800.512, 801.437, 802.434, 803.336, 804.239, 805.211, 806.088, 807.003, 807.993, 808.947, 809.812, 810.729, 811.791, 812.635, 813.513, 814.431, 815.388, 816.326, 817.286, 818.175, 819.114, 820.023, 820.989, 821.859, 822.859, 823.729, 824.688, 825.602, 826.523, 827.43, 828.484, 829.36, 830.359, 831.218, 832.112, 833.04, 834.076, 834.879, 835.802, 836.714, 837.734, 838.605, 839.797, 840.518, 841.378, 842.3, 843.315, 844.285, 845.104, 846.065, 846.957, 847.998, 848.981, 849.74, 850.696, 851.897, 852.584, 853.957, 854.391, 855.315, 856.24, 857.312, 858.086, 859.13, 859.942, 860.893, 861.899, 862.722, 863.978, 864.708, 865.496, 866.477, 867.451, 868.568, 869.326, 870.332, 871.113, 872.133, 873.435, 873.881, 874.776, 875.697, 876.634, 877.598, 878.565, 879.44, 880.463, 881.292, 882.232, 883.312, 884.131, 885.023, 886.003, 887.044, 887.793, 888.726, 889.776, 890.676, 891.508, 892.565, 893.466, 894.297, 895.201, 896.323, 897.109, 898.159, 899.023, 900.198, 900.804, 902.224, 903.196, 903.734, 904.682, 905.488, 906.358, 907.525, 908.263, 909.469, 910.432, 910.993, 912.073, 912.89, 913.831, 914.909, 915.789, 916.826, 917.611, 918.403, 919.481, 920.333, 921.274, 922.148, 923.042, 924.122, 924.924, 925.852, 926.755, 927.818, 928.703, 929.823, 930.617, 931.398, 932.954, 933.398, 934.331, 935.448, 936.054, 937.226, 937.968, 938.959, 939.924, 940.83, 941.924, 942.781, 943.63, 944.438, 945.516, 946.51, 947.25, 948.185, 949.294, 949.966, 950.896, 951.949, 952.771, 953.726, 955.032, 955.719, 956.497, 957.498, 958.428, 959.332, 960.76, 961.274, 962.475, 963.307, 964.002, 964.881, 965.805, 966.972, 968.33, 968.594, 969.554, 970.499, 971.515, 972.466, 974.137, 974.339, 975.052, 976.289, 977.497, 977.984, 978.995, 981.135, 981.146, 981.566, 982.721, 983.669, 984.82, 985.827, 986.272, 987.497, 988.811, 989.036, 990.112, 991.418, 991.798, 992.95, 994.833, 995.399, 996.67, 997.063, 997.499, 998.242, 999.353, 1000.24, 1001.76, 1002.04, 1003.92, 1003.96, 1004.81, 1006.99, 1007.19, 1007.64, 1008.5, 1010.2, 1010.83, 1011.64, 1012.68, 1013.17, 1014.07, 1015.12, 1016.56, 1017.48, 1017.9, 1018.84, 1020.18, 1021.1, 1021.72, 1022.58, 1023.98, 1024.53, 1025.28, 1026.27, 1028.34, 1028.56, 1029.38, 1029.91, 1031.06, 1031.84, 1032.82, 1033.79, 1034.56, 1035.51, 1037.47, 1037.67, 1038.31, 1039.87, 1041.36, 1041.42, 1042.68, 1043.17, 1044.08, 1046.39, 1046.6, 1046.95, 1049.05, 1050.5, 1050.78, 1050.9, 1051.41, 1052.52, 1054.38, 1054.71, 1055.01, 1055.91, 1057.4, 1058.21, 1060.06, 1060.6, 1062.67, 1062.78, 1062.91, 1063.74, 1064.22, 1065.39, 1066.21, 1066.97, 1068.12, 1069.29, 1069.87, 1073.55, 1073.77, 1073.81, 1074.69, 1075.11, 1076.49, 1079.03, 1079.12, 1079.15, 1080.29, 1080.47, 1081.89, 1082.14, 1083.32, 1086.1, 1086.35, 1087.94, 1088.55, 1090.49, 1091.45, 1092.24, 1093.39, 1093.96, 1094.3, 1094.83, 1095.23, 1095.86, 1096.45, 1097.42, 1097.59, 1098.89, 1100.14, 1100.63, 1102.98, 1103.27, 1104.39, 1104.49, 1105.82, 1106.05, 1107.1, 1107.83, 1110.79, 1111.03, 1111.26, 1111.9, 1112.73, 1115.58, 1116.82, 1117.42, 1117.45, 1117.58, 1119.53, 1120.22, 1122.03, 1122.36, 1122.55, 1122.91, 1124.46, 1124.97, 1127.49, 1128.24, 1128.28, 1129.64, 1129.72, 1130.18, 1131.47, 1132.74, 1134.28, 1137.81, 1137.82, 1138.14, 1138.23, 1140.35, 1140.97, 1144.22, 1144.7, 1146.29, 1146.65, 1148.93, 1149.47, 1150.14, 1150.15, 1150.29, 1152.06, 1154.92, 1159.51, 1159.69, 1161.82, 1161.97, 1163.24, 1163.91, 1165.58, 1166.87, 1167.8, 1170.2, 1171.59, 1173.7, 1174.3, 1175.16, 1177.11, 1177.28, 1178.19, 1178.25, 1182.94, 1185.64, 1186.06, 1186.31, 1187.12, 1187.32, 1188.4, 1193.68, 1195.48, 1197.58, 1197.82, 1200.51, 1206.91, 1207.41, 1209.17, 1217.19, 1220.61, 1221.51, 1227.79, 1228.04, 1228.65, 1229.81, 1230.69, 1234.87, 1235.51, 1236.28, 1237.49, 1239.01, 1240.85, 1242.75, 1243.46, 1246.92, 1250.41, 1251.61, 1267.1, 1274.85, 1275.98, 1276.46, 1277.47, 1280.9, 1282.38, 1283.42, 1285.29, 1286.34, 1286.97, 1289.62, 1292.03, 1300.45, 1301.54, 1308.27, 1310.26, 1310.82, 1316.13, 1317.84, 1331.07, 1333.46, 1337.21, 1345.61, 1345.94, 1350.77, 1363.09, 1364.29, 1380.01, 1389.55, 1416.39, 1442.25, 1452.25, 1452.99, 1467.49, 1470.39, 1473.72, 1478.47, 1498.98, 1550.69, 1573.2, 1597.14, 1629.43, 1630.39, 1643.84, 1648.5, 1660.74, 1663.12, 1663.51, 1671.13, 1673.72, 1674.3, 1714.79, 1731.73, 1734.58, 1756.54, 1769.73, 1893.29, 1913.23, 1947.25, 1953.02, 2083.1, 2202.75, 2316.44,] plot.scatter(x200,y200) plot.scatter(x2000,y2000,color=[1,0,0]) plot.show()
UTF-8
Python
false
false
2,013
3,238,405,380,234
058697d5bec46bab75926c290c8bbb639f86311e
2de33ba731066a63352080dd19da1e4582bb00c4
/my315ok.portlet.logo/my315ok/portlet/logo/widget.py
74837d0a1a7e4f8619a96867216760006f9895e2
[]
no_license
adam139/plonesrc
https://github.com/adam139/plonesrc
58f48e7cdfc8fbed7398011c40649f095df10066
cbf20045d31d13cf09d0a0b2a4fb78b96c464d20
refs/heads/master
2021-01-10T21:36:44.014240
2014-09-09T04:28:04
2014-09-09T04:28:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from OFS.Image import Image from zope.app.form.browser.textwidgets import FileWidget from zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile class ImageWidget(FileWidget): """ The standard FileWidget returns a string instead of an IFile inst, which means it will always fail schema validation in formlib. """ template = ViewPageTemplateFile('inputwidget.pt') displayWidth = 30 def __call__(self): value=self._getFormValue() or None return self.template(name=self.context.__name__, value=value) def _toFieldValue(self, input): value=super(ImageWidget, self)._toFieldValue(input) if value is not self.context.missing_value: value=Image('image','image', value) return value def hasInput(self): return ((self.name+".used" in self.request.form) or (self.name in self.request.form) ) and not self.request.form.get(self.name+".nochange", '')
UTF-8
Python
false
false
2,014
11,905,649,352,884
e7a7e3510dbfc1f059e426d451ec22a185b27b01
a297132838c5a6b436d940db63286896b8e7c829
/configure
cb29f5d380041ca043510ccf56f7c8134a960ecc
[]
no_license
hotgloupi/cof
https://github.com/hotgloupi/cof
799ba000548959e64e00ec27065afa36d1cb1b31
fa6193fe8ea7b9f5c7318ed5b84059785bc718d3
refs/heads/master
2020-04-23T12:23:42.257051
2013-10-22T09:50:07
2013-10-22T09:50:07
171,166,390
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- import argparse import os import pipes import re import shutil import stat import subprocess import sys import cgitb cgitb.enable(format = 'text') def cleanpath(p, **kwargs): p = os.path.normpath(p) if p.startswith('./'): p = p[2:] if kwargs.get('replace_home'): p = os.path.join( '~', os.path.relpath(p, start=os.path.expanduser('~')), ) return p.replace('\\', '/') def cleanabspath(p, **kwargs): return cleanpath(os.path.abspath(p), **kwargs) def cleanjoin(*args, **kwargs): return cleanpath(os.path.join(*args), **kwargs) FATAL = "[ cfg ] FATAL" ERROR = "[ cfg ] ERROR" STATUS = "[ cfg ]" def err(*args, **kwargs): kwargs['file'] = sys.stderr return print(*args, **kwargs) def status(*args, **kwargs): return print(STATUS, *args, **kwargs) def error(*args, **kwargs): return err(ERROR, *args, **kwargs) def fatal(*args, **kwargs): try: err(FATAL, *args, **kwargs) finally: sys.exit(1) def which(binary): paths = os.environ['PATH'].split(os.path.pathsep) for dir_ in paths: path = os.path.join(dir_, binary) if os.path.exists(path) and os.stat(path)[stat.ST_MODE] & stat.S_IXUSR: return path if sys.platform =='win32' and not binary.lower().endswith('.exe'): return which(binary + '.exe') return None def cmd(cmd, stdin = b'', cwd = None, env=None): sys.stderr.flush() p = subprocess.Popen(cmd, cwd = cwd, stdin = subprocess.PIPE, shell = False, env = env) p.stdin.write(stdin) p.stdin.close() p.wait() if p.returncode != 0: raise Exception("Command failed") DEBUG = True VERBOSE = True ROOT_DIR = cleanpath(os.path.dirname(__file__)) HOME_URL = "http://hotgloupi.fr/tupcfg.html" TUP_HOME_URL = "http://gittup.org" PROJECT_CONFIG_DIR_NAME = ".config" PROJECT_CONFIG_DIR = cleanjoin(ROOT_DIR, PROJECT_CONFIG_DIR_NAME) PROJECT_CONFIG_FILENAME = "project.py" TUP_INSTALL_DIR = cleanjoin(PROJECT_CONFIG_DIR, 'tup') TUP_GIT_URL = "git://github.com/gittup/tup.git" TUP_WINDOWS_URL = "http://gittup.org/tup/win32/tup-latest.zip" TUPCFG_INSTALL_DIR = cleanjoin(PROJECT_CONFIG_DIR, 'tupcfg-install') TUPCFG_GIT_URL = "git://github.com/hotgloupi/tupcfg.git" TUPCFG_GENERATORS = ['Tup', 'Makefile'] def self_install(args): status("Installing tupcfg in", TUPCFG_INSTALL_DIR) if not os.path.exists(TUPCFG_INSTALL_DIR): os.makedirs(TUPCFG_INSTALL_DIR) status("Getting tup from", TUPCFG_GIT_URL) cmd(['git', 'clone', TUPCFG_GIT_URL, TUPCFG_INSTALL_DIR]) else: status("Updating tupcfg") cmd(['git', 'pull'], cwd=TUPCFG_INSTALL_DIR) shutil.rmtree(os.path.join(PROJECT_CONFIG_DIR, 'tupcfg'), ignore_errors=True) shutil.copytree( os.path.join(TUPCFG_INSTALL_DIR, 'src/tupcfg'), os.path.join(PROJECT_CONFIG_DIR, 'tupcfg') ) def tup_install(args): from tupcfg import platform if platform.IS_WINDOWS: tup_install_windows(args) else: tup_install_git(args) from tupcfg import tools print("Tup installed in", tools.which('tup')) def tup_install_windows(args): import urllib.request as r req = r.urlopen(TUP_WINDOWS_URL) if not os.path.exists(TUP_INSTALL_DIR): os.makedirs(TUP_INSTALL_DIR) tarball = os.path.join(TUP_INSTALL_DIR, 'tup.zip') with open(tarball, 'wb') as f: while True: data = req.read(4096) if not data: break f.write(data) import zipfile with zipfile.ZipFile(tarball) as f: f.extractall(TUP_INSTALL_DIR) def tup_install_git(args): from tupcfg import path status("Installing tup in", TUP_INSTALL_DIR) if not path.exists(TUP_INSTALL_DIR): os.makedirs(TUP_INSTALL_DIR) status("Getting tup from", TUP_GIT_URL) cmd(['git', 'clone', TUP_GIT_URL, TUP_INSTALL_DIR]) else: status("Updating tup") cmd(['git', 'pull'], cwd=TUP_INSTALL_DIR) tup_shell_bin = path.join(TUP_INSTALL_DIR, "build", "tup") if not path.exists(TUP_INSTALL_DIR, "build", "tup"): cmd(['sh', 'build.sh'], cwd=TUP_INSTALL_DIR) else: status("Found shell version of tup at", tup_shell_bin) tup_dir = path.join(ROOT_DIR, '.tup') if os.path.exists(tup_dir): os.rename(tup_dir, tup_dir + '.bak') try: if not path.exists(TUP_INSTALL_DIR, '.tup'): cmd(['./build/tup', 'init'], cwd=TUP_INSTALL_DIR) cmd(['./build/tup', 'upd'], cwd=TUP_INSTALL_DIR) finally: if path.exists(tup_dir + '.bak'): os.rename(tup_dir + '.bak', tup_dir) def prepare_build(args, defines, exports): import tupcfg # Should work at this point from tupcfg.path import exists, join, absolute build_dir = args.build_dir try: project = tupcfg.Project( ROOT_DIR, PROJECT_CONFIG_DIR, config_filename = PROJECT_CONFIG_FILENAME, new_project_vars = exports, ) env_build_dirs = list( d for d in project.env.get('BUILD_DIRECTORIES', []) if exists(d, '.tupcfg_build') ) tupcfg.tools.verbose("Found build directories:", ' '.join(env_build_dirs)) build_dirs = [] if build_dir is not None: build_dirs = [build_dir] tup_build_marker = join(build_dir, '.tupcfg_build') if not exists(build_dir): os.makedirs(build_dir) with open(tup_build_marker, 'w') as f: pass elif not exists(tup_build_marker): fatal('\n'.join([ "'%(build_dir)s' doest not seem to be a tup build directory:", "\t* Remove this directory", "\t* Touch the file %(tup_build_marker)s", ]) % locals()) else: build_dirs = env_build_dirs if not build_dirs: fatal("No build directory specified on command line. (try -h switch)") project.env.project_set( 'BUILD_DIRECTORIES', list(set(build_dirs + env_build_dirs)) ) if args.dump_vars: status("Project variables:") for k, v in project.env.project_vars.items(): status("\t - %s = %s" % (k, v)) generators = [] if args.generator: generators.append(args.generator) with project: for build_dir in build_dirs: with project.configure(build_dir, defines, generators) as build: if args.dump_vars: status("Build variables for directory '%s':" % build_dir) build_vars = project.env.build_vars keys = sorted(build_vars.keys()) for k in keys: status("\t - %s = %s" % (k, build_vars[k])) continue if args.dump_build: build.dump(project) else: build.execute(project) except tupcfg.Project.NeedUserEdit: print( "Please edit %s and re-run the configure script" % join( PROJECT_CONFIG_DIR, PROJECT_CONFIG_FILENAME, replace_home=True, ) ) sys.exit(0) def parse_args(): def Dir(s): if not os.path.isdir(s): raise argparse.ArgumentTypeError return s parser = argparse.ArgumentParser( description="Configure your project for tup" ) parser.add_argument('build_dir', action="store", help="Where to build your project", nargs='?') parser.add_argument('-D', '--define', action='append', help="Define build specific variables", default=[]) parser.add_argument('-E', '--export', action='append', help="Define project specific variables", default=[]) parser.add_argument('-v', '--verbose', action='store_true', help="verbose mode") parser.add_argument('-d', '--debug', action='store_true', help="debug mode") parser.add_argument('--dump-vars', action='store_true', help="dump variables") parser.add_argument('--dump-build', action='store_true', help="dump commands that would be executed") parser.add_argument('--install', action='store_true', help="install when needed") parser.add_argument('--self-install', action='store_true', help="install (or update) tupcfg") parser.add_argument('--tup-install', action='store_true', help="install (or update) tup") parser.add_argument('--generator', '-G', default = None, help = "Generate build rules for another build system", choices = TUPCFG_GENERATORS) return parser, parser.parse_args() def parse_cmdline_variables(args): res = {} for arg in args: arg = arg.strip() if '=' not in arg: fatal("'=' not found in define: use %s=true to define a boolean variable" % arg) parts = arg.split('=') k = parts[0].strip() v = '='.join(parts[1:]).strip() op = '=' for p in ['+', ':']: if k.endswith(p): op = p + '=' k = k[:-1] break if v.lower() in ['1', 'true']: v = True elif v.lower() in ['0', 'false']: v = False elif v.startswith('['): if not v.endswith(']'): fatal("Missing ']' when defining %s" % k) v = [e.strip() for e in v[1:-1].split(',')] res[k] = { 'op': op, 'value': v, } return res def main(): parser, args = parse_args() DEBUG = args.debug VERBOSE = args.verbose from os.path import exists, join sys.path.insert(0, os.path.join(PROJECT_CONFIG_DIR,'tupcfg/src')) sys.path.insert(0, PROJECT_CONFIG_DIR) have_tupcfg = False try: import tupcfg have_tupcfg = True except: pass if args.self_install or (args.install and not have_tupcfg): self_install(args) try: import imp file_, pathname, descr = imp.find_module("tupcfg", [PROJECT_CONFIG_DIR]) tupcfg = imp.load_module("tupcfg", file_, pathname, descr) except Exception as e: fatal("Sorry, tupcfg installation failed for some reason:", e) try: import tupcfg # Tupcfg will use these functions to log tupcfg.tools.status = status tupcfg.tools.error = error tupcfg.tools.fatal = fatal tupcfg.tools.DEBUG = DEBUG tupcfg.tools.VERBOSE = VERBOSE or DEBUG except ImportError as e: if DEBUG is True: raise e fatal( '\n'.join([ "Cannot find tupcfg module, your options are:", "\t* Just use the --self-install flag (installed in %(config_dir)s/tupcfg)", "\t* Add it as a submodule: `git submodule add [email protected]:hotgloupi/tupcfg.git %(config_dir)s/tupcfg`", "\t* Install it somewhere (see %(home_url)s)", ]) % { 'config_dir': cleanabspath(PROJECT_CONFIG_DIR, replace_home=True), 'home_url': HOME_URL } ) tupcfg.tools.PATH.insert(0, tupcfg.path.absolute(TUP_INSTALL_DIR)) if args.tup_install or (args.install and not tupcfg.tools.which('tup')): tup_install(args) if 'Tup' == args.generator and not tupcfg.tools.which('tup'): fatal( '\n'.join([ "Cannot find tup binary, your options are:", "\t* Just use the --tup-install flag (installed in %(config_dir)s/tup)", "\t* Install it somewhere (see %(home_url)s)", ]) % { 'config_dir': cleanabspath(PROJECT_CONFIG_DIR, replace_home=True), 'home_url': TUP_HOME_URL } ) try: defines = parse_cmdline_variables(args.define) exports = parse_cmdline_variables(args.export) prepare_build(args, defines, exports) except tupcfg.Env.VariableNotFound as e: fatal('\n'.join([ "Couldn't find any variable '%s', do one of the following:" % e.name, "\t* Export it with: `%s=something ./configure`" % e.name, "\t* Define it with: `./configure -D%s=something`" % e.name, "\t* Define it in your project config file (%s)" % cleanjoin(PROJECT_CONFIG_DIR, PROJECT_CONFIG_FILENAME), ])) if __name__ == '__main__': main()
UTF-8
Python
true
false
2,013
9,156,870,294,364
5c67cf78ac17398d66078fcc069ec58af0439e39
8d257076e970099c978d9859c48bb63c8b0460ca
/CPAC/anatpreproc/tests/anatpreproc_test.py
5c7aadece74a812129e6ce7d7bb4d78ff6ae560e
[]
no_license
theresaanna/C-PAC
https://github.com/theresaanna/C-PAC
5e8061ec9626fa8c36675b938ab0aaf60c72cdd6
661dba65bffd6481e096d0da11d6d742d7ab4a55
refs/heads/master
2021-01-17T21:49:35.293968
2012-06-27T16:43:27
2012-06-27T16:43:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np import nibabel as nib """ Anatomical Workflow Sanity Checks ================================ **Class anatPreprocTest** ------------------------- """ class anatPreprocTest(object): """ This class defines all the quantitative checks done to the inputs and outputs of Anatomical Preprocessing """ def __init__(self, preproc, base_dir, input_anat): """ Constructor to initialize the workflow **Parameters** *preproc* : (Object) Anatomical workflow object *base_dir* : (String) path of output workflow directory *input_anat* : (file) Input image to the anatomical workflow """ """ Setup workflow object --------------------- """ def setup(self): """ Set up the Workflow and run This method is run once before _each_ test method is executed **Example** >>> self.preproc.inputs.anat = self.input_anat >>> self.preproc.base_dir = self.base_dir >>> self.preproc.run() """ """ Delete workflow object ----------------------- """ def teardown(self): """ Delete The Workflow Object This method is run once after _each_ test method is executed """ """ Test to verify inputs --------------------- """ def inputs_test(self): assert False """ Method to check if the input file is T1 image and is in right format **Returns** *TRUE* : (boolean) if all the tests pass *Error* : (Exception) raise if any of the tests fail **Tests** - input_anat image should be a nifti file with extension nii.gz or nii - input_anat image should be a 3D image, i.e only one volume """ """ Test to verify deoblique Image ------------------------------ """ def anat_deoblique_test(self, deoblique_img): assert False """ method to check if the input file is deobliqued correctly or not **Parameters** *deoblique_img* : (nifti file) De-obliqued mprage file **Returns** Same as above method **Tests** - Compare the headers of input_anat and deoblique_img, the voxel offset values will be unequal, if the original image is obliqued. All other values should remain unchanged. - Compare the Image data of input_anat and deoblique_img, they should remain same """ """ Test to verify re-orientation ----------------------------- """ def anat_reorient_test(self, standard_img, rpi_img): assert False """ method to check if the output reorient is coorectly in RPI orientation or not **Parameters** *standard_img* : (nifti file) Standard T1 RPI image in MNI space *rpi_img* : (nifti file) RPI output mprage file **Returns** Same as above method **Tests** - Compute Spatial Correlation between standard_img and rpi_img For this, first convert the RPI output image into MNI space and if necessary resample it to match the voxel dimensions of the standard image - Compare the Image data of rpi_img and input_anat. It should be same. """ """ Test to verify skulStrip image with normalized/scaled intensities ----------------------------------------------------------------- """ def anat_skullstrip_test(self, skullstrip_img): assert False """ method to check if the skull stripped image is correct or not **Parameters** *skullstrip_img* : (nifti file) Skullstrip output image with normalized/scaled intensities **Returns** Same as above method **Tests** - Since afni scales/normalizes the intensity values its hard to test it.Can be tested in the next step """ """ Test to verify skullstrip image with original intensity values -------------------------------------------------------------- """ def anat_brain_test(self, rpi_img, brian_img): assert False """ method to check if the input file is skull stripped and the intensity values are unchanged **Parameters** *rpi_img* : (nifti file) RPI output mprage file *brian_img* : (nifti file) Skull stripped Brain only file **Returns** Same as above method **Tests** - Subtract (infile_a - infile_b), this should return a matrix with all zero values in brain area and non zero values around the edge. From the origin, choose a sphere of resonable diameter and check the intensity values should be zero. Then check the edges for non-zero values. """
UTF-8
Python
false
false
2,012
1,340,029,829,050
5b6184e6823851a6082c85b8026cfcc528171a23
913fcfd2f179d1d784f9a6cdad8570ca9c444488
/Lecture/day3/getters-and-setters-2/main.py
9dd3cb632dc9083e27606118e42b4765ad799234
[]
no_license
Russ93/dpw
https://github.com/Russ93/dpw
1aa0995fe6c84e101898e0dcaad19ffdb351f02b
727353528d8047dac607ba566cd0249c90f85375
refs/heads/master
2021-01-20T03:34:08.264513
2014-01-30T11:53:36
2014-01-30T11:53:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#Russell Schlup #january 13, 2013 import webapp2 class MainHandler(webapp2.RequestHandler): def get(self): russell = Transscript() russell.grade1 = 20 #list the grades #calculate the averages #show the average self.response.write(russell.print_num()) app = webapp2.WSGIApplication([ ('/', MainHandler) ], debug=True) class Transscript(object): def __init__(self): #attributes - that are private - should have underscores - convention - tradition self.__grade1 = 90 self.__grades = [self.__grade1,80,60] @property def grade1(self): #convention - don't have underscores... just tradition pass # #this is what tells computer to associate grade1 with __grade1 # return self.__grade1 @grade1.setter def grade1(self, value): self.__grade1 = value return self.__grade1 def calc_average(self): sum = 0 for g in self.__grades: sum = g+sum avg = sum/len(self.__grades) return avg def print_num(self): return self.__grade1
UTF-8
Python
false
false
2,014
13,606,456,436,632
4d566e0d128d11e5db19b3568178097ad7a10077
e567e8955467117db85a5d34446ba9d6f8fc35f9
/src/wrangler/queue/fifo.py
1853c1af885193baf78a8bd00bb37cda1442059d
[]
no_license
HyFrmn/Wrangler
https://github.com/HyFrmn/Wrangler
9d20431e23a746a884842fbd0dbf77135951f3c0
88b2f0f45f7e9621c3818d3288523880670e407f
refs/heads/master
2021-01-10T00:54:14.386594
2009-11-10T15:27:27
2009-11-10T15:27:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python from wrangler.queue.interface import WranglerQueueInterface class FIFOQueue(WranglerQueueInterface): def __init__(self): self.queue = [] def queue_task(self, task, priority=500): self.queue.append(task) def next_task(self): try: task = self.queue.pop(0) except IndexError: task = -1 return task def remove_task(self, task): try: self.queue.remove(task) return True except ValueError: return False def list(self): return self.queue
UTF-8
Python
false
false
2,009
18,726,057,444,437
7891b7ded381e088da4d5afc1269a55ea40ba5cd
49d127ffd824ebe5cc3dc453cdeff8cd9f83ad4d
/ui/helper/ms_leaves_table.py
c3493f8dbfff9d1dbb64a0a015499a5f3d18b6a0
[ "AGPL-3.0-or-later", "AGPL-3.0-only" ]
non_permissive
suw/sidd
https://github.com/suw/sidd
554358a77dbd22fe97a261c871624940188f8d20
c3b28ea89bb88d14988bed89ff6b03603a16d699
refs/heads/master
2020-12-30T09:50:47.728723
2013-12-01T06:29:07
2013-12-01T06:29:07
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright (c) 2011-2013, ImageCat Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ dialog for editing mapping scheme branches """ from PyQt4.QtCore import Qt, QVariant, QString, QAbstractTableModel, QModelIndex from ui.helper.common import build_attribute_tooltip class MSLeavesTableModel(QAbstractTableModel): """ table model supporting visualization of node in mapping scheme tree """ def __init__(self, values, headers, formats, parser, valid_codes): """ constructor """ super(MSLeavesTableModel, self).__init__() self.headers = headers self.formats= formats self.parser=parser self.valid_codes=valid_codes self.values = values self.do_sort(sortIndex=0) def columnCount(self, parent): """ only three columns exist. always return 3 """ return len(self.headers) def rowCount(self, parent): """ number of rows same as number of siblings """ return len(self.values) def headerData(self, section, orientation, role): """ return data to diaply for header row """ if role == Qt.DisplayRole: if orientation == Qt.Horizontal: return QString(self.headers[section][0]) else: # no vertical header return QVariant() elif role == Qt.ToolTipRole: return QString(self.headers[section][1]) else: return QVariant() def data(self, index, role): """ return data to be displayed in a cell """ row, col = index.row(), index.column() value = self.values[row][col] if role == Qt.DisplayRole: if value is not None: return QString(self.formats[col] % value) else: return QVariant("") elif role == Qt.ToolTipRole: # construct data for display in tooltip if (index.column() == 0): if value is not None: return build_attribute_tooltip(self.valid_codes, self.parser.parse(value)) else: return QVariant("") elif role == Qt.UserRole: return index.internalPointer() else: return QVariant() def index(self, row, column, parent): """ provide index to data given a cell """ try: node = self.values[row][len(self.headers)] return self.createIndex(row, column, node) except: return QModelIndex() def flags(self, index): """ cell condition flag """ # NOTE: # ItemIsEditable also required data() and setData() function return Qt.ItemIsEnabled | Qt.ItemIsSelectable def sort(self, ncol, order): """ sort table """ if ncol < 0 or ncol > len(self.headers): return self.layoutAboutToBeChanged.emit() self.do_sort(sortIndex=ncol, reverse_sort=order==Qt.DescendingOrder) self.layoutChanged.emit() # internal helper methods ############################### def do_sort(self, sortIndex = 0, reverse_sort=False): def sort_key(row): return row[sortIndex] self.values.sort(key=sort_key, reverse=reverse_sort)
UTF-8
Python
false
false
2,013
3,393,024,190,373
dc9d9ec25d80b1585dea678ec22eaa402e387afe
3dd31b5da5358d455e607af46610786093c7cb6b
/bin/dump_mongo.py
23f191842fef3b5bfd1e8933d295fbd6ea0e5982
[]
no_license
sunlightlabs/upwardly
https://github.com/sunlightlabs/upwardly
1de31a3b817bb89acc356099499d2c2b018f1c24
d34986de3836968f5fddb1be21f45ec6ca94d28c
refs/heads/master
2021-01-25T06:36:53.492438
2014-05-01T19:22:31
2014-05-01T19:22:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import itertools from pymongo import Connection from saucebrush import emitters, filters, sources, stats, run_recipe, Recipe import os import re import settings PRIMARY_STATE_RE = re.compile('^(.*), ([A-Z]{2})$') class LocationSource(object): fields = ( 'code', 'name', 'primary_state', 'occupation', 'ffiec_low', 'ffiec_high', 'ffiec_avg', 'nces_schools', 'oes_median', 'oes_mean', ) def __init__(self): self._locations = Connection()[settings.MONGO_DATABASE]['locations'] def __iter__(self): for location in self._locations.find({}): if 'code' not in location: continue record = dict((key, None) for key in self.fields) record.update({ 'code': location['code'], 'name': location['name'], 'occupation': '00-0000', }) primary_state = location.get('primary_state', None) if primary_state is None: m = PRIMARY_STATE_RE.match(location['name']) if m is not None: primary_state = m.groups()[1] record['primary_state'] = primary_state if 'ffiec' in location: record['ffiec_low'] = location['ffiec']['low'] record['ffiec_high'] = location['ffiec']['high'] record['ffiec_avg'] = location['ffiec']['avg'] if 'nces' in location: record['nces_schools'] = location['nces']['schools'] yield record if 'oes' in location: for occupation_id, oes in location['oes'].iteritems(): record['occupation'] = occupation_id record['oes_median'] = oes['median'] record['oes_mean'] = oes['mean'] yield record if __name__ == '__main__': csv_path = os.path.join(settings.DATA_DIR, 'k2.csv') db_path = os.path.join(settings.DATA_DIR, 'k2.db') # if os.path.exists(db_path): # os.unlink(db_path) # # run_recipe( # LocationSource(), # #emitters.CSVEmitter(open(csv_path, 'w'), fieldnames=LocationSource.fields), # emitters.SqliteEmitter(db_path, 'locations', fieldnames=LocationSource.fields), # #emitters.MongoDBEmitter(settings.MONGO_DATABASE, 'movingup', port=settings.MONGO_PORT) # #emitters.DebugEmitter(), # ) def to_float(s): if s is not None: try: return float(s) except ValueError: pass def fieldnames_iter(fieldnames): yield 'occupation' for f in fieldnames: yield "%s_stddev" % f yield "%s_mean" % f STATS_FIELDS = ('ffiec_low','ffiec_high','ffiec_avg','nces_schools','oes_median','oes_mean') class StatsGenerator(filters.Filter): def process_record(self, record): occ = record['occupation'] stats_filters = {} for fieldname in STATS_FIELDS: stats_filters[fieldname] = stats.StandardDeviation(fieldname) run_recipe( sources.SqliteSource(db_path, """SELECT * FROM locations WHERE occupation = ?""", (occ,)), filters.FieldModifier(STATS_FIELDS, to_float), Recipe(*stats_filters.values()), error_stream = emitters.DebugEmitter(), ) for fieldname, stats_filter in stats_filters.iteritems(): record['%s_stddev' % fieldname] = stats_filter.value()[0] record['%s_mean' % fieldname] = stats_filter.average() return record stats_path = os.path.join(settings.DATA_DIR, 'k2-stats.csv') run_recipe( sources.SqliteSource(db_path, """SELECT DISTINCT occupation FROM locations"""), StatsGenerator(), emitters.CSVEmitter( open(stats_path, 'w'), fieldnames=[f for f in fieldnames_iter(STATS_FIELDS)], ), emitters.DebugEmitter(), error_stream = emitters.DebugEmitter(), )
UTF-8
Python
false
false
2,014
4,398,046,552,665
88c15a04dddc59823bb05d2eaf38bae4230f9412
a62adccc82ee6ef7ebd94d710b5f75056626057f
/account_tgt/wizard/account_balance.py
7c00321fa7883dcf0679f5d4d92a3bd5c90b07f7
[]
no_license
Lunchik/openerp_custom_modules
https://github.com/Lunchik/openerp_custom_modules
614b2fa6ccf1aaaaa2b99210bcd72aea301079d0
b7707762d66743dad139727b3903063393f0da93
refs/heads/master
2020-05-19T13:58:50.328789
2014-12-01T10:36:39
2014-12-01T10:36:39
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from openerp.osv import fields, osv from ..report.account_balance_report import TrialBalanceReport import base64 class account_balance_report(osv.osv_memory): _inherit = "account.balance.report" _name = 'account.balance.report.xcel' _description = 'Trial Balance Report XCEL' def _get_company_ids(self, cr, uid, context=None): return self.pool.get('res.company').search(cr, uid, [], context=context) _columns = { 'company_ids': fields.many2many('res.company', 'trial_company_rel', 'trial_id', 'company_id', string="TGT Entities"), } _defaults = { 'company_ids': _get_company_ids, } def _print_report(self, cr, uid, ids, data, context=None): company_ids = self.read(cr, uid, ids, ['company_ids'], context=context) data = self.pre_print_report(cr, uid, ids, data, context=context) data['form']['company_ids'] = company_ids[0]['company_ids'] combined = TrialBalanceReport(cr, uid, self.pool, '', context) combined.set_context([], data, []) report = combined.generate() return { 'type': 'ir.actions.act_window', 'res_model': 'accounting.report.xcel.download', 'view_mode': 'form', 'target': 'new', 'name': 'Trial Balance Report', 'datas': data, 'context': {'r_file': base64.encodestring(report.read()),}, } return {'type': 'ir.actions.report.xml', 'report_name': 'account.account.balance', 'datas': data} account_balance_report()
UTF-8
Python
false
false
2,014
6,545,530,198,648
7a4fb763a635a3db7cd9b5d51c17f7ae5525828a
067754dd40a742efe306fd95056211485047994c
/challenge.py
af07923db4af97fae2932fe37416e7251dcd6abc
[]
no_license
yoland68/email-prediction-challenge
https://github.com/yoland68/email-prediction-challenge
fce37beb6f7edd826052af40a8a07bdf9211b590
9c0bf04b436027d621ad3d1fdf1d0bfc5076c1de
refs/heads/master
2016-08-03T00:24:48.408018
2014-11-21T16:10:22
2014-11-21T16:10:22
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # coding=utf-8 """ USAGE: python challenge.py [existing email file name] [prediting email file name] EXAMPLES: python challenge.py db_input.txt prediction_input.txt """ import json import os from company_class import Company, print_company_pattern, print_company_dict from employee_class import Employee from pattern_class import Pattern import pdb def main(): """ Run the program and get result""" ### Reading and parsing the first file (existing email addresses) try: file = os.sys.argv[1] with open(file, 'r') as f: inp = f.read(); dict = None try: dict = json.loads(inp) # dump string into dict {[person name]: [email]} except: inp = convert_to_json_string(inp) try: dict = json.loads(inp) except: print("Input invalid") exit(1) # exit code 1 for error input except: print("First Input in invalid") print(__doc__) exit(1) ### Loop through the dictionary and add each item as an Employee, and each Employee to a Company for name, email in dict.iteritems(): # pdb.set_trace() em = Employee(name, email) em.join_company() # this function put Employee into a Company's list and update their Pattern # print_company_pattern() # Print all the patterns in all the Companies # print_company_dict() # Print all the Companies ## Next get input value for that emails user wants to predict file2 = None try: file2 = os.sys.argv[2] except: print("Second Input in invalid") print(__doc__) exit(1) with open(file2, 'r') as f2: to_be_predicted = f2.readlines() for i in to_be_predicted: # print(i) i = i.strip() i = i.replace('"', '') name = i.split(', ')[0].lower() company_name = i.split(', ')[1].lower() result = predict(name, company_name) #output the Employee's possible email(s) print("Name: {0}, \nEmail: {1}\n".format(name, result)) def convert_to_json_string(inp): """Convert the db_input (the ones with name and emails) to valid json string""" return inp.replace(" =>", ':') def predict(name, company_name): """ Input: Employee's name and Company's name Output: Employee's potential email address using Company.company_dict """ comp = Company.company_dict.get(company_name) if comp: # if the Company is found in master list, use its Pattern list to make predictions return comp.predict(name) else: return "Domain {0} not found in existing database, can not predict".format(company_name) ## Testing area if __name__ == "__main__": main()
UTF-8
Python
false
false
2,014
17,222,818,871,274
60f8d538da22d1a6c1dfd1556f3677d9168856be
85b099b67fbbc4f172a62f7ce922cd667cd70998
/2013/Crypto/1-[CSAWpad - 100 Points]/dec.py
d11dfc0055ef31f03d5d6cda9316c3de5df6f9c8
[]
no_license
b1acktrac3/CSAW-ctf
https://github.com/b1acktrac3/CSAW-ctf
2ac9f98955b0ba3b8d65919fe84de1b1a405764c
a3af80b67403fd47c326e9ead25f2b1fc7db88d9
refs/heads/master
2021-01-19T07:25:03.436800
2013-10-25T10:57:22
2013-10-25T10:57:33
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import string import os from hashlib import sha512 from binascii import hexlify #generates s box and sinverse box, called f and g respectively, using #sha 512 as a deterministic random number generator def genTables(seed="Well one day i'll be a big boy just like manhell"): fSub={} gSub={} i=0 prng=sha512() prng.update(seed) seed=prng.digest() for el in xrange(256): cSeed="" for x in xrange(4): cSeed+=prng.digest() prng.update(str(x)) prng.update(cSeed) fCharSub=[0]*256 gCharSub=[0]*256 unused=range(256) for toUpdate in xrange(256): i+=1 curInd=ord(cSeed[toUpdate])%len(unused) toDo=unused[curInd] del unused[curInd] fSub[(el,toUpdate)]=toDo gSub[(el,toDo )]=toUpdate return fSub,gSub f,g=genTables() def encrypt(pad, ptext): assert(len(ptext)<=len(pad))#if pad < plaintext bail ctext = [] if type(ptext)==type(""): ptext=map(ord,ptext) if type(pad)==type(""): pad=map(ord,pad) for padByte,ptextByte in zip(pad,ptext): ctext.append(f[padByte,ptextByte]) return "".join(map(chr,ctext)) def decrypt(pad, ciphertext): assert(len(ciphertext)<=len(pad))#if pad < ciphertext bail ptext = [] if type(ciphertext)==type(""): ciphertext=map(ord,ciphertext) if type(pad)==type(""): pad=map(ord,pad) for padByte,ctextByte in zip(pad,ciphertext): ptext.append(g[padByte,ctextByte]) return "".join(map(chr,ptext)) strings = [ '794d630169441dbdb788337d40fe245daa63c30e6c80151d4b055c18499a8ac3e5f3b3a8752e95cb36a90f477eb8d7aa7809427dde0f00dc11ab1f78cdf64da55cb75924a2b837d7a239639d89fe2b7bc1415f3542dba748dd40', '14a60bb3afbca7da0e8e337de5a3a47ae763a20e8e18695f39450353a2c6a26a6d8635694cbdc34b7d1a543af546b94b6671e67d0c5a8b64db12fe32e275', '250d83a7ed103faaca9d786f23a82e8e4473a5938eabd9bd03c3393b812643ea5df835b14c8e5a4b36cdcfd210a82e2c3c71d27d3c47091bdb391f2952b261fde94a4b23238137a4897d1631b4e18d63', '68a90beb191f13b621747ab46321a491e71c536b71800b8f5f08996bb433838fe56587f171a759cf1c160b4733a3465f5509ad7d1a89d4b41f631f3c600347a8762141095dad3714027dfc7c894d69fd896b810313259b1a0e941ecb43d6ae1857a465b4ddcdf102b7297763acb0281144b0598c326e871c3a1ad047ad4fea2093a1b734d589b8998175b3', '0fc304048469137d0e2f3a71885a5a78e749145510cf2d56157939548bfd5dd7e59dcebc75b678cfeac4cf408fce5dda32c9bfcbfd578bdcb801df32ebf64da365df4b285d5068975137990134bd69991695989b322b0849', '254c0bb31453badaca9d060ce5faa45fa66378a6716915473579d3743e315dbedf4d8cf78b93c3267d579247e32c8c7cd3e71e7dda6138a2ab015166fa03f2ce6ab74b89ce561eb16a65990189e169f1c457d9af622ba119a66acedb108fae18825bf3efc0428b9dae250791cb0ea018966e257d601a87f9914d646026eeab5c45cbaedd27e4c47643ab4e25193aa64f79', '41cd1c01c62883b2ca71e671dce57e5f96b1610e29507b6c03c38211653284576d4d8cdc967764147d1a0578102cb05f32a73065f11009041fa3cc5f60b24d8c7098598627df37322f814525966acabc99be5303c2322b43ecf358ac8b8541bd82214d1cc042cac3869c54e2964fa376229c2563ba3fd03e2d4d4d441721c60b6d817e034965be28b7d463cf2b97baebfe2729ed2aa41ffe', '68c50bd5197bfdbdfa887883783d2455a673a685436915bd72d1af74dffdd2b89df335daee93c36d5f57e147e9a35913d3b3bf33'] printable = string.printable[:-5] print "".join(string.printable[:-5]) print "".join(string.printable[:-3]) print "".join(string.printable[:-2]) print "".join(string.printable[:-1]) print "".join(string.printable[:0]) print "".join(string.printable) strings = map(lambda s: s.decode("hex"), strings) maxl = max(map(len, strings)) pad = [0,]*maxl # pos: (string, char) guess = {0:(1,"G"), 14: (0, " "), 16: (0, "e"), 17: (0, "t"), 20: (0, "e"), 22: (0, " "), 24: (0, "t"), 34: (0, "n"), 38: (0, "e"), 39: (0, "n"), 43: (0, " "), 46: (0, " "), 51: (0, " ")} for pos in range(maxl): possible = [] for i in range(256): ok = True x = [] for string in strings: if len(string) <= pos: continue c = chr(g[i,ord(string[pos])]) if c not in printable: ok = False break x.append(c) if ok and len(x) > 0: possible.append((i,x)) if len(possible) == 1: pad[pos] = possible[0][0] elif pad[pos] == 0: gotone = False if pos in guess: for padval, solutions in possible: if guess[pos][1] in solutions[guess[pos][0]]: pad[pos] = padval gotone = True break if not gotone: pad[pos] = possible[0][0] for string in strings: print decrypt(pad, string)
UTF-8
Python
false
false
2,013
2,611,340,144,531
f0ad6dd46f30c24dbf54c76f747bc37b79450274
d29c68fded4a93b09e862b46ae6128dc3c29bf54
/test/tests/600.bdm-performance/highest-perf.py
62730fe6581c00e7cf9b14f3395d75b17da15488
[ "LicenseRef-scancode-generic-cla", "LGPL-2.1-or-later", "LicenseRef-scancode-other-copyleft", "LGPL-2.1-only", "LicenseRef-scancode-unknown-license-reference" ]
non_permissive
ldm5180/hammerhead
https://github.com/ldm5180/hammerhead
2fb52eb379f4311891627e1b6e9a286a3fd169a7
9a17d28027c9c55c3943209c837d2520e2156d8f
refs/heads/master
2020-12-24T08:42:12.076826
2014-05-28T21:27:57
2014-05-28T21:27:57
3,656,864
2
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # # Copyright (c) 2010, Regents of the University of Colorado. # This work was supported by NASA contracts NNJ05HE10G, NNC06CB40C, and # NNC07CB47C. # import sys import optparse import logging from select import select import time # parse options parser = optparse.OptionParser() parser.add_option("-r", "--resource", "--resources", dest="resource_name", help="Subscribe to a Resource list.", metavar="HAB-Type.HAB-ID.Node-ID:Resource-ID") parser.add_option("-s", "--security-dir", dest="security_dir", help="Directory containing security certificates.", metavar="dir", default=None) parser.add_option("-e", "--require-security", dest="require_security", help="Require secured connections.", action="store_true", default=False) parser.add_option("-t", "--time-to-run", dest="time_to_run", help="Duration to check", default="60") (options, args) = parser.parse_args() logger = logging.getLogger("Bionet Highest BDM Performance") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) from bionet import * global highest highest = 0 #bionet callbacks def cb_lost_hab(hab): None def cb_new_hab(hab): None def cb_new_node(node): global highest hab = bionet_node_get_hab(node) if (bionet_node_get_num_resources(node)): for i in range(bionet_node_get_num_resources(node)): resource = bionet_node_get_resource_by_index(node, i) datapoint = bionet_resource_get_datapoint_by_index(resource, 0) value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint)) val = int(value_str); if (val > highest): highest = val def cb_lost_node(node): None def cb_datapoint(datapoint): global highest value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint)) val = int(value_str); if (val > highest): highest = val if (options.security_dir != None): bionet_init_security(options.security_dir, options.require_security) bionet_fd = -1 bionet_fd = bionet_connect() if (0 > bionet_fd): logger.error("error connecting to Bionet") exit(1) else: # register Bionet callbacks pybionet_register_callback_new_hab(cb_new_hab) pybionet_register_callback_lost_hab(cb_lost_hab); pybionet_register_callback_new_node(cb_new_node); pybionet_register_callback_lost_node(cb_lost_node); pybionet_register_callback_datapoint(cb_datapoint); if (None == options.resource_name): logger.error("Resource name needs to be specified.") exit(1) bionet_subscribe_datapoints_by_name(options.resource_name) fd_list = [] if (bionet_fd != -1): fd_list.append(bionet_fd) started = time.time() while(1): (rr, wr, er) = select(fd_list, [], [], 1) for fd in rr: if (fd == bionet_fd): bionet_read() if ((time.time() - started) >= float(options.time_to_run)): print "Bionet Data Manager max speed:", highest, "datapoints/sec" exit(0)
UTF-8
Python
false
false
2,014
17,609,365,928,429
081e1ccd91d4b415eac4faa87169a21291516a26
0a80fb72ea403c5fc5ed3c55b8f16490c21d8c60
/teststratum.py
3a2048d17a3afbc535b75f5a047427767592a2d9
[]
no_license
dkoh/Stratum-Project
https://github.com/dkoh/Stratum-Project
287ed1d7c126873f07a300665c2ff6872ac6258d
9f29521fd1b8fc77519719b0478e19c13e74ceaa
refs/heads/master
2021-01-02T08:57:00.611786
2012-06-24T23:16:20
2012-06-24T23:16:20
2,050,189
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # encoding: utf-8 """ teststratum.py Created by Derek Koh on 2011-07-07. Copyright (c) 2011 __MyCompanyName__. All rights reserved. """ import random, Forest1, math, clusters from copy import deepcopy import rpy2.robjects as robjects random.seed(2222222) inputdict={'readDataset':("vanillapaireddata.csv",0,-99),'number_of_trees':10} blah=Forest1.ConditionalRandomForest(**inputdict) # #def logisticregressionR(data): # data1=zip(*data) # features=['col{0}'.format(i) for i in xrange(len(data[0]))] # columns=[robjects.FloatVector(col) for col in data1] # Rdata = robjects.r['data.frame'](**dict(zip(features,columns))) # Rformula = robjects.r['as.formula']('{0} ~ {1} -1'.format(features[-1],reduce(lambda x,y: x + '+' + y, features[:-1] ))) # rpart_params = {'formula' : Rformula, 'data' : Rdata, 'family' : "binomial"} # model=robjects.r.glm(**rpart_params) # return model.rx('aic')[0][0],model.rx('deviance')[0][0] # ###This function transform stratum data to its independent variables #def transformstratum(data, clrformat=0): # returndata = deepcopy(data) #create a copy of input data # column_count=len(data[0]) # column_count_half=column_count/2 # if clrformat ==0: # for row in returndata: # for i in range(column_count_half): # if row[i] < row[i+column_count_half]: # row[i+column_count_half]=1 # else: row[i+column_count_half]=0 # else: # for row in returndata: # for i in range(column_count_half): # row[i+column_count_half]=row[i+column_count_half]-float(row[i]) # returndata=[row[column_count_half:] + [1] for row in returndata] # return returndata # #logisticdata= Forest1.read_data("vanillapaireddata.csv",0,-99)[0] #logisticdata=transformstratum(logisticdata,1) #import csv #outputwriter=csv.writer(open('Rlogisticdata.csv', 'wb')) #for row in logisticdata: # outputwriter.writerow(row) #print logisticregressionR(logisticdata) #print "done" ##This code does plain CLR on a dataset and gets back the aic and deviance #def logisticregressionR(data): # data1=zip(*data) # features=['col{0}'.format(i) for i in xrange(len(data[0]))] # columns=[robjects.FloatVector(col) for col in data1] # Rdata = robjects.r['data.frame'](**dict(zip(features,columns))) # Rformula = robjects.r['as.formula']('{0} ~ {1} -1'.format(features[-1],reduce(lambda x,y: x + '+' + y, features[:-1] ))) # rpart_params = {'formula' : Rformula, 'data' : Rdata, 'family' : "binomial"} # model=robjects.r.glm(**rpart_params) # return model.rx('aic')[0][0],model.rx('deviance')[0][0] # ###################### ##START OF MAIN SCRIPT ###################### #numberofnodes=10 # ##The input dataset as 4 columns. the xy of the case and xy for the control #rawdata, featureNames=Forest.read_data('vanillalogisticdata.csv') #logisticdata=treepredictstratum.transformstratum(rawdata,1) #clusters= treepredictstratum.stratumForest(treepredictstratum.transformstratum(rawdata),numberofnodes) # ## Perform logistic regression in each of the 10 clusters and then suming up the stats #finallist=[] #for key in clusters: # if len(clusters[key])>3: # finallist.append(logisticregressionR([logisticdata[i] for i in clusters[key]])) # else: print 'Less than 3 obs in cluster' # ##print out the final results for the number of nodes. #print "Results for {0} nodes".format(numberofnodes) #print reduce(lambda x, y: (x[0]+y[0],x[1]+y[1]),finallist) # ##Calculating the vanilla logistic regression #print treepredictstratum.logisticregressionR(logisticdata) # Unused code # dummydata1=[[int(random.random()*100) for i in xrange(20)] for j in xrange(len(dummydata))] # for i in xrange(len(dummydata)): # dummydata1[i][0]=dummydata[i][0] # dummydata1[i][1]=dummydata[i][1] # dummydata1[i][len(dummydata1[0])/2]=dummydata[i][2] # dummydata1[i][len(dummydata1[0])/2+1]=dummydata[i][3]
UTF-8
Python
false
false
2,012
10,574,209,511,141
bf7c5af851c0228b370b25108a0e4a98883dbf47
cb5c94d899a2ee9b8c576904227500911ea71977
/trychooser_test
3792cf984c906e39ad8899c8482faf47af9bd55f
[]
no_license
pbiggar/trychooser
https://github.com/pbiggar/trychooser
ce13e5b6301940fef2d0520b1134ad727c261ab0
5dd9b72289be346295c3e6a426757480806802b4
refs/heads/master
2021-01-11T03:00:26.836467
2014-08-08T04:04:39
2014-08-08T04:04:39
1,907,283
11
12
null
false
2014-08-08T04:04:39
2011-06-16T18:52:20
2013-11-08T03:44:23
2014-08-08T04:04:39
194
17
16
4
Python
null
null
#!/usr/bin/env python import subprocess import sys import re def run (input): proc = subprocess.Popen("./trychooser", stdin=subprocess.PIPE, stdout=subprocess.PIPE) (stdout, _) = proc.communicate(input) return (stdout.split("\n")[-2], stdout) tests = [ ('Y', '-b do -p all -u all -t all'), ('NYYYYY', '-b do -p all -u all -t all'), ('NNYYYY', '-b o -p all -u all -t all'), ('NNNYYYY', '-b d -p all -u all -t all'), ('NYNYYYYYYYYYYYYY', '-b do -p linux,linux64,macosx64,win32,win64,android,android-armv6,android-noion,ics_armv7a_gecko,panda,unagi -u all -t all'), ('NYNNNYYNNNNNNNYY', '-b do -p macosx64,win32 -u all -t all'), ('NYNNNNNNYNNNNNYYY', '-b do -p android -u all -t all'), ('NYYNNY', '-b do -p all -u none -t all'), ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYYYYYYYYYY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-3,mochitest-4,mochitest-5,mochitest-6,mochitest-7,mochitest-8,mochitest-bc,mochitest-o -t all'), ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYYYNNNNNNY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-3,mochitest-4 -t all'), ('NYYNYYYYYYYYYYYYYYYYYYYYYYYYYNYYYNNYNNNYYY', '-b do -p all -u reftest,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,reftest-ipc,reftest-no-accel,crashtest,crashtest-1,crashtest-2,crashtest-3,crashtest-ipc,xpcshell,jsreftest,jsreftest-1,jsreftest-2,jsreftest-3,jetpack,marionette,marionette-webapi,mozmill,robocop,mochitest-1,mochitest-2,mochitest-5,mochitest-bc,mochitest-o -t all'), ('NYYNYNNNNNNNNNNNNNNYYNNNNYYNNNYYYYYYNNNYYY', '-b do -p all -u xpcshell,jsreftest,marionette,marionette-webapi,mochitest-1,mochitest-2,mochitest-3,mochitest-4,mochitest-5,mochitest-bc,mochitest-o -t all'), ('NYYNYYNNNNNNNNYNNNNYYNNNNYYNNYYY', '-b do -p all -u reftest,crashtest,xpcshell,jsreftest,marionette,marionette-webapi,mochitests -t all'), ('NYYYNNNN', '-b do -p all -u all -t none'), ('NYYYNYYYYYYYYYYYYY', '-b do -p all -u all -t tpn,nochromer,other,dirtypaint,svgr,dromaeojs,xperf,remote-ts,remote-tdhtml,remote-tsvg,remote-tpan,remote-trobopan,remote-trobocheck,remote-troboprovider,remote-trobocheck2,remote-trobocheck3,remote-tp4m_nochrome'), ('NYYYNYNNYYYYYNYYYYYYYYYYY', '-b do -p all -u all -t other,dirtypaint,svgr,dromaeojs,xperf,remote-ts,remote-tdhtml,remote-tsvg,remote-tpan,remote-trobopan,remote-trobocheck,remote-trobocheck2,remote-trobocheck3,remote-troboprovider,remote-tp4m_nochrome'), ('NYYYNYYYYYYNYNYNNNNNNNNYN', '-b do -p all -u all -t tpn,nochromer,other,dirtypaint,svgr,xperf,remote-troboprovider'), ('NYYNYNNNNNNNNNNNNNNNNNNNNNNNNYNN', '-b do -p all -u mochitests -t none'), ] invalid_tests = [ ('NYNNNNNNNNNNNNYY', ": try: -b do -p -u all -t all"), # No platforms ('NNNNYYY', ""), # No builds ('NYYNYNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN', ": try: -b do -p all -u -t none"), # Wanted unittests, none picked ('NYYYNYNNNNNNNNYNNNNNNNNNN', ": try: -b do -p all -u all -t"), # Wanted talos, none picked ] tests = [(a, 'try: ' + b) for (a,b) in tests] tests += [(a, 'Invalid selection' + b) for (a,b) in invalid_tests] def combine(output, input): result = "" matches = re.findall('.*?\[Ynh\?\]', output, re.M | re.S) assert matches != None # assert len(matches) == len(input) i = 0 for match in matches: result += match if len(input) > i: result += " " + input[i] i += 1 return result for (input, expected) in tests: (output, full_output) = run("\n".join(input)) if output != expected: print "Fail:\n\texpected:\n\t\t" + expected + "\n\tgot:\n\t\t" + output print combine(full_output, input) sys.exit(-1) else: print "Pass [" + input + "]: '" + expected + "'"
UTF-8
Python
false
false
2,014
7,782,480,787,599
371ae2b24bbe02880faa49a663721b79feb8d7e4
153ecce57c94724d2fb16712c216fb15adef0bc4
/Zope3/tags/before-blow-services-merge/src/zope/app/wfmc/metadirectives.py
455e7e8b2b9afc6bb5722459f0357c1131fca9a0
[ "ZPL-2.1", "LicenseRef-scancode-unknown-license-reference", "ZPL-2.0", "ICU", "Python-2.0", "LicenseRef-scancode-public-domain" ]
non_permissive
pombredanne/zope
https://github.com/pombredanne/zope
10572830ba01cbfbad08b4e31451acc9c0653b39
c53f5dc4321d5a392ede428ed8d4ecf090aab8d2
refs/heads/master
2018-03-12T10:53:50.618672
2012-11-20T21:47:22
2012-11-20T21:47:22
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""ZCML directives for defining privileges. $Id: $ """ import zope.interface import zope.schema import zope.configuration.fields class IdefineXpdl(zope.interface.Interface): file = zope.configuration.fields.Path( title=u"File Name", description=u"The name of the xpdl file to read.", ) process = zope.schema.TextLine( title=u"Process Name", description=u"The name of the process to read.", ) id = zope.schema.Id( title=u"ID", description=(u"The identifier to use for the process. " u"Defaults to the process name."), )
UTF-8
Python
false
false
2,012
12,670,153,532,697
407e1a50d0432257bf34c7f65be10972d0db2091
7a187e3127c2e2c0e652c91578d13afcdc509411
/Numbers/Numbers.py
fc8ee9eb178a837c0f7a5d5bab9a1a6e06bcf60c
[]
no_license
2gisprojectT/kaygorodov-twitter
https://github.com/2gisprojectT/kaygorodov-twitter
716abc82fcedadb119ab7e5f9bf2b1820144899f
2abe2b0e2f3c53ab47e0d03cb011c342563b7943
refs/heads/master
2021-01-02T23:12:47.971694
2014-11-12T17:55:55
2014-11-12T17:55:55
24,887,262
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import math class Numbers: def __init__(self, a, b, c): self.a = a self.b = b if c > 0: self.c = c else: self.c = 0 def sum(self): return self.a + self.b + self.c def multiplication(self): return self.a * self.b * self.c def abs_multiplication(self): return math.fabs(self.a * self.b * self.c) num = Numbers(3, 10, -2) print(num.sum()) print(num.multiplication()) print(num.abs_multiplication())
UTF-8
Python
false
false
2,014
14,121,852,488,759
88c7f403c84fdf0a079133be57944c48ee58ee86
a704892d86252dde1bc0ff885ea5e7d23b45ce84
/addons-extra/olap/cube/levels/level_date.py
fc6ddb891a3c06eed54e30e1f8095dea6e7836e4
[]
no_license
oneyoung/openerp
https://github.com/oneyoung/openerp
5685bf8cce09131afe9b9b270f6cfadf2e66015e
7ee9ec9f8236fe7c52243b5550fc87e74a1ca9d5
refs/heads/master
2016-03-31T18:22:41.917881
2013-05-24T06:10:53
2013-05-24T06:10:53
9,902,716
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sqlalchemy from level_interface import level_interface from olap.cube import common from olap.cube import axis_map # # To Be Fixed: # Mapping of QX and Month # class level_date_month(level_interface): def run(self, level, metadata, table): col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) result = { 'column': [sqlalchemy.func.date_part('month',col_id)], 'column_name': [sqlalchemy.func.date_part('month', col)], 'axis_mapping': [axis_map.column_mapping], 'where_clause': [sqlalchemy.func.date_part('month',col_id) == level.name] } return result def children(self, level, metadata, table): col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String) + sqlalchemy.sql.cast(sqlalchemy.func.date_part('month',col_id),sqlalchemy.types.String) return { 'column': [sqlalchemy.func.date_part('month',col)], 'column_name':[sqlalchemy.func.date_part('month',col)], 'axis_mapping': [axis_map.column_mapping] } def validate(self, level, name): return 1 class level_date_year(level_interface): def run(self, level, metadata, table): col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata),level.object.column_name) col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) result = { 'column': [sqlalchemy.func.date_part('year',col_id)], 'column_name': [sqlalchemy.func.date_part('year', col)], 'axis_mapping': [axis_map.column_mapping], 'where_clause': [sqlalchemy.func.date_part('year',col_id) == level.name] } return result def children(self, level, metadata, table): col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) return { 'column': [sqlalchemy.func.date_part('year',col_id)], 'column_name':[sqlalchemy.func.date_part('year',col)], 'axis_mapping': [axis_map.column_mapping], 'where_clause':[] } def validate(self, level, name): return 1 # # To Do: Create your own axis mapping # class level_date_quarter(level_interface): def run(self, level, metadata, table): quarters = { 'Q1': [1,2,3], 'Q2': [4,5,6], 'Q3': [7,8,9], 'Q4': [10,11,12] } col = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) col_id = common.col_get(sqlalchemy.Table(level.object.table_name,metadata), level.object.column_name) qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String) if not level.name in quarters: raise 'Quarter should be in Q1,Q2,Q3,Q4 !' result = { 'column': [qexpr], 'column_name': [qexpr], 'axis_mapping': [axis_map.column_static], 'where_clause': [ (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][0]) | (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][1]) | (sqlalchemy.func.date_part('month',col_id)==quarters[level.name][2]) ] } return result def children(self, level, metadata, table): table = sqlalchemy.Table(level.object.table_name, metadata) col =common.col_get(table, level.object.column_name) col_id = common.col_get(table,level.object.column_name) qexpr = sqlalchemy.literal('Q')+ sqlalchemy.sql.cast(sqlalchemy.func.date_part('QUARTER',col_id), sqlalchemy.types.String) return { 'column': [qexpr], 'column_name': [qexpr], 'axis_mapping': [axis_map.column_mapping_value] } def validate(self, level, name): return 1 # vim: ts=4 sts=4 sw=4 si et
UTF-8
Python
false
false
2,013
9,105,330,674,921
77fed391a9f40ba0e14c152912f4ce7b404398d8
2beb72aa4d15541bb351460a155fea091b51b165
/src/utils.py
5d4369083b913d6a9f04a71064260eb7d37ff75d
[]
no_license
menshen/GetAPKDetails
https://github.com/menshen/GetAPKDetails
9dd7576c84aa28e96d62f936a82cb175ee51e213
170c7f73e77f67cb151b5e5ba3e39e37ca8da4e3
refs/heads/master
2020-12-25T04:30:20.089932
2014-07-21T03:39:34
2014-07-21T03:39:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python #coding:utf-8 # --*-- encoding:utf-8 --*-- ''' Created on 2011-7-7 @author: Evil export LANG="zh_CN.GB18030" ''' import os,sys reload(sys) sys.setdefaultencoding('utf-8') def convertFilePath(path): path = path.replace('(', '\(') path = path.replace(' ', '\ ') path = path.replace(')', '\)') return path
UTF-8
Python
false
false
2,014
635,655,207,577
db3475abc03d0c706695a5354a6ce04a4f6acbf7
c2ff7389a75d2765ba8e8cf62ad5f1b141c10179
/pyglossary.pyw
bd226c7e01a00f5058bf11a8f5399ce5317c775a
[ "GPL-3.0-or-later", "GPL-1.0-or-later", "GPL-3.0-only" ]
non_permissive
vladimirgegsistem/pyglossary-1
https://github.com/vladimirgegsistem/pyglossary-1
9bea2ff8109aec26a18a4b00c2ab1704c5cfd04d
917c3a23f10595b07c6004a2f1ccd2f515c3a6fd
refs/heads/master
2020-12-11T09:04:09.714784
2014-11-06T08:03:05
2014-11-06T08:03:05
27,014,356
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python2 # -*- coding: utf-8 -*- ## ui_main.py ## ## Copyright © 2008-2010 Saeed Rasooli <[email protected]> (ilius) ## This file is part of PyGlossary project, http://sourceforge.net/projects/pyglossary/ ## ## This program is a free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3, or (at your option) ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License along ## with this program. Or on Debian systems, from /usr/share/common-licenses/GPL ## If not, see <http://www.gnu.org/licenses/gpl.txt>. import os, sys, getopt, __builtin__ from pyglossary.glossary import confPath, VERSION #from pyglossary.text_utils import printAsError ## No red color, plain from os.path import dirname, join, realpath from ui.ui_cmd import COMMAND, printAsError, help, parseFormatOptionsStr def myRaise(File=None): i = sys.exc_info() if File==None: sys.stderr.write('line %s: %s: %s'%(i[2].tb_lineno, i[0].__name__, i[1])) else: sys.stderr.write('File "%s", line %s: %s: %s'%(File, i[2].tb_lineno, i[0].__name__, i[1])) def dashToCamelCase(text):## converts "hello-PYTHON-user" to "helloPythonUser" parts = text.split('-') parts[0] = parts[0].lower() for i in range(1, len(parts)): parts[i] = parts[i].capitalize() return ''.join(parts) use_psyco_file = '%s_use_psyco'%confPath psyco_found = None ui_list = ('gtk', 'tk', 'qt') #print('PyGlossary %s'%VERSION) if os.path.isfile(use_psyco_file): try: import psyco except ImportError: print('Warning: module "psyco" not found. It could speed up execution.') psyco_found = False else: psyco.full() print('Using module "psyco" to speed up execution.') psyco_found = True available_options = [ 'version', 'help', 'ui=', 'read-options=', 'write-options=', 'read-format=', 'write-format=', 'reverse', 'no-progress-bar', ] ## no-progress-bar only for command line UI ## FIXME: load ui-dependent available options from ui modules (for example ui_cmd.available_options) ## the only problem is that it has to "import gtk" before it get the "ui_gtk.available_options" try: (options, arguments) = getopt.gnu_getopt( sys.argv[1:], 'vhu:r:w:', available_options, ) except getopt.GetoptError: printAsError(sys.exc_info()[1]) print 'try: %s --help'%COMMAND sys.exit(1) """ ui_type: User interface type Possible values: cmd - Command line interface, this ui will automatically selected if you give both input and output file gtk - GTK interface tk - Tkinter interface qt - Qt interface auto - Use the first available UI """ ui_type = 'auto' if len(arguments)<1:## open GUI ipath = opath = '' elif len(arguments)==1:## open GUI, in edit mode (if gui support, like DB Editor in ui_gtk) ipath = arguments[0] opath = '' else:## run the commnad line interface ui_type = 'cmd' ipath = arguments[0] opath = arguments[1] read_format = '' ## only used in ui_cmd for now write_format = '' ## only used in ui_cmd for now read_options = {} ## only used in ui_cmd for now write_options = {} ## only used in ui_cmd for now reverse = False ## only used in ui_cmd for now ui_options = {} ''' examples for read and write options: --read-options testOption=stringValue --read-options enableFoo=True --read-options fooList=[1,2,3] --read-options 'fooList=[1, 2, 3]' --read-options 'testOption=stringValue; enableFoo=True; fooList=[1, 2, 3]' --read-options 'testOption=stringValue;enableFoo=True;fooList=[1,2,3]' ''' for (opt, opt_arg) in options: if opt in ('-v', '--version'): print('PyGlossary %s'%VERSION) sys.exit(0) elif opt in ('-h', '--help'): help() sys.exit(0) elif opt in ('-u', '--ui'): if opt_arg in ui_list: ui_type = opt_arg else: printAsError('invalid ui type %s'%opt_arg) elif opt in ('-r', '--read-options'): read_options = parseFormatOptionsStr(opt_arg) elif opt in ('-w', '--write-options'): write_options = parseFormatOptionsStr(opt_arg) elif opt == '--read-format': read_format = opt_arg elif opt == '--write-format': write_format = opt_arg elif opt == '--reverse': reverse = True elif opt.startswith('--'): ui_options[dashToCamelCase(opt[2:])] = opt_arg ## opt_arg is not None, UI just ignores None value ## FIXME ## -v (verbose or version?) ## -r (reverse or read-options) if ui_type == 'cmd': from ui import ui_cmd sys.exit(ui_cmd.UI(**ui_options).run( ipath, opath=opath, read_format=read_format, write_format=write_format, read_options=read_options, write_options=write_options, reverse=reverse, )) else: if ui_type=='auto': ui_module = None for ui_type2 in ui_list: try: ui_module = getattr(__import__('ui.ui_%s'%ui_type2), 'ui_%s'%ui_type2) except ImportError: myRaise()## FIXME else: break if ui_module==None: printAsError('no user interface module found!') sys.exit(1) else: ui_module = getattr(__import__('ui.ui_%s'%ui_type), 'ui_%s'%ui_type) sys.exit(ui_module.UI(**ui_options).run( editPath=ipath, read_options=read_options, )) ## don't forget to append "**options" at every UI.__init__ arguments
UTF-8
Python
false
false
2,014
8,443,905,713,923
10d00c67c45f8ef01a77059161bd4e04961cef47
7096dac027e6c7e0594dbde79eeefdce3883cff6
/para_grading/ML/Tests/test_05_term_extraction.py
74a1860636ba988d97d484b55cd47898c1031ecf
[]
no_license
YuanhaoSun/PPLearn
https://github.com/YuanhaoSun/PPLearn
f45b72cf87895ee388948d4202e26bde3526fb8c
8ffb4c5f8540a37dad3e4741fbf5fef0094eeb9d
refs/heads/master
2020-05-17T11:06:58.903744
2011-12-30T20:51:50
2011-12-30T20:51:50
2,602,364
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from time import time import numpy as np from operator import itemgetter from StringIO import StringIO from sklearn.datasets import load_files from sklearn.utils import shuffle from sklearn.feature_extraction.text import Vectorizer from sklearn.preprocessing import Normalizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn import metrics from sklearn.externals import joblib from sklearn.cross_validation import KFold, StratifiedKFold, ShuffleSplit from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.linear_model import RidgeClassifier, LogisticRegression from sklearn.linear_model.sparse import SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB from sklearn.lda import LDA from sklearn.svm.sparse import LinearSVC, SVC from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier from sklearn.multiclass import OneVsRestClassifier ############################################################################### # Preprocessing # # Load from raw data # # Load categories categories = ['nolimitshare','notsell', 'notsellnotshare', 'notsharemarketing', 'sellshare', 'shareforexception', 'shareforexceptionandconsent','shareonlyconsent'] categories3 = ['good','neutral', 'bad'] # # Load data # print "Loading privacy policy dataset for categories:" # print categories if categories else "all" # data_set = load_files('../../Dataset/ShareStatement/raw', categories = categories, # shuffle = True, random_state = 42) # data_set = load_files('../../Dataset/ShareStatement3/raw', categories = categories3, # shuffle = True, random_state = 42) # print 'data loaded' # print # data_set = joblib.load('../../Dataset/test_datasets/data_set_pos_tagged.pkl') # load from pickle # load data and initialize classification variables # data_set = joblib.load('../../Dataset/train_datasets/data_set_origin.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_stemmed.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_lemmatized.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_lemmatized_pos.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_negation_bigram.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_selected.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_term_extracted.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_tagged.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_pos_bagged.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_firstsense.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_internal_sentence_wsd.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_corpus_sentence_wsd.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_corpus_word_wsd.pkl') # data_set = joblib.load('../../Dataset/train_datasets/data_set_sem_internal_word_wsd.pkl') data_set = joblib.load('../../Dataset/train_datasets_3/data_set_origin.pkl') # data_set = joblib.load('../../Dataset/train_datasets_3/data_set_pos_selected.pkl') # data_set = joblib.load('../../Dataset/train_datasets_3/data_set_term_extracted.pkl') categories = data_set.target_names y = data_set.target # Extract features vectorizer = Vectorizer(max_features=10000) # Engineering nGram # vectorizer.analyzer.max_n = 2 # Engineering stopword vectorizer.analyzer.stop_words = set([]) # vectorizer.analyzer.stop_words = set(["amazonnn", "comnn", "incnn", "emcnn", "alexann", "realnetworks", "googlenn", "googlevbp", "linkedinnn", # "foxnn", "zyngann", "eann", "yahoorb", "travelzoo", "kalturann", "2cocd", "ign", "blizzardnn", # "jobstreetcom", "surveymonkeynn", "microsoftnn", "wraljj", "spenn", "tnn", "mobile", "opendnsnns", # "bentleynn", "allvoicesnns", "watsonnn", "dynnn", "aenn", "downn", "jonesnns", "webmnn", "toysrus", "bonnierjjr", # "skypenn", "wndnn", "landrovernn", "icuenn", "seinn", "entersectnn", "padealsnns", "acsnns", "enn", # "gettynn", "imagesnns", "winampvbp", "lionsgatenn", "opendnnn", "allvoicenn", "padealnn", "imagenn", # "jonenn", "acnn", ]) # vectorizer.analyzer.stop_words = set(["amazon", "com", "inc", "emc", "alexa", "realnetworks", "google", "linkedin", # "fox", "zynga", "ea", "yahoo", "travelzoo", "kaltura", "2co", "ign", "blizzard", # "jobstreetcom", "surveymonkey", "microsoft", "wral", "spe", "t", "mobile", "opendns", # "bentley", "allvoices", "watson", "dyn", "ae", "dow", "jones", "webm", "toysrus", "bonnier", # "skype", "wnd", "landrover", "icue", "sei", "entersect", "padeals", "acs", "e", # "getty", "images", "winamp", "lionsgate", "opendn", "allvoice", "padeal", "image", # "getti", "gett", "jone", "ac"]) # vectorizer.analyzer.stop_words = set(["amazon", "com", "inc", "emc", "alexa", "realnetworks", "google", "linkedin", # "fox", "zynga", "ea", "yahoo", "travelzoo", "kaltura", "2co", "ign", "blizzard", # "jobstreetcom", "surveymonkey", "microsoft", "wral", "spe", "t", "mobile", "opendns", # "bentley", "allvoices", "watson", "dyn", "ae", "dow", "jones", "webm", "toysrus", "bonnier", # "skype", "wnd", "landrover", "icue", "sei", "entersect", "padeals", "acs", "e", # "getty", "images", "winamp", "lionsgate", "opendn", "allvoice", "padeal", "image", # "getti", "gett", "jone", "ac", "not"]) # vectorizer.analyzer.stop_words = set(['as', 'of', 'in', 'you', 'rent', 'we', 'the', 'sell', 'parties', 'we', 'with', 'not', 'personal', # 'third', 'to', 'share', 'your', 'information', 'or', ]) #threshold 20 on training set # vectorizer.analyzer.stop_words = set(["we", "do", "you", "your", "the", "that", "this", # "is", "was", "are", "were", "being", "be", "been", # "for", "of", "as", "in", "to", "at", "by", # # "or", "and", # "ve", # "amazon", "com", "inc", "emc", "alexa", "realnetworks", "google", "linkedin", # "fox", "zynga", "ea", "yahoo", "travelzoo", "kaltura", "2co", "ign", "blizzard", # "jobstreetcom", "surveymonkey", "microsoft", "wral", "spe", "t", "mobile", "opendns", # "bentley", "allvoices", "watson", "dyn", "ae", "dow", "jones", "webm", "toysrus", "bonnier", # "skype", "wnd", "landrover", "icue", "sei", "entersect", "padeals", "acs", "e", # "getty", "images", "winamp", "lionsgate", ]) X = vectorizer.fit_transform(data_set.data) # X = Normalizer(norm="l2", copy=False).transform(X) # # Engineering feature selection # ch2 = SelectKBest(chi2, k = 276) # X = ch2.fit_transform(X, y) X = X.toarray() # X = X.todense() n_samples, n_features = X.shape print "n_samples: %d, n_features: %d" % (n_samples, n_features) print ############################################################################### # Test classifier using n run of K-fold Cross Validation X_orig = X y_orig = y clf = BernoulliNB(alpha=.1) # used for grading classification # clf = RidgeClassifier(tol=1e-1) print clf num_run = 50 # lists to hold all n*k data f1_total = [] f5_total = [] acc_total = [] pre_total = [] rec_total = [] # x run of Kfold for i in range(num_run): X, y = shuffle(X_orig, y_orig, random_state=(i+50)) # Setup 10 fold cross validation num_fold = 10 kf = KFold(n_samples, k=num_fold, indices=True) # Initialize variables for couting the average f1_all = [] f5_all = [] acc_all = [] pre_all = [] rec_all = [] # Test for 10 rounds using the results from 10 fold cross validations for train_index, test_index in kf: X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] # fit and predict clf.fit(X_train, y_train) pred = clf.predict(X_test) # metrics acc_score = metrics.zero_one_score(y_test, pred) pre_score = metrics.precision_score(y_test, pred) rec_score = metrics.recall_score(y_test, pred) acc_all.append(acc_score) pre_all.append(pre_score) rec_all.append(rec_score) # put the lists into numpy array for calculating the results acc_all_array = np.asarray(acc_all) pre_all_array = np.asarray(pre_all) rec_all_array = np.asarray(rec_all) # add to the total k*n data set acc_total += acc_all pre_total += pre_all rec_total += rec_all # Result for each run print ( (2*pre_all_array.mean()*rec_all_array.mean()) / (rec_all_array.mean()+pre_all_array.mean()) ) # put the total k*n lists into numpy array for calculating the overall results acc_total_array = np.asarray(acc_total) pre_total_array = np.asarray(pre_total) rec_total_array = np.asarray(rec_total) # Report f1 and f0.5 using the final precision and recall for consistancy print "Overall precision: %0.5f (+/- %0.2f)" % ( pre_total_array.mean(), pre_total_array.std() / 2 ) print "Overall recall: %0.5f (+/- %0.2f)" % ( rec_total_array.mean(), rec_total_array.std() / 2 ) # print (2*pre_total_array.mean()*rec_total_array.mean())/(rec_total_array.mean()+pre_total_array.mean()) print "Overall f1-score: %0.5f" % ( (2*pre_total_array.mean()*rec_total_array.mean()) / (rec_total_array.mean()+pre_total_array.mean()) ) print "Overall f0.5-score: %0.5f" % ( (1.25*pre_total_array.mean()*rec_total_array.mean()) / (rec_total_array.mean()+0.25*pre_total_array.mean()) )
UTF-8
Python
false
false
2,011
10,720,238,419,642
ba47d15dab4e5b41966a395d77a5e63c9846377a
552b5202497278d39d38c5e9dea2a2190104c8e5
/docweb/context_processors.py
eb91f0169979339ca0a6684449dbc56d507c6718
[]
no_license
pv/pydocweb
https://github.com/pv/pydocweb
c5e372488e73959ed4dbde7ca9ddc12ce9dbcf94
05c7b69c3903e2bb90cca511f18f9c10d7926cc6
refs/heads/master
2021-03-12T19:56:20.227038
2013-01-24T19:41:16
2013-01-24T19:41:16
717,383
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import pydocweb.docweb.models as models def media_url(request): site = models.Site.objects.get_current() sites = models.Site.objects.all() from django.conf import settings return {'MEDIA_URL': settings.MEDIA_URL, 'SITE_PREFIX': settings.SITE_PREFIX, 'OTHER_SITES': [s for s in sites if s != site], 'CURRENT_SITE': site}
UTF-8
Python
false
false
2,013
2,783,138,853,953
2dbc83a18b299415587e00483c0fed2162061da4
5924bfeecab771a4fd4e87fdaa20705da1a98546
/scripts/divzero.py
1979413448c5f85ee8d65a68e79661671bef173d
[]
no_license
odewahn/kids_code_ipython_test
https://github.com/odewahn/kids_code_ipython_test
2e799830e801f8be10059ca24b82adbfe673fba3
adcde6cab6ba8d652bd56eb48bd922889af19ffc
refs/heads/master
2021-03-12T20:40:33.172558
2014-06-30T16:57:21
2014-06-30T16:57:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
num1 = input("First number: ") num2 = input("Second number: ") print "Result:", num1 / num2
UTF-8
Python
false
false
2,014
6,047,313,998,521
2c3045ee39ed9538d7aa790d39dd8d45b8c20a5f
057fb361fa10aec79376e7b0000b402590834015
/src/util/filetools.py
62ec2ae0f566016f63bb717ad792c4fe2c6216f4
[]
no_license
pingansdaddy/newtempo
https://github.com/pingansdaddy/newtempo
e8733094ed3ebf3a2bca98b25d7addefa22d5d3c
cf5c150b1741e31bfc85b62f760abfbf1e63d9dd
refs/heads/master
2021-01-19T14:30:31.453235
2014-10-15T06:47:18
2014-10-15T06:47:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding:utf-8 import os import time class GrowingFile(object): def __init__(self, fn): self._fn = fn self._fd = os.open(self._fn, os.O_RDONLY) self._max_size = 1024*4 def run(self): while True: buf = os.read(self._fd, self._max_size) if not buf: continue time.sleep(0.01) class Tailer(object): """ 与tail命令类似,输出跟踪文件的新行 """ def __init__(self, filename): self._fn = filename self._fd = os.open(self._fn, os.O_RDONLY) _stat = os.stat(self._fn) self.st_ino = _stat.st_ino os.lseek(self._fd, _stat.st_size, os.SEEK_SET) def handle_line(self, content): """ override in child class """ pass def do_rotate(self): try: os.close(self._fd) self._fd = os.open(self._fn, os.O_RDONLY) except Exception, e: pass def handle_fd_changed(self): c_ino = self.st_ino count = 0 while c_ino == self.st_ino: try: c_ino = os.stat(self._fn).st_ino count += 1 except OSError: time.sleep(1) if count > 5: break else: self.st_ino = c_ino self.do_rotate() def readline(self): buf = [] while True: c = os.read(self._fd, 1) if c and c != '\n': buf.append(c) else: return ''.join(buf) if buf else None def advance(self): line = self.readline() if not line: self.handle_fd_changed() line = self.readline() if line: self.handle_line(line)
UTF-8
Python
false
false
2,014
7,722,351,234,936
92b8449d352aef4603d031d47ad5bbdcde352357
fff1836a8a75148d6ca306f688d93c36d6a5f8eb
/pyFlashCopy.py
59abdb99882f5e8713b69ec2f88db53332256bcd
[ "MIT" ]
permissive
rmamba/pyFlashOnPI
https://github.com/rmamba/pyFlashOnPI
c37598acf3098b398910fec213983fcf231336da
eb4315c2248b8d9d15e2f6c8d67da9cb20f0c23f
refs/heads/master
2020-05-18T05:27:32.115855
2014-03-15T18:20:46
2014-03-15T18:20:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin python # Filename: pyFlashCopy.py ''' Created on 13 Mar 2014 @author: [email protected] ''' import sys, os.path, json if __name__ == '__main__': size = None targetOffset = 0 sourceOffset = 0 bAppend = False fileIn = None fileOut = None routerSection = None routerParams = None bOW = False try: if len(sys.argv) < 3: print "Not enough parameters!" raise SystemExit fileIn = sys.argv[1] if not os.path.isfile(fileIn): print "File does not exist!" raise SystemExit for arg in sys.argv: if (arg == '--append') or (arg == '-a'): bAppend = True if arg.startswith('--size=') or arg.startswith('-si='): tmp = arg.split('=') size = int(tmp[1]) if arg.startswith('--sourceOffset=') or arg.startswith('-so='): tmp = arg.split('=') sourceOffset = tmp if arg.startswith('--targetOffset=') or arg.startswith('-to='): tmp = arg.split('=') targetOffset = tmp if arg.startswith('--router=') or arg.startswith('-r=') or arg.startswith('-ow='): if arg.startswith('-ow='): #overwrite data in fIn!!! #-ow=WR741ND:v43:MAC 0xFFFFFFFFFFFF,PIN 12345678 bOW = True #Copy section from fIn to fOut #-r=WR741ND:v43:uboot,rootfs tmp = arg.split('=') tmp2 = tmp[1].split(":") if len(tmp2) != 3: print "Invalid router parameter!" raise SystemExit j = open('romlayouts.json', 'r') jsn = j.read() j.close() jsn = json.loads(jsn) if not tmp2[0] in jsn: print "Unknown router!" raise SystemExit jsn = jsn[tmp2[0]] if not tmp2[1] in jsn: print "Unknown router version!" raise SystemExit jsn = jsn[tmp2[1]] if arg.startswith('-ow='): routerParams = jsn["DATA"] else: routerParams = jsn routerSection = tmp2[2].split(",") #if not tmp2[2] in jsn: # print "Unknown router section!" # raise SystemExit fIn = open(fileIn, "r+b") if size == None: fIn.seek(0, 2) size = fIn.tell() - sourceOffset fIn.seek(sourceOffset) if bAppend: if not os.path.isfile(fileOut): print "File does not exist!" raise SystemExit fOut = open(fileOut, "ab") fOut.seek(0, 2) readData = 0 while readData < size: data = fIn.read(1024) fOut.write(data) readData = readData + 1024 fout.close() elif bOW: for section in routerSection: tmp = section.split('#') print tmp fIn.seek(int(routerParams[tmp[0]]["offset"], 0)) if tmp[1].startswith('0x'): #hex data dat = tmp[1][2:].decode("hex") fIn.write(bytearray(dat)) else: #string fIn.write(bytearray(tmp[1])) else: if not os.path.isfile(fileOut): print "File does not exist!" raise SystemExit if routerSection == None: if targetOffset > 0: #inject data fOut = open(fileOut, "r+b") fOut.seek(targetOffset) else: fOut = open(fileOut, "wb") readData = 0 chunk = 4096 while readData < size: data = fIn.read(chunk) fOut.write(data) readData = readData + chunk if size-readData<chunk: chunk = size-readData fOut.close() else: fOut = open(fileOut, "r+b") for sec in routerSection: fIn.seek(int(routerParams[sec]["offset"])) fOut.seek(int(routerParams[sec]["offset"])) size = int(routerParams[sec]["size"]) readData = 0 chunk = 4096 while readData < size: data = fIn.read(chunk) fOut.write(data) readData = readData + chunk if size-readData<chunk: chunk = size-readData fOut.close() fIn.close() except Exception,e: print "Error: " + str(e) #finally: # if f != None: # f.close()
UTF-8
Python
false
false
2,014
1,571,958,045,207
9134ce796d86db25b8a6b37db8c6e8ee5e298d78
f84157562e81ea8bd1d42a2764377a149b9e55ce
/py/src/thriftex/server/service.py
672e28ec92e6bd9a5c34bb86be55cec496213f68
[]
no_license
PKUbuntu/thriftex
https://github.com/PKUbuntu/thriftex
d3fc0aa95e4f07944222c1ec07f65c740283799a
54869b099a28d274e12b15e1147ece724730380f
refs/heads/master
2020-06-05T08:36:56.883174
2014-05-26T16:13:19
2014-05-26T16:13:19
20,174,312
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import logging from thrift.transport import TSocket from thrift.protocol import TBinaryProtocol from thrift.server.TNonblockingServer import TNonblockingServer import threading class ServiceEx(object): """ Xoa core service """ def __init__(self, port=9090): """ Initialize port """ self._port = port self._processor = None self._server_transport = None self._transport_factory = None self._protocol_factory = None self._server = None self._thread_number = 16 self._service_thread = None self.logger = logging.getLogger('thriftex') def get_port(self): return self._port def set_processor(self, processor): self._processor = processor def set_thread_number(self, number): self._thread_number = number def run(self): if not self._processor: self.logger.error("Do not initialize handler/processor !") sys.exit(-1) self._server_transport = TSocket.TServerSocket(port=self._port) self._protocol_factory = TBinaryProtocol.TBinaryProtocolFactory() self._server = TNonblockingServer(self._processor, self._server_transport, self._protocol_factory) self._server.setNumThreads(self._thread_number) self.logger.info("service begin to run on port [%d]" % (self._port, )) self._server.serve() def get_running_status(self): return self._service_thread.is_alive() def start(self): self._service_thread = threading.Thread(target=self.run) self._service_thread.start() def stop(self): if self._service_thread is not None: self.logger.info("stop service") self._server.stop() self._service_thread.join() self._server.close()
UTF-8
Python
false
false
2,014
6,554,120,104,357
55b4de3e256598d523c9afcae9430af179c56262
533d165f83bf7fc9eb1609c1fe4e3ba30fe8ee25
/PLeetcode/com/cx363/UniquePaths.py
451cec0e95496e9e32fd968b0f8d47c550b9c1cd
[ "GPL-2.0-only" ]
non_permissive
Cunqi/leetcode
https://github.com/Cunqi/leetcode
2c5cd009ef23daeba0b596e2dc24d2a9e75f0463
110158b376d03884bf93d95d22698eba27b3f74a
refs/heads/master
2021-01-11T11:01:09.983944
2014-11-24T20:04:22
2014-11-24T20:04:22
24,578,354
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Solution: # @return an integer def uniquePaths(self, m, n): dp = [[0 for col in range(n)] for row in range(m)] #set all elements in first row to be 1 for row in range(m): dp[row][0] = 1 #set all elements in first col to be 1 for col in range(n): dp[0][col] = 1 for row in range(1, m): for col in range(1, n): dp[row][col] = dp[row - 1][col] + dp[row][col - 1] return dp[m-1][n-1] solution = Solution() print(solution.uniquePaths(2, 2))
UTF-8
Python
false
false
2,014
1,305,670,104,091
dedfb376d8b477e95d6c4f77a107f4842bcf6582
151f26c650d4253acfabb239f2c87c908f9233f1
/test_exception_else.py
60cb1020fffc31bb5b6c28c73746198f5735bc05
[ "MIT" ]
permissive
pankajanand18/python-tests
https://github.com/pankajanand18/python-tests
19fabe4474e0e95abe02dc42f3c70ba08e87aa68
3e58a274463692a2552efd3a05393683567dac6e
refs/heads/master
2021-01-15T21:50:24.462289
2014-11-16T08:17:56
2014-11-16T08:17:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
data={'Hello':'Data'} try: # data.Hello='hello' print 'hello' except Exception as ex: raise else: print "reached rase"
UTF-8
Python
false
false
2,014
12,386,685,701,846
f58373c7fe64bd9c93ab2825f063e45861d71b2a
354391e671da5cabcacffb1bd8c47a04414b0950
/city.py
6d5555ba06c9b21b62d99e1023a667156d242552
[]
no_license
Skycker/round-city-round
https://github.com/Skycker/round-city-round
e143be6fae8ff3711df99fb3a9d306a266906a36
a855abe4ac2a6524ff4316d60cf54d278eac7a51
refs/heads/master
2020-05-18T16:24:05.548735
2013-12-08T18:29:14
2013-12-08T18:29:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import os import random import pygame from pygame.locals import * import inputbox from prices import Price_dict, Price_list from classes import * # Размер игрового экрана W_WIDTH = 800 W_HEIGH = 600 # Палитра цветов) green = (0, 128, 0) # green teal = (0, 128, 128) # teal white = (255,255,255) # white black = (0,0,0) # black # Максимальное количество денег MAX_MONEY = 99999 # Позиции вывода текстов на экран MONEY_TEXT_POS = (660, 40) TIME_TEXT_POS = (695, 13) ALERT_POS = (625, 3) FINAL_TEXT_POS = (40, 30) WIN_TEXT = 'Поздравляю! Вы скопили нужную сумму, кружок выкупил салон связи и устроил свою жизнь' LOSE_TEXT = 'Вы проиграли, кружка уволили, вся его жизнь пошла под откос. Вы подвели кружка' # Внутреннее время быстрее реального в 60*TIME_RAPID раз. # сразу задаем начальное значение. # minute_step_able разрешает инкрементацию часов, ибо без него, # часы могли инкриментнуться более раза вместо одного TIME_RAPID = 2 START_MINUTES = 00 START_HOURS = 10 minute_inc_counter = 0 minute_step_able = True minutes = START_MINUTES hours = START_HOURS # Время отображения уведомления(в итерациях) ALERT_TIME = 500 # Вершины, где ночью горожан быть не должно NIGHT_TABOO_PLACES = [2,3,4,5,6,7,8,9,10,11,12,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34] # Константы для игрока player_name = '' # имя FINE = 500 # штраф за отсутствие на рабочем месте MAX_FINES_QUO = 15 # максимальное кол-во штрафоф до game_over MEAL_PRICE = 50 # цена покупки еды у бармена pygame.init() screen = pygame.display.set_mode((W_WIDTH, W_HEIGH)) # Загружаем фоновые рисуноки background = pygame.image.load('background.png').convert() start_background = pygame.image.load('start_background.png').convert() end_background = pygame.image.load('half_life_3_hope.png').convert() night_cover = pygame.image.load('night_cover.png').convert_alpha() # Вводим саундтреки и их обработку directory = os.getcwd() + '/soundtracks' files_list = os.listdir(directory) songs_list = [] for file_name in files_list: if file_name.endswith('.mp3'): songs_list.append('soundtracks/' + file_name) # создаем пользовательское событие SONG_END. # оно нам отсигналит, если какой-то трек отыграет SONG_END = pygame.USEREVENT + 1 pygame.mixer.music.set_endevent(SONG_END) # Ф-я выбора соундтрека def start_next_song(): if songs_list: next_song = random.choice(songs_list) # случайно выбираем трек... print(next_song) pygame.mixer.music.load(next_song) # ...и подгружаем его pygame.mixer.music.set_volume(0.2) pygame.mixer.music.play() # Ф-я для вывода текста def set_text(surface, text='', size=50, text_pos=(0,0), alias=True, color=black): font = pygame.font.Font(None, size) text_obj = font.render(text, alias, color) surface.blit(text_obj, text_pos) # Ф-я вывода сообщений игроку def alert(text, color=black): x = W_WIDTH - len(text)*8 - 5 y = ALERT_POS[1] pos = [x, y] set_text(screen, text, 20, pos) # Определяет ночь ли в игре def is_night(): if 22<=hours or 0<=hours<6: return True else: return False def is_working_day(): if 8 <= hours < 22: return True else: return False def unless_player_press_smth(): """Ф-я "ставит игру на паузу" до нажатия кнопки игроком. Обрабатывает эти нажатия""" while True: for event in pygame.event.get(): if event.type == KEYDOWN: # Полноэкранный режим или оконный? if event.key == K_f: pygame.display.set_mode((W_WIDTH, W_HEIGH), pygame.FULLSCREEN) screen.blit(start_background, (0, 0)) if event.type == MOUSEMOTION: for btn in buttons: if btn.rect.collidepoint(pygame.mouse.get_pos()): btn.changeState(1) else: btn.changeState(0) elif event.type == MOUSEBUTTONDOWN: if new_game: if btn_start_game.rect.collidepoint(pygame.mouse.get_pos()): global player_name player_name = inputbox.ask(screen, "Your name?") return if btn_exit.rect.collidepoint(pygame.mouse.get_pos()): pygame.quit() sys.exit() if not new_game: if btn_finish_game.rect.collidepoint(pygame.mouse.get_pos()): return if event.type == QUIT: pygame.quit() sys.exit() for btn in buttons: btn.rander(screen) set_text(screen, btn.text, 20, (btn.x+27, btn.y+7), 1, btn.text_color ) pygame.display.flip() while True: # Начинаем игру... new_game = True pygame.display.set_caption('Игра') screen.blit(start_background, (0, 0)) # кнопки меню btn_start_game = Button(W_WIDTH/2 - 30,W_HEIGH/2 - 60, "Старт") btn_exit = Button(W_WIDTH/2 - 30,W_HEIGH/2, "Выход") buttons = [btn_start_game, btn_exit] start_next_song() unless_player_press_smth() pygame.display.set_caption('Играет ' + player_name) minutes = START_MINUTES hours = START_HOURS # Инициируем игровой объект hero = Hero(18, 270, 0.25) # Инициируем жителей городка # (cтартовая позиция определяется рандомно из вершин соответствующего графа) person1 = Citizen(random.randint(0,34), 0.1) person2 = Citizen(random.randint(0,34), 0.1) person3 = Citizen(random.randint(0,34), 0.1) person4 = Citizen(random.randint(0,34), 0.1) person5 = Citizen(random.randint(0,34), 0.1) person6 = FastСitizen(random.randint(0,34), 0.15) person7 = FastСitizen(random.randint(0,34), 0.15) citizens = [person1, person2, person3, person4, person5, person6, person7] neighbour = Neighbour() # создаем бармена, стартовая позиция определяется от времени if is_working_day(): barman = Barman(6) # на работе else: barman = Barman(0) # дома pygame.mouse.set_visible(False) hungry_time = [random.randint(0,24) for x in range(random.randint(3,5))] # Цикл приема сообщений while True: screen.blit(background, (0, 0)) # чтобы спрайты не накладывались # Если ночь, затемняем экран if is_night(): screen.blit(night_cover, (0,0)) for event in pygame.event.get(): if event.type == QUIT: # обрабатываем событие шечка по крестику pygame.quit() sys.exit() if event.type == KEYDOWN: # Полноэкранный режим или оконный? if event.key == K_f: pygame.display.set_mode((W_WIDTH, W_HEIGH), pygame.FULLSCREEN) screen.blit(background, (0, 0)) elif event.key == K_ESCAPE: pygame.display.set_mode((W_WIDTH, W_HEIGH)) screen.blit(background, (0, 0)) if (event.key == K_e and hero.is_near_barman() and barman.is_at_workplace() ): hero.hungry = False hero.money -= MEAL_PRICE hero.alert_text = 'Вы купили поесть' hero.alert_times = ALERT_TIME # Если соундтрек закончился, вызываем start_next_song() if event.type == SONG_END: start_next_song() # Передвижение протеже keys = pygame.key.get_pressed() if keys[K_LEFT]: hero.x -= hero.step if keys[K_RIGHT]: hero.x += hero.step if keys[K_UP]: hero.y -= hero.step if keys[K_DOWN]: hero.y += hero.step #set_text(screen, str(hero.x) + '|' + str(hero.y)) # Оповещения о действиях протеже if hero.alert_text: alert(hero.alert_text) hero.alert_times -= 1 if hero.alert_times == 0: hero.alert_text = '' # Голод протеже if hours == 0 and minutes == 0: hungry_time = [random.randint(0,24) for x in range(random.randint(3,5))] if hours in hungry_time and minutes == 00: hero.hungry = True if hero.hungry: hero.alert_text = 'Вы голодны. Поешьте в кафе' if hero.fines >= MAX_FINES_QUO - 3: hero.alert_text = ('Еще ' + str(MAX_FINES_QUO - hero.fines) + ' штраф и вас уволят') if hero.is_near_barman() and barman.is_at_workplace(): if not hero.alert_times: hero.alert_text = 'Нажмите E, что бы поесть' hero.alert_times = int(ALERT_TIME/5) # Движение persons(синие и оранжевые кружки) for person in citizens: if person.is_at_point(person.target_x, person.target_y): if not is_working_day(): while True: person.target_vert = random.choice(person.Links[person.target_vert]) if person.target_vert in NIGHT_TABOO_PLACES: continue else: break else: person.target_vert = random.choice(person.Links[person.target_vert]) person.target_x = person.Verts[person.target_vert][0] person.target_y = person.Verts[person.target_vert][1] person.make_step() # Горожане у рабочего места кружка в салоне for person in citizens: if (person.is_at_point(person.Verts[26][0], person.Verts[26][1]) or person.is_at_point(person.Verts[27][0], person.Verts[27][1])): if hero.is_at_workplace(): if not person.is_paid_for: key = random.choice(Price_list) hero.money += Price_dict[key] person.is_paid_for = True person.alert_times = ALERT_TIME if hero.money > MAX_MONEY: hero.money = MAX_MONEY else: hero.alert_text = 'Не было на месте. Штраф!' hero.alert_times = ALERT_TIME hero.money -= FINE hero.fines += 1 if hero.money < -MAX_MONEY: hero.money = -MAX_MONEY # Отображение уведомлений от горожан if person.is_paid_for: if not hero.alert_text: # затычка, что бы выводы текста не накладывались alert(key) # нужно бы переписать всю систему вывода алертов person.alert_times -= 1 if person.alert_times == 0: person.is_paid_for = False # Условия окончания игры if hero.money == MAX_MONEY: win = True break if hero.fines == MAX_FINES_QUO: win = False break # Жизненный цикл соседа(neighbour) if (hours == 12 or hours == 18) and minutes == 0: neighbour.direction = 'toward' if neighbour.is_at_point(neighbour.Verts[5][0], neighbour.Verts[5][1]): neighbour.direction = 'back' if (neighbour.is_at_point(neighbour.Verts[0][0], neighbour.Verts[0][1]) and neighbour.direction == 'back'): neighbour.direction = 'stay' neighbour.target_vert = 0 if neighbour.direction != 'stay': if neighbour.is_at_point(neighbour.target_x, neighbour.target_y): if neighbour.direction == 'toward': neighbour.target_vert += 1 else: neighbour.target_vert -= 1 neighbour.target_x = neighbour.Verts[neighbour.target_vert][0] neighbour.target_y = neighbour.Verts[neighbour.target_vert][1] neighbour.make_step() # Жизненный цикл бармена(barman) if hours == 8 and minutes == 0: barman.direction = 'toward' barman.target_vert = 0 if hours == 22 and minutes == 0: barman.direction = 'back' barman.target_vert = 6 if (barman.is_at_point(barman.Verts[6][0], barman.Verts[6][1]) and barman.direction == 'toward'): barman.direction = 'stay' if (barman.is_at_point(barman.Verts[0][0], barman.Verts[0][1]) and barman.direction == 'back'): barman.direction = 'stay' if barman.direction != 'stay': if barman.is_at_point(barman.target_x, barman.target_y): if barman.direction == 'toward': barman.target_vert += 1 else: barman.target_vert -= 1 barman.target_x = barman.Verts[barman.target_vert][0] barman.target_y = barman.Verts[barman.target_vert][1] barman.make_step() # Подсчет внутреигрового времени minutes = (START_MINUTES + (TIME_RAPID*pygame.time.get_ticks()//1000)%60 - minute_inc_counter*START_MINUTES) if minutes == 59: if minute_step_able: hours += 1 minute_step_able = False minute_inc_counter += 1 if hours == 24: hours = 0 if minutes == 1: minute_step_able = True # Вывод денег и времени на экран # собираем строку для красивого вывода денег money_string = ('$' + '0'*(len(str(MAX_MONEY))-len(str(abs(hero.money)))) + str(abs(hero.money))) if hero.money < 0: money_string = '-' + money_string set_text(screen, money_string, 50, MONEY_TEXT_POS, True, green) # собираем строку для красивого вывода времени if not hours//10: time_string = '0' + str(hours) else: time_string = str(hours) time_string += ':' if not minutes//10: time_string += '0' + str(minutes) else: time_string += str(minutes) set_text(screen, time_string, 50, TIME_TEXT_POS, True, teal) # Отрисовываем спрайты в буфер hero.rander(screen) for person in citizens: person.rander(screen) neighbour.rander(screen) barman.rander(screen) # Отображаем буфер на экран pygame.display.flip() new_game = False screen.blit(end_background, (0, 0)) if win: final_text = WIN_TEXT size = 23 else: final_text = LOSE_TEXT size = 25 set_text(screen, final_text, size, FINAL_TEXT_POS, True, white) btn_finish_game = Button(W_WIDTH/2 - 30,W_HEIGH/2 - 60, "Жду...") buttons = [btn_finish_game, btn_exit] pygame.mouse.set_visible(True) unless_player_press_smth()
UTF-8
Python
false
false
2,013
3,341,484,567,910
e5169a6084764982aeff521db9ecff79b5d61708
d84922ed03e9d01c84e92b384f20b87edb3ece6d
/src/cog_abm/ML/core.py
d2efc70efd70e2b9c96aa0a04ce151c8c6316974
[ "BSD-3-Clause" ]
permissive
plewczynski/cog-abm
https://github.com/plewczynski/cog-abm
b1d37cf70f4a1d005e424a085159818dd776bd5e
6f6450141a996b067d3a396d47f4386215a4042c
refs/heads/master
2021-05-27T11:33:24.433639
2012-08-25T21:01:37
2012-08-25T21:01:37
765,520
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Most useful things connected with ML """ import math from itertools import izip from random import shuffle from scipy.io.arff import loadarff from cog_abm.extras.tools import flatten class Classifier(object): def classify(self, sample): pass def classify_pval(self, sample): """ Returns tuple with class and probability of sample belonging to it """ pass def class_probabilities(self, sample): """ Returns dict with mapping class->probability that sample belongs to it """ pass def train(self, samples): pass def clone(self): """ Returns copy of classifier. This is default implementation. Should be overriden in subclasses. @rtype: Classifier @return: New instance of classifier. """ import copy return copy.deepcopy(self) class Attribute(object): ID = None """ This class field is for id when putting some conversion method in dict """ def get_value(self, value): ''' value is inner representation ''' pass def set_value(self, value): ''' value is outer representation ''' return value def __eq__(self, other): return self.ID == other.ID class NumericAttribute(Attribute): ID = "NumericAttribute" def get_value(self, value): return value class NominalAttribute(Attribute): ID = "NominalAttribute" def __init__(self, symbols): """ Symbols should be strings! For example Orange doesn't support any other format """ symbols = [str(s) for s in symbols] self.symbols = tuple(s for s in symbols) self.mapping = dict(reversed(x) for x in enumerate(self.symbols)) self.tmp_rng = set(xrange(len(self.symbols))) def get_symbol(self, idx): return self.symbols[idx] def get_idx(self, symbol): return self.mapping[str(symbol)] def get_value(self, value): return self.get_symbol(value) def set_value(self, value): return self.set_symbol(value) def set_symbol(self, symbol): return self.get_idx(symbol) def __eq__(self, other): return super(NominalAttribute, self).__eq__(other) and \ set(self.symbols) == set(other.symbols) class Sample(object): def __init__(self, values, meta=None, cls=None, cls_meta=None, dist_fun=None, last_is_class=False, cls_idx=None): self.values = values[:] self.meta = meta or [NumericAttribute() for _ in values] if last_is_class or cls_idx is not None: if last_is_class: cls_idx = -1 self.cls_meta = self.meta[cls_idx] self.cls = self.values[cls_idx] self.meta = self.meta[:] del self.values[cls_idx], self.meta[cls_idx] else: self.cls = cls self.cls_meta = cls_meta if dist_fun is None and \ all(attr.ID == NumericAttribute.ID for attr in self.meta): self.dist_fun = euclidean_distance else: self.dist_fun = dist_fun def get_cls(self): if self.cls_meta is None or self.cls is None: return None return self.cls_meta.get_value(self.cls) def get_values(self): return [m.get_value(v) for v, m in izip(self.values, self.meta)] def distance(self, other): return self.dist_fun(self, other) def __eq__(self, other): return self.cls == other.cls and self.cls_meta == other.cls_meta and \ self.meta == other.meta and self.values == other.values def __hash__(self): return 3 * hash(tuple(self.values)) + 5 * hash(self.cls) def __str__(self): return "({0}, {1})".format(str(self.get_values()), self.get_cls()) def __repr__(self): return str(self) def copy_basic(self): return Sample(self.values, self.meta, dist_fun=self.dist_fun) def copy_full(self): return Sample(self.values, self.meta, self.cls, self.cls_meta, self.dist_fun) def copy_set_cls(self, cls, meta): s = self.copy_basic() s.cls_meta = meta s.cls = meta.set_value(cls) return s #Sample distance functions def euclidean_distance(sx, sy): return math.sqrt(math.fsum([ (x - y) * (x - y) for x, y in izip(sx.get_values(), sy.get_values()) ])) def load_samples_arff(file_name, last_is_class=False, look_for_cls=True): a_data, a_meta = loadarff(file_name) names = a_meta.names() attr = {"nominal": lambda attrs: NominalAttribute(attrs), "numeric": lambda _: NumericAttribute()} gen = (a_meta[n] for n in names) meta = [attr[a[0]](a[1]) for a in gen] cls_idx = None if look_for_cls: for i, name in enumerate(names): if a_meta[name][0] == "nominal" and name.lower() == "class": cls_idx = i break def create_sample(s): values = [mi.set_value(vi) for mi, vi in izip(meta, s)] return \ Sample(values, meta, last_is_class=last_is_class, cls_idx=cls_idx) return [create_sample(s) for s in a_data] def split_data(data, train_ratio=2. / 3.): """ data - samples to split into two sets: train and test train_ratio - real number in [0,1] returns (train, test) - pair of data sets """ tmp = [s for s in data] shuffle(tmp) train = [s for i, s in enumerate(tmp) if i < train_ratio * len(tmp)] test = [s for i, s in enumerate(tmp) if i >= train_ratio * len(tmp)] return (train, test) def split_data_cv(data, folds=8): """ data - samples to split into two sets *folds* times returns [(train, test), ...] - list of pairs of data sets """ tmp = [s for s in data] shuffle(tmp) N = len(tmp) M = N / folds overflow = N % folds splits = [] i = 0 while i < N: n = M if overflow > 0: overflow -= 1 n += 1 split = tmp[i:i + n] splits.append(split) i += n return [(flatten(splits[:i] + splits[i + 1:]), splits[i]) for i in xrange(folds)]
UTF-8
Python
false
false
2,012
9,526,237,511,057
d9ca70df01fdc5490e6111aa2559f2b30dd91dc6
e71423f70e13c99e9d7432946d5cb6b4318eb20e
/core/netbase/mainbranch/Tribler/Main/Dialogs/RemoveTorrent.py
527cf544aa16d0aeafd03ea7ec22b2bac79d5d06
[ "LicenseRef-scancode-unknown-license-reference", "LGPL-2.1-only", "LGPL-2.1-or-later", "LicenseRef-scancode-other-copyleft", "MIT", "GPL-1.0-or-later", "LicenseRef-scancode-free-unknown", "LGPL-2.0-or-later", "LGPL-3.0-or-later", "IJG", "LicenseRef-scancode-proprietary-license", "OpenSSL", "LicenseRef-scancode-python-cwi", "GPL-2.0-only", "LGPL-3.0-only", "Apache-2.0", "GPL-2.0-or-later", "GPL-3.0-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "WxWindows-exception-3.1" ]
non_permissive
bi0shacker001/Criminality
https://github.com/bi0shacker001/Criminality
8778ad1e48d655e0e43a8d05ec93a89298b8514a
85b8162989200a67e7157f66a6c22eac64f9cc7c
refs/heads/master
2016-09-11T06:24:25.832669
2014-06-21T01:01:36
2014-06-21T01:01:36
19,465,343
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# Written by Niels Zeilemaker # see LICENSE.txt for license information import wx import os import sys from Tribler.Main.vwxGUI.widgets import _set_font, BetterText as StaticText,\ EditText from Tribler.Main.vwxGUI.GuiUtility import GUIUtility from Tribler.Core.TorrentDef import TorrentDef from Tribler.Core.simpledefs import TRIBLER_TORRENT_EXT from threading import Event from Tribler.Main.Dialogs.GUITaskQueue import GUITaskQueue from Tribler.Main.vwxGUI import forceWxThread from traceback import print_exc from Tribler.community.channel.community import ChannelCommunity class RemoveTorrent(wx.Dialog): def __init__(self, parent, torrent): canEdit = canComment = False if torrent.hasChannel(): state, iamModerator = torrent.channel.getState() canEdit = state >= ChannelCommunity.CHANNEL_OPEN height = 125 if canEdit: height = 200 wx.Dialog.__init__(self, parent, -1, 'Are you sure you want to remove this torrent?', size=(600, height)) hSizer = wx.BoxSizer(wx.HORIZONTAL) hSizer.Add(wx.StaticBitmap(self, -1, wx.ArtProvider.GetBitmap(wx.ART_QUESTION, wx.ART_MESSAGE_BOX)), 0, wx.RIGHT, 10) vSizer = wx.BoxSizer(wx.VERTICAL) firstLine = StaticText(self, -1, "Delete '%s' from disk, or just remove them from your downloads?"%torrent.name) _set_font(firstLine, fontweight = wx.FONTWEIGHT_BOLD) firstLine.SetMinSize((1, -1)) vSizer.Add(firstLine, 0, wx.EXPAND|wx.BOTTOM, 3) vSizer.Add(StaticText(self, -1, "Removing from disk will move the selected item to your trash."), 0, wx.EXPAND) vSizer.AddStretchSpacer() self.newName = None if canEdit: vSizer.Add(StaticText(self, -1, "While we're at it, can you improve the name of this torrent?"), 0, wx.EXPAND|wx.BOTTOM, 3) self.newName = EditText(self, torrent.name) vSizer.Add(self.newName, 0, wx.EXPAND) vSizer.AddStretchSpacer() bSizer = wx.BoxSizer(wx.HORIZONTAL) bSizer.AddStretchSpacer() bSizer.Add(wx.Button(self, wx.ID_CANCEL), 0, wx.RIGHT, 3) bSizer.Add(wx.Button(self, wx.ID_DEFAULT, 'Only delete from downloads'), 0, wx.RIGHT, 3) bSizer.Add(wx.Button(self, wx.ID_DELETE, 'Also delete from disk')) vSizer.Add(bSizer, 0, wx.ALIGN_RIGHT|wx.TOP, 7) hSizer.Add(vSizer, 1, wx.EXPAND) border = wx.BoxSizer() border.Add(hSizer, 1, wx.ALL|wx.EXPAND, 10) self.Bind(wx.EVT_BUTTON, lambda event: self.EndModal(event.GetId())) self.SetSizer(border) self.Layout() self.CenterOnParent()
UTF-8
Python
false
false
2,014
9,998,683,873,262
f92b63b8700d532a4724ae21fa1afe6f2b66a1e1
3e8f7ba07f34e2e440f796e2c85e5b072f201aeb
/recommender.py
a68dad881b1d59dbd2686e987cf28f3d2adb59d1
[ "MIT" ]
permissive
kavinyao/SKBPR
https://github.com/kavinyao/SKBPR
57cc977c1abb2fccf47edef15cf999a6ecb5beb3
305aeb846ee89234d8eae3b73452c2fdad2496b4
refs/heads/master
2021-01-01T18:37:11.779651
2013-05-30T15:51:57
2013-05-30T15:51:57
9,961,167
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Keyword Recommenders. """ import math import config import random from utils import timeit from collections import defaultdict class NonStatisticalMixin(object): def reset(self): pass def round_statistics(self): pass def experiment_statistics(self): pass class RandomRecommender(NonStatisticalMixin): def __init__(self, dbm, *ignored): """ @param dbm a DatabaseManager @param limit the (maximal) number of recommended products at a time """ self.limit = 0 self.dbm = dbm self.all_products = [] def __str__(self): return 'Random Recommender[N=%d]' % self.limit def set_limit(self, limit): self.limit = limit @timeit def preprocess(self, query_train_table): # retrieve all products at once as there aren't many (< 4000) query = '''SELECT DISTINCT pageinfo FROM visit WHERE pagetype = 'product' AND pageinfo != '' AND pageinfo != 'null' AND userid IN ( SELECT user_id FROM %s )''' % query_train_table self.all_products = [(row['pageinfo'], 1.0) for row in self.dbm.get_rows(query)] def recommend(self, query): return random.sample(self.all_products, self.limit) class HottestRecommender(NonStatisticalMixin): def __init__(self, dbm, *ignored): """ @param dbm a DatabaseManager @param limit the (maximal) number of recommended products at a time """ self.limit = 0 self.dbm = dbm self.recommend_list = [] def __str__(self): return 'Hottest Recommender[N=%d]' % self.limit def set_limit(self, limit): self.limit = limit @timeit def preprocess(self, query_train_table): for row in self.dbm.get_rows('''SELECT pageinfo, COUNT(id) count FROM visit WHERE pagetype = 'product' AND pageinfo != '' AND userid IN ( SELECT user_id FROM %s ) GROUP BY pageinfo ORDER BY count DESC LIMIT %d''' % (query_train_table, self.limit)): self.recommend_list.append((row['pageinfo'], row['count'])) #print self.recommend_list def recommend(self, query): return self.recommend_list class KeywordRecommender(object): def __init__(self, dbm, ws, rm): """ Make sure to source rec_tables.sql before using this class. @param dbm a DatabaseManager @param ws a WordSegmenter @param rm a RelevanceMeasure """ self.limit = 0 self.dbm = dbm self.ws = ws self.rm = rm self.reset() def reset(self): self._related_product_cache = {} self._not_enough_recs = 0 self._round_results = [] def set_limit(self, limit): self.limit = limit def __str__(self): return 'Keyword Recommender with %s[N=%d]' % (self.rm, self.limit) def preprocess(self, query_train_table): self.query_train_table = query_train_table # empty cache so that cache from last round does not interfere with next round self._related_product_cache = {} self._not_enough_recs = 0 self.dbm.begin() self.dbm.query('TRUNCATE TABLE keyword'); self.dbm.query('TRUNCATE TABLE keyword_query'); self.dbm.query('TRUNCATE TABLE keyword_product_weight'); # these methods can be overridden by sub-classes self._build_keyword_product_mapping() self._build_product_keyword_mapping() self._measure_relevance() self.dbm.commit() @timeit def _build_keyword_product_mapping(self): self.keyword_count = defaultdict(int) self.keyword_product_count = defaultdict(lambda: defaultdict(int)) for qrow in self.dbm.get_rows('SELECT id, query FROM %s' % self.query_train_table): # GROUP_CONCAT returns a comma-separeted string products = [(qprow['product_name'], qprow['sequences']) for qprow in self.dbm.get_rows('SELECT product_name, GROUP_CONCAT(sequence) AS sequences FROM query_product WHERE query_id = %s GROUP BY product_name', (qrow['id'],))] # remove duplicate keywords = set(self.ws.segment(qrow['query'])) for kw in keywords: self.keyword_count[kw] += 1 # store keyword-query relations in db self.dbm.insert('INSERT INTO keyword_query (keyword, query_id) VALUES (%s, %s)', (kw, qrow['id'])) for p, sequences in products: # get product sequence in this session count = self.get_browse_count(sequences) # update keyword_product_count for kw in keywords: self.keyword_product_count[kw][p] += count def get_browse_count(self, sequences): """Overrideable by sub-class. Multiple browses in a session always count 1.""" return 1 @timeit def _build_product_keyword_mapping(self): # construct product_keyword_count # it's actually equivalent to keyword_product_count, but can let compute # related_product_count faster self.product_keyword_count = defaultdict(dict) for kw, dt in self.keyword_product_count.iteritems(): for p, c in dt.iteritems(): self.product_keyword_count[p][kw] = c @timeit def _measure_relevance(self): # calculate keyword-product relevance all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product') for keyword, count in self.keyword_count.iteritems(): # will be used for statistics self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count)) related_product_number = len(self.keyword_product_count[keyword].keys()) related_product_count = sum(self.keyword_product_count[keyword].values()) for product, count in self.keyword_product_count[keyword].iteritems(): related_keyword_number = len(self.product_keyword_count[product].keys()) related_keyword_count = sum(self.product_keyword_count[product].values()) # delegate to sub-classes relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number) self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance)) def round_statistics(self): """Get number of query, keywords, products, keyword-product relations of current round.""" n_query = self.dbm.get_value("SELECT COUNT(*) FROM %s" % self.query_train_table) n_keyword = self.dbm.get_value("SELECT COUNT(*) FROM keyword") n_product = self.dbm.get_value("SELECT COUNT(DISTINCT product) FROM keyword_product_weight") n_relation = self.dbm.get_value("SELECT COUNT(*) FROM keyword_product_weight") self._round_results.append((n_query, self._not_enough_recs, n_keyword, n_product, n_relation)) if config.verbose: print 'Round statistics: query: %d (not enough %d), keyword: %d, product: %d, relation: %d, A/M: %.2f%%' % (n_query, self._not_enough_recs, n_keyword, n_product, n_relation, 100.0*n_relation / (n_keyword*n_product)) def experiment_statistics(self): # stands for: query, not-enough, keyword, product, relation, a/m sums = [0, 0, 0, 0, 0, 0] for data in self._round_results: for i in range(5): sums[i] += data[i] sums[5] += 100.0*data[4]/(data[2]*data[3]) n = float(len(self._round_results)) n_query, not_enough_recs, n_keyword, n_product, n_relation, am = [s/n for s in sums] print 'Experiment statistics:\nquery: %.2f (not enough %.2f), keyword: %.2f, product: %.2f, relation: %.2f, A/M: %.2f%%' % (n_query, not_enough_recs, n_keyword, n_product, n_relation, am) def recommend(self, query): keywords = self.ws.segment(query) product_weight = defaultdict(float) # gather product weights for kw in keywords: for product, weight in self.__fetch_related_products(kw): product_weight[product] += weight # convert dict to list for sorting product_weight_list = [item for item in product_weight.iteritems()] product_weight_list.sort(key=lambda t: t[1], reverse=True) if len(product_weight_list) < self.limit: self._not_enough_recs += 1 return product_weight_list[:self.limit] def __fetch_related_products(self, keyword): if not self._related_product_cache.has_key(keyword): self._related_product_cache[keyword] = [(row['product'], row['weight']) for row in self.dbm.get_rows('SELECT product, weight FROM keyword_product_weight WHERE keyword = %s', (keyword,))] return self._related_product_cache[keyword] class KeywordRecommenderHottestFallback(KeywordRecommender): """A recommender which uses KeywordRecommender's recommendations first, but turns to HottestRecommender if its recommendations are not enough.""" def __init__(self, *args): """Identical to that of KeywordRecommender""" super(KeywordRecommenderHottestFallback, self).__init__(*args) self.hottest_recommender = HottestRecommender(*args) def __str__(self): return 'Keyword Recommender with Hottest Recommender fallback with %s[N=%d]' % (self.rm, self.limit) def set_limit(self, limit): self.hottest_recommender.set_limit(limit) super(KeywordRecommenderHottestFallback, self).set_limit(limit) def preprocess(self, query_train_table): super(KeywordRecommenderHottestFallback, self).preprocess(query_train_table) self.hottest_recommender.preprocess(query_train_table) def recommend(self, query): recommendations = super(KeywordRecommenderHottestFallback, self).recommend(query) num_rec = len(recommendations) if num_rec == self.limit: return recommendations # ask HottestRecommender for more # note that create list in order not to break HottestRecommender.recommend_list hot_recommendations = self.hottest_recommender.recommend(query)[:self.limit-num_rec] # ensure hot_recommendations's weight is no greater than any from keyword recommendations max_hot_rec_weight = hot_recommendations[0][1] min_key_rec_weight = recommendations[-1][1] if num_rec > 0 else max_hot_rec_weight recommendations.extend((t[0], 1.0*min_key_rec_weight*t[1]/max_hot_rec_weight) for t in hot_recommendations) return recommendations from operator import mul def product(numbers): return reduce(mul, numbers) class LinearSequenceKeywordRecommender(KeywordRecommender): """A tentative method using heuristic information of sequence distribution.""" def _heuristic_weight(self, sequence): #return -math.log(abs(sequence-26)+1, 2)/8.0 + 1.125 return -math.log(abs(sequence-3)+1, 2)/8.0 + 1.125 def get_browse_count(self, sequences): seqs = sequences.split(',') #return sum(self._heuristic_weight(int(seq)) for seq in seqs) #return sum(self._heuristic_weight(int(seq)) for seq in seqs) * math.log(len(seqs)) return product(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs) #return sum(self._heuristic_weight(int(seq)) for seq in seqs) * len(seqs) def __str__(self): return 'Linear Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit) class WeightedSequenceRelevanceMixin(object): @timeit def _measure_relevance(self): # calculate keyword-product relevance all_product_number = self.dbm.get_value('SELECT COUNT(DISTINCT product_name) FROM query_product') for keyword, count in self.keyword_count.iteritems(): self.dbm.insert('INSERT INTO keyword (keyword, count) VALUES (%s, %s)', (keyword, count)) related_product_number = len(self.keyword_product_count[keyword].keys()) related_product_count = sum(self.keyword_product_count[keyword].values()) for product, count in self.keyword_product_count[keyword].iteritems(): related_keyword_number = len(self.product_keyword_count[product].keys()) related_keyword_count = sum(self.product_keyword_count[product].values()) # get average sequence from database # TODO: very inefficient, get a group all average sequences for a keyword at once #avg_sequence = self.dbm.get_value('select avg(sequence) from query_product where product_name = %s AND query_id in (select query_id from keyword_query where keyword = %s)', (product, keyword)) avg_sequence = 1 relevance = self.rm.get_relevance(count, (related_product_number, related_product_count), (related_keyword_number, related_keyword_count), all_product_number, avg_sequence) # sub-class can override sequence_weight # relevance *= self.sequence_weight(avg_sequence) self.dbm.insert('INSERT INTO keyword_product_weight (keyword, product, weight) VALUES (%s, %s, %s)', (keyword, product, relevance)) def sequence_weight(self, avg_sequence): return 1 # ensure WSRM._measure_relevance will be called with putting it before KeywordRecommender # ref: http://python-history.blogspot.com/2010/06/method-resolution-order.html class SequenceKeywordRecommender(WeightedSequenceRelevanceMixin, LinearSequenceKeywordRecommender): """This recommender weights browse count by distribution of sequence.""" @timeit def preprocess(self, query_train_table): # first, get sequence distribution max_occurrence = self.dbm.get_value('SELECT MAX(c) FROM (SELECT sequence, COUNT(sequence) c FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence) T' % query_train_table) self.sequence_dist = {row['sequence']: float(row['ratio']) for row in self.dbm.get_rows('SELECT sequence, COUNT(sequence)/%d ratio FROM query_product WHERE query_id IN (SELECT id FROM %s) GROUP BY sequence' % (max_occurrence,query_train_table))} self.pivot_seq = max(self.sequence_dist.iteritems(), key=lambda t:t[1])[0] # then, call KeywordRecommender's preprocess super(SequenceKeywordRecommender, self).preprocess(query_train_table) def _heuristic_weight(self, sequence): weight = self.sequence_dist[sequence] if self.pivot_seq-sequence < 0: return weight return 1 + weight def __str__(self): return 'Sequenced Keyword Recommender with %s[N=%d]' % (self.rm, self.limit) class RelevanceMeasure(object): """Defines the RelevanceMeasure interface.""" def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args): """ @param count number of times the keyword visit the product @param related_product_info the tuple (related_product_number, related_product_count) @param related_keyword_info the tuple (related_keyword_number, related_keyword_count) @param all_product_number number of all products """ raise NotImplemented class BCMeasure(RelevanceMeasure): def get_relevance(self, count, *ignored): return count def __str__(self): return 'BC' class BCIPFMeasure(RelevanceMeasure): def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args): ipf = math.log(1.0 * all_product_number / related_product_info[0]) return count * ipf def __str__(self): return 'BC-IPF' class BFIPFMeasure(RelevanceMeasure): def get_relevance(self, count, related_product_info, related_keyword_info, all_product_number, *args): bf = 1.0 * count / related_keyword_info[1] ipf = math.log(1.0 * all_product_number / related_product_info[0]) return bf * ipf def __str__(self): return 'BF-IPF' if __name__ == '__main__': import config from database import DatabaseManager from word_segment import SpaceWordSegmenter dbm = DatabaseManager(config.DB_HOST, config.DB_USER, config.DB_PASSWORD, config.DB_NAME) try: word_segmenter = SpaceWordSegmenter() rmeasure = BCIPFMeasure() recommender = KeywordRecommender(10, dbm, word_segmenter, rmeasure) recommender.preprocess('query_train') finally: dbm.close()
UTF-8
Python
false
false
2,013
2,302,102,501,858
e1736db4ea74d81da3a493c4a91cdb9c41759ac6
1e04894995fcee6e296cf45909c224f75db7676f
/custom_comments/models.py
50f90c6a8d9f8ff6badf6db37d571ca03a8385fc
[]
no_license
minrivertea/chinanews
https://github.com/minrivertea/chinanews
bdb8b61dc1ebc61149ab22bf65e09db49421d716
f70d7cb769c3d4541b98eb14491c273cec77045b
refs/heads/master
2016-09-16T14:52:09.292931
2011-12-12T10:44:56
2011-12-12T10:44:56
2,842,670
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.db import models from django.contrib.comments.models import Comment from users.models import Person class CommentWithVote(Comment): votes = models.PositiveIntegerField(default="0") voters = models.ManyToManyField(Person)
UTF-8
Python
false
false
2,011
9,594,956,959,187
5aa274f0688ef1b16558046be8f26bc8f547bdff
04830630f4042d47ec243b8c78c8a194c45cb5c8
/mysqlsub/utils.py
c51497d5b28630124d339d05b663d8eae9d6c8ba
[]
no_license
yelu/mysqlsub
https://github.com/yelu/mysqlsub
b13f7a9c1c29d0dfc210bd1890229d09e6ef2268
16697b698be9bcd9e56c6069b026542333dd5b28
refs/heads/master
2021-01-20T11:11:35.375739
2013-03-24T13:31:02
2013-03-24T13:31:02
7,915,225
0
1
null
false
2020-07-24T05:59:59
2013-01-30T13:39:06
2013-12-09T13:49:12
2013-03-24T13:31:13
208
2
1
1
Python
false
false
''' Created on 2013-3-1 @author: yelu01 ''' from mysql.connector.utils import * import datetime import decimal def read_signed_int(buf, size, big_endian = True): """Read a big endian integer values based on byte number Returns a tuple (truncated buffer, int) ''''' """ if big_endian: endian = '>' else: endian = '<' if size == 1: res = struct.unpack(endian+'b', buf)[0] return (buf[1:], res) elif size == 2: res = struct.unpack(endian+'h', buf)[0] return (buf[2:], res) elif size == 3: a, b, c = struct.unpack("BBB", buf) #TODO:may be wrong. if a & 128: res = a + (b << 8) + (c << 16) else: res = (a + (b << 8) + (c << 16)) * -1 return (buf[3:], res) elif size == 4: res = struct.unpack(endian+'i', buf)[0] return (buf[4:], res) elif size == 8: res = struct.unpack(endian+'q', buf)[0] return (buf[8:], res) else: raise def read_unsigned_int(buf, size, big_endian = False): """Read a little endian integer values based on byte number Returns a tuple (truncated buffer, int) """ if big_endian: endian = '>' else: endian = '<' if size == 1: res = struct.unpack(endian+'B', buf)[0] return (buf[1:], res) elif size == 2: res = struct.unpack(endian+'H', buf)[0] return (buf[2:], res) elif size == 3: a, b, c = struct.unpack("BBB", buf) res = a + (b << 8) + (c << 16) return (buf[3:], res) elif size == 4: res = struct.unpack(endian+'I', buf)[0] return (buf[4:], res) elif size == 8: res = struct.unpack(endian+'Q', buf)[0] return (buf[8:], res) else: raise def read_float(buf): res = struct.unpack("<f", buf)[0] return (buf[4:], res) def read_double(buf): res = struct.unpack("<d", buf)[0] return (buf[8:], res) def read_lc_pascal_string(buf, size): '''Read a string with length coded using pascal style. The string start by the size of the string''' head, length = read_unsigned_int(buf, size) head, res = read_bytes(head, length) return (head, res) def read_lc_pascal_string_decoded(buf, size, charset): '''Read a string with length coded using pascal style. The string start by the size of the string''' head, res = read_lc_pascal_string(buf, size) res = res.decode(charset) return (head, res) def read_bits(head, bytes, bits): """Read MySQL BIT type""" resp = "" for byte in range(0, bytes): current_byte = "" head, data = read_unsigned_int(head, 1) if byte == 0: if bytes == 1: end = bits else: end = bits % 8 if end == 0: end = 8 else: end = 8 for bit in range(0, end): if data & (1 << bit): current_byte += "1" else: current_byte += "0" resp += current_byte[::-1] return (head, resp) def read_datetime(head): head, value = read_unsigned_int(head, 8) date = value / 1000000 time = value % 1000000 date = datetime.datetime( year = int(date / 10000), month = int((date % 10000) / 100), day = int(date % 100), hour = int(time / 10000), minute = int((time % 10000) / 100), second = int(time % 100)) return (head, date) def read_time(head): head, time = read_unsigned_int(head, 3) date = datetime.time( hour = int(time / 10000), minute = int((time % 10000) / 100), second = int(time % 100)) return (head, date) def read_date(head): head, time = read_unsigned_int(head, 3) date = datetime.date( year = (time & ((1 << 15) - 1) << 9) >> 9, month = (time & ((1 << 4) - 1) << 5) >> 5, day = (time & ((1 << 5) - 1)) ) return (head, date) def read_new_decimal(head, precision, decimals): '''Read MySQL's new decimal format introduced in MySQL 5''' # https://github.com/jeremycole/mysql_binlog/blob/master/lib/mysql_binlog/binlog_field_parser.rb digits_per_integer = 9 compressed_bytes = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4] integral = (precision - decimals) uncomp_integral = int(integral / digits_per_integer) uncomp_fractional = int(decimals / digits_per_integer) comp_integral = integral - (uncomp_integral * digits_per_integer) comp_fractional = decimals - (uncomp_fractional * digits_per_integer) # Support negative # The sign is encoded in the high bit of the the byte # But this bit can also be used in the value value = struct.pack('B', head)[0] res, mask = ["", 0] if (value & 0x80 != 0) else ["-", -1] head[0] = value ^ 0x80 size = compressed_bytes[comp_integral] if size > 0: head, value = read_unsigned_int(head, size, True) ^ mask res += str(value) for i in range(0, uncomp_integral): head, value = read_signed_int(head, 4, True) value = value ^ mask res += str(value) res += "." for i in range(0, uncomp_fractional): head, value = read_signed_int(head, 4, True) value = value ^ mask res += str(value) size = compressed_bytes[comp_fractional] if size > 0: head, value = read_signed_int(head, size, True) value = value ^ mask res += str(value) return decimal.Decimal(res)
UTF-8
Python
false
false
2,013
10,565,619,574,107
60463f787f344fe460f55d89eeb31e531aef667c
646849715e3042a1e9b718b7ab2f2fdfe3560140
/Code/pyuppaal/tests/ulp/test_basic.py
55bb48b25de6ca4babc3c7b857c236b654b6dad7
[ "GPL-3.0-only" ]
non_permissive
jlandersen/sw10
https://github.com/jlandersen/sw10
45af85426b06ebdcc03a03a06e16c729eea13feb
8624f40dbc62304b01b190ba79a6c0e818e9d1e2
refs/heads/master
2020-05-31T01:46:01.478127
2013-06-25T19:39:26
2013-06-25T19:39:26
8,028,829
0
1
null
false
2013-06-12T14:10:15
2013-02-05T12:44:03
2013-06-12T14:10:14
2013-06-12T14:10:14
5,412
null
1
0
Java
null
null
#!/usr/bin/python import sys import os import unittest from pyuppaal.ulp import lexer, parser, expressionParser, node class TestBasicParsing(unittest.TestCase): def test_parse_declarations(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_declarations.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children #pars.AST.visit() declvisitor = parser.DeclVisitor(pars) self.assertEqual(declvisitor.variables, [('a', 'TypeInt', [], 0), ('b', 'TypeBool', [], False), ('b1', 'TypeBool', [], False), ('b2', 'TypeBool', [], False)]) self.assertEqual(len(declvisitor.clocks), 1) self.assertEqual(declvisitor.clocks[0][0], 'c') self.assertEqual(declvisitor.channels, [('d', [])]) self.assertEqual(declvisitor.urgent_channels, [('e', [])]) self.assertEqual(declvisitor.broadcast_channels, [('f', [])]) self.assertEqual(declvisitor.urgent_broadcast_channels, [('g', [])]) def test_parse_declarations2(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_declarations2.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children pars.AST.visit() declvisitor = parser.DeclVisitor(pars) self.assertEqual(res[7].type, 'VarDecl') self.assertEqual(res[7].leaf.type, 'TypeInt') self.assertEqual(res[7].children[0].type, 'Identifier') self.assertEqual(res[7].children[0].leaf, 'lalala') self.assertEqual(res[7].children[0].children[0].type, 'Assignment') self.assertEqual(res[7].children[0].children[0].leaf.type, 'Identifier') self.assertEqual(res[7].children[0].children[0].leaf.leaf, 'lalala') self.assertEqual(res[12].type, 'VarDecl') self.assertEqual(res[12].leaf.type, 'TypeBool') self.assertEqual(res[12].children[0].type, 'Identifier') self.assertEqual(res[12].children[0].leaf, 'msg') self.assertEqual(res[12].children[0].children[0].type, 'Index') self.assertEqual(res[12].children[0].children[1].type, 'Index') self.assertEqual(declvisitor.variables[0], ('L', 'TypeInt', [], 0)) #self.assertEqual(declvisitor.variables[1], ('lalala', 'int', [], _)) self.assertEqual(declvisitor.variables[1][0], 'lalala') self.assertEqual(declvisitor.variables[1][1], 'TypeInt') self.assertEqual(declvisitor.variables[1][2], []) self.assertEqual(declvisitor.variables[1][3].type, 'Expression') self.assertEqual(declvisitor.variables[1][3].children[0].type, 'Number') self.assertEqual(declvisitor.variables[1][3].children[0].leaf, 3) self.assertEqual(declvisitor.variables[3][0], 'lock') self.assertEqual(declvisitor.variables[3][1], 'TypeBool') self.assertEqual(declvisitor.variables[3][2], []) self.assertEqual(declvisitor.variables[3][3].type, 'Expression') self.assertEqual(declvisitor.variables[3][3].children[0].type, 'False') self.assertEqual(declvisitor.variables[4][0], 'lock2') self.assertEqual(declvisitor.variables[4][1], 'TypeBool') self.assertEqual(declvisitor.variables[4][2], []) self.assertEqual(declvisitor.variables[4][3].children[0].type, 'True') self.assertEqual(declvisitor.clocks, [('time', 10), ('y1', 10), ('y2', 10), ('y3', 10), ('y4', 10)]) self.assertEqual(declvisitor.channels, [('take', []), ('release', [])]) def test_parse_empty_query(self): lex = lexer.lexer pars = parser.Parser("", lex) self.assertEqual(len(pars.AST.children), 0) def test_parse_array(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_array.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 7) #TODO add more asserts res = pars.AST.children #pars.AST.visit() self.assertEqual(res[0].children[0].children[0].type, "Index") self.assertEqual(res[1].children[0].children[0].type, "Index") self.assertEqual(res[2].children[0].children[0].type, "Index") self.assertEqual(res[3].children[0].children[0].type, "Index") self.assertEqual(res[4].children[0].children[0].type, "Index") self.assertEqual(res[6].children[0].children[0].type, "Index") self.assertEqual(res[6].children[0].children[1].type, "Index") #mchro 07-04-2011: don't allow empty brackets, it's not a valid expression #myParser = testParser(lexer.lexer) #res = myParser.parse("a[]") #self.assertEqual(res.type, "Identifier") #self.assertEqual(len(res.children), 0) def test_struct(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_struct.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts def test_parse_typedef_simple(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_typedef_simple.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) pars.AST.visit() self.assertEqual(len(pars.AST.children), 4) self.assertEqual(pars.AST.type, "RootNode") self.assertEqual(pars.AST.children[0].type, "NodeTypedef") self.assertEqual(pars.AST.children[0].leaf, "id_t") self.assertEqual(pars.AST.children[0].children[0].type, "TypeInt") self.assertEqual(pars.AST.children[1].type, "NodeTypedef") self.assertEqual(pars.AST.children[1].leaf, "id_t") self.assertEqual(pars.AST.children[1].children[0].type, "TypeInt") self.assertEqual(pars.AST.children[1].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].leaf, 0) self.assertEqual(pars.AST.children[1].children[0].children[1].type, "Expression") self.assertEqual(pars.AST.children[1].children[0].children[1].children[0].leaf, 4) self.assertEqual(pars.AST.children[2].type, "NodeTypedef") self.assertEqual(pars.AST.children[2].leaf, "id_t") self.assertEqual(pars.AST.children[2].children[0].type, "TypeInt") self.assertEqual(pars.AST.children[2].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[2].children[0].children[1].type, "Expression") self.assertEqual(pars.AST.children[2].children[0].children[1].children[0].leaf, 4) self.assertEqual(pars.AST.children[2].type, "NodeTypedef") self.assertEqual(pars.AST.children[2].leaf, "id_t") self.assertEqual(pars.AST.children[2].children[0].type, "TypeInt") self.assertEqual(pars.AST.children[2].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[2].children[0].children[1].type, "Expression") self.assertEqual(pars.AST.children[2].children[0].children[1].children[0].leaf, 4) self.assertEqual(len(pars.typedefDict), 1) self.assertTrue('id_t' in pars.typedefDict) def test_parse_typedef(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_typedef.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) pars.AST.visit() self.assertEqual(len(pars.AST.children), 8) self.assertEqual(len(pars.typedefDict), 4) self.assertTrue('myStructType' in pars.typedefDict) self.assertTrue('adr' in pars.typedefDict) self.assertTrue('DBMClock' in pars.typedefDict) self.assertTrue('clock' in pars.typedefDict) ctype = pars.typedefDict['clock'] self.assertEqual(ctype.type, 'NodeTypedef') self.assertEqual(ctype.leaf, 'clock') self.assertEqual(len(ctype.children), 1) self.assertEqual(ctype.children[0], pars.typedefDict['DBMClock']) declvisitor = parser.DeclVisitor(pars) #XXX parses to deeply into structs! self.assertEqual(len(declvisitor.variables), 5) pars.AST.visit() print declvisitor.variables varnames = [x for (x, _, _, _) in declvisitor.variables] self.assertTrue('m' in varnames) self.assertTrue(('m', 'myStructType', [], None) in declvisitor.variables) self.assertTrue('n' in varnames) self.assertTrue(('n', 'adr', [], None) in declvisitor.variables) self.assertTrue('n2' in varnames) for (x, _, _, initval) in declvisitor.variables: if x == "n2": self.assertEqual(initval.type, "Expression") self.assertEqual(initval.children[0].type, "Number") self.assertEqual(initval.children[0].leaf, 3) self.assertTrue('c' in varnames) self.assertTrue(('c', 'DBMClock', [], None) in declvisitor.variables) #XXX parses to deeply into structs! #self.assertFalse('a' in varnames) def test_parse_brackets(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_brackets.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) def test_comments(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_comments.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(pars.AST.type, "RootNode") self.assertEqual(pars.AST.children[0].type, "VarDecl") self.assertEqual(pars.AST.children[1].type, "Function") self.assertEqual(pars.AST.children[1].children[0].type, "Assignment") self.assertEqual(pars.AST.children[1].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].type, "Divide") self.assertEqual(len(pars.AST.children), 2) def test_operators(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_operators.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(pars.AST.type, "RootNode") self.assertEqual(pars.AST.children[0].type, "VarDecl") self.assertEqual(pars.AST.children[1].type, "Function") self.assertEqual(pars.AST.children[1].children[0].type, "Assignment") self.assertEqual(pars.AST.children[1].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[1].children[0].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[1].type, "Assignment") self.assertEqual(pars.AST.children[1].children[1].children[0].type, "Expression") self.assertEqual(pars.AST.children[1].children[1].children[0].children[0].type, "Minus") self.assertEqual(pars.AST.children[1].children[2].type, "Assignment") self.assertEqual(pars.AST.children[1].children[2].children[0].children[0].type, "Times") self.assertEqual(pars.AST.children[1].children[3].type, "Assignment") self.assertEqual(pars.AST.children[1].children[3].children[0].children[0].type, "Divide") self.assertEqual(pars.AST.children[1].children[4].type, "Assignment") self.assertEqual(pars.AST.children[1].children[4].children[0].children[0].type, "UnaryMinus") self.assertEqual(pars.AST.children[1].children[5].type, "Assignment") self.assertEqual(pars.AST.children[1].children[5].children[0].children[0].type, "Minus") self.assertEqual(pars.AST.children[1].children[5].children[0].children[0].children[0].type, "UnaryMinus") self.assertEqual(pars.AST.children[1].children[6].type, "Assignment") self.assertEqual(pars.AST.children[1].children[6].children[0].children[0].type, "Minus") self.assertEqual(pars.AST.children[1].children[6].children[0].children[0].children[0].type, "PlusPlusPost") self.assertEqual(pars.AST.children[1].children[7].type, "Assignment") self.assertEqual(pars.AST.children[1].children[7].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[7].children[0].children[0].children[0].type, "PlusPlusPost") self.assertEqual(pars.AST.children[1].children[8].type, "Assignment") self.assertEqual(pars.AST.children[1].children[8].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[8].children[0].children[0].children[0].type, "PlusPlusPre") self.assertEqual(pars.AST.children[1].children[9].type, "Assignment") self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].children[0].type, "PlusPlusPre") self.assertEqual(pars.AST.children[1].children[9].children[0].children[0].children[1].type, "PlusPlusPost") self.assertEqual(pars.AST.children[1].children[10].type, "Assignment") self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].children[0].type, "PlusPlusPost") self.assertEqual(pars.AST.children[1].children[10].children[0].children[0].children[1].type, "PlusPlusPre") self.assertEqual(pars.AST.children[1].children[11].type, "Assignment") self.assertEqual(pars.AST.children[1].children[11].children[0].children[0].type, "Minus") self.assertEqual(pars.AST.children[1].children[11].children[0].children[0].children[0].type, "MinusMinusPost") self.assertEqual(pars.AST.children[1].children[12].type, "Assignment") self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].type, "Minus") self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].children[0].type, "MinusMinusPost") self.assertEqual(pars.AST.children[1].children[12].children[0].children[0].children[1].type, "MinusMinusPre") self.assertEqual(pars.AST.children[1].children[13].type, "Assignment") self.assertEqual(pars.AST.children[1].children[13].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[13].children[0].children[0].children[0].type, "MinusMinusPost") self.assertEqual(pars.AST.children[1].children[14].type, "Assignment") self.assertEqual(pars.AST.children[1].children[14].children[0].children[0].type, "Plus") self.assertEqual(pars.AST.children[1].children[14].children[0].children[0].children[0].type, "MinusMinusPre") self.assertEqual(pars.AST.children[1].children[15].type, "Assignment") self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].type, "Modulo") self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[0].type, "Identifier") self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[0].leaf, "a") self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[1].type, "Identifier") self.assertEqual(pars.AST.children[1].children[15].children[0].children[0].children[1].leaf, "a") #TODO add more operators pars.AST.visit() self.assertEqual(len(pars.AST.children), 2) def test_parse_assignments(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_assignments.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(pars.AST.type, "RootNode") self.assertEqual(pars.AST.children[0].type, "VarDecl") self.assertEqual(pars.AST.children[1].type, "VarDecl") self.assertEqual(pars.AST.children[2].type, "Function") self.assertEqual(pars.AST.children[2].children[0].type, "Assignment") self.assertEqual(pars.AST.children[2].children[0].children[0].type, "Expression") self.assertEqual(pars.AST.children[2].children[0].children[0].children[0].type, "PlusPlusPost") self.assertEqual(pars.AST.children[2].children[1].type, "Assignment") self.assertEqual(pars.AST.children[2].children[1].children[0].type, "Expression") self.assertEqual(pars.AST.children[2].children[1].children[0].children[0].type, "PlusPlusPre") self.assertEqual(pars.AST.children[2].children[2].type, "Assignment") self.assertEqual(pars.AST.children[2].children[2].children[0].type, "Expression") self.assertEqual(pars.AST.children[2].children[2].children[0].children[0].type, "MinusMinusPre") self.assertEqual(pars.AST.children[2].children[3].type, "Assignment") self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].type, "Times") self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].children[0].type, "PlusPlusPre") self.assertEqual(pars.AST.children[2].children[3].children[0].children[0].children[1].type, "PlusPlusPost") self.assertEqual(pars.AST.children[2].children[4].type, "Assignment") self.assertEqual(pars.AST.children[2].children[4].children[0].children[0].type, "Times") self.assertEqual(pars.AST.children[2].children[4].children[0].children[0].children[0].type, "Times") self.assertEqual(pars.AST.children[2].children[4].children[0]. \ children[0].children[0].children[0].type, "PlusPlusPre") self.assertEqual(pars.AST.children[2].children[4].children[0]. \ children[0].children[0].children[1].type, "PlusPlusPost") self.assertEqual(pars.AST.children[2].children[4].children[0]. \ children[0].children[0].children[1].type, "PlusPlusPost") self.assertEqual(len(pars.AST.children), 3) def test_parse_for_loop(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_for_loop.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts def test_parse_while_loop(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_while_loop.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts def test_parse_do_while_loop(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_do_while_loop.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 1) #TODO add more asserts def test_parse_simple_function(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_simple_function.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) self.assertEqual(len(pars.AST.children), 3) #TODO add more asserts def test_parse_expression(self): parser = testParser(lexer.lexer) res = parser.parse("") #should not fail self.assertFalse(res) res = parser.parse(" ") #should not fail self.assertFalse(res) res = parser.parse("5") self.assertEqual(res.type, "Number") self.assertEqual(res.leaf, 5) res = parser.parse("5 > 5") self.assertEqual(res.type, "Greater") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 5) res = parser.parse("5 != 5") #res.visit() self.assertEqual(res.type, "NotEqual") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 5) res = parser.parse("!True") self.assertEqual(res.type, "UnaryNot") self.assertEqual(res.children[0].type, 'True') res = parser.parse("5 && 4") self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 4) res = parser.parse("5 and 4") self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 4) res = parser.parse("!(5 && 4)") self.assertEqual(res.type, "UnaryNot") self.assertEqual(res.children[0].type, "And") self.assertEqual(res.children[0].children[0].type, "Number") self.assertEqual(res.children[0].children[0].leaf, 5) self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 4) res = parser.parse("not (5 && 4)") self.assertEqual(res.type, "UnaryNot") self.assertEqual(res.children[0].type, "And") self.assertEqual(res.children[0].children[0].type, "Number") self.assertEqual(res.children[0].children[0].leaf, 5) self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 4) res = parser.parse("5 || 4") self.assertEqual(res.type, "Or") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 4) res = parser.parse("5 or 4") self.assertEqual(res.type, "Or") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 5) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 4) res = parser.parse("5 < 5 and 4 > 3") self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "Less") self.assertEqual(res.children[0].children[0].type, "Number") self.assertEqual(res.children[0].children[0].leaf, 5) self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 5) res = parser.parse("3 * 2 + 4") self.assertEqual(res.type, "Plus") self.assertEqual(res.children[0].type, "Times") self.assertEqual(res.children[0].children[0].type, "Number") self.assertEqual(res.children[0].children[0].leaf, 3) self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 2) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 4) res = parser.parse("Viking1.safe and Viking2.safe") #TODO add struct support self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "Viking1") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, "safe") self.assertEqual(res.children[1].type, "Identifier") self.assertEqual(res.children[1].leaf, "Viking2") self.assertEqual(res.children[1].children[0].type, "Identifier") self.assertEqual(res.children[1].children[0].leaf, "safe") res = parser.parse( "Viking1.safe and Viking2.safe and Viking3.safe and Viking4.safe") self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "And") self.assertEqual(res.children[1].type, "Identifier") self.assertEqual(res.children[1].leaf, "Viking4") self.assertEqual(res.children[1].children[0].type, "Identifier") self.assertEqual(res.children[1].children[0].leaf, "safe") self.assertEqual(res.children[0].children[0].type, "And") self.assertEqual(res.children[0].children[1].type, "Identifier") self.assertEqual(res.children[0].children[1].leaf, "Viking3") self.assertEqual(res.children[0].children[1].children[0].type, "Identifier") self.assertEqual(res.children[0].children[1].children[0].leaf, "safe") self.assertEqual(res.children[0].children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].children[0].leaf, "Viking1") self.assertEqual(res.children[0].children[0].children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].children[0].children[0].leaf, "safe") self.assertEqual(res.children[0].children[0].children[1].type, "Identifier") self.assertEqual(res.children[0].children[0].children[1].leaf, "Viking2") self.assertEqual(res.children[0].children[0].children[1].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].children[1].children[0].leaf, "safe") res = parser.parse("N - 1") self.assertEqual(res.type, "Minus") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, 'N') self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 1) res = parser.parse("f() == 2") self.assertEqual(res.type, "Equal") self.assertEqual(res.children[0].type, "FunctionCall") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, "f") self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 2) res = parser.parse("dbm.isEmpty()") self.assertEqual(res.type, "FunctionCall") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "dbm") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, "isEmpty") def test_parse_expression2(self): parser = testParser(lexer.lexer) res = parser.parse("(N - 0 - 1)") self.assertEqual(res.type, "Minus") self.assertEqual(res.children[0].type, "Minus") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, 'N') self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 0) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 1) res = parser.parse("-42") self.assertEqual(res.type, "UnaryMinus") self.assertEqual(res.children[0].type, "Number") self.assertEqual(res.children[0].leaf, 42) res = parser.parse("-(42+1)") self.assertEqual(res.type, "UnaryMinus") self.assertEqual(res.children[0].type, "Plus") self.assertEqual(res.children[0].children[0].type, "Number") self.assertEqual(res.children[0].children[0].leaf, 42) self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 1) res = parser.parse("N- 0- 1") self.assertEqual(res.type, "Minus") self.assertEqual(res.children[0].type, "Minus") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, 'N') self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 0) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 1) res = parser.parse("N-0-1") self.assertEqual(res.type, "Minus") self.assertEqual(res.children[0].type, "Minus") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, 'N') self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 0) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 1) res = parser.parse("(x == 5 && y == 4)") self.assertEqual(res.type, "And") self.assertEqual(res.children[0].type, "Equal") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, 'x') self.assertEqual(res.children[0].children[1].type, "Number") self.assertEqual(res.children[0].children[1].leaf, 5) self.assertEqual(res.children[1].children[0].type, "Identifier") self.assertEqual(res.children[1].children[0].leaf, 'y') self.assertEqual(res.children[1].children[1].type, "Number") self.assertEqual(res.children[1].children[1].leaf, 4) res = parser.parse("True") self.assertEqual(res.type, "True") res = parser.parse("true") res.visit() self.assertEqual(res.type, "True") res = parser.parse("x[0][1] == True") self.assertEqual(res.type, "Equal") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, 'x') self.assertEqual(res.children[0].children[0].type, "Index") self.assertEqual(res.children[0].children[0].leaf.type, 'Number') self.assertEqual(res.children[0].children[0].leaf.leaf, 0) self.assertEqual(res.children[0].children[1].type, "Index") self.assertEqual(res.children[0].children[1].leaf.type, 'Number') self.assertEqual(res.children[0].children[1].leaf.leaf, 1) self.assertEqual(res.children[1].type, "True") res = parser.parse("msg[ 0 ][ N - 0 - 1 ] == True") self.assertEqual(res.type, "Equal") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, 'msg') self.assertEqual(res.children[0].children[0].type, "Index") self.assertEqual(res.children[0].children[0].leaf.type, 'Number') self.assertEqual(res.children[0].children[0].leaf.leaf, 0) self.assertEqual(res.children[0].children[1].type, "Index") index2 = res.children[0].children[1].leaf self.assertEqual(index2.type, 'Minus') self.assertEqual(index2.children[0].type, 'Minus') self.assertEqual(index2.children[0].children[0].type, 'Identifier') self.assertEqual(index2.children[0].children[0].leaf, 'N') self.assertEqual(index2.children[0].children[1].type, 'Number') self.assertEqual(index2.children[0].children[1].leaf, 0) self.assertEqual(res.children[1].type, "True") def test_parse_expression3(self): parser = testParser(lexer.lexer) res = parser.parse("(x == true) && (0 > N-0-1)") self.assertEqual(res.type, 'And') self.assertEqual(len(res.children), 2) self.assertEqual(res.children[0].type, 'Equal') self.assertEqual(res.children[0].children[0].type, 'Identifier') self.assertEqual(res.children[0].children[0].leaf, 'x') self.assertEqual(res.children[0].children[1].type, 'True') self.assertEqual(res.children[1].type, 'Greater') self.assertEqual(res.children[1].children[0].type, 'Number') self.assertEqual(res.children[1].children[0].leaf, 0) self.assertEqual(res.children[1].children[1].type, 'Minus') self.assertEqual(res.children[1].children[1].children[0].type, 'Minus') self.assertEqual(res.children[1].children[1].children[0].children[0].type, 'Identifier') self.assertEqual(res.children[1].children[1].children[0].children[0].leaf, 'N') self.assertEqual(res.children[1].children[1].children[0].children[1].type, 'Number') self.assertEqual(res.children[1].children[1].children[0].children[1].leaf, 0) self.assertEqual(res.children[1].children[1].children[1].type, 'Number') self.assertEqual(res.children[1].children[1].children[1].leaf, 1) res = parser.parse("x == true && (0 > N-0-1)") self.assertEqual(res.type, 'And') self.assertEqual(len(res.children), 2) self.assertEqual(res.children[0].type, 'Equal') self.assertEqual(res.children[0].children[0].type, 'Identifier') self.assertEqual(res.children[0].children[0].leaf, 'x') self.assertEqual(res.children[0].children[1].type, 'True') self.assertEqual(res.children[1].type, 'Greater') self.assertEqual(res.children[1].children[0].type, 'Number') self.assertEqual(res.children[1].children[0].leaf, 0) self.assertEqual(res.children[1].children[1].type, 'Minus') self.assertEqual(res.children[1].children[1].children[0].type, 'Minus') self.assertEqual(res.children[1].children[1].children[0].children[0].type, 'Identifier') self.assertEqual(res.children[1].children[1].children[0].children[0].leaf, 'N') self.assertEqual(res.children[1].children[1].children[0].children[1].type, 'Number') self.assertEqual(res.children[1].children[1].children[0].children[1].leaf, 0) self.assertEqual(res.children[1].children[1].children[1].type, 'Number') self.assertEqual(res.children[1].children[1].children[1].leaf, 1) def test_parse_expression4(self): parser = testParser(lexer.lexer) res = parser.parse("x' == 0") res.visit() self.assertEqual(res.type, 'Equal') self.assertEqual(res.children[0].type, 'ClockRate') self.assertEqual(res.children[0].leaf, 'x') self.assertEqual(res.children[1].type, 'Number') self.assertEqual(res.children[1].leaf, 0) res = parser.parse("y >= 5 && x' == 0") res.visit() self.assertEqual(res.type, 'And') self.assertEqual(len(res.children), 2) self.assertEqual(res.children[0].type, 'GreaterEqual') self.assertEqual(res.children[0].children[0].type, 'Identifier') self.assertEqual(res.children[0].children[0].leaf, 'y') self.assertEqual(res.children[0].children[1].type, 'Number') self.assertEqual(res.children[0].children[1].leaf, 5) self.assertEqual(res.children[1].type, 'Equal') self.assertEqual(res.children[1].children[0].type, 'ClockRate') self.assertEqual(res.children[1].children[0].leaf, 'x') self.assertEqual(res.children[1].children[1].type, 'Number') self.assertEqual(res.children[1].children[1].leaf, 0) def test_parse_func_with_params(self): parser = testParser(lexer.lexer) res = parser.parse("ishit(4)") self.assertEqual(res.type, "FunctionCall") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "ishit") #parameters self.assertEqual(len(res.leaf), 1) self.assertEqual(res.leaf[0].type, "Number") self.assertEqual(res.leaf[0].leaf, 4) res = parser.parse("cache.ishit(4)") self.assertEqual(res.type, "FunctionCall") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "cache") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, "ishit") #parameters self.assertEqual(len(res.leaf), 1) self.assertEqual(res.leaf[0].type, "Number") self.assertEqual(res.leaf[0].leaf, 4) res = parser.parse("cache.ishit(acc)") self.assertEqual(res.type, "FunctionCall") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "cache") self.assertEqual(res.children[0].children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].leaf, "ishit") #parameters self.assertEqual(len(res.leaf), 1) self.assertEqual(res.leaf[0].type, "Identifier") self.assertEqual(res.leaf[0].leaf, "acc") res = parser.parse("ishit(4, 5, x, True, a.b.c)") res.visit() self.assertEqual(res.type, "FunctionCall") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, "ishit") #parameters self.assertEqual(len(res.leaf), 5) self.assertEqual(res.leaf[0].type, "Number") self.assertEqual(res.leaf[0].leaf, 4) self.assertEqual(res.leaf[1].type, "Number") self.assertEqual(res.leaf[1].leaf, 5) self.assertEqual(res.leaf[2].type, "Identifier") self.assertEqual(res.leaf[2].leaf, "x") self.assertEqual(res.leaf[3].type, "True") self.assertEqual(res.leaf[4].type, "Identifier") self.assertEqual(res.leaf[4].leaf, "a") self.assertEqual(res.leaf[4].children[0].type, "Identifier") self.assertEqual(res.leaf[4].children[0].leaf, "b") self.assertEqual(res.leaf[4].children[0].children[0].type, "Identifier") self.assertEqual(res.leaf[4].children[0].children[0].leaf, "c") def test_parse_array_index_expression(self): parser = testParser(lexer.lexer) res = parser.parse("a[1] == 2") #parser = testParser(lexer.lexer) #res = pars.parse() #res.visit() self.assertEqual(res.type, "Equal") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].children[0].type, "Index") self.assertEqual(res.children[0].children[0].leaf.type, "Number") self.assertEqual(res.children[0].children[0].leaf.leaf, 1) self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 2) res = parser.parse("N-1") self.assertEqual(res.type, "Minus") self.assertEqual(res.children[0].type, "Identifier") self.assertEqual(res.children[0].leaf, 'N') self.assertEqual(res.children[1].type, "Number") self.assertEqual(res.children[1].leaf, 1) def test_parse_extern(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children #pars.AST.visit() declvisitor = parser.DeclVisitor(pars) def test_parse_extern2(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern2.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children pars.AST.visit() declvisitor = parser.DeclVisitor(pars) self.assertTrue('TestExternalLattice' in pars.externList) self.assertEqual(declvisitor.get_type('mylat'), 'TestExternalLattice') def test_parse_extern3(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern3.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children pars.AST.visit() declvisitor = parser.DeclVisitor(pars) self.assertTrue('WideningIntRange' in pars.externList) self.assertEqual(declvisitor.get_type('x'), 'WideningIntRange') wideningIntRangeTypeNode = pars.typedefDict['WideningIntRange'] print "typedefdict:" wideningIntRangeTypeNode.visit() self.assertEqual(wideningIntRangeTypeNode.leaf.type, "Identifier") self.assertEqual(wideningIntRangeTypeNode.leaf.leaf, "WideningIntRange") self.assertEqual(len(wideningIntRangeTypeNode.children), 1) self.assertEqual(wideningIntRangeTypeNode.children[0].type, 'FunctionCall') parameters = wideningIntRangeTypeNode.children[0].leaf self.assertEqual(len(parameters), 4) self.assertEqual(parameters[0].leaf, 1) self.assertEqual(parameters[1].leaf, 2) self.assertEqual(parameters[2].leaf, 3) self.assertEqual(parameters[3].leaf, 9) #self.assertTrue(False) def test_parse_extern_dbm(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_extern_dbm.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children #pars.AST.visit() declvisitor = parser.DeclVisitor(pars) #print declvisitor.variables self.assertEqual(len(declvisitor.variables), 5) self.assertEqual(declvisitor.variables[0], ('dbm', 'DBMFederation', [], None)) self.assertEqual(declvisitor.variables[1], ('dbm.x', 'DBMClock', [], None)) self.assertEqual(declvisitor.variables[2], ('dbm.c', 'DBMClock', [], None)) self.assertEqual(declvisitor.variables[3][0], 'dbm.y') #('dbm.y', 'DBMClock', [10]) self.assertEqual(declvisitor.variables[3][1], 'DBMClock') self.assertEqual(len(declvisitor.variables[3][2]), 1) self.assertEqual(declvisitor.variables[3][2][0].children[0].leaf, 10) self.assertEqual(declvisitor.variables[4][0], 'dbm.z') #('dbm.z', 'DBMClock', [10, 20]) self.assertEqual(declvisitor.variables[4][1], 'DBMClock') self.assertEqual(len(declvisitor.variables[4][2]), 2) self.assertEqual(declvisitor.variables[4][2][0].children[0].leaf, 10) self.assertEqual(declvisitor.variables[4][2][1].children[0].leaf, 20) def test_parse_constants(self): test_file = open(os.path.join(os.path.dirname(__file__), 'test_parse_constants.txt'), "r") lex = lexer.lexer pars = parser.Parser(test_file.read(), lex) res = pars.AST.children pars.AST.visit() declvisitor = parser.DeclVisitor(pars) inorder = ["a", "b", "c", "d"] #should return the constants in file order self.assertEqual(declvisitor.constants.keys(), inorder) #TODO clean this up a bit class myToken: type = None def __init__(self, type): self.type = type class testParser: currentToken = None lex = None def __init__(self, lexer): self.lex = lexer def parse(self, str): self.lex.input(str) self.currentToken = self.lex.token() exParser = expressionParser.ExpressionParser(self.lex, self) return exParser.parse() def parseNumber(self): n = node.Node('Number', [], self.currentToken.value) self.accept('NUMBER') return n def parseIdentifierComplex(self): n = node.Node('Identifier', [], self.currentToken.value) self.accept('IDENTIFIER') p = n while self.currentToken.type == 'DOT': self.accept('DOT') element = node.Node('Identifier', [], self.currentToken.value) self.accept('IDENTIFIER') p.children = [element] p = element return n def accept(self, expectedTokenType): if self.currentToken.type == expectedTokenType: self.currentToken = self.lex.token() if self.currentToken == None: t = myToken('UNKNOWN') self.currentToken = t else: self.error('at token %s on line %d: Expected %s but was %s' % (self.currentToken.value, self.currentToken.lineno, expectedTokenType, self.currentToken.type)) def error(self, msg): raise Exception("Parser error" + msg) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,013
3,959,959,863,001
647044170106f0a9c3f6d9261fb50edb471e0d64
7b9e0f3d6be02ff0f2be23e9944441b4ca68751a
/ass4/project/utils.py
03be7a7f6bed01d284c0cf2369e4678430544ed6
[]
no_license
darora/cs3245
https://github.com/darora/cs3245
a8a0bc4feba4b820ae114c853b4f0f4ad848e8ab
cd8428de243baf0a987821c73f5d304be59e2697
refs/heads/master
2020-05-15T03:04:23.286813
2013-03-29T15:43:55
2013-03-29T15:43:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from contextlib import contextmanager __author__ = 'darora' @contextmanager def ignored(*exceptions): try: yield except exceptions: pass
UTF-8
Python
false
false
2,013
11,390,253,279,109
fe65cd8f3b93f98ea1b0e5274dc20803bd8bef14
12bcd942e8523212132cbae470f0d6cdf192664b
/dbhelper.py
2eac9e82e805a8520c65cf71e418b68ca40d39e1
[]
no_license
icbmike/magicTracker
https://github.com/icbmike/magicTracker
db69bde44e263926e3b20ffc5f47b74d53c1bb29
53ec5013f5f8984f19e89f4f3e63d8763ddfd185
refs/heads/master
2020-05-03T17:11:48.652845
2013-06-30T05:11:25
2013-06-30T05:11:25
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sqlite3 instance = None def get_DBHelper_instance(): class DBHelper(object): def __init__(self): #open the connections conn = sqlite3.connect('magic.db') #create the tables if they dont exist cursor = conn.cursor() cursor.execute("""CREATE TABLE IF NOT EXISTS decks ( name TEXT, version INTEGER, color TEXT, creator TEXT PRIMARY KEY (name, version));""") cursor.execute(""" CREATE TABLE IF NOT EXISTS games ( dateTime DATETIME PRIMARY KEY, winningPlayer TEXT, winningDeckName TEXT, winningDeckVersion INTEGER, winningMulligans INTEGER, losingPlayer TEXT, losingDeckName TEXT, losingDeckVesrion INTEGER, losingMulligans INTEGER );""") conn.commit() conn.close() def addDeck(self, name, version, color, creator): #open the connections with sqlite3.connect('magic.db') as conn: cursor = conn.cursor() cursor.execute("""INSERT INTO decks ( name, version, color, creator, wins, losses ) VALUES ( ?, ?, ?, ?, 0, 0 );""", (name, version, color, creator)) conn.commit() global instance if instance is None: instance = DBHelper() return instance
UTF-8
Python
false
false
2,013
1,726,576,862,282
f4f4e838de265a12b14adad8ee4fd531e1c780ce
a744c3a78f6b625c4052162132351966d878d54e
/pybo.wsgi
2b4a8c7b3c14030fb4aec254abf2b717f9e6227e
[]
no_license
lksh/pybo
https://github.com/lksh/pybo
58eb17d6e4fb0d3fe69a29e2f700935667f87881
a9f9e3054bbdfe19fc68d86c26be6f33392b2fa1
refs/heads/master
2016-03-30T07:06:23.522052
2012-06-08T10:44:17
2012-06-08T10:44:17
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os import sys path = '/var/www' if path not in sys.path: sys.path.append(path) sys.path.append('/var/www/pybo') os.environ['DJANGO_SETTINGS_MODULE'] = 'pybo.settings' import django.core.handlers.wsgi application = django.core.handlers.wsgi.WSGIHandler()
UTF-8
Python
false
false
2,012
8,985,071,621,829
de486564463a83fcfc71edccc9df3c20d6f91cb1
a911232b8fc7592c25128877408741519bfeda74
/engine/storage.py
22e5e3295294441d2a2eea0faae0619d0a1f105d
[ "GPL-3.0-only" ]
non_permissive
lotem/rime.py
https://github.com/lotem/rime.py
355c09fee0f4649432f17f1cea52ca04c6498a57
4f6c532f3311d9494e74b905dee6872068c23449
refs/heads/master
2021-01-22T07:39:14.089224
2011-06-18T07:34:36
2011-06-18T07:34:36
367,290
5
2
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- # vim:set et sts=4 sw=4: import os import sqlite3 import sys import time def debug(*what): print >> sys.stderr, u'[DEBUG]: ', u' '.join(map(unicode, what)) # sql for global tables INIT_ZIME_DB_SQL = """ CREATE TABLE IF NOT EXISTS setting_paths ( id INTEGER PRIMARY KEY, path TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS setting_values ( path_id INTEGER, value TEXT ); CREATE TABLE IF NOT EXISTS phrases ( id INTEGER PRIMARY KEY, phrase TEXT UNIQUE ); """ QUERY_SETTING_SQL = """ SELECT value FROM setting_values WHERE path_id IN (SELECT id FROM setting_paths WHERE path = :path); """ QUERY_SETTING_ITEMS_SQL = """ SELECT path, value FROM setting_paths, setting_values WHERE path LIKE :pattern AND id = path_id; """ QUERY_SETTING_PATH_SQL = """ SELECT id FROM setting_paths WHERE path = :path; """ ADD_SETTING_PATH_SQL = """ INSERT INTO setting_paths VALUES (NULL, :path); """ ADD_SETTING_VALUE_SQL = """ INSERT INTO setting_values VALUES (:path_id, :value); """ UPDATE_SETTING_VALUE_SQL = """ UPDATE setting_values SET value = :value WHERE path_id == :path_id; """ CLEAR_SETTING_VALUE_SQL = """ DELETE FROM setting_values WHERE path_id IN (SELECT id FROM setting_paths WHERE path LIKE :path); """ CLEAR_SETTING_PATH_SQL = """ DELETE FROM setting_paths WHERE path LIKE :path; """ QUERY_SCHEMA_LIST_SQL = """ SELECT substr(path, length('SchemaList/') + 1), value FROM setting_paths p LEFT JOIN setting_values v ON p.id = v.path_id WHERE path LIKE 'SchemaList/%'; """ QUERY_DICT_PREFIX_SQL = """ SELECT substr(path, 1, length(path) - length('/Dict')), value FROM setting_paths p LEFT JOIN setting_values v ON p.id = v.path_id WHERE path LIKE '%/Dict'; """ QUERY_PHRASE_SQL = """ SELECT id FROM phrases WHERE phrase = :phrase; """ ADD_PHRASE_SQL = """ INSERT INTO phrases VALUES (NULL, :phrase); """ # dict specific sql CREATE_DICT_SQL = """ CREATE TABLE IF NOT EXISTS %(prefix)s_keywords ( keyword TEXT ); CREATE TABLE IF NOT EXISTS %(prefix)s_keys ( id INTEGER PRIMARY KEY, ikey TEXT UNIQUE ); CREATE TABLE IF NOT EXISTS %(prefix)s_stats ( sfreq INTEGER, ufreq INTEGER ); INSERT INTO %(prefix)s_stats VALUES (0, 0); CREATE TABLE IF NOT EXISTS %(prefix)s_unigram ( id INTEGER PRIMARY KEY, p_id INTEGER, okey TEXT, sfreq INTEGER, ufreq INTEGER ); CREATE UNIQUE INDEX IF NOT EXISTS %(prefix)s_entry_idx ON %(prefix)s_unigram (p_id, okey); CREATE TABLE IF NOT EXISTS %(prefix)s_ku ( k_id INTEGER, u_id INTEGER, PRIMARY KEY (k_id, u_id) ); CREATE TABLE IF NOT EXISTS %(prefix)s_bigram ( e1 INTEGER, e2 INTEGER, bfreq INTEGER, PRIMARY KEY (e1, e2) ); CREATE TABLE IF NOT EXISTS %(prefix)s_kb ( k_id INTEGER, b_id INTEGER, PRIMARY KEY (k_id, b_id) ); """ DROP_DICT_SQL = """ DROP TABLE IF EXISTS %(prefix)s_keywords; DROP TABLE IF EXISTS %(prefix)s_keys; DROP TABLE IF EXISTS %(prefix)s_stats; DROP INDEX IF EXISTS %(prefix)s_entry_idx; DROP TABLE IF EXISTS %(prefix)s_unigram; DROP TABLE IF EXISTS %(prefix)s_ku; DROP TABLE IF EXISTS %(prefix)s_bigram; DROP TABLE IF EXISTS %(prefix)s_kb; """ LIST_KEYWORDS_SQL = """ SELECT keyword FROM %(prefix)s_keywords; """ QUERY_KEY_SQL = """ SELECT id FROM %(prefix)s_keys WHERE ikey = :ikey; """ ADD_KEY_SQL = """ INSERT INTO %(prefix)s_keys VALUES (NULL, :ikey); """ QUERY_STATS_SQL = """ SELECT sfreq + ufreq AS freq, ufreq FROM %(prefix)s_stats; """ UPDATE_SFREQ_TOTAL_SQL = """ UPDATE %(prefix)s_stats SET ufreq = ufreq + :n; """ UPDATE_UFREQ_TOTAL_SQL = """ UPDATE %(prefix)s_stats SET sfreq = sfreq + :n; """ QUERY_UNIGRAM_SQL = """ SELECT phrase, okey, u.id, sfreq + ufreq AS freq, ufreq FROM %(prefix)s_unigram u, %(prefix)s_ku ku, %(prefix)s_keys k, phrases p WHERE ikey = :ikey AND k.id = k_id AND u_id = u.id AND p_id = p.id ORDER BY freq DESC; """ UNIGRAM_EXIST_SQL = """ SELECT id FROM %(prefix)s_unigram WHERE p_id = :p_id AND okey = :okey; """ ADD_UNIGRAM_SQL = """ INSERT INTO %(prefix)s_unigram VALUES (NULL, :p_id, :okey, :freq, 0); """ INC_SFREQ_SQL = """ UPDATE %(prefix)s_unigram SET sfreq = sfreq + :freq WHERE id = :id; """ INC_UFREQ_SQL = """ UPDATE %(prefix)s_unigram SET ufreq = ufreq + :freq WHERE id = :id; """ QUERY_BIGRAM_SQL = """ SELECT e1, e2, bfreq AS freq FROM %(prefix)s_bigram b , %(prefix)s_kb kb, %(prefix)s_keys k WHERE ikey = :ikey AND k.id = k_id AND b_id = b.rowid ORDER BY freq; """ QUERY_BIGRAM_BY_ENTRY_SQL = """ SELECT e2, bfreq FROM %(prefix)s_bigram WHERE e1 = :e1; """ BIGRAM_EXIST_SQL = """ SELECT rowid FROM %(prefix)s_bigram WHERE e1 = :e1 AND e2 = :e2; """ ADD_BIGRAM_SQL = """ INSERT INTO %(prefix)s_bigram VALUES (:e1, :e2, 1); """ INC_BFREQ_SQL = """ UPDATE %(prefix)s_bigram SET bfreq = bfreq + :freq WHERE e1 = :e1 AND e2 = :e2; """ QUERY_KB_SQL = """ SELECT rowid FROM %(prefix)s_kb WHERE k_id = :k_id AND b_id = :b_id; """ ADD_KB_SQL = """ INSERT INTO %(prefix)s_kb VALUES (:k_id, :b_id); """ ADD_KEYWORD_SQL = """ INSERT INTO %(prefix)s_keywords VALUES (:keyword); """ ADD_KU_SQL = """ INSERT INTO %(prefix)s_ku VALUES (:k_id, :u_id); """ QUERY_USER_FREQ_SQL = """ SELECT phrase, ufreq, okey FROM %(prefix)s_unigram u LEFT JOIN phrases p ON p_id = p.id WHERE ufreq > 0 """ QUERY_USER_GRAM_SQL = """ SELECT p1.phrase, p2.phrase, bfreq, u1.okey, u2.okey FROM %(prefix)s_bigram b, %(prefix)s_unigram u1 LEFT JOIN phrases p1 ON u1.p_id = p1.id, %(prefix)s_unigram u2 LEFT JOIN phrases p2 ON u2.p_id = p2.id WHERE e1 = u1.id AND e2 = u2.id AND bfreq > 0 """ UPDATE_USER_FREQ_SQL = """ UPDATE OR IGNORE %(prefix)s_unigram SET ufreq = ufreq + :freq WHERE p_id IN (SELECT id FROM phrases WHERE phrase = :phrase) AND okey = :okey; """ def _generate_dict_specific_sql(db, prefix_args): db._create_dict_sql = CREATE_DICT_SQL % prefix_args db._drop_dict_sql = DROP_DICT_SQL % prefix_args db._list_keywords_sql = LIST_KEYWORDS_SQL % prefix_args db._add_keyword_sql = ADD_KEYWORD_SQL % prefix_args db._query_key_sql = QUERY_KEY_SQL % prefix_args db._add_key_sql = ADD_KEY_SQL % prefix_args db._query_stats_sql = QUERY_STATS_SQL % prefix_args db._update_ufreq_total_sql = UPDATE_UFREQ_TOTAL_SQL % prefix_args db._update_sfreq_total_sql = UPDATE_SFREQ_TOTAL_SQL % prefix_args db._query_unigram_sql = QUERY_UNIGRAM_SQL % prefix_args db._unigram_exist_sql = UNIGRAM_EXIST_SQL % prefix_args db._add_unigram_sql = ADD_UNIGRAM_SQL % prefix_args db._inc_sfreq_sql = INC_SFREQ_SQL % prefix_args db._inc_ufreq_sql = INC_UFREQ_SQL % prefix_args db._add_ku_sql = ADD_KU_SQL % prefix_args db._query_bigram_sql = QUERY_BIGRAM_SQL % prefix_args db._query_bigram_by_entry_sql = QUERY_BIGRAM_BY_ENTRY_SQL % prefix_args db._bigram_exist_sql = BIGRAM_EXIST_SQL % prefix_args db._add_bigram_sql = ADD_BIGRAM_SQL % prefix_args db._inc_bfreq_sql = INC_BFREQ_SQL % prefix_args db._query_kb_sql = QUERY_KB_SQL % prefix_args db._add_kb_sql = ADD_KB_SQL % prefix_args db._query_user_freq_sql = QUERY_USER_FREQ_SQL % prefix_args db._query_user_gram_sql = QUERY_USER_GRAM_SQL % prefix_args db._update_user_freq_sql = UPDATE_USER_FREQ_SQL % prefix_args class DB: UNIG_LIMIT = 1000 BIG_LIMIT = 50 FLUSH_INTERVAL = 2 * 60 # 2 minutes __last_flush_time = 0 __conn = None @classmethod def open(cls, db_file, read_only=False): #debug('opening db file:', db_file) if cls.__conn: return cls.__conn = sqlite3.connect(db_file) cls.read_only = read_only if not read_only: cls.__conn.executescript(INIT_ZIME_DB_SQL) cls.flush(True) @classmethod def read_setting(cls, key): r = cls.__conn.execute(QUERY_SETTING_SQL, {'path': key}).fetchone() return r[0] if r else None @classmethod def read_setting_list(cls, key): r = cls.__conn.execute(QUERY_SETTING_SQL, {'path': key}).fetchall() return [x[0] for x in r] @classmethod def read_setting_items(cls, key): r = cls.__conn.execute(QUERY_SETTING_ITEMS_SQL, {'pattern': key + '%'}).fetchall() return [(x[0][len(key):], x[1]) for x in r] @classmethod def add_setting(cls, key, value): if cls.read_only: return False path_id = cls.__get_or_insert_setting_path(key) args = {'path_id': path_id, 'value': value} cls.__conn.execute(ADD_SETTING_VALUE_SQL, args) return True @classmethod def update_setting(cls, key, value): if cls.read_only: return False path_id = cls.__get_or_insert_setting_path(key) args = {'path_id': path_id, 'value': value} if cls.read_setting(key) is None: cls.__conn.execute(ADD_SETTING_VALUE_SQL, args) else: cls.__conn.execute(UPDATE_SETTING_VALUE_SQL, args) return True @classmethod def __get_or_insert_setting_path(cls, path): cur = cls.__conn.cursor() args = {'path' : path} r = cur.execute(QUERY_SETTING_PATH_SQL, args).fetchone() if r: return r[0] else: cur.execute(ADD_SETTING_PATH_SQL, args) return cur.lastrowid @classmethod def clear_setting(cls, path): cur = cls.__conn.cursor() cur.execute(CLEAR_SETTING_VALUE_SQL, {'path' : path}) cur.execute(CLEAR_SETTING_PATH_SQL, {'path' : path}) @classmethod def flush(cls, immediate=False): now = time.time() if immediate or now - cls.__last_flush_time > cls.FLUSH_INTERVAL: cls.__conn.commit() cls.__last_flush_time = now def __init__(self, name): self.__name = name self.__section = '%s/' % name prefix_args = {'prefix' : self.read_config_value('Dict')} _generate_dict_specific_sql(self, prefix_args) # for recovery from learning accidental user input self.__pending_updates = [] def recreate_tables(self): cur = DB.__conn.cursor() cur.executescript(self._drop_dict_sql) cur.executescript(self._create_dict_sql) def read_config_value(self, key): return DB.read_setting(self.__section + key) def read_config_list(self, key): return DB.read_setting_list(self.__section + key) def list_keywords(self): return [x[0] for x in DB.__conn.execute(self._list_keywords_sql, ()).fetchall()] def lookup_freq_total(self): self.proceed_pending_updates() r = DB.__conn.execute(self._query_stats_sql).fetchone() return r def lookup_unigram(self, key): #print 'lookup_unigram:', key args = {'ikey' : key} r = DB.__conn.execute(self._query_unigram_sql, args).fetchmany(DB.UNIG_LIMIT) return r def lookup_bigram(self, key): #print 'lookup_bigram:', key args = {'ikey' : key} r = DB.__conn.execute(self._query_bigram_sql, args).fetchmany(DB.BIG_LIMIT) return r def lookup_bigram_by_entry(self, e): #print 'lookup_bigram_by_entry:', unicode(e) args = {'e1' : e.get_eid()} r = DB.__conn.execute(self._query_bigram_by_entry_sql, args).fetchmany(DB.BIG_LIMIT) return r def update_freq_total(self, n): #print 'update_freq_total:', n self.__pending_updates.append(lambda: self.__update_ufreq_total(n)) def update_unigram(self, e): #print 'update_unigram:', unicode(e) self.__pending_updates.append(lambda: self.__update_unigram(e)) def update_bigram(self, a, b, indexer): #print 'update_bigram:', unicode(a), unicode(b) self.__pending_updates.append(lambda: self.__update_bigram(a, b, indexer)) def proceed_pending_updates(self): if self.__pending_updates: for f in self.__pending_updates: f() self.__pending_updates = [] def cancel_pending_updates(self): if self.__pending_updates: self.__pending_updates = [] def __update_ufreq_total(self, n): if DB.read_only: return args = {'n' : n} DB.__conn.execute(self._update_ufreq_total_sql, args) DB.flush() def __update_unigram(self, e): if DB.read_only: return args = {'id' : e.get_eid(), 'freq': 1} DB.__conn.execute(self._inc_ufreq_sql, args) def __update_bigram(self, a, b, indexer): if DB.read_only: return cur = DB.__conn.cursor() args = {'e1' : a.get_eid(), 'e2' : b.get_eid(), 'freq': 1} if cur.execute(self._bigram_exist_sql, args).fetchone(): cur.execute(self._inc_bfreq_sql, args) else: cur.execute(self._add_bigram_sql, args) # generate ikey-bigram index b_id = cur.execute(self._bigram_exist_sql, args).fetchone()[0] okey = u' '.join([a.get_okey(), b.get_okey()]) k_ids = [self.__get_or_insert_key(k) for k in indexer(okey)] for k_id in k_ids: self.__add_kb(k_id, b_id) def __get_or_insert_key(self, key): cur = DB.__conn.cursor() args = {'ikey' : key} r = None while not r: r = cur.execute(self._query_key_sql, args).fetchone() if not r: cur.execute(self._add_key_sql, args) return r[0] def __add_kb(self, k_id, b_id): args = {'k_id' : k_id, 'b_id' : b_id} if not DB.__conn.execute(self._query_kb_sql, args).fetchone(): DB.__conn.execute(self._add_kb_sql, args) # used by zimedb-admin.py @classmethod def get_schema_list(self): schema_list = DB.__conn.execute(QUERY_SCHEMA_LIST_SQL).fetchall() return schema_list @classmethod def get_installed_dicts(self): prefixes = DB.__conn.execute(QUERY_DICT_PREFIX_SQL).fetchall() return prefixes def drop_tables(self, compact=False): DB.__conn.executescript(self._drop_dict_sql) @classmethod def compact(cls): DB.__conn.execute("""VACUUM;""") def add_keywords(self, keywords): args = [{'keyword': k} for k in keywords] DB.__conn.executemany(self._add_keyword_sql, args) def add_phrases(self, phrase_table, indexer, reporter=None): '''批量添加詞條並以indexer建立編碼索引''' # 第一趟,讀取phrase id,寫入新增詞條 phrase_id = dict() missing_phrases = set() for (k, freq) in phrase_table: phrase = k[0] p_id = self.__get_phrase_id(phrase) if p_id: phrase_id[phrase] = p_id else: missing_phrases.add(phrase) if missing_phrases: table = [{'phrase': p} for p in missing_phrases] DB.__conn.executemany(ADD_PHRASE_SQL, table) for phrase in missing_phrases: p_id = self.__get_phrase_id(phrase) if p_id: phrase_id[phrase] = p_id # 第二趟,累計詞頻、生成unigram unigram_freq = dict() total = 0 for (k, freq) in phrase_table: if k in unigram_freq: unigram_freq[k] += freq else: unigram_freq[k] = freq total += freq increment = list() missing_unigrams = set() for (phrase, okey), freq in unigram_freq.iteritems(): p_id = phrase_id[phrase] u_id = self.__get_unigram_id(p_id, okey) if u_id: # 已有unigram,累計詞頻 if freq > 0: increment.append({'id': u_id, 'freq': freq}) else: missing_unigrams.add((phrase, okey)) if reporter: reporter(phrase, okey) if missing_unigrams: table = [{'p_id': phrase_id[k[0]], 'okey': k[1], 'freq': unigram_freq[k]} for k in missing_unigrams] DB.__conn.executemany(self._add_unigram_sql, table) if increment: DB.__conn.executemany(self._inc_sfreq_sql, increment) if total > 0: self.__inc_freq_total(total) # 建立索引 key_id = dict() missing_keys = set() missing_ku_links = set() for (phrase, okey) in missing_unigrams: u_id = self.__get_unigram_id(phrase_id[phrase], okey) if not u_id: # shouldn't happen! continue for key in indexer(okey): missing_ku_links.add((key, u_id)) k_id = self.__get_key_id(key) if k_id: key_id[key] = k_id else: missing_keys.add(key) if missing_keys: table = [{'ikey': k} for k in missing_keys] DB.__conn.executemany(self._add_key_sql, table) for key in missing_keys: k_id = self.__get_key_id(key) if k_id: key_id[key] = k_id if missing_ku_links: table = [{'k_id': key_id[k], 'u_id': u} for (k, u) in missing_ku_links] DB.__conn.executemany(self._add_ku_sql, table) def __get_phrase_id(self, phrase): args = {'phrase': phrase} r = DB.__conn.execute(QUERY_PHRASE_SQL, args).fetchone() return r[0] if r else None def __get_key_id(self, key): args = {'ikey': key} r = DB.__conn.execute(self._query_key_sql, args).fetchone() return r[0] if r else None def __get_unigram_id(self, p_id, okey): args = {'p_id': p_id, 'okey': okey} r = DB.__conn.execute(self._unigram_exist_sql, args).fetchone() return r[0] if r else None def __inc_freq_total(self, n): args = {'n' : n} DB.__conn.execute(self._update_sfreq_total_sql, args) def dump_user_freq(self): return DB.__conn.execute(self._query_user_freq_sql).fetchall() def dump_user_gram(self): return DB.__conn.execute(self._query_user_gram_sql).fetchall() def restore_user_freq(self, freq_table): cur = DB.__conn.cursor() unigram_freq = dict() for (u, n) in freq_table: if u in unigram_freq: unigram_freq[u] += n else: unigram_freq[u] = n table = list() total_increment = 0 for (phrase, okey), n in unigram_freq.iteritems(): table.append({'phrase': phrase, 'okey': okey, 'freq': n}) total_increment += n cur.executemany(self._update_user_freq_sql, table) if total_increment > 0: cur.execute(self._update_ufreq_total_sql, {'n': total_increment}) def restore_user_gram(self, freq_table, indexer): cur = DB.__conn.cursor() bigram_freq = dict() for (a, b, n) in freq_table: k = (a, b) if k in bigram_freq: bigram_freq[k] += n else: bigram_freq[k] = n missing = list() increment = list() for ((phrase1, okey1), (phrase2, okey2)), n in bigram_freq.iteritems(): p1 = self.__get_phrase_id(phrase1) if not p1: continue e1 = self.__get_unigram_id(p1, okey1) if not e1: continue p2 = self.__get_phrase_id(phrase2) if not p2: continue e2 = self.__get_unigram_id(p2, okey2) if not e2: continue args = {'e1': e1, 'e2': e2, 'freq': n, 'okey': u' '.join([okey1, okey2])} if cur.execute(self._bigram_exist_sql, args).fetchone(): increment.append(args) else: missing.append(args) cur.executemany(self._inc_bfreq_sql, increment) cur.executemany(self._add_bigram_sql, missing) # generate ikey-bigram index for args in missing: b_id = cur.execute(self._bigram_exist_sql, args).fetchone()[0] k_ids = [self.__get_or_insert_key(k) for k in indexer(args['okey'])] for k_id in k_ids: self.__add_kb(k_id, b_id)
UTF-8
Python
false
false
2,011
18,279,380,837,036
91b639d79ce4dbdb97b612eb3a75f83e74cea2f6
bd9dcfb3fecce1786470570866cd527bd8016f66
/export_layers/pylibgimpplugin/pylibgimp.py
0ec90fb256710c3e680fd051104124c093694eed
[ "GPL-3.0-only" ]
non_permissive
ncornette/gimp-plugin-export-layers
https://github.com/ncornette/gimp-plugin-export-layers
c96812cfb0736695002623cd435be76262896dcc
5cb46039c164c9796b4bb9f6ad5e3ea59a7a1f80
refs/heads/master
2021-01-23T15:42:20.363509
2014-11-10T22:55:31
2014-11-10T22:55:31
26,461,215
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#------------------------------------------------------------------------------- # # This file is part of pylibgimpplugin. # # Copyright (C) 2014 khalim19 <[email protected]> # # pylibgimpplugin is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pylibgimpplugin is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pylibgimpplugin. If not, see <http://www.gnu.org/licenses/>. # #------------------------------------------------------------------------------- """ This module defines functions dealing with GIMP objects (images, layers, etc.) not defined in the Python API for GIMP plug-ins. """ #=============================================================================== from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from __future__ import division str = unicode #=============================================================================== from contextlib import contextmanager import gimp import gimpenums #=============================================================================== pdb = gimp.pdb #=============================================================================== # Functions #=============================================================================== @contextmanager def undo_group(image): """ Wrap the enclosing block of code into one GIMP undo group for the specified image. Use this function as a context manager: with undo_group(image): # do stuff """ pdb.gimp_image_undo_group_start(image) try: yield finally: pdb.gimp_image_undo_group_end(image) def merge_layer_group(image, layer_group): """ Merge layers in the specified layer group belonging to the specified image into one layer. This function can handle both top-level and nested layer groups. """ if not pdb.gimp_item_is_group(layer_group): raise TypeError("not a layer group") with undo_group(image): orig_parent_and_pos = () if layer_group.parent is not None: # Nested layer group orig_parent_and_pos = (layer_group.parent, pdb.gimp_image_get_item_position(image, layer_group)) pdb.gimp_image_reorder_item(image, layer_group, None, 0) orig_layer_visibility = [layer.visible for layer in image.layers] for layer in image.layers: layer.visible = False layer_group.visible = True merged_layer_group = pdb.gimp_image_merge_visible_layers(image, gimpenums.EXPAND_AS_NECESSARY) for layer, orig_visible in zip(image.layers, orig_layer_visibility): layer.visible = orig_visible if orig_parent_and_pos: pdb.gimp_image_reorder_item(image, merged_layer_group, orig_parent_and_pos[0], orig_parent_and_pos[1]) return merged_layer_group def is_layer_inside_image(image, layer): """ Return True if the layer is inside the image canvas (partially or completely). Return False if the layer is completely outside the image canvas. """ return ((-image.width < layer.offsets[0] < image.width) and (-image.height < layer.offsets[1] < image.height)) def remove_all_layers(image): """ Remove all layers from the specified image. """ for layer in image.layers: pdb.gimp_image_remove_layer(image, layer) def remove_all_channels(image): """ Remove all layers from the specified image. """ for channel in image.channels: pdb.gimp_image_remove_channel(image, channel) def remove_all_paths(image): """ Remove all paths (vectors) from the specified image. """ for path in image.vectors: pdb.gimp_image_remove_vectors(image, path) def remove_all_items(image): """ Remove all items (layers, channels, paths) from the specified image. """ remove_all_layers(image) remove_all_channels(image) remove_all_paths(image) def duplicate(image, remove_items=False): """ Duplicate the specified image. If `remove_items` is true, remove all items (layers, channels, paths) from the duplicated image. """ image_new = pdb.gimp_image_duplicate(image) if remove_items: remove_all_items(image_new) return image_new
UTF-8
Python
false
false
2,014