{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \")\n\n\t\t\telif self.path == \"/compressed/deflate\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Encoding', 'deflate')\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\n\t\t\t\tinb = b\"Root OK?\"\n\t\t\t\tcobj = zlib.compressobj(wbits=-zlib.MAX_WBITS)\n\t\t\t\tt1 = cobj.compress(inb) + cobj.flush()\n\t\t\t\tself.wfile.write(t1)\n\n\t\t\telif self.path == \"/compressed/gzip\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Encoding', 'gzip')\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(gzip.compress(b\"Root OK?\"))\n\n\t\t\telif self.path == \"/json/invalid\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT\")\n\n\t\t\telif self.path == \"/json/valid\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b'{\"oh\" : \"hai\"}')\n\n\t\t\telif self.path == \"/json/no-coding\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b'{\"oh\" : \"hai\"}')\n\n\t\t\telif self.path == \"/filename/path-only.txt\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename/path-only-trailing-slash/\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename/content-disposition\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=lolercoaster.txt\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/path-only.txt\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=lolercoaster.txt\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition-html-suffix\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=lolercoaster.html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition-quotes-1\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename='lolercoaster.html'\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition-quotes-2\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=\\'lolercoaster.html\\'\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition-quotes-spaces-1\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename='loler coaster.html'\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/content-disposition-quotes-spaces-2\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=\\\"loler coaster.html\\\"\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/filename_mime/explicit-html-mime\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-Disposition', \"filename=lolercoaster.html\")\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"LOLWAT?\")\n\n\t\t\telif self.path == \"/redirect/bad-1\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/redirect/bad-2\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tself.send_header('location', \"bad-2\")\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/redirect/bad-3\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tself.send_header('location', \"gopher://www.google.com\")\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/redirect/from-1\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tself.send_header('location', \"to-1\")\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/redirect/to-1\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"Redirect-To-1\")\n\n\t\t\telif self.path == \"/redirect/from-2\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tself.send_header('uri', \"to-2\")\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/redirect/to-2\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"Redirect-To-2\")\n\n\t\t\telif self.path == \"/redirect/from-3\":\n\t\t\t\tself.send_response(302)\n\t\t\t\tnewurl = \"http://{}:{}\".format(self.server.server_address[0], self.server.server_address[1])\n\t\t\t\tself.send_header('uri', newurl)\n\t\t\t\tself.end_headers()\n\n\t\t\telif self.path == \"/password/expect\":\n\t\t\t\t# print(\"Password\")\n\t\t\t\t# print(self.headers)\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\n\t\t\t\tif not 'Authorization' in self.headers:\n\t\t\t\t\tself.wfile.write(b\"Password not sent!!\")\n\t\t\t\t\treturn\n\n\t\t\t\tval = self.headers['Authorization']\n\t\t\t\tpassval = val.split(\" \")[-1]\n\t\t\t\tpassstr = base64.b64decode(passval)\n\n\t\t\t\tif passstr == b'lol:wat':\n\t\t\t\t\tself.wfile.write(b\"Password Ok?\")\n\t\t\t\telse:\n\t\t\t\t\tself.wfile.write(b\"Password Bad!\")\n\n\t\t\telif self.path == \"/content/have-title\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"I can haz title?This page has a title!\")\n\n\t\t\telif self.path == \"/content/no-title\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"This page has no title. Sadface.jpg\")\n\n\t\t\telif self.path == \"/binary_ctnt\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"image/jpeg\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"Binary!\\x00\\x01\\x02\\x03\")\n\n\t\t\telif self.path == \"/binary_ctnt\":\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"image/jpeg\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"Binary!\\x00\\x01\\x02\\x03\")\n\n\t\t\t##################################################################################################################################\n\t\t\t# Cookie stuff\n\t\t\t##################################################################################################################################\n\n\t\t\telif self.path == '/cookie_test':\n\t\t\t\tcook = cookies.SimpleCookie()\n\t\t\t\tcook['cookie_test_key'] = cookie_key\n\t\t\t\tcook['cookie_test_key']['path'] = \"/\"\n\t\t\t\tcook['cookie_test_key']['domain'] = \"\"\n\t\t\t\texpiration = datetime.datetime.now() + datetime.timedelta(days=30)\n\t\t\t\tcook['cookie_test_key'][\"expires\"] = expiration.strftime(\"%a, %d-%b-%Y %H:%M:%S PST\")\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\n\t\t\t\tself.send_header('Set-Cookie', cook['cookie_test_key'].OutputString())\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"CF Cookie Test\")\n\n\t\t\telif self.path == '/cookie_require':\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\t\t\t\t\tif cook_key == 'cookie_test_key' and cook_value == cookie_key:\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"Cookie forwarded properly!\")\n\t\t\t\t\t\treturn\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(b\"Cookie is missing\")\n\n\n\n\t\t\t##################################################################################################################################\n\t\t\t# Sucuri validation\n\t\t\t##################################################################################################################################\n\n\n\n\n\n\t\t\telif self.path == '/sucuri_shit_3':\n\t\t\t\t# I'd like to get this down to just 2 requests (cookie bounce, and fetch).\n\t\t\t\t# Doing that requires pulling html content out of chromium, though.\n\t\t\t\t# Annoying.\n\t\t\t\tnonlocal sucuri_reqs_3\n\t\t\t\tsucuri_reqs_3 += 1\n\n\t\t\t\tif sucuri_reqs_3 > 3:\n\t\t\t\t\traise RuntimeError(\"Too many requests to sucuri_shit_3 (%s)!\" % sucuri_reqs_3)\n\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\n\t\t\t\t\tif cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':\n\t\t\t\t\t\t# if cook['']\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"At target preemptive Sucuri page!Preemptive waf circumvented OK (p3)?\")\n\n\t\t\t\t\t\treturn\n\n\n\t\t\t\tcontainer_dir = os.path.dirname(__file__)\n\t\t\t\tfpath = os.path.join(container_dir, \"waf_garbage\", 'sucuri_garbage.html')\n\t\t\t\twith open(fpath, \"rb\") as fp:\n\t\t\t\t\tplain_contents = fp.read()\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(plain_contents)\n\n\t\t\telif self.path == '/sucuri_shit_2':\n\t\t\t\t# This particular path is the one we should already have a cookie for.\n\t\t\t\t# As such, we expect one request only\n\t\t\t\tnonlocal sucuri_reqs_2\n\t\t\t\tsucuri_reqs_2 += 1\n\n\t\t\t\tif sucuri_reqs_2 > 1:\n\t\t\t\t\traise RuntimeError(\"Too many requests to sucuri_shit_2 (%s)!\" % sucuri_reqs_2)\n\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\n\t\t\t\t\tif cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':\n\t\t\t\t\t\t# if cook['']\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"At target preemptive Sucuri page!Preemptive waf circumvented OK (p2)?\")\n\n\t\t\t\t\t\treturn\n\n\n\t\t\t\tcontainer_dir = os.path.dirname(__file__)\n\t\t\t\tfpath = os.path.join(container_dir, \"waf_garbage\", 'sucuri_garbage.html')\n\t\t\t\twith open(fpath, \"rb\") as fp:\n\t\t\t\t\tplain_contents = fp.read()\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(plain_contents)\n\n\t\t\telif self.path == '/sucuri_shit':\n\t\t\t\tnonlocal sucuri_reqs_1\n\t\t\t\tsucuri_reqs_1 += 1\n\n\t\t\t\tif sucuri_reqs_1 > 4:\n\t\t\t\t\traise RuntimeError(\"Too many requests to sucuri_shit (%s)!\" % sucuri_reqs_1)\n\n\t\t\t\t# print(\"Fetch for \", self.path)\n\t\t\t\t# print(\"Cookies:\", self.headers.get_all('Cookie', failobj=[]))\n\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\n\t\t\t\t\tif cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478':\n\t\t\t\t\t\t# if cook['']\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"At target Sucuri page!Sucuri Redirected OK?\")\n\n\t\t\t\t\t\treturn\n\n\n\t\t\t\tcontainer_dir = os.path.dirname(__file__)\n\t\t\t\tfpath = os.path.join(container_dir, \"waf_garbage\", 'sucuri_garbage.html')\n\t\t\t\twith open(fpath, \"rb\") as fp:\n\t\t\t\t\tplain_contents = fp.read()\n\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(plain_contents)\n\n\t\t\t##################################################################################################################################\n\t\t\t# Cloudflare validation\n\t\t\t##################################################################################################################################\n\n\t\t\telif self.path == '/cloudflare_under_attack_shit_2':\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\n\t\t\t\t\tif cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:\n\t\t\t\t\t\t# if cook['']\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"At target CF page!CF Redirected OK?\")\n\n\t\t\t\t\t\treturn\n\n\t\t\t\tcontainer_dir = os.path.dirname(__file__)\n\t\t\t\tfpath = os.path.join(container_dir, \"waf_garbage\", 'cf_js_challenge_03_12_2018.html')\n\t\t\t\twith open(fpath, \"rb\") as fp:\n\t\t\t\t\tplain_contents = fp.read()\n\n\t\t\t\tself.server_version = \"cloudflare is garbage\"\n\t\t\t\tself.send_response(503)\n\t\t\t\tself.send_header('Server', \"cloudflare is garbage\")\n\t\t\t\tself.send_header('Content-type','text/html')\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(plain_contents)\n\n\t\t\telif self.path == '/cloudflare_under_attack_shit':\n\t\t\t\tif self.headers.get_all('Cookie', failobj=[]):\n\t\t\t\t\tcook = self.headers.get_all('Cookie', failobj=[])[0]\n\n\t\t\t\t\tcook_key, cook_value = cook.split(\"=\", 1)\n\n\t\t\t\t\tif cook_key == 'cloudflare_validate_key' and cook_value == cookie_key:\n\t\t\t\t\t\t# if cook['']\n\t\t\t\t\t\tself.send_response(200)\n\t\t\t\t\t\tself.send_header('Content-type', \"text/html\")\n\t\t\t\t\t\tself.end_headers()\n\t\t\t\t\t\tself.wfile.write(b\"At target CF page!CF Redirected OK?\")\n\n\t\t\t\t\t\treturn\n\n\t\t\t\tcontainer_dir = os.path.dirname(__file__)\n\t\t\t\tfpath = os.path.join(container_dir, \"waf_garbage\", 'cf_js_challenge_03_12_2018.html')\n\t\t\t\twith open(fpath, \"rb\") as fp:\n\t\t\t\t\tplain_contents = fp.read()\n\n\t\t\t\tself.server_version = \"cloudflare is garbage\"\n\t\t\t\tself.send_response(503)\n\t\t\t\tself.send_header('Server', \"cloudflare is garbage\")\n\t\t\t\tself.send_header('Content-type','text/html')\n\t\t\t\tself.end_headers()\n\t\t\t\tself.wfile.write(plain_contents)\n\n\t\t\telif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594':\n\n\t\t\t\tcook = cookies.SimpleCookie()\n\t\t\t\tcook['cloudflare_validate_key'] = cookie_key\n\t\t\t\tcook['cloudflare_validate_key']['path'] = \"/\"\n\t\t\t\tcook['cloudflare_validate_key']['domain'] = \"\"\n\t\t\t\texpiration = datetime.datetime.now() + datetime.timedelta(days=30)\n\t\t\t\tcook['cloudflare_validate_key'][\"expires\"] = expiration.strftime(\"%a, %d-%b-%Y %H:%M:%S PST\")\n\t\t\t\tself.send_response(200)\n\t\t\t\tself.send_header('Content-type', \"text/html\")\n\n\t\t\t\tself.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString())\n\t\t\t\tself.end_headers()\n\n\t\t\t\tbody = \"Setting cookies.\"\n\t\t\t\tself.wfile.write(body.encode(\"utf-8\"))\n\n\n\n\n\t\t\t##################################################################################################################################\n\t\t\t# Handle requests for an unknown path\n\t\t\t##################################################################################################################################\n\n\t\t\telse:\n\t\t\t\ttest_context.assertEqual(self.path, \"This shouldn't happen!\")\n\n\n\n\t\tdef do_GET(self):\n\t\t\t# Process an HTTP GET request and return a response with an HTTP 200 status.\n\t\t\tlog.info(\"Request for URL path: '%s'\", self.path)\n\t\t\t# print(\"Headers: \", self.headers)\n\t\t\t# print(\"Cookie(s): \", self.headers.get_all('Cookie', failobj=[]))\n\n\t\t\ttry:\n\t\t\t\treturn self._get_handler()\n\t\t\texcept Exception as e:\n\t\t\t\tlog.error(\"Exception in handler!\")\n\t\t\t\tfor line in traceback.format_exc().split(\"\\n\"):\n\t\t\t\t\tlog.error(line)\n\t\t\t\traise e\n\n\treturn MockServerRequestHandler\n\ndef get_free_port():\n\ts = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n\ts.bind(('localhost', 0))\n\taddress, port = s.getsockname()\n\ts.close()\n\treturn port\n\n\ndef start_server(assertion_class,\n\t\t\tfrom_wg,\n\t\t\tport_override = None,\n\t\t\tis_chromium = None,\n\t\t\tis_selenium_garbage_chromium = False,\n\t\t\tis_annoying_pjs = False,\n\t\t\tskip_header_checks = False\n\t\t):\n\n\t# Configure mock server.\n\tif port_override:\n\t\tmock_server_port = port_override\n\telse:\n\t\tmock_server_port = get_free_port()\n\n\texpected_headers = dict(from_wg.browserHeaders)\n\tprint(from_wg)\n\tprint(expected_headers)\n\tassert isinstance(expected_headers, dict)\n\n\tcaptured_server = capture_expected_headers(\n\t\t\texpected_headers = expected_headers,\n\t\t\ttest_context = assertion_class,\n\t\t\tis_chromium = is_chromium,\n\t\t\tis_selenium_garbage_chromium = is_selenium_garbage_chromium,\n\t\t\tis_annoying_pjs = is_annoying_pjs,\n\t\t\tskip_header_checks = skip_header_checks\n\t\t)\n\tretries = 4\n\n\tfor x in range(retries + 1):\n\t\ttry:\n\t\t\tmock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server)\n\t\t\tbreak\n\t\texcept OSError:\n\t\t\ttime.sleep(0.2)\n\t\t\tif x >= retries:\n\t\t\t\traise\n\n\t# Start running mock server in a separate thread.\n\t# Daemon threads automatically shut down when the main process exits.\n\tmock_server_thread = Thread(target=mock_server.serve_forever)\n\tmock_server_thread.setDaemon(True)\n\tmock_server_thread.start()\n\n\treturn mock_server_port, mock_server, mock_server_thread\n\n\n\nif __name__ == '__main__':\n\n\twg = WebRequest.WebGetRobust()\n\tsrv = start_server(\n\t\tassertion_class = None,\n\t\tfrom_wg = wg,\n\t\tskip_header_checks = True)\n\n\tprint(\"running server on port: \", srv)\n\twhile 1:\n\t\ttime.sleep(1)\n"},"avg_line_length":{"kind":"number","value":32.9293739968,"string":"32.929374"},"max_line_length":{"kind":"number","value":165,"string":"165"},"alphanum_fraction":{"kind":"number","value":0.6406531806,"string":"0.640653"},"count_classes":{"kind":"number","value":17801,"string":"17,801"},"score_classes":{"kind":"number","value":0.8677065561784061,"string":"0.867707"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":7644,"string":"7,644"},"score_documentation":{"kind":"number","value":0.37260541067511577,"string":"0.372605"}}},{"rowIdx":3706,"cells":{"hexsha":{"kind":"string","value":"b963e6196b8baa521ce89adb40142bf81a9183a6"},"size":{"kind":"number","value":3770,"string":"3,770"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"calcgrades.py"},"max_stars_repo_name":{"kind":"string","value":"qrowsxi/calcgrades"},"max_stars_repo_head_hexsha":{"kind":"string","value":"93c71c1afef8dde5174726ae1702b71ccba633de"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"calcgrades.py"},"max_issues_repo_name":{"kind":"string","value":"qrowsxi/calcgrades"},"max_issues_repo_head_hexsha":{"kind":"string","value":"93c71c1afef8dde5174726ae1702b71ccba633de"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"calcgrades.py"},"max_forks_repo_name":{"kind":"string","value":"qrowsxi/calcgrades"},"max_forks_repo_head_hexsha":{"kind":"string","value":"93c71c1afef8dde5174726ae1702b71ccba633de"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import csv\nimport math\nimport numpy as np\nimport pandas\nimport scipy.optimize\nimport sys\nimport argparse\n\n\ndef ineq_constraint_1(v):\n return np.array([vi for vi in v])\n\n\ndef ineq_constraint_2(v):\n return np.array([-vi + 30 for vi in v])\n\n\nclass WeightAverage:\n\n def __init__(self, mean, csv):\n self.df = pandas.read_csv(csv)\n self.course = self.df['name']\n self.expected_mean = mean\n self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]\n self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])\n self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]\n self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]\n self.tot_credits = sum(self.owned_credits) + sum(self.credits)\n\n def weight_average(self, v):\n term1 = 0\n term2 = 0\n for i in range(0, len(self.owned_grades)):\n term1 = term1 + self.owned_grades[i] * self.owned_credits[i]\n for i in range(0, len(v)):\n term2 = term2 + v[i] * self.credits[i]\n return (term1 + term2) / self.tot_credits\n\n def eq_constraint(self, v):\n return self.weight_average(v) - self.expected_mean\n\n def solve(self):\n cons = (\n {'type': 'eq', 'fun': self.eq_constraint},\n {'type': 'ineq', 'fun': ineq_constraint_1},\n {'type': 'ineq', 'fun': ineq_constraint_2})\n res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)\n if not res.success:\n return None\n return res.x\n\n\ndef error_no_solution():\n print(\"Mean not possible with current vote :(\")\n exit(0)\n\n\ndef output_result(solver, sol):\n avg = solver.weight_average(sol)\n df = solver.df\n print(f\"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110\")\n if sol is None:\n print(\"Not Possible with current grades :(\")\n exit()\n for index, row in df.query('grade > 0').iterrows():\n print(f\"'{row['name']}', credits: {row['credits']}, grade {row['grade']}\")\n i = 0\n for index, row in df.query('grade == 0').iterrows():\n print(f\"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}\")\n i += 1\n return 0\n\n\ndef main():\n name = \"calcGrades\"\n description = \"\"\"CalcGrades is an utility which purpose is to compute the minimum\n grades required to get a certain weight average of the grades over the credits,\n given the desired output and the grades already owned.\"\"\"\n parser = argparse.ArgumentParser(name, description=description)\n parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')\n parser.add_argument('--file',dest='file', default='courses.csv', type=str,\n help='path to the csv file containing the courses (default: courses.csv)')\n parser.add_argument('--floor', default=False, action='store_true',\n help='apply floor operation instead of round to solution')\n parser.add_argument('--ceil', default=False, action='store_true',\n help='apply ceil operation instead of round to solution')\n args = parser.parse_args()\n mean = args.mean\n courses = args.file\n solver = WeightAverage(mean, courses)\n sol = solver.solve()\n if sol is None:\n error_no_solution()\n if args.ceil:\n sol = [math.ceil(x) for x in sol]\n elif args.floor:\n sol = [math.floor(x) for x in sol]\n else:\n sol = [round(x) for x in sol]\n output_result(solver, sol)\n return 0\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":35.5660377358,"string":"35.566038"},"max_line_length":{"kind":"number","value":116,"string":"116"},"alphanum_fraction":{"kind":"number","value":0.6092838196,"string":"0.609284"},"count_classes":{"kind":"number","value":1464,"string":"1,464"},"score_classes":{"kind":"number","value":0.3883289124668435,"string":"0.388329"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":991,"string":"991"},"score_documentation":{"kind":"number","value":0.2628647214854111,"string":"0.262865"}}},{"rowIdx":3707,"cells":{"hexsha":{"kind":"string","value":"b9652ceb78b45d3bef98c61d48e3cd4630133615"},"size":{"kind":"number","value":19317,"string":"19,317"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"sdk/python/pulumi_google_native/testing/v1/test_matrix.py"},"max_stars_repo_name":{"kind":"string","value":"AaronFriel/pulumi-google-native"},"max_stars_repo_head_hexsha":{"kind":"string","value":"75d1cda425e33d4610348972cd70bddf35f1770d"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":44,"string":"44"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-04-18T23:00:48.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-14T17:43:15.000Z"},"max_issues_repo_path":{"kind":"string","value":"sdk/python/pulumi_google_native/testing/v1/test_matrix.py"},"max_issues_repo_name":{"kind":"string","value":"AaronFriel/pulumi-google-native"},"max_issues_repo_head_hexsha":{"kind":"string","value":"75d1cda425e33d4610348972cd70bddf35f1770d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":354,"string":"354"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-04-16T16:48:39.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T17:16:39.000Z"},"max_forks_repo_path":{"kind":"string","value":"sdk/python/pulumi_google_native/testing/v1/test_matrix.py"},"max_forks_repo_name":{"kind":"string","value":"AaronFriel/pulumi-google-native"},"max_forks_repo_head_hexsha":{"kind":"string","value":"75d1cda425e33d4610348972cd70bddf35f1770d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":8,"string":"8"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-04-24T17:46:51.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-01-05T10:40:21.000Z"},"content":{"kind":"string","value":"# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi SDK Generator. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom ... import _utilities\nfrom . import outputs\nfrom ._enums import *\nfrom ._inputs import *\n\n__all__ = ['TestMatrixArgs', 'TestMatrix']\n\n@pulumi.input_type\nclass TestMatrixArgs:\n def __init__(__self__, *,\n environment_matrix: pulumi.Input['EnvironmentMatrixArgs'],\n result_storage: pulumi.Input['ResultStorageArgs'],\n test_specification: pulumi.Input['TestSpecificationArgs'],\n client_info: Optional[pulumi.Input['ClientInfoArgs']] = None,\n fail_fast: Optional[pulumi.Input[bool]] = None,\n flaky_test_attempts: Optional[pulumi.Input[int]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None):\n \"\"\"\n The set of arguments for constructing a TestMatrix resource.\n :param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on.\n :param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written.\n :param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test.\n :param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test.\n :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.\n :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.\n :param pulumi.Input[str] project: The cloud project that owns the test matrix.\n \"\"\"\n pulumi.set(__self__, \"environment_matrix\", environment_matrix)\n pulumi.set(__self__, \"result_storage\", result_storage)\n pulumi.set(__self__, \"test_specification\", test_specification)\n if client_info is not None:\n pulumi.set(__self__, \"client_info\", client_info)\n if fail_fast is not None:\n pulumi.set(__self__, \"fail_fast\", fail_fast)\n if flaky_test_attempts is not None:\n pulumi.set(__self__, \"flaky_test_attempts\", flaky_test_attempts)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if request_id is not None:\n pulumi.set(__self__, \"request_id\", request_id)\n\n @property\n @pulumi.getter(name=\"environmentMatrix\")\n def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']:\n \"\"\"\n The devices the tests are being executed on.\n \"\"\"\n return pulumi.get(self, \"environment_matrix\")\n\n @environment_matrix.setter\n def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']):\n pulumi.set(self, \"environment_matrix\", value)\n\n @property\n @pulumi.getter(name=\"resultStorage\")\n def result_storage(self) -> pulumi.Input['ResultStorageArgs']:\n \"\"\"\n Where the results for the matrix are written.\n \"\"\"\n return pulumi.get(self, \"result_storage\")\n\n @result_storage.setter\n def result_storage(self, value: pulumi.Input['ResultStorageArgs']):\n pulumi.set(self, \"result_storage\", value)\n\n @property\n @pulumi.getter(name=\"testSpecification\")\n def test_specification(self) -> pulumi.Input['TestSpecificationArgs']:\n \"\"\"\n How to run the test.\n \"\"\"\n return pulumi.get(self, \"test_specification\")\n\n @test_specification.setter\n def test_specification(self, value: pulumi.Input['TestSpecificationArgs']):\n pulumi.set(self, \"test_specification\", value)\n\n @property\n @pulumi.getter(name=\"clientInfo\")\n def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]:\n \"\"\"\n Information about the client which invoked the test.\n \"\"\"\n return pulumi.get(self, \"client_info\")\n\n @client_info.setter\n def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]):\n pulumi.set(self, \"client_info\", value)\n\n @property\n @pulumi.getter(name=\"failFast\")\n def fail_fast(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.\n \"\"\"\n return pulumi.get(self, \"fail_fast\")\n\n @fail_fast.setter\n def fail_fast(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"fail_fast\", value)\n\n @property\n @pulumi.getter(name=\"flakyTestAttempts\")\n def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]:\n \"\"\"\n The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.\n \"\"\"\n return pulumi.get(self, \"flaky_test_attempts\")\n\n @flaky_test_attempts.setter\n def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]):\n pulumi.set(self, \"flaky_test_attempts\", value)\n\n @property\n @pulumi.getter\n def project(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n The cloud project that owns the test matrix.\n \"\"\"\n return pulumi.get(self, \"project\")\n\n @project.setter\n def project(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"project\", value)\n\n @property\n @pulumi.getter(name=\"requestId\")\n def request_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"request_id\")\n\n @request_id.setter\n def request_id(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"request_id\", value)\n\n\nclass TestMatrix(pulumi.CustomResource):\n @overload\n def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,\n environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,\n fail_fast: Optional[pulumi.Input[bool]] = None,\n flaky_test_attempts: Optional[pulumi.Input[int]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None,\n result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,\n test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,\n __props__=None):\n \"\"\"\n Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.\n Auto-naming is currently not supported for this resource.\n Note - this resource's API doesn't support deletion. When deleted, the resource will persist\n on Google Cloud even though it will be deleted from Pulumi state.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test.\n :param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on.\n :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.\n :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.\n :param pulumi.Input[str] project: The cloud project that owns the test matrix.\n :param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written.\n :param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test.\n \"\"\"\n ...\n @overload\n def __init__(__self__,\n resource_name: str,\n args: TestMatrixArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n \"\"\"\n Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.\n Auto-naming is currently not supported for this resource.\n Note - this resource's API doesn't support deletion. When deleted, the resource will persist\n on Google Cloud even though it will be deleted from Pulumi state.\n\n :param str resource_name: The name of the resource.\n :param TestMatrixArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,\n environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,\n fail_fast: Optional[pulumi.Input[bool]] = None,\n flaky_test_attempts: Optional[pulumi.Input[int]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None,\n result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,\n test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,\n __props__=None):\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = TestMatrixArgs.__new__(TestMatrixArgs)\n\n __props__.__dict__[\"client_info\"] = client_info\n if environment_matrix is None and not opts.urn:\n raise TypeError(\"Missing required property 'environment_matrix'\")\n __props__.__dict__[\"environment_matrix\"] = environment_matrix\n __props__.__dict__[\"fail_fast\"] = fail_fast\n __props__.__dict__[\"flaky_test_attempts\"] = flaky_test_attempts\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"request_id\"] = request_id\n if result_storage is None and not opts.urn:\n raise TypeError(\"Missing required property 'result_storage'\")\n __props__.__dict__[\"result_storage\"] = result_storage\n if test_specification is None and not opts.urn:\n raise TypeError(\"Missing required property 'test_specification'\")\n __props__.__dict__[\"test_specification\"] = test_specification\n __props__.__dict__[\"invalid_matrix_details\"] = None\n __props__.__dict__[\"outcome_summary\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"test_executions\"] = None\n __props__.__dict__[\"test_matrix_id\"] = None\n __props__.__dict__[\"timestamp\"] = None\n super(TestMatrix, __self__).__init__(\n 'google-native:testing/v1:TestMatrix',\n resource_name,\n __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix':\n \"\"\"\n Get an existing TestMatrix resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = TestMatrixArgs.__new__(TestMatrixArgs)\n\n __props__.__dict__[\"client_info\"] = None\n __props__.__dict__[\"environment_matrix\"] = None\n __props__.__dict__[\"fail_fast\"] = None\n __props__.__dict__[\"flaky_test_attempts\"] = None\n __props__.__dict__[\"invalid_matrix_details\"] = None\n __props__.__dict__[\"outcome_summary\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"result_storage\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"test_executions\"] = None\n __props__.__dict__[\"test_matrix_id\"] = None\n __props__.__dict__[\"test_specification\"] = None\n __props__.__dict__[\"timestamp\"] = None\n return TestMatrix(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter(name=\"clientInfo\")\n def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']:\n \"\"\"\n Information about the client which invoked the test.\n \"\"\"\n return pulumi.get(self, \"client_info\")\n\n @property\n @pulumi.getter(name=\"environmentMatrix\")\n def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']:\n \"\"\"\n The devices the tests are being executed on.\n \"\"\"\n return pulumi.get(self, \"environment_matrix\")\n\n @property\n @pulumi.getter(name=\"failFast\")\n def fail_fast(self) -> pulumi.Output[bool]:\n \"\"\"\n If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.\n \"\"\"\n return pulumi.get(self, \"fail_fast\")\n\n @property\n @pulumi.getter(name=\"flakyTestAttempts\")\n def flaky_test_attempts(self) -> pulumi.Output[int]:\n \"\"\"\n The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.\n \"\"\"\n return pulumi.get(self, \"flaky_test_attempts\")\n\n @property\n @pulumi.getter(name=\"invalidMatrixDetails\")\n def invalid_matrix_details(self) -> pulumi.Output[str]:\n \"\"\"\n Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state.\n \"\"\"\n return pulumi.get(self, \"invalid_matrix_details\")\n\n @property\n @pulumi.getter(name=\"outcomeSummary\")\n def outcome_summary(self) -> pulumi.Output[str]:\n \"\"\"\n Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED.\n \"\"\"\n return pulumi.get(self, \"outcome_summary\")\n\n @property\n @pulumi.getter\n def project(self) -> pulumi.Output[str]:\n \"\"\"\n The cloud project that owns the test matrix.\n \"\"\"\n return pulumi.get(self, \"project\")\n\n @property\n @pulumi.getter(name=\"resultStorage\")\n def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']:\n \"\"\"\n Where the results for the matrix are written.\n \"\"\"\n return pulumi.get(self, \"result_storage\")\n\n @property\n @pulumi.getter\n def state(self) -> pulumi.Output[str]:\n \"\"\"\n Indicates the current progress of the test matrix.\n \"\"\"\n return pulumi.get(self, \"state\")\n\n @property\n @pulumi.getter(name=\"testExecutions\")\n def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]:\n \"\"\"\n The list of test executions that the service creates for this matrix.\n \"\"\"\n return pulumi.get(self, \"test_executions\")\n\n @property\n @pulumi.getter(name=\"testMatrixId\")\n def test_matrix_id(self) -> pulumi.Output[str]:\n \"\"\"\n Unique id set by the service.\n \"\"\"\n return pulumi.get(self, \"test_matrix_id\")\n\n @property\n @pulumi.getter(name=\"testSpecification\")\n def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']:\n \"\"\"\n How to run the test.\n \"\"\"\n return pulumi.get(self, \"test_specification\")\n\n @property\n @pulumi.getter\n def timestamp(self) -> pulumi.Output[str]:\n \"\"\"\n The time this test matrix was initially created.\n \"\"\"\n return pulumi.get(self, \"timestamp\")\n\n"},"avg_line_length":{"kind":"number","value":50.4360313316,"string":"50.436031"},"max_line_length":{"kind":"number","value":458,"string":"458"},"alphanum_fraction":{"kind":"number","value":0.676709634,"string":"0.67671"},"count_classes":{"kind":"number","value":18864,"string":"18,864"},"score_classes":{"kind":"number","value":0.9765491535952787,"string":"0.976549"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":15506,"string":"15,506"},"score_decorators":{"kind":"number","value":0.8027126365377647,"string":"0.802713"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":9637,"string":"9,637"},"score_documentation":{"kind":"number","value":0.49888699073355075,"string":"0.498887"}}},{"rowIdx":3708,"cells":{"hexsha":{"kind":"string","value":"b965c021bcb2dac479172708e85ad9ed89f09ef2"},"size":{"kind":"number","value":5427,"string":"5,427"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"View/View.py"},"max_stars_repo_name":{"kind":"string","value":"MoriokaReimen/ConfigHeaderGenerator"},"max_stars_repo_head_hexsha":{"kind":"string","value":"73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"View/View.py"},"max_issues_repo_name":{"kind":"string","value":"MoriokaReimen/ConfigHeaderGenerator"},"max_issues_repo_head_hexsha":{"kind":"string","value":"73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"View/View.py"},"max_forks_repo_name":{"kind":"string","value":"MoriokaReimen/ConfigHeaderGenerator"},"max_forks_repo_head_hexsha":{"kind":"string","value":"73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import tkinter as tk\nimport tkinter.messagebox\nfrom Control import Control\n\nclass View:\n def __init__(self, control : Control.Control):\n self.control = control\n\n # Init Window\n self.root = tk.Tk()\n self.root.title(u\"Header File Generator\")\n self.root.geometry(\"700x800\")\n\n self.config_frame = tk.Frame(self.root)\n\n # Config Table\n lb_symbol = tk.Label(self.config_frame, width = 20)\n lb_symbol[\"text\"] = \"Symbol\"\n lb_symbol.grid(row = 0, column = 0)\n\n lb_description = tk.Label(self.config_frame, width = 40)\n lb_description[\"text\"] = \"Detail\"\n lb_description.grid(row = 0, column = 1)\n\n lb_enable = tk.Label(self.config_frame, width = 10)\n lb_enable[\"text\"] = \"Enable\"\n lb_enable.grid(row = 0, column = 2)\n\n for i, config in enumerate(self.control.getConfigs()):\n symbol_entry = tk.Entry(self.config_frame, width=20)\n symbol_entry.insert(tk.END, config.symbol)\n symbol_entry.config(state = tk.DISABLED)\n symbol_entry.config(disabledforeground = \"black\", disabledbackground = \"white\")\n symbol_entry.grid(row= i + 1, column = 0)\n\n detail_entry = tk.Entry(self.config_frame, width=40)\n detail_entry.insert(tk.END, config.detail)\n detail_entry.config(state = tk.DISABLED)\n detail_entry.config(disabledforeground = \"black\", disabledbackground = \"white\")\n detail_entry.grid(row= i + 1, column = 1)\n\n bt_enable = tk.Button(self.config_frame, text=\"ON\", width= 5)\n bt_enable[\"text\"] = \"ON\" if config.enable else \"OFF\"\n color = \"green\" if config.enable else \"red\"\n bt_enable.config(bg=color, activebackground = color)\n bt_enable[\"command\"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button)\n bt_enable.grid(row = i + 1, column = 2)\n self.config_frame.pack(side=tk.TOP, anchor=tk.NW)\n\n\n self.value_config_frame = tk.Frame(self.root)\n\n # Config Table\n lb_symbol = tk.Label(self.value_config_frame, width = 20)\n lb_symbol[\"text\"] = \"Symbol\"\n lb_symbol.grid(row = 0, column = 0)\n\n lb_description = tk.Label(self.value_config_frame, width = 40)\n lb_description[\"text\"] = \"Detail\"\n lb_description.grid(row = 0, column = 1)\n\n lb_value = tk.Label(self.value_config_frame, width = 10)\n lb_value[\"text\"] = \"Value\"\n lb_value.grid(row = 0, column = 2)\n\n lb_enable = tk.Label(self.value_config_frame, width = 10)\n lb_enable[\"text\"] = \"Enable\"\n lb_enable.grid(row = 0, column = 3)\n\n for i, val_config in enumerate(self.control.getValConfigs()):\n symbol_entry = tk.Entry(self.value_config_frame, width=20)\n symbol_entry.insert(tk.END, val_config.symbol)\n symbol_entry.config(state = tk.DISABLED)\n symbol_entry.config(disabledforeground = \"black\", disabledbackground = \"white\")\n symbol_entry.grid(row= i + 1, column = 0)\n\n detail_entry = tk.Entry(self.value_config_frame, width=40)\n detail_entry.insert(tk.END, val_config.detail)\n detail_entry.config(state = tk.DISABLED)\n detail_entry.config(disabledforeground = \"black\", disabledbackground = \"white\")\n detail_entry.grid(row= i + 1, column = 1)\n\n value_entry = tk.Entry(self.value_config_frame, width=10)\n value_entry.insert(tk.END, val_config.value)\n value_entry.config(state = tk.DISABLED)\n value_entry.config(disabledforeground = \"black\", disabledbackground = \"white\")\n value_entry.grid(row= i + 1, column = 2)\n\n bt_enable = tk.Button(self.value_config_frame, text=\"ON\", width= 5)\n bt_enable[\"text\"] = \"ON\" if val_config.enable else \"OFF\"\n color = \"green\" if val_config.enable else \"red\"\n bt_enable.config(bg=color, activebackground = color)\n bt_enable[\"command\"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button)\n bt_enable.grid(row = i + 1, column = 3)\n self.value_config_frame.pack(side=tk.TOP, anchor=tk.W)\n\n # Generator Button\n self.bt_generate = tk.Button(self.root)\n self.bt_generate[\"text\"] = \"Generate Header\"\n self.bt_generate[\"command\"] = self.generateHeader\n self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE)\n\n def start(self):\n self.root.mainloop()\n\n def generateHeader(self):\n self.control.generateHeader()\n tk.messagebox.showinfo(\"Header Generator Info\", \"Generated:{0}\".format(self.control.header_config.path))\n\n def update(self):\n pass\n\n def toggle_config_enable(self, id, button : tk.Button):\n config = self.control.getConfigs()[id]\n config.enable = not config.enable\n button[\"text\"] = \"ON\" if config.enable else \"OFF\"\n color = \"green\" if config.enable else \"red\"\n button.config(bg=color, activebackground = color)\n \n def toggle_val_config_enable(self, id, button : tk.Button):\n val_config = self.control.getValConfigs()[id]\n val_config.enable = not val_config.enable\n button[\"text\"] = \"ON\" if val_config.enable else \"OFF\"\n color = \"green\" if val_config.enable else \"red\"\n button.config(bg=color, activebackground = color)\n"},"avg_line_length":{"kind":"number","value":43.0714285714,"string":"43.071429"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6294453658,"string":"0.629445"},"count_classes":{"kind":"number","value":5350,"string":"5,350"},"score_classes":{"kind":"number","value":0.9858116823290952,"string":"0.985812"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":463,"string":"463"},"score_documentation":{"kind":"number","value":0.08531416989128432,"string":"0.085314"}}},{"rowIdx":3709,"cells":{"hexsha":{"kind":"string","value":"b9669e29ffa745ca4256305d7461bcbe497cc930"},"size":{"kind":"number","value":1428,"string":"1,428"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tests/bugs/core_3355_test.py"},"max_stars_repo_name":{"kind":"string","value":"FirebirdSQL/firebird-qa"},"max_stars_repo_head_hexsha":{"kind":"string","value":"96af2def7f905a06f178e2a80a2c8be4a4b44782"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-02-05T11:37:13.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-05T11:37:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"tests/bugs/core_3355_test.py"},"max_issues_repo_name":{"kind":"string","value":"FirebirdSQL/firebird-qa"},"max_issues_repo_head_hexsha":{"kind":"string","value":"96af2def7f905a06f178e2a80a2c8be4a4b44782"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-09-03T11:47:00.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-09-03T12:42:10.000Z"},"max_forks_repo_path":{"kind":"string","value":"tests/bugs/core_3355_test.py"},"max_forks_repo_name":{"kind":"string","value":"FirebirdSQL/firebird-qa"},"max_forks_repo_head_hexsha":{"kind":"string","value":"96af2def7f905a06f178e2a80a2c8be4a4b44782"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-06-30T14:14:16.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-06-30T14:14:16.000Z"},"content":{"kind":"string","value":"#coding:utf-8\n#\n# id: bugs.core_3355\n# title: Wrong comparsion of DATE and TIMESTAMP if index is used\n# decription: \n# tracker_id: CORE-3355\n# min_versions: ['2.1.5']\n# versions: 3.0\n# qmid: None\n\nimport pytest\nfrom firebird.qa import db_factory, isql_act, Action\n\n# version: 3.0\n# resources: None\n\nsubstitutions_1 = []\n\ninit_script_1 = \"\"\"create table tdate (id integer not null primary key, val date);\ncreate index tdateix1 on tdate (val);\ncommit;\ninsert into tdate values (0, '1997-12-31');\ninsert into tdate values (1, '1998-01-01');\ninsert into tdate values (2, '1998-01-02');\ninsert into tdate values (3, '1998-01-03');\ninsert into tdate values (4, '1998-01-04');\ninsert into tdate values (5, '1998-01-05');\ncommit;\n\"\"\"\n\ndb_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)\n\ntest_script_1 = \"\"\"select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000';\nselect count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000';\n\"\"\"\n\nact_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)\n\nexpected_stdout_1 = \"\"\"\n COUNT\n=====================\n 1\n\n\n COUNT\n=====================\n 5\n\n\"\"\"\n\n@pytest.mark.version('>=3.0')\ndef test_1(act_1: Action):\n act_1.expected_stdout = expected_stdout_1\n act_1.execute()\n assert act_1.clean_stdout == act_1.clean_expected_stdout\n\n"},"avg_line_length":{"kind":"number","value":25.0526315789,"string":"25.052632"},"max_line_length":{"kind":"number","value":95,"string":"95"},"alphanum_fraction":{"kind":"number","value":0.6414565826,"string":"0.641457"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":183,"string":"183"},"score_decorators":{"kind":"number","value":0.12815126050420167,"string":"0.128151"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":953,"string":"953"},"score_documentation":{"kind":"number","value":0.6673669467787114,"string":"0.667367"}}},{"rowIdx":3710,"cells":{"hexsha":{"kind":"string","value":"b967ba0197b144171458b230c2dfe31844ba0b72"},"size":{"kind":"number","value":5231,"string":"5,231"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"dags/download_decrypt_transfer_files.py"},"max_stars_repo_name":{"kind":"string","value":"hms-dbmi/bch-pic-sure-airflow-dags"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0c1e6f07da4e270581942e551ac30284474921d4"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"dags/download_decrypt_transfer_files.py"},"max_issues_repo_name":{"kind":"string","value":"hms-dbmi/bch-pic-sure-airflow-dags"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0c1e6f07da4e270581942e551ac30284474921d4"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"dags/download_decrypt_transfer_files.py"},"max_forks_repo_name":{"kind":"string","value":"hms-dbmi/bch-pic-sure-airflow-dags"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0c1e6f07da4e270581942e551ac30284474921d4"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\n@author: anilkdegala\n\"\"\"\nimport os\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom datetime import date, timedelta, datetime\nfrom collections import OrderedDict \nfrom scripts.dag_pebbles import DagPebbles\nfrom airflow.configuration import conf\nfrom scripts.configurations import *\nfrom airflow.operators.dummy_operator import DummyOperator\n\ndefault_args = {\n \"owner\": \"anilkdegala\",\n \"depends_on_past\": True,\n \"max_active_runs\": 1,\n \"start_date\": datetime(2015, 6, 1),\n \"is_active\": True,\n \"is_paused_upon_creation\": False,\n}\n\n\ndef begin_pipeline(**kwargs):\n print(\"begin_pipeline:\")\n files = kwargs['dag_run'].conf.get('files')\n \n download_decrypt_arguments = ''\n transfer_arguments_list = []\n for f in files:\n print(\"download_decrypt_transfer_files: file: \", f['name'], ', location: ', f['path']) \n output = f['name']+','+f['path']+','+f['final_name']\n download_decrypt_arguments = download_decrypt_arguments + \" \" + output\n transfer_arguments_list.append(DATA_LOCATION + \"/\"+f['final_name'])\n \n transfer_arguments = \",\".join(transfer_arguments_list)\n print(\"final download_decrypt_arguments: \",download_decrypt_arguments)\n print(\"final transfer_arguments: \",transfer_arguments) \n kwargs[\"ti\"].xcom_push(key=\"download_decrypt_arguments\", value=download_decrypt_arguments)\n kwargs[\"ti\"].xcom_push(key=\"transfer_arguments\", value=transfer_arguments)\n \n \ndef pipeline_enable_check(**kwargs):\n dp = DagPebbles()\n if dp.pipeline_enable_check('DATA_LOAD'):\n return \"pipeline_check_passed\"\n else:\n return \"pipeline_check_skipped\" \n\ndef pipeline_check_passed(**kwargs):\n print(\"pipeline_check_passed:\") \n\ndef end_pipeline(**kwargs):\n print(\"end_pipeline:\")\n\n\ndef pipeline_check_skipped(**kwargs):\n print(\"pipeline_check_skipped:\") \n \ndef cleanup(**kwargs):\n dp = DagPebbles()\n print(\"cleanup\") \n\n \ndef notify(**kwargs):\n dp = DagPebbles()\n print(\"notify\") \n\n \ndef end(**kwargs):\n dp = DagPebbles()\n print(\"end\") \n\nwith DAG( \"DOWNLOAD_DECRYPT_TRANSFER\",\n description=\"Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)\",\n default_args=default_args,\n schedule_interval=None,\n catchup=False,\n orientation=\"TB\",\n tags=['Utils'],\n dagrun_timeout=timedelta(hours=240)\n ) as dag:\n \n\n t_pipeline_begin = PythonOperator(\n task_id=\"begin_pipeline\",\n python_callable=begin_pipeline,\n provide_context=True,\n dag=dag,\n )\n \n t_check_pipeline = BranchPythonOperator(\n task_id=\"check_pipeline\",\n python_callable=pipeline_enable_check,\n provide_context=True,\n dag=dag,\n )\n \n t_pipeline_check_passed = PythonOperator(\n task_id=\"pipeline_check_passed\",\n python_callable=pipeline_check_passed,\n provide_context=True,\n dag=dag,\n )\n \n t_pipeline_check_skipped = PythonOperator(\n task_id=\"pipeline_check_skipped\",\n python_callable=pipeline_check_skipped,\n provide_context=True,\n dag=dag,\n )\n \n download_files_cmd = \"/opt/bitnami/airflow/airflow-data/scripts/download_files.sh \"+\"{{ ti.xcom_pull(key='download_decrypt_arguments')}}\" \n t_download_files = BashOperator(\n task_id='download_files',\n bash_command=download_files_cmd,\n dag=dag) \n \n decrypt_files_cmd = \"/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh \"+\"{{ ti.xcom_pull(key='download_decrypt_arguments')}} \"\n t_decrypt_files = BashOperator(\n task_id='decrypt_files',\n bash_command=decrypt_files_cmd,\n dag=dag) \n \n transfer_files_cmd = \"/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl \"+\"{{ ti.xcom_pull(key='transfer_arguments')}} \"\n t_transfer_files = BashOperator(\n task_id='transfer_files',\n bash_command=transfer_files_cmd,\n dag=dag) \n \n t_end_pipeline = PythonOperator(\n task_id=\"end_pipeline\",\n python_callable=end_pipeline,\n provide_context=True,\n trigger_rule=\"none_failed\",\n dag=dag,\n )\n \n t_notify = PythonOperator(\n task_id=\"send_notifications\",\n python_callable=notify,\n provide_context=True,\n trigger_rule=\"none_failed\",\n dag=dag,\n )\n \n t_cleanup = PythonOperator(\n task_id=\"cleanup\",\n python_callable=cleanup,\n provide_context=True,\n trigger_rule=\"none_failed\",\n dag=dag,\n )\n \n t_end = PythonOperator(\n task_id=\"end\",\n python_callable=end,\n provide_context=True,\n trigger_rule=\"none_failed\",\n dag=dag,\n )\n \n \n t_pipeline_begin >> t_check_pipeline\n t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline \n t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline \n \n t_end_pipeline >> t_cleanup >> t_notify >> t_end\n"},"avg_line_length":{"kind":"number","value":30.7705882353,"string":"30.770588"},"max_line_length":{"kind":"number","value":171,"string":"171"},"alphanum_fraction":{"kind":"number","value":0.6641177595,"string":"0.664118"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1239,"string":"1,239"},"score_documentation":{"kind":"number","value":0.2368571974765819,"string":"0.236857"}}},{"rowIdx":3711,"cells":{"hexsha":{"kind":"string","value":"b96834dcae4311b040352e86ae4bdc019619193a"},"size":{"kind":"number","value":7518,"string":"7,518"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"keystone-moon/keystone/endpoint_policy/controllers.py"},"max_stars_repo_name":{"kind":"string","value":"hashnfv/hashnfv-moon"},"max_stars_repo_head_hexsha":{"kind":"string","value":"daaba34fa2ed4426bc0fde359e54a5e1b872208c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"keystone-moon/keystone/endpoint_policy/controllers.py"},"max_issues_repo_name":{"kind":"string","value":"hashnfv/hashnfv-moon"},"max_issues_repo_head_hexsha":{"kind":"string","value":"daaba34fa2ed4426bc0fde359e54a5e1b872208c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"keystone-moon/keystone/endpoint_policy/controllers.py"},"max_forks_repo_name":{"kind":"string","value":"hashnfv/hashnfv-moon"},"max_forks_repo_head_hexsha":{"kind":"string","value":"daaba34fa2ed4426bc0fde359e54a5e1b872208c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-03-21T11:38:30.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-03-21T11:38:30.000Z"},"content":{"kind":"string","value":"# Copyright 2014 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom keystone.common import controller\nfrom keystone.common import dependency\nfrom keystone import notifications\n\n\n@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')\nclass EndpointPolicyV3Controller(controller.V3Controller):\n collection_name = 'endpoints'\n member_name = 'endpoint'\n\n def __init__(self):\n super(EndpointPolicyV3Controller, self).__init__()\n notifications.register_event_callback(\n 'deleted', 'endpoint', self._on_endpoint_delete)\n notifications.register_event_callback(\n 'deleted', 'service', self._on_service_delete)\n notifications.register_event_callback(\n 'deleted', 'region', self._on_region_delete)\n notifications.register_event_callback(\n 'deleted', 'policy', self._on_policy_delete)\n\n def _on_endpoint_delete(self, service, resource_type, operation, payload):\n self.endpoint_policy_api.delete_association_by_endpoint(\n payload['resource_info'])\n\n def _on_service_delete(self, service, resource_type, operation, payload):\n self.endpoint_policy_api.delete_association_by_service(\n payload['resource_info'])\n\n def _on_region_delete(self, service, resource_type, operation, payload):\n self.endpoint_policy_api.delete_association_by_region(\n payload['resource_info'])\n\n def _on_policy_delete(self, service, resource_type, operation, payload):\n self.endpoint_policy_api.delete_association_by_policy(\n payload['resource_info'])\n\n @controller.protected()\n def create_policy_association_for_endpoint(self, context,\n policy_id, endpoint_id):\n \"\"\"Create an association between a policy and an endpoint.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_endpoint(endpoint_id)\n self.endpoint_policy_api.create_policy_association(\n policy_id, endpoint_id=endpoint_id)\n\n @controller.protected()\n def check_policy_association_for_endpoint(self, context,\n policy_id, endpoint_id):\n \"\"\"Check an association between a policy and an endpoint.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_endpoint(endpoint_id)\n self.endpoint_policy_api.check_policy_association(\n policy_id, endpoint_id=endpoint_id)\n\n @controller.protected()\n def delete_policy_association_for_endpoint(self, context,\n policy_id, endpoint_id):\n \"\"\"Delete an association between a policy and an endpoint.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_endpoint(endpoint_id)\n self.endpoint_policy_api.delete_policy_association(\n policy_id, endpoint_id=endpoint_id)\n\n @controller.protected()\n def create_policy_association_for_service(self, context,\n policy_id, service_id):\n \"\"\"Create an association between a policy and a service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.endpoint_policy_api.create_policy_association(\n policy_id, service_id=service_id)\n\n @controller.protected()\n def check_policy_association_for_service(self, context,\n policy_id, service_id):\n \"\"\"Check an association between a policy and a service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.endpoint_policy_api.check_policy_association(\n policy_id, service_id=service_id)\n\n @controller.protected()\n def delete_policy_association_for_service(self, context,\n policy_id, service_id):\n \"\"\"Delete an association between a policy and a service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.endpoint_policy_api.delete_policy_association(\n policy_id, service_id=service_id)\n\n @controller.protected()\n def create_policy_association_for_region_and_service(\n self, context, policy_id, service_id, region_id):\n \"\"\"Create an association between a policy and region+service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.catalog_api.get_region(region_id)\n self.endpoint_policy_api.create_policy_association(\n policy_id, service_id=service_id, region_id=region_id)\n\n @controller.protected()\n def check_policy_association_for_region_and_service(\n self, context, policy_id, service_id, region_id):\n \"\"\"Check an association between a policy and region+service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.catalog_api.get_region(region_id)\n self.endpoint_policy_api.check_policy_association(\n policy_id, service_id=service_id, region_id=region_id)\n\n @controller.protected()\n def delete_policy_association_for_region_and_service(\n self, context, policy_id, service_id, region_id):\n \"\"\"Delete an association between a policy and region+service.\"\"\"\n self.policy_api.get_policy(policy_id)\n self.catalog_api.get_service(service_id)\n self.catalog_api.get_region(region_id)\n self.endpoint_policy_api.delete_policy_association(\n policy_id, service_id=service_id, region_id=region_id)\n\n @controller.protected()\n def get_policy_for_endpoint(self, context, endpoint_id):\n \"\"\"Get the effective policy for an endpoint.\"\"\"\n self.catalog_api.get_endpoint(endpoint_id)\n ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)\n # NOTE(henry-nash): since the collection and member for this class is\n # set to endpoints, we have to handle wrapping this policy entity\n # ourselves.\n self._add_self_referential_link(context, ref)\n return {'policy': ref}\n\n # NOTE(henry-nash): As in the catalog controller, we must ensure that the\n # legacy_endpoint_id does not escape.\n\n @classmethod\n def filter_endpoint(cls, ref):\n if 'legacy_endpoint_id' in ref:\n ref.pop('legacy_endpoint_id')\n return ref\n\n @classmethod\n def wrap_member(cls, context, ref):\n ref = cls.filter_endpoint(ref)\n return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)\n\n @controller.protected()\n def list_endpoints_for_policy(self, context, policy_id):\n \"\"\"List endpoints with the effective association to a policy.\"\"\"\n self.policy_api.get_policy(policy_id)\n refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)\n return EndpointPolicyV3Controller.wrap_collection(context, refs)\n"},"avg_line_length":{"kind":"number","value":45.0179640719,"string":"45.017964"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.6996541633,"string":"0.699654"},"count_classes":{"kind":"number","value":6754,"string":"6,754"},"score_classes":{"kind":"number","value":0.8983772279861665,"string":"0.898377"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":6827,"string":"6,827"},"score_decorators":{"kind":"number","value":0.9080872572492684,"string":"0.908087"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1723,"string":"1,723"},"score_documentation":{"kind":"number","value":0.22918329342910349,"string":"0.229183"}}},{"rowIdx":3712,"cells":{"hexsha":{"kind":"string","value":"b96893ff0c22487256e91c812d37a56c2c479eb3"},"size":{"kind":"number","value":11886,"string":"11,886"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/nibetaseries/cli/run.py"},"max_stars_repo_name":{"kind":"string","value":"ipacheco-uy/NiBetaSeries"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3d8716552f22f925524d80af9aace09469c22d4d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-10-03T21:20:48.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-10-03T21:20:48.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/nibetaseries/cli/run.py"},"max_issues_repo_name":{"kind":"string","value":"ipacheco-uy/NiBetaSeries"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3d8716552f22f925524d80af9aace09469c22d4d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/nibetaseries/cli/run.py"},"max_forks_repo_name":{"kind":"string","value":"ipacheco-uy/NiBetaSeries"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3d8716552f22f925524d80af9aace09469c22d4d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nModule that contains the command line app.\n\nWhy does this file exist, and why not put this in __main__?\n\n You might be tempted to import things from __main__ later, but that will cause\n problems: the code will get executed twice:\n\n - When you run `python -m nibetaseries` python will execute\n ``__main__.py`` as a script. That means there won't be any\n ``nibetaseries.__main__`` in ``sys.modules``.\n - When you import __main__ it will get executed again (as a module) because\n there's no ``nibetaseries.__main__`` in ``sys.modules``.\n\n Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration\n\"\"\"\nfrom __future__ import absolute_import\nimport os\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom glob import glob\nfrom multiprocessing import cpu_count\nfrom nipype import config as ncfg\n\n\ndef get_parser():\n \"\"\"Build parser object\"\"\"\n from ..__init__ import __version__\n import sys\n\n verstr = 'nibs v{}'.format(__version__)\n\n parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('bids_dir', help='The directory with the input dataset '\n 'formatted according to the BIDS standard.')\n parser.add_argument('derivatives_pipeline', help='The pipeline that contains '\n 'minimally preprocessed img, brainmask, and confounds.tsv')\n parser.add_argument('output_dir', help='The directory where the output directory '\n 'and files should be stored. If you are running group level analysis '\n 'this folder should be prepopulated with the results of the'\n 'participant level analysis.')\n parser.add_argument('analysis_level', choices=['participant', 'group'],\n help='Level of the analysis that will be performed '\n 'Multiple participant level analyses can be run independently '\n '(in parallel) using the same output_dir')\n parser.add_argument('-v', '--version', action='version',\n version=verstr)\n\n # Atlas Arguments (Required Options)\n atlas_args = parser.add_argument_group('Required Atlas Arguments')\n atlas_args.add_argument('-a', '--atlas-img', action='store',\n required=('-l' in sys.argv or '--atlas-lut' in sys.argv),\n help='input atlas nifti where each voxel within a \"region\" '\n 'is labeled with the same integer and there is a unique '\n 'integer associated with each region of interest.')\n atlas_args.add_argument('-l', '--atlas-lut', action='store',\n required=('-a' in sys.argv or '--atlas-img' in sys.argv),\n help='atlas look up table (tsv) formatted with the columns: '\n 'index, regions which correspond to the regions in the '\n 'nifti file specified by --atlas-img.')\n\n # preprocessing options\n proc_opts = parser.add_argument_group('Options for processing')\n proc_opts.add_argument('--estimator', default='lss',\n choices=['lss', 'lsa'],\n help='beta series modeling method')\n proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,\n help='select a smoothing kernel (mm)')\n proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,\n default=0.0078125, help='high pass filter (Hz)')\n proc_opts.add_argument('-c', '--confounds', help='The confound column names '\n 'that are to be included in nuisance regression. '\n 'write the confounds you wish to include separated by a space',\n nargs=\"+\")\n proc_opts.add_argument('--hrf-model', default='glover',\n choices=['glover', 'spm', 'fir',\n 'glover + derivative',\n 'glover + derivative + dispersion',\n 'spm + derivative',\n 'spm + derivative + dispersion'],\n help='convolve your regressors '\n 'with one of the following hemodynamic response functions')\n proc_opts.add_argument('--fir-delays', default=None,\n nargs='+', type=int, help='FIR delays in volumes',\n metavar='VOL')\n proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '\n 'are stored (i.e. non-essential files). '\n 'This directory can be deleted once you are reasonably '\n 'certain nibs finished as expected.')\n\n # Image Selection options\n image_opts = parser.add_argument_group('Options for selecting images')\n parser.add_argument('--participant-label', nargs=\"+\",\n help='The label(s) of the participant(s) '\n 'that should be analyzed. The label '\n 'corresponds to sub- from the BIDS spec '\n '(so it does not include \"sub-\"). If this parameter is not '\n 'provided all subjects should be analyzed. Multiple '\n 'participants can be specified with a space separated list.')\n image_opts.add_argument('--session-label', action='store',\n default=None, help='select a session to analyze')\n image_opts.add_argument('-t', '--task-label', action='store',\n default=None, help='select a specific task to be processed')\n image_opts.add_argument('--run-label', action='store',\n default=None, help='select a run to analyze')\n image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',\n choices=['MNI152NLin2009cAsym'],\n help='select a bold derivative in a specific space to be used')\n image_opts.add_argument('--description-label', action='store',\n default=None, help='select a bold file with particular '\n '`desc` label to process')\n image_opts.add_argument('--exclude-description-label', action='store_true',\n default=False, help='exclude this `desc` label from nibetaseries')\n\n # performance options\n g_perfm = parser.add_argument_group('Options to handle performance')\n g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,\n help='maximum number of threads across all processes')\n g_perfm.add_argument('--use-plugin', action='store', default=None,\n help='nipype plugin configuration file')\n\n # misc options\n misc = parser.add_argument_group('misc options')\n misc.add_argument('--graph', action='store_true', default=False,\n help='generates a graph png of the workflow')\n\n return parser\n\n\ndef main():\n from ..workflows.base import init_nibetaseries_participant_wf\n\n # get commandline options\n opts = get_parser().parse_args()\n\n # check inputs\n if (opts.hrf_model == 'fir') and (opts.fir_delays is None):\n raise ValueError('If the FIR HRF model is selected, '\n 'FIR delays must be provided.')\n\n # Set up directories\n # TODO: set up some sort of versioning system\n bids_dir = os.path.abspath(opts.bids_dir)\n\n derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)\n\n output_dir = os.path.abspath(opts.output_dir)\n os.makedirs(output_dir, exist_ok=True)\n\n log_dir = os.path.join(output_dir, 'logs')\n os.makedirs(log_dir, exist_ok=True)\n\n if opts.work_dir:\n work_dir = os.path.abspath(opts.work_dir)\n else:\n work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')\n\n os.makedirs(work_dir, exist_ok=True)\n\n # only for a subset of subjects\n if opts.participant_label:\n subject_list = opts.participant_label\n # for all subjects\n else:\n subject_dirs = glob(os.path.join(bids_dir, \"sub-*\"))\n subject_list = [subject_dir.split(\"-\")[-1] for subject_dir in subject_dirs]\n\n # Nipype plugin configuration\n # Load base plugin_settings from file if --use-plugin\n if opts.use_plugin is not None:\n from yaml import load as loadyml\n with open(opts.use_plugin) as f:\n plugin_settings = loadyml(f)\n plugin_settings.setdefault('plugin_args', {})\n else:\n # Defaults\n plugin_settings = {\n 'plugin': 'MultiProc',\n 'plugin_args': {\n 'raise_insufficient': False,\n 'maxtasksperchild': 1,\n }\n }\n\n # Resource management options\n # Note that we're making strong assumptions about valid plugin args\n # This may need to be revisited if people try to use batch plugins\n nthreads = plugin_settings['plugin_args'].get('n_procs')\n # Permit overriding plugin config with specific CLI options\n if nthreads is None or opts.nthreads is not None:\n nthreads = opts.nthreads\n if nthreads is None or nthreads < 1:\n nthreads = cpu_count()\n plugin_settings['plugin_args']['n_procs'] = nthreads\n\n # Nipype config (logs and execution)\n ncfg.update_config({\n 'logging': {'log_directory': log_dir,\n 'log_to_file': True},\n 'execution': {'crashdump_dir': log_dir,\n 'crashfile_format': 'txt',\n 'parameterize_dirs': False},\n })\n\n # running participant level\n if opts.analysis_level == \"participant\":\n nibetaseries_participant_wf = init_nibetaseries_participant_wf(\n estimator=opts.estimator,\n atlas_img=os.path.abspath(opts.atlas_img),\n atlas_lut=os.path.abspath(opts.atlas_lut),\n bids_dir=bids_dir,\n derivatives_pipeline_dir=derivatives_pipeline_dir,\n exclude_description_label=opts.exclude_description_label,\n fir_delays=opts.fir_delays,\n hrf_model=opts.hrf_model,\n high_pass=opts.high_pass,\n output_dir=output_dir,\n run_label=opts.run_label,\n selected_confounds=opts.confounds,\n session_label=opts.session_label,\n smoothing_kernel=opts.smoothing_kernel,\n space_label=opts.space_label,\n subject_list=subject_list,\n task_label=opts.task_label,\n description_label=opts.description_label,\n work_dir=work_dir,\n )\n\n if opts.graph:\n nibetaseries_participant_wf.write_graph(graph2use='colored',\n format='svg',\n simple_form=True)\n try:\n nibetaseries_participant_wf.run(**plugin_settings)\n except RuntimeError as e:\n if \"Workflow did not execute cleanly\" in str(e):\n print(\"Workflow did not execute cleanly\")\n else:\n raise e\n\n elif opts.analysis_level == \"group\":\n raise NotImplementedError('group analysis not currently implemented')\n\n\ndef init():\n if __name__ == \"__main__\":\n raise RuntimeError(\"NiBetaSeries/cli/run.py should not be run directly;\\n\"\n \"Please `pip install` NiBetaSeries and use the `nibs` command\")\n\n\ninit()\n"},"avg_line_length":{"kind":"number","value":46.6117647059,"string":"46.611765"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.5954063604,"string":"0.595406"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":4930,"string":"4,930"},"score_documentation":{"kind":"number","value":0.4147736833249201,"string":"0.414774"}}},{"rowIdx":3713,"cells":{"hexsha":{"kind":"string","value":"b9693ae1ef191dd2735a2abba99bb1bc689af26f"},"size":{"kind":"number","value":2727,"string":"2,727"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"custom_components/senz/config_flow.py"},"max_stars_repo_name":{"kind":"string","value":"astrandb/senz_hass"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6725d37fd9c6d250ac10a16e68c56908bf1c8404"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-01-15T09:55:58.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-10T10:13:35.000Z"},"max_issues_repo_path":{"kind":"string","value":"custom_components/senz/config_flow.py"},"max_issues_repo_name":{"kind":"string","value":"astrandb/senz_hass"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6725d37fd9c6d250ac10a16e68c56908bf1c8404"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-01-15T19:41:28.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T16:01:47.000Z"},"max_forks_repo_path":{"kind":"string","value":"custom_components/senz/config_flow.py"},"max_forks_repo_name":{"kind":"string","value":"astrandb/senz_hass"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6725d37fd9c6d250ac10a16e68c56908bf1c8404"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"Config flow for SENZ WiFi.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nimport voluptuous as vol\nfrom homeassistant.components import persistent_notification\nfrom homeassistant.data_entry_flow import FlowResult\nfrom homeassistant.helpers import config_entry_oauth2_flow\n\nfrom .const import DOMAIN\nfrom .pysenz import PreAPI\n\n\nclass OAuth2FlowHandler(\n config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN\n):\n \"\"\"Config flow to handle SENZ WiFi OAuth2 authentication.\"\"\"\n\n DOMAIN = DOMAIN\n\n @property\n def logger(self) -> logging.Logger:\n \"\"\"Return logger.\"\"\"\n return logging.getLogger(__name__)\n\n @property\n def extra_authorize_data(self) -> dict:\n \"\"\"Extra data that needs to be appended to the authorize url.\"\"\"\n return {\n \"scope\": \"restapi offline_access\",\n }\n\n async def async_step_reauth(\n self, entry: dict[str, Any] | None = None\n ) -> FlowResult:\n \"\"\"Perform reauth upon an API authentication error.\"\"\"\n\n self.entry = entry\n\n persistent_notification.async_create(\n self.hass,\n f\"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.\",\n \"Senz re-authentication\",\n \"senz_reauth\",\n )\n return await self.async_step_reauth_confirm()\n\n async def async_step_reauth_confirm(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n \"\"\"Dialog that informs the user that reauth is required.\"\"\"\n if user_input is None:\n return self.async_show_form(\n step_id=\"reauth_confirm\",\n description_placeholders={\"account\": self.entry[\"auth_implementation\"]},\n data_schema=vol.Schema({}),\n errors={},\n )\n\n persistent_notification.async_dismiss(self.hass, \"senz_reauth\")\n return await self.async_step_user()\n\n async def async_oauth_create_entry(self, data: dict) -> dict:\n \"\"\"Create an oauth config entry or update existing entry for reauth.\"\"\"\n\n pre_api = PreAPI(self.hass)\n resp = await pre_api.getAccount(data[\"token\"][\"access_token\"])\n account = resp[\"userName\"]\n\n existing_entry = await self.async_set_unique_id(account)\n if existing_entry:\n self.hass.config_entries.async_update_entry(existing_entry, data=data)\n await self.hass.config_entries.async_reload(existing_entry.entry_id)\n return self.async_abort(reason=\"reauth_successful\")\n return self.async_create_entry(title=account, data=data)\n"},"avg_line_length":{"kind":"number","value":34.5189873418,"string":"34.518987"},"max_line_length":{"kind":"number","value":184,"string":"184"},"alphanum_fraction":{"kind":"number","value":0.6710671067,"string":"0.671067"},"count_classes":{"kind":"number","value":2363,"string":"2,363"},"score_classes":{"kind":"number","value":0.8665199853318665,"string":"0.86652"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":321,"string":"321"},"score_decorators":{"kind":"number","value":0.11771177117711772,"string":"0.117712"},"count_async_functions":{"kind":"number","value":1829,"string":"1,829"},"score_async_functions":{"kind":"number","value":0.6707004033736707,"string":"0.6707"},"count_documentation":{"kind":"number","value":708,"string":"708"},"score_documentation":{"kind":"number","value":0.25962596259625964,"string":"0.259626"}}},{"rowIdx":3714,"cells":{"hexsha":{"kind":"string","value":"b9697b05a9b44247d80463465fa92118d707fb98"},"size":{"kind":"number","value":6465,"string":"6,465"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"astropy_helpers/git_helpers.py"},"max_stars_repo_name":{"kind":"string","value":"bsipocz/astropy-helpers"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4999df1cfb6a5022347b0cef9caf8a556517c625"},"max_stars_repo_licenses":{"kind":"list like","value":["PSF-2.0","BSD-2-Clause","BSD-3-Clause"],"string":"[\n \"PSF-2.0\",\n \"BSD-2-Clause\",\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":9,"string":"9"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-12-06T13:12:33.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-10-05T12:47:15.000Z"},"max_issues_repo_path":{"kind":"string","value":"astropy_helpers/git_helpers.py"},"max_issues_repo_name":{"kind":"string","value":"bsipocz/astropy-helpers"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4999df1cfb6a5022347b0cef9caf8a556517c625"},"max_issues_repo_licenses":{"kind":"list like","value":["PSF-2.0","BSD-2-Clause","BSD-3-Clause"],"string":"[\n \"PSF-2.0\",\n \"BSD-2-Clause\",\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-11-28T17:20:27.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-12-09T18:44:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"astropy_helpers/git_helpers.py"},"max_forks_repo_name":{"kind":"string","value":"bsipocz/astropy-helpers"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4999df1cfb6a5022347b0cef9caf8a556517c625"},"max_forks_repo_licenses":{"kind":"list like","value":["PSF-2.0","BSD-2-Clause","BSD-3-Clause"],"string":"[\n \"PSF-2.0\",\n \"BSD-2-Clause\",\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-11-28T17:04:22.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-10-19T13:12:34.000Z"},"content":{"kind":"string","value":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nUtilities for retrieving revision information from a project's git repository.\n\"\"\"\n\n# Do not remove the following comment; it is used by\n# astropy_helpers.version_helpers to determine the beginning of the code in\n# this module\n\n# BEGIN\n\nimport locale\nimport os\nimport subprocess\nimport warnings\n\n\ndef _decode_stdio(stream):\n try:\n stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'\n except ValueError:\n stdio_encoding = 'utf-8'\n\n try:\n text = stream.decode(stdio_encoding)\n except UnicodeDecodeError:\n # Final fallback\n text = stream.decode('latin1')\n\n return text\n\n\ndef update_git_devstr(version, path=None):\n \"\"\"\n Updates the git revision string if and only if the path is being imported\n directly from a git working copy. This ensures that the revision number in\n the version string is accurate.\n \"\"\"\n\n try:\n # Quick way to determine if we're in git or not - returns '' if not\n devstr = get_git_devstr(sha=True, show_warning=False, path=path)\n except OSError:\n return version\n\n if not devstr:\n # Probably not in git so just pass silently\n return version\n\n if 'dev' in version: # update to the current git revision\n version_base = version.split('.dev', 1)[0]\n devstr = get_git_devstr(sha=False, show_warning=False, path=path)\n\n return version_base + '.dev' + devstr\n else:\n # otherwise it's already the true/release version\n return version\n\n\ndef get_git_devstr(sha=False, show_warning=True, path=None):\n \"\"\"\n Determines the number of revisions in this repository.\n\n Parameters\n ----------\n sha : bool\n If True, the full SHA1 hash will be returned. Otherwise, the total\n count of commits in the repository will be used as a \"revision\n number\".\n\n show_warning : bool\n If True, issue a warning if git returns an error code, otherwise errors\n pass silently.\n\n path : str or None\n If a string, specifies the directory to look in to find the git\n repository. If `None`, the current working directory is used, and must\n be the root of the git repository.\n If given a filename it uses the directory containing that file.\n\n Returns\n -------\n devversion : str\n Either a string with the revision number (if `sha` is False), the\n SHA1 hash of the current commit (if `sha` is True), or an empty string\n if git version info could not be identified.\n\n \"\"\"\n\n if path is None:\n path = os.getcwd()\n\n if not os.path.isdir(path):\n path = os.path.abspath(os.path.dirname(path))\n\n if sha:\n # Faster for getting just the hash of HEAD\n cmd = ['rev-parse', 'HEAD']\n else:\n cmd = ['rev-list', '--count', 'HEAD']\n\n def run_git(cmd):\n try:\n p = subprocess.Popen(['git'] + cmd, cwd=path,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n stdout, stderr = p.communicate()\n except OSError as e:\n if show_warning:\n warnings.warn('Error running git: ' + str(e))\n return (None, b'', b'')\n\n if p.returncode == 128:\n if show_warning:\n warnings.warn('No git repository present at {0!r}! Using '\n 'default dev version.'.format(path))\n return (p.returncode, b'', b'')\n if p.returncode == 129:\n if show_warning:\n warnings.warn('Your git looks old (does it support {0}?); '\n 'consider upgrading to v1.7.2 or '\n 'later.'.format(cmd[0]))\n return (p.returncode, stdout, stderr)\n elif p.returncode != 0:\n if show_warning:\n warnings.warn('Git failed while determining revision '\n 'count: {0}'.format(_decode_stdio(stderr)))\n return (p.returncode, stdout, stderr)\n\n return p.returncode, stdout, stderr\n\n returncode, stdout, stderr = run_git(cmd)\n\n if not sha and returncode == 128:\n # git returns 128 if the command is not run from within a git\n # repository tree. In this case, a warning is produced above but we\n # return the default dev version of '0'.\n return '0'\n elif not sha and returncode == 129:\n # git returns 129 if a command option failed to parse; in\n # particular this could happen in git versions older than 1.7.2\n # where the --count option is not supported\n # Also use --abbrev-commit and --abbrev=0 to display the minimum\n # number of characters needed per-commit (rather than the full hash)\n cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']\n returncode, stdout, stderr = run_git(cmd)\n # Fall back on the old method of getting all revisions and counting\n # the lines\n if returncode == 0:\n return str(stdout.count(b'\\n'))\n else:\n return ''\n elif sha:\n return _decode_stdio(stdout)[:40]\n else:\n return _decode_stdio(stdout).strip()\n\n\n# This function is tested but it is only ever executed within a subprocess when\n# creating a fake package, so it doesn't get picked up by coverage metrics.\ndef _get_repo_path(pathname, levels=None): # pragma: no cover\n \"\"\"\n Given a file or directory name, determine the root of the git repository\n this path is under. If given, this won't look any higher than ``levels``\n (that is, if ``levels=0`` then the given path must be the root of the git\n repository and is returned if so.\n\n Returns `None` if the given path could not be determined to belong to a git\n repo.\n \"\"\"\n\n if os.path.isfile(pathname):\n current_dir = os.path.abspath(os.path.dirname(pathname))\n elif os.path.isdir(pathname):\n current_dir = os.path.abspath(pathname)\n else:\n return None\n\n current_level = 0\n\n while levels is None or current_level <= levels:\n if os.path.exists(os.path.join(current_dir, '.git')):\n return current_dir\n\n current_level += 1\n if current_dir == os.path.dirname(current_dir):\n break\n\n current_dir = os.path.dirname(current_dir)\n\n return None\n"},"avg_line_length":{"kind":"number","value":33.324742268,"string":"33.324742"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.6120649652,"string":"0.612065"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":3176,"string":"3,176"},"score_documentation":{"kind":"number","value":0.49126063418406807,"string":"0.491261"}}},{"rowIdx":3715,"cells":{"hexsha":{"kind":"string","value":"b96b280416f0d557826ffa670a7914f2d45e5fc5"},"size":{"kind":"number","value":526,"string":"526"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/sot_talos_balance/test/test_feet_admittance.py"},"max_stars_repo_name":{"kind":"string","value":"imaroger/sot-talos-balance"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5e56700b4e105273ecf6feb3474789beac469a77"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/sot_talos_balance/test/test_feet_admittance.py"},"max_issues_repo_name":{"kind":"string","value":"imaroger/sot-talos-balance"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5e56700b4e105273ecf6feb3474789beac469a77"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/sot_talos_balance/test/test_feet_admittance.py"},"max_forks_repo_name":{"kind":"string","value":"imaroger/sot-talos-balance"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5e56700b4e105273ecf6feb3474789beac469a77"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"'''Test feet admittance control'''\nfrom sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient\n\ntry:\n # Python 2\n input = raw_input # noqa\nexcept NameError:\n pass\n\nrun_test('appli_feet_admittance.py')\n\nrun_ft_calibration('robot.ftc')\ninput(\"Wait before running the test\")\n\nprint('Set saturation value')\nrunCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')\n\ninput(\"Wait before dumping the data\")\n\nrunCommandClient('dump_tracer(robot.tracer)')\n"},"avg_line_length":{"kind":"number","value":25.0476190476,"string":"25.047619"},"max_line_length":{"kind":"number","value":97,"string":"97"},"alphanum_fraction":{"kind":"number","value":0.752851711,"string":"0.752852"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":266,"string":"266"},"score_documentation":{"kind":"number","value":0.5057034220532319,"string":"0.505703"}}},{"rowIdx":3716,"cells":{"hexsha":{"kind":"string","value":"b96bb8e94e8bbfe556cc0ad3a314b6991573aa47"},"size":{"kind":"number","value":544,"string":"544"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tests/test_db.py"},"max_stars_repo_name":{"kind":"string","value":"davebryson/py-tendermint"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ec6a38a54950d9841759b0f2ed93659b58948a03"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":24,"string":"24"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-08-18T20:36:27.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-03-27T08:55:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"tests/test_db.py"},"max_issues_repo_name":{"kind":"string","value":"davebryson/py-tendermint"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ec6a38a54950d9841759b0f2ed93659b58948a03"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":6,"string":"6"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2017-10-14T05:50:34.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-06-03T08:39:49.000Z"},"max_forks_repo_path":{"kind":"string","value":"tests/test_db.py"},"max_forks_repo_name":{"kind":"string","value":"davebryson/py-tendermint"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ec6a38a54950d9841759b0f2ed93659b58948a03"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":5,"string":"5"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-01-09T11:07:06.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-06-02T14:34:34.000Z"},"content":{"kind":"string","value":"import os\n\nfrom tendermint.db import VanillaDB\nfrom tendermint.utils import home_dir\n\ndef test_database():\n dbfile = home_dir('temp', 'test.db')\n db = VanillaDB(dbfile)\n\n db.set(b'dave',b'one')\n result = db.get(b'dave')\n assert(b'one' == result)\n\n db.set(b'dave',b'two')\n result = db.get(b'dave')\n assert(b'two' == result)\n\n assert(None == db.get(b'doesntexist'))\n assert(db.exists(b'dave'))\n\n db.delete(b'dave')\n assert(db.exists(b'dave') == False)\n\n if os.path.exists(dbfile):\n os.remove(dbfile)\n"},"avg_line_length":{"kind":"number","value":20.9230769231,"string":"20.923077"},"max_line_length":{"kind":"number","value":42,"string":"42"},"alphanum_fraction":{"kind":"number","value":0.6213235294,"string":"0.621324"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":102,"string":"102"},"score_documentation":{"kind":"number","value":0.1875,"string":"0.1875"}}},{"rowIdx":3717,"cells":{"hexsha":{"kind":"string","value":"b96d766a7c5eab27eb3785b1277b6beccda7c9ed"},"size":{"kind":"number","value":1446,"string":"1,446"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"auth/tests/test_views.py"},"max_stars_repo_name":{"kind":"string","value":"asb29/Redundant"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ee816fd41f9217610bd11f757cf9175288723c70"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"auth/tests/test_views.py"},"max_issues_repo_name":{"kind":"string","value":"asb29/Redundant"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ee816fd41f9217610bd11f757cf9175288723c70"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"auth/tests/test_views.py"},"max_forks_repo_name":{"kind":"string","value":"asb29/Redundant"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ee816fd41f9217610bd11f757cf9175288723c70"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.test import TestCase\nfrom django.test import Client\n\n\nclass RegisterTestCase(TestCase):\n def test_register(self):\n c = Client()\n\n # on success redirects to /\n response = c.post('/accounts/register/', {\n 'username': 'asdas',\n 'password1': 'asdasdasd12',\n 'password2': 'asdasdasd12'\n })\n self.assertRedirects(response, '/')\n\n # passwords don't match\n response = c.post('/accounts/register/', {\n 'username': 'asdasdasd1',\n 'password1': 'asdasdasd1',\n 'password2': 'asdasdasd2'\n })\n self.assertEquals(response.status_code, 200)\n\n # username is empty\n response = c.post('/accounts/register/', {\n 'username': '',\n 'password1': 'asdasdasd12',\n 'password2': 'asdasdasd12'\n })\n self.assertEquals(response.status_code, 200)\n\n # no password\n response = c.post('/accounts/register/', {\n 'username': 'asdasdasd',\n 'password1': '',\n 'password2': ''\n })\n self.assertEquals(response.status_code, 200)\n\n # username and password are similar\n response = c.post('/accounts/register/', {\n 'username': 'asdasdasd0',\n 'password1': 'asdasdasd1',\n 'password2': 'asdasdasd1'\n })\n self.assertEquals(response.status_code, 200)\n"},"avg_line_length":{"kind":"number","value":30.125,"string":"30.125"},"max_line_length":{"kind":"number","value":52,"string":"52"},"alphanum_fraction":{"kind":"number","value":0.5352697095,"string":"0.53527"},"count_classes":{"kind":"number","value":1379,"string":"1,379"},"score_classes":{"kind":"number","value":0.9536652835408023,"string":"0.953665"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":533,"string":"533"},"score_documentation":{"kind":"number","value":0.3686030428769018,"string":"0.368603"}}},{"rowIdx":3718,"cells":{"hexsha":{"kind":"string","value":"b96f6c5854c1e905c9ad5d8f08d016972c710a1f"},"size":{"kind":"number","value":4134,"string":"4,134"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"projects/OneNet/onenet/head.py"},"max_stars_repo_name":{"kind":"string","value":"iFighting/OneNet"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6e33b46d2aa13131262833c75f0fd1c3d224ef03"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-06-16T01:31:17.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-25T15:27:28.000Z"},"max_issues_repo_path":{"kind":"string","value":"projects/OneNet/onenet/head.py"},"max_issues_repo_name":{"kind":"string","value":"xieenze/OneNet"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3b06ad6832727cef4c0262389de4cdbb2a666197"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"projects/OneNet/onenet/head.py"},"max_forks_repo_name":{"kind":"string","value":"xieenze/OneNet"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3b06ad6832727cef4c0262389de4cdbb2a666197"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-02-04T06:38:42.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-02-04T06:38:42.000Z"},"content":{"kind":"string","value":"#\n# Modified by Peize Sun\n# Contact: sunpeize@foxmail.com\n#\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nOneNet Transformer class.\n\nCopy-paste from torch.nn.Transformer with modifications:\n * positional encodings are passed in MHattention\n * extra LN at the end of encoder is removed\n * decoder returns a stack of activations from all decoding layers\n\"\"\"\nimport copy\nimport math\nfrom typing import Optional, List\n\nimport torch\nfrom torch import nn, Tensor\nimport torch.nn.functional as F\n\nfrom detectron2.modeling.poolers import ROIPooler, cat\nfrom detectron2.structures import Boxes\nfrom .deconv import CenternetDeconv\n\nclass Head(nn.Module):\n\n def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]):\n super().__init__()\n \n # Build heads.\n num_classes = cfg.MODEL.OneNet.NUM_CLASSES\n d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1]\n activation = cfg.MODEL.OneNet.ACTIVATION\n\n self.deconv = CenternetDeconv(cfg, backbone_shape)\n \n self.num_classes = num_classes\n self.d_model = d_model\n self.num_classes = num_classes\n self.activation = _get_activation_fn(activation)\n\n self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)\n self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1)\n self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1) \n \n # Init parameters.\n prior_prob = cfg.MODEL.OneNet.PRIOR_PROB\n self.bias_value = -math.log((1 - prior_prob) / prior_prob)\n self._reset_parameters()\n\n def _reset_parameters(self):\n # init all parameters.\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n # initialize the bias for focal loss.\n if p.shape[-1] == self.num_classes:\n nn.init.constant_(p, self.bias_value)\n \n def forward(self, features_list):\n \n features = self.deconv(features_list)\n locations = self.locations(features)[None] \n\n feat = self.activation(self.feat1(features))\n \n class_logits = self.cls_score(feat)\n pred_ltrb = F.relu(self.ltrb_pred(feat))\n pred_bboxes = self.apply_ltrb(locations, pred_ltrb)\n\n return class_logits, pred_bboxes\n \n def apply_ltrb(self, locations, pred_ltrb): \n \"\"\"\n :param locations: (1, 2, H, W)\n :param pred_ltrb: (N, 4, H, W) \n \"\"\"\n\n pred_boxes = torch.zeros_like(pred_ltrb)\n pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1\n pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1\n pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2\n pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2\n\n return pred_boxes \n \n @torch.no_grad()\n def locations(self, features, stride=4):\n \"\"\"\n Arguments:\n features: (N, C, H, W)\n Return:\n locations: (2, H, W)\n \"\"\"\n\n h, w = features.size()[-2:]\n device = features.device\n \n shifts_x = torch.arange(\n 0, w * stride, step=stride,\n dtype=torch.float32, device=device\n )\n shifts_y = torch.arange(\n 0, h * stride, step=stride,\n dtype=torch.float32, device=device\n )\n shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2 \n \n locations = locations.reshape(h, w, 2).permute(2, 0, 1)\n \n return locations\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n"},"avg_line_length":{"kind":"number","value":32.296875,"string":"32.296875"},"max_line_length":{"kind":"number","value":94,"string":"94"},"alphanum_fraction":{"kind":"number","value":0.6008708273,"string":"0.600871"},"count_classes":{"kind":"number","value":3152,"string":"3,152"},"score_classes":{"kind":"number","value":0.7624576681180455,"string":"0.762458"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":859,"string":"859"},"score_decorators":{"kind":"number","value":0.2077890662796323,"string":"0.207789"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":830,"string":"830"},"score_documentation":{"kind":"number","value":0.200774068698597,"string":"0.200774"}}},{"rowIdx":3719,"cells":{"hexsha":{"kind":"string","value":"b96fae5c29fd446ea7199733a629bbe0f6190046"},"size":{"kind":"number","value":49876,"string":"49,876"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"mermaid/utils.py"},"max_stars_repo_name":{"kind":"string","value":"HastingsGreer/mermaid"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bd13c5fc427eb8cd9054973a8eaaeb302078182d"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":120,"string":"120"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-10-29T23:53:02.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T02:59:58.000Z"},"max_issues_repo_path":{"kind":"string","value":"mermaid/utils.py"},"max_issues_repo_name":{"kind":"string","value":"AlexanderChristgau/mermaid"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ba07883cc3cb5982e4655048a434b4495cb49c6d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":10,"string":"10"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-11-05T09:28:35.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-01-09T19:12:51.000Z"},"max_forks_repo_path":{"kind":"string","value":"mermaid/utils.py"},"max_forks_repo_name":{"kind":"string","value":"AlexanderChristgau/mermaid"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ba07883cc3cb5982e4655048a434b4495cb49c6d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":19,"string":"19"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-11-10T13:34:39.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-13T20:30:10.000Z"},"content":{"kind":"string","value":"\"\"\"Various utility functions.\n\n.. todo::\n Reorganize this package in a more meaningful way.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n# from builtins import str\n# from builtins import range\nimport torch\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Variable\nfrom .libraries.modules.stn_nd import STN_ND_BCXYZ\nfrom .data_wrapper import AdaptVal\nfrom .data_wrapper import MyTensor\nfrom . import smoother_factory as sf\nfrom .data_wrapper import USE_CUDA\n\nimport numpy as np\nfrom . import finite_differences as fd\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom . import module_parameters as pars\n\nfrom .spline_interpolation import SplineInterpolation_ND_BCXYZ\n\nimport os\n\ntry:\n from .libraries.functions.nn_interpolation import get_nn_interpolation\nexcept ImportError:\n print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '\n 'Some functionality may not be available.')\n\n\ndef my_hasnan(x):\n \"\"\"Check if any input elements are NaNs.\n\n :param x: numpy array\n :return: True if NaNs are present, False else\n \"\"\"\n return (x != x).any()\n\n\ndef create_symlink_with_correct_ext(sf, tf):\n abs_s = os.path.abspath(sf)\n ext_s = os.path.splitext(abs_s)[1]\n\n abs_t = os.path.abspath(tf)\n root_t,ext_t = os.path.splitext(abs_t)\n\n abs_t_with_right_ext = root_t + ext_s\n\n if os.path.isfile(abs_t_with_right_ext):\n if os.path.samefile(abs_s,abs_t_with_right_ext):\n # nothing to do here, these are already the same file\n return\n else:\n os.remove(abs_t_with_right_ext)\n\n # now we can do the symlink\n os.symlink(abs_s,abs_t_with_right_ext)\n\n\ndef combine_dict(d1,d2):\n \"\"\"Creates a dictionary which has entries from both of them.\n\n :param d1: dictionary 1\n :param d2: dictionary 2\n :return: resulting dictionary\n \"\"\"\n d = d1.copy()\n d.update(d2)\n return d\n\n\ndef get_parameter_list_from_parameter_dict(pd):\n \"\"\"Takes a dictionary which contains key value pairs for model parameters and converts it into a list of\n parameters that can be used as an input to an optimizer.\n\n :param pd: parameter dictionary\n :return: list of parameters\n \"\"\"\n pl = []\n for key in pd:\n pl.append(pd[key])\n return pl\n\n\ndef get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):\n \"\"\"Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys\n based on memory id.\n\n :param pd: parameter dictionary\n :return: tuple of (parameter_list, name_dictionary)\n \"\"\"\n\n par_to_name_dict = dict()\n pl = []\n for key in pd:\n pl.append(pd[key])\n par_to_name_dict[pd[key]] = key\n return pl, par_to_name_dict\n\n\ndef remove_infs_from_variable(v):\n # 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor\n # 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor\n # 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor\n\n # todo: maybe find a cleaner way of handling this\n # this is to make sure that subsequent sums work (hence will be smaller than it could be,\n # but values of this size should not occur in practice anyway\n sz = v.size()\n reduction_factor = np.prod(np.array(sz))\n condition = True\n\n if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32:\n return torch.clamp(v,\n min=(np.asscalar(np.finfo('float32').min))/reduction_factor,\n max=(np.asscalar(np.finfo('float32').max))/reduction_factor)\n elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor:\n return torch.clamp(v,\n min=(np.asscalar(np.finfo('float64').min))/reduction_factor,\n max=(np.asscalar(np.finfo('float64').max))/reduction_factor)\n elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor:\n return torch.clamp(v,\n min=(np.asscalar(np.finfo('float16').min))/reduction_factor,\n max=(np.asscalar(np.finfo('float16').max))/reduction_factor)\n else:\n raise ValueError('Unknown data type: ' + str( type(v.data)))\n\n\ndef lift_to_dimension(A, dim):\n \"\"\"Creates a view of A of dimension dim (by adding dummy dimensions if necessary).\n\n :param A: numpy array\n :param dim: desired dimension of view\n :return: returns view of A of appropriate dimension\n \"\"\"\n\n current_dim = len(A.shape)\n if current_dim > dim:\n raise ValueError('Can only add dimensions, but not remove them')\n\n if current_dim == dim:\n return A\n else:\n return A.reshape([1]*(dim-current_dim)+list(A.shape))\n\n\ndef get_dim_of_affine_transform(Ab):\n \"\"\"Returns the number of dimensions corresponding to an affine transformation of the\n form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply\n [a1;a2;a3;b], i.e., all columns stacked on top of each other.\n\n :param Ab: parameter vector\n :return: dimensionality of transform (1,2,or 3)\n \"\"\"\n nr = len(Ab)\n if nr==2:\n return 1\n elif nr==6:\n return 2\n elif nr==12:\n return 3\n else:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n\ndef set_affine_transform_to_identity(Ab):\n \"\"\"Sets the affine transformation as given by the column vector Ab to the identity transform.\n\n :param Ab: Affine parameter vector (will be overwritten with the identity transform)\n :return:\n \"\"\"\n dim = get_dim_of_affine_transform(Ab)\n\n if dim==1:\n Ab.zero_()\n Ab[0]=1.\n elif dim==2:\n Ab.zero_()\n Ab[0]=1.\n Ab[3]=1.\n elif dim==3:\n Ab.zero_()\n Ab[0]=1.\n Ab[4]=1.\n Ab[8]=1.\n else:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n\ndef set_affine_transform_to_identity_multiN(Ab):\n \"\"\"Set the affine transforms to the identity (in the case of arbitrary batch size).\n\n :param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.\n :return:\n \"\"\"\n sz = Ab.size()\n nr_of_images = sz[0]\n for nrI in range(nr_of_images):\n set_affine_transform_to_identity(Ab[nrI, :])\n\n\ndef get_inverse_affine_param(Ab):\n \"\"\"Computes inverse of affine transformation.\n\n Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb\n\n :param Ab: B x pars (batch size x param. vector)\n :return: Inverse of affine parameters\n \"\"\"\n\n dim =0\n if Ab.shape[1] == 2:\n dim = 1\n elif Ab.shape[1] == 6:\n dim = 2\n elif Ab.shape[1] == 12:\n dim = 3\n\n if dim not in [1, 2, 3]:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)\n Ab_inv = torch.zeros_like(Ab)\n\n for n in range(Ab.shape[0]):\n tm_inv = torch.inverse(Ab[n, :, :dim])\n Ab_inv[n, :, :dim] = tm_inv\n Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])\n\n inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)\n return inv_affine_param\n\n\ndef update_affine_param(Ab, Cd):\n \"\"\"Update affine parameters.\n\n Formally: C(Ax+b)+d = CAx+Cb+d\n\n :param Ab: B x pars (batch size x param. vector)\n :return: Updated affine parameters\n \"\"\"\n\n dim = 0\n if Ab.shape[1]==2:\n dim = 1\n elif Ab.shape[1]==6:\n dim = 2\n elif Ab.shape[1]==12:\n dim = 3\n\n if dim not in [1, 2, 3]:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)\n Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)\n\n updated_param = torch.zeros_like(Ab)\n for n in range(Ab.shape[0]):\n tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])\n updated_param[n,:,:dim] = tm_param\n updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]\n updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)\n return updated_param\n\n\ndef apply_affine_transform_to_map(Ab,phi):\n \"\"\"Applies an affine transform to a map.\n\n :param Ab: affine transform parameter column vector\n :param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)\n :return: returns transformed map\n \"\"\"\n sz = phi.size()\n\n dim = len(sz) - 1\n if dim not in [1,2,3]:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n phiR = MyTensor(sz).zero_().type_as(phi)\n\n if dim == 1:\n phiR = phi * Ab[0] + Ab[1]\n elif dim == 2:\n phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1\n phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2\n elif dim == 3:\n phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]\n phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]\n phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]\n else:\n raise ValueError('Only supports dimensions 1, 2, and 3.')\n\n return phiR\n\n\ndef apply_affine_transform_to_map_multiNC(Ab,phi):\n \"\"\"Applies an affine transform to maps (for arbitrary batch size).\n\n :param Ab: affine transform parameter column vectors (batch size x param. vector)\n :param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)\n :return: returns transformed maps\n \"\"\"\n sz = phi.size()\n dim = get_dim_of_affine_transform(Ab[0,:])\n nr_of_images = Ab.size()[0]\n\n if nr_of_images != sz[0]:\n raise ValueError('Incompatible number of affine transforms')\n if dim != len(sz)-2:\n raise ValueError('Incompatible number of affine transforms')\n\n phiR = MyTensor(sz).zero_().type_as(phi)\n for nrI in range(nr_of_images):\n phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])\n\n return phiR\n\n\ndef compute_normalized_gaussian(X, mu, sig):\n \"\"\"Computes a normalized Gaussian.\n\n :param X: map with coordinates at which to evaluate\n :param mu: array indicating the mean\n :param sig: array indicating the standard deviations for the different dimensions\n :return: Normalized Gaussian evaluated at coordinates in X\n\n Example::\n\n >>> mu, sig = [1,1], [1,1]\n >>> X = [0,0]\n >>> print(compute_normalized_gaussian(X, mu, sig)\n\n \"\"\"\n dim = len(mu)\n if dim == 1:\n g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))\n g = g/g.sum()\n return g\n elif dim == 2:\n g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))\n - np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))\n g = g/g.sum()\n return g\n elif dim == 3:\n g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))\n -np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))\n -np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))\n g = g / g.sum()\n return g\n else:\n raise ValueError('Can only compute Gaussians in dimensions 1-3')\n\n\ndef _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):\n\n if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n raise ValueError('Currently only orders 0 to 9 are supported')\n\n if spline_order == 0:\n stn = STN_ND_BCXYZ(spacing,\n zero_boundary,\n use_bilinear=False,\n use_01_input=use_01_input)\n elif spline_order == 1:\n stn = STN_ND_BCXYZ(spacing,\n zero_boundary,\n use_bilinear=True,\n use_01_input=use_01_input)\n else:\n stn = SplineInterpolation_ND_BCXYZ(spacing,\n spline_order)\n\n I1_warped = stn(I0, phi)\n\n return I1_warped\n\n\ndef _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):\n\n if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n raise ValueError('Currently only orders 0 to 9 are supported')\n\n if spline_order == 0:\n stn = STN_ND_BCXYZ(spacing,\n zero_boundary,\n use_bilinear=False,\n use_01_input=use_01_input)\n elif spline_order == 1:\n stn = STN_ND_BCXYZ(spacing,\n zero_boundary,\n use_bilinear=True,\n use_01_input=use_01_input)\n else:\n stn = SplineInterpolation_ND_BCXYZ(spacing,\n spline_order)\n\n I1_warped = stn(I0, phi)\n\n return I1_warped\n\n\ndef _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):\n\n if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:\n raise ValueError('Currently only orders 0 to 9 are supported')\n\n if spline_order == 0:\n # return get_warped_label_map(I0,phi,spacing)\n stn = STN_ND_BCXYZ(spacing,\n zero_boundary,\n use_bilinear=False,\n use_01_input=use_01_input)\n elif spline_order == 1:\n stn = STN_ND_BCXYZ(spacing,zero_boundary,\n use_bilinear=True,\n use_01_input=use_01_input)\n else:\n stn = SplineInterpolation_ND_BCXYZ(spacing,\n spline_order)\n\n I1_warped = stn(I0, phi)\n\n return I1_warped\n\n\ndef compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):\n \"\"\"Warps image.\n\n :param I0: image to warp, image size XxYxZ\n :param phi: map for the warping, size dimxXxYxZ\n :param spacing: image spacing [dx,dy,dz]\n :return: returns the warped image of size XxYxZ\n \"\"\"\n\n # implements this by creating a different view (effectively adding dimensions)\n Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),\n phi.view(torch.Size([1] + list(phi.size()))),\n spacing,\n spline_order,\n zero_boundary,\n use_01_input)\n return Iw.view(I0.size())\n\n\ndef compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):\n \"\"\"Warps image.\n\n :param I0: image to warp, image size BxCxXxYxZ\n :param phi: map for the warping, size BxdimxXxYxZ\n :param spacing: image spacing [dx,dy,dz]\n :return: returns the warped image of size BxCxXxYxZ\n \"\"\"\n\n dim = I0.dim()-2\n if dim == 1:\n return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)\n elif dim == 2:\n return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)\n elif dim == 3:\n return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)\n else:\n raise ValueError('Images can only be warped in dimensions 1 to 3')\n\n\ndef _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):\n \"\"\"Computes spacing for the low-res parametrization from image spacing.\n\n :param spacing: image spacing\n :param sz: size of image\n :param lowResSize: size of low re parameterization\n :return: returns spacing of low res parameterization\n \"\"\"\n #todo: check that this is the correct way of doing it\n return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)\n\n\ndef _get_low_res_size_from_size(sz, factor):\n \"\"\"Returns the corresponding low-res size from a (high-res) sz.\n\n :param sz: size (high-res)\n :param factor: low-res factor (needs to be <1)\n :return: low res size\n \"\"\"\n if (factor is None) or (factor >= 1):\n print('WARNING: Could not compute low_res_size as factor was ' + str(factor))\n return np.array(sz)\n else:\n low_res_sz = np.array(sz)\n low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')\n\n return low_res_sz\n\n\ndef _compute_low_res_image(I, spacing, low_res_size, spline_order):\n import mermaid.image_sampling as IS\n sampler = IS.ResampleImage()\n low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order)\n return low_res_image\n\n\ndef individual_parameters_to_model_parameters(ind_pars):\n model_pars = dict()\n\n if type(ind_pars) == type(dict()):\n # should already be in the right format\n model_pars = ind_pars\n else:\n # if ind_pars is not a dictionary assume that they come from the optimizer\n # (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'\n for par in ind_pars:\n model_pars[par['name']] = par['model_params']\n\n return model_pars\n\n\ndef compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):\n \"\"\"Computes the vector momentum from the scalar momentum: :math:`m=\\\\lambda\\\\nabla I`.\n\n :param lam: scalar momentum, BxCxXxYxZ\n :param I: image, BxCxXxYxZ\n :param sz: size of image\n :param spacing: spacing of image\n :return: returns the vector momentum\n \"\"\"\n nrOfI = sz[0] # number of images\n m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC\n nrOfC = sz[1]\n for c in range(nrOfC): # loop over all the channels and add the results\n m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],\n I[:, c, ...],\n nrOfI,\n sz[2::],\n spacing)\n return m\n\n\ndef compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):\n \"\"\"Computes the vector momentum from the scalar momentum: :math:`m=\\\\lambda\\\\nabla I`.\n\n :param lam: scalar momentum, batchxXxYxZ\n :param I: image, batchXxYxZ\n :param sz: size of image\n :param spacing: spacing of image\n :return: returns the vector momentum\n \"\"\"\n fdt = fd.FD_torch(spacing)\n dim = len(sz)\n m = create_ND_vector_field_variable_multiN(sz, nrOfI)\n if dim == 1:\n m[:, 0, :] = fdt.dXc(I)*lam\n elif dim == 2:\n m[:, 0, :, :] = fdt.dXc(I)*lam\n m[:, 1, :, :] = fdt.dYc(I)*lam\n elif dim == 3:\n m[:, 0, :, :, :] = fdt.dXc(I)*lam\n m[:, 1, :, :, :] = fdt.dYc(I)*lam\n m[:, 2, :, :, :] = fdt.dZc(I)*lam\n else:\n raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')\n return m\n\n\ndef create_ND_vector_field_variable_multiN(sz, nr_of_images=1):\n \"\"\"\n Create vector field torch Variable of given size\n\n :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)\n :param nrOfI: number of images\n :return: returns vector field of size nrOfIxdimxXxYxZ\n \"\"\"\n dim = len(sz)\n csz = np.array(sz) # just to make sure it is a numpy array\n csz = np.array([nr_of_images, dim]+list(csz))\n return MyTensor(*(csz.tolist())).normal_(0., 1e-7)\n\n\ndef create_ND_vector_field_variable(sz):\n \"\"\"Create vector field torch Variable of given size.\n\n :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)\n :return: returns vector field of size dimxXxYxZ\n \"\"\"\n dim = len(sz)\n csz = np.array(sz) # just to make sure it is a numpy array\n csz = np.array([dim]+list(csz))\n return MyTensor(*(csz.tolist())).normal_(0.,1e-7)\n\n\ndef create_vector_parameter(nr_of_elements):\n \"\"\"Creates a vector parameters with a specified number of elements.\n\n :param nr_of_elements: number of vector elements\n :return: returns the parameter vector\n \"\"\"\n return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))\n\n\ndef create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):\n \"\"\"Create vector field torch Parameter of given size.\n\n :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)\n :param nrOfI: number of images\n :return: returns vector field of size nrOfIxdimxXxYxZ\n \"\"\"\n dim = len(sz)\n csz = np.array(sz) # just to make sure it is a numpy array\n csz = np.array([nrOfI, dim]+list(csz))\n if get_field_from_external_network:\n tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)\n tmp.requires_grad = True\n else:\n tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))\n return tmp\n\n\ndef create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):\n \"\"\"\n Create vector field torch Parameter of given size\n\n :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)\n :param nrOfI: number of images\n :return: returns vector field of size nrOfIxdimxXxYxZ\n \"\"\"\n nr_of_mg_weights = len(gaussian_std_weights)\n csz = np.array(sz) # just to make sure it is a numpy array\n csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))\n weights = torch.empty(*csz)\n # set the default\n if sched =='w_K_w':\n gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]\n for g in range(nr_of_mg_weights):\n weights[:, g, ...] = gaussian_std_weights[g]\n tmp = AdaptVal(weights)\n\n if get_preweight_from_network:\n tmp.requires_grad = True\n else:\n tmp = Parameter(tmp)\n return tmp\n\ndef create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):\n \"\"\"\n Create vector field torch Parameter of given size\n\n :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)\n :param nrOfI: number of images\n :param nrOfC: number of channels\n :return: returns vector field of size nrOfIxnrOfCxXxYxZ\n \"\"\"\n\n csz = np.array(sz) # just to make sure it is a numpy array\n csz = np.array([nrOfI,nrOfC]+list(csz))\n return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))\n\ndef centered_identity_map_multiN(sz, spacing, dtype='float32'):\n \"\"\"\n Create a centered identity map (shifted so it is centered around 0)\n\n :param sz: size of an image in BxCxXxYxZ format\n :param spacing: list with spacing information [sx,sy,sz]\n :param dtype: numpy data-type ('float32', 'float64', ...)\n :return: returns the identity map\n \"\"\"\n dim = len(sz) - 2\n nrOfI = sz[0]\n\n if dim == 1:\n id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)\n elif dim == 2:\n id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)\n elif dim == 3:\n id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n\n for n in range(nrOfI):\n id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)\n\n return id\n\n\ndef identity_map_multiN(sz,spacing,dtype='float32'):\n \"\"\"\n Create an identity map\n\n :param sz: size of an image in BxCxXxYxZ format\n :param spacing: list with spacing information [sx,sy,sz]\n :param dtype: numpy data-type ('float32', 'float64', ...)\n :return: returns the identity map\n \"\"\"\n dim = len(sz)-2\n nrOfI = int(sz[0])\n\n if dim == 1:\n id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)\n elif dim == 2:\n id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)\n elif dim == 3:\n id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n\n for n in range(nrOfI):\n id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)\n\n return id\n\n\ndef centered_identity_map(sz, spacing, dtype='float32'):\n \"\"\"\n Returns a centered identity map (with 0 in the middle) if the sz is odd\n Otherwise shifts everything by 0.5*spacing\n\n :param sz: just the spatial dimensions, i.e., XxYxZ\n :param spacing: list with spacing information [sx,sy,sz]\n :param dtype: numpy data-type ('float32', 'float64', ...)\n :return: returns the identity map of dimension dimxXxYxZ\n \"\"\"\n dim = len(sz)\n if dim == 1:\n id = np.mgrid[0:sz[0]]\n elif dim == 2:\n id = np.mgrid[0:sz[0], 0:sz[1]]\n elif dim == 3:\n id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n\n # now get it into range [0,(sz-1)*spacing]^d\n id = np.array(id.astype(dtype))\n if dim == 1:\n id = id.reshape(1, sz[0]) # add a dummy first index\n\n for d in range(dim):\n id[d] *= spacing[d]\n if sz[d]%2==0:\n #even\n id[d] -= spacing[d]*(sz[d]//2)\n else:\n #odd\n id[d] -= spacing[d]*((sz[d]+1)//2)\n\n\n # and now store it in a dim+1 array\n if dim == 1:\n idnp = np.zeros([1, sz[0]], dtype=dtype)\n idnp[0, :] = id[0]\n elif dim == 2:\n idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)\n idnp[0, :, :] = id[0]\n idnp[1, :, :] = id[1]\n elif dim == 3:\n idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)\n idnp[0, :, :, :] = id[0]\n idnp[1, :, :, :] = id[1]\n idnp[2, :, :, :] = id[2]\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')\n\n return idnp\n\n# \n# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):\n# \"\"\"\n# Returns a centered identity map (with 0 in the middle) if the sz is odd\n# Otherwise shifts everything by 0.5*spacing\n# \n# :param sz: just the spatial dimensions, i.e., XxYxZ\n# :param spacing: list with spacing information [sx,sy,sz]\n# :param dtype: numpy data-type ('float32', 'float64', ...)\n# :return: returns the identity map of dimension dimxXxYxZ\n# \"\"\"\n# dim = len(sz)\n# if dim == 1:\n# id = np.mgrid[0:sz[0]]\n# elif dim == 2:\n# id = np.mgrid[0:sz[0], 0:sz[1]]\n# elif dim == 3:\n# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]\n# else:\n# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n# \n# min_spacing = np.min(spacing)\n# spacing_ratio = spacing/min_spacing\n# \n# \n# # now get it into range [0,(sz-1)*spacing]^d\n# id = np.array(id.astype(dtype))\n# if dim == 1:\n# id = id.reshape(1, sz[0]) # add a dummy first index\n# \n# for d in range(dim):\n# id[d] *= spacing[d]\n# if sz[d]%2==0:\n# #even\n# id[d] -= spacing[d]*(sz[d]//2)\n# else:\n# #odd\n# id[d] -= spacing[d]*((sz[d]+1)//2)\n# \n# # and now store it in a dim+1 array and rescale by the ratio\n# if dim == 1:\n# idnp = np.zeros([1, sz[0]], dtype=dtype)\n# idnp[0, :] = id[0] * spacing_ratio[0]\n# elif dim == 2:\n# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)\n# idnp[0, :, :] = id[0] * spacing_ratio[0]\n# idnp[1, :, :] = id[1] * spacing_ratio[1]\n# elif dim == 3:\n# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)\n# idnp[0, :, :, :] = id[0] * spacing_ratio[0]\n# idnp[1, :, :, :] = id[1] * spacing_ratio[1]\n# idnp[2, :, :, :] = id[2] * spacing_ratio[2]\n# else:\n# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')\n# \n# return idnp\n#\n# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):\n# if do_transform:\n# min_spacing = np.min(spacing)\n# spacing_ratio =min_spacing/spacing\n# dim = spacing.size\n# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))\n# sp_sz = [1]+[dim] +[1]*dim\n# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)\n# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]\n# else:\n# new_var_list = var_list\n# return new_var_list\n\n# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):\n# if do_transform:\n# min_spacing = np.min(spacing)\n# spacing_ratio =spacing/min_spacing\n# dim = spacing.size\n# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))\n# sp_sz = [1]+[dim] +[1]*dim\n# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)\n# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]\n# else:\n# new_var_list = var_list\n# return new_var_list\n#\n\n\n\n\n\n\ndef identity_map(sz,spacing,dtype='float32'):\n \"\"\"\n Returns an identity map.\n\n :param sz: just the spatial dimensions, i.e., XxYxZ\n :param spacing: list with spacing information [sx,sy,sz]\n :param dtype: numpy data-type ('float32', 'float64', ...)\n :return: returns the identity map of dimension dimxXxYxZ\n \"\"\"\n dim = len(sz)\n if dim==1:\n id = np.mgrid[0:sz[0]]\n elif dim==2:\n id = np.mgrid[0:sz[0],0:sz[1]]\n elif dim==3:\n id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n\n # now get it into range [0,(sz-1)*spacing]^d\n id = np.array( id.astype(dtype) )\n if dim==1:\n id = id.reshape(1,sz[0]) # add a dummy first index\n\n for d in range(dim):\n id[d]*=spacing[d]\n\n #id[d]*=2./(sz[d]-1)\n #id[d]-=1.\n\n # and now store it in a dim+1 array\n if dim==1:\n idnp = np.zeros([1, sz[0]], dtype=dtype)\n idnp[0,:] = id[0]\n elif dim==2:\n idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)\n idnp[0,:, :] = id[0]\n idnp[1,:, :] = id[1]\n elif dim==3:\n idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)\n idnp[0,:, :, :] = id[0]\n idnp[1,:, :, :] = id[1]\n idnp[2,:, :, :] = id[2]\n else:\n raise ValueError('Only dimensions 1-3 are currently supported for the identity map')\n\n return idnp\n\ndef omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):\n \"\"\"generate a smooth weight mask for the omt \"\"\"\n dim = len(img_sz)\n mask_sz = [1,1]+ list(img_sz)\n mask = AdaptVal(torch.ones(*mask_sz))*mask_value\n if dim ==2:\n mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1\n elif dim==3:\n mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1\n sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)\n mask = sm.smooth(mask)\n return mask.detach()\n\n\ndef momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):\n \"\"\"generate a smooth weight mask for the omt \"\"\"\n dim = len(img_sz)\n mask_sz = [1,1]+ list(img_sz)\n mask = AdaptVal(torch.zeros(*mask_sz))\n if dim ==2:\n mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1\n elif dim==3:\n mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1\n sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)\n mask = sm.smooth(mask)\n if pow ==2:\n mask = mask**2\n if pow ==3:\n mask = mask*mask*mask\n return mask\n\n# def compute_omt_const(stds,param,dim):\n# omt_power = param['forward_model']['smoother']['omt_power']\n# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']\n# min_std = torch.min(stds)\n# max_std = torch.max(stds)\n# omt_const = torch.abs(torch.log(max_std/stds))**omt_power\n# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)\n# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)\n# sz = [1]+ [len(stds)] +[1]*(dim+1)\n# return omt_const.view(*sz)\n\n\n\ndef get_single_gaussian_smoother(gaussian_std,sz,spacing):\n s_m_params = pars.ParameterDict()\n s_m_params['smoother']['type'] = 'gaussian'\n s_m_params['smoother']['gaussian_std'] = gaussian_std\n s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)\n return s_m\n\n\n\ndef get_warped_label_map(label_map, phi, spacing, sched='nn'):\n if sched == 'nn':\n warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)\n # check if here should be add assert\n assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, \"nn interpolation is not precise\"\n else:\n raise ValueError(\" the label warping method is not implemented\")\n\n return warped_label_map\n\n\ndef t2np(v):\n \"\"\"\n Takes a torch array and returns it as a numpy array on the cpu\n\n :param v: torch array\n :return: numpy array\n \"\"\"\n\n return (v.detach()).cpu().numpy()\n\n\n\ndef cxyz_to_xyzc( v ):\n \"\"\"\n Takes a torch array and returns it as a numpy array on the cpu\n\n :param v: torch array\n :return: numpy array\n \"\"\"\n dim = len(v.shape)-2\n if dim ==2:\n v = v.permute(0,2,3,1)\n if dim ==3:\n v = v.permute(0,2,3,4,1)\n return v\n\n\n\n\ndef get_scalar(v):\n if isinstance(v, float):\n return v\n elif isinstance(v, np.ndarray) and v.size == 1:\n return float(v)\n\n\ndef checkNan(x):\n \"\"\"\"\n input should be list of Variable\n \"\"\"\n return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]\n\n\ndef noramlized_spacing_to_smallest(spacing):\n min_sp = np.min(spacing)\n spacing[spacing>min_sp]=min_sp\n return spacing\n\n\ndef time_warped_function(f):\n def __time_warped_function(input=None):\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n\n start.record()\n output = f(input)\n end.record()\n\n # Waits for everything to finish running\n torch.cuda.synchronize()\n\n print(start.elapsed_time(end))\n return output\n\n return __time_warped_function\n\ndef interoplate_boundary_right(tensor):\n dim = len(tensor.shape)-2\n if dim==1:\n tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]\n if dim==2:\n tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]\n tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]\n if dim==3:\n tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]\n tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]\n tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]\n\ndef get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):\n \"\"\"\n\n :param I: B C X Y Z\n :param spacing: spx spy spz\n :param desiredSize: B C X Y Z\n :param spline_order:\n :param zero_boundary:\n :param identity_map:\n :return:\n \"\"\"\n if spacing is None:\n img_sz = I.shape[2:]\n spacing = 1. / (np.array(img_sz) - 1)\n if identity_map is not None: # todo will remove, currently fix for symmetric training\n if I.shape[0] != identity_map.shape[0]:\n n_batch = I.shape[0]\n desiredSize = desiredSize.copy()\n desiredSize[0] = n_batch\n identity_map = identity_map[:n_batch]\n resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,\n zero_boundary=zero_boundary, identity_map=identity_map)\n return resampled\n\ndef resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):\n \"\"\"\n Resample an image to a given desired size\n\n :param I: Input image (expected to be of BxCxXxYxZ format)\n :param spacing: array describing the spatial spacing\n :param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)\n :return: returns a tuple: the downsampled image, the new spacing after downsampling\n \"\"\"\n desiredSize = desiredSize[2:]\n\n is_numpy = False\n if not isinstance(I, torch.Tensor):\n I = torch.Tensor(I)\n is_numpy = True\n sz = np.array(list(I.size()))\n # check that the batch size and the number of channels is the same\n nrOfI = sz[0]\n nrOfC = sz[1]\n\n desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))\n\n newspacing = spacing * ((sz[2::].astype('float') - 1.) / (\n desiredSizeNC[2::].astype('float') - 1.)) ###########################################\n if identity_map is not None:\n idDes = identity_map\n else:\n idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))\n # now use this map for resampling\n ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)\n\n return ID if not is_numpy else ID.numpy(), newspacing\n\ndef get_res_size_from_size(sz, factor):\n \"\"\"\n Returns the corresponding low-res size from a (high-res) sz\n :param sz: size (high-res)\n :param factor: low-res factor (needs to be <1)\n :return: low res size\n \"\"\"\n if (factor is None):\n print('WARNING: Could not compute low_res_size as factor was ' + str(factor))\n return sz\n else:\n lowResSize = np.array(sz)\n if not isinstance(factor, list):\n lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')\n else:\n lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')\n\n if lowResSize[-1] % 2 != 0:\n lowResSize[-1] -= 1\n print(\n '\\n\\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\\n\\n')\n\n return lowResSize\n\ndef get_res_spacing_from_spacing(spacing, sz, lowResSize):\n \"\"\"\n Computes spacing for the low-res parameterization from image spacing\n :param spacing: image spacing\n :param sz: size of image\n :param lowResSize: size of low re parameterization\n :return: returns spacing of low res parameterization\n \"\"\"\n # todo: check that this is the correct way of doing it\n return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)\n\n\n\n\n\n\n\n\n\n\n########################################## Adaptive Net ###################################################3\ndef space_normal(tensors, std=0.1):\n \"\"\"\n space normalize for the net kernel\n :param tensor:\n :param mean:\n :param std:\n :return:\n \"\"\"\n if isinstance(tensors, Variable):\n space_normal(tensors.data, std=std)\n return tensors\n for n in range(tensors.size()[0]):\n for c in range(tensors.size()[1]):\n dim = tensors[n][c].dim()\n sz = tensors[n][c].size()\n mus = np.zeros(dim)\n stds = std * np.ones(dim)\n print('WARNING: What should the spacing be here? Needed for new identity map code')\n raise ValueError('Double check the spacing here before running this code')\n spacing = np.ones(dim)\n centered_id = centered_identity_map(sz,spacing)\n g = compute_normalized_gaussian(centered_id, mus, stds)\n tensors[n,c] = torch.from_numpy(g)\n\n\ndef weights_init_uniform(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.uniform(m.weight.data, 0.038, 0.042)\n elif classname.find('Linear') != -1:\n init.uniform(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n space_normal(m.weight.data)\n elif classname.find('Linear') != -1:\n space_normal(m.weight.data)\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\ndef weights_init_rd_normal(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.normal(m.weight.data)\n elif classname.find('Linear') != -1:\n init.normal(m.weight.data)\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.xavier_normal(m.weight.data, gain=1)\n elif classname.find('Linear') != -1:\n init.xavier_normal(m.weight.data, gain=1)\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n # print(classname)\n if classname.find('Conv') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef weights_init_orthogonal(m):\n classname = m.__class__.__name__\n print(classname)\n if classname.find('Conv') != -1:\n init.orthogonal(m.weight.data, gain=1)\n elif classname.find('Linear') != -1:\n init.orthogonal(m.weight.data, gain=1)\n elif classname.find('BatchNorm2d') != -1:\n init.uniform(m.weight.data, 1.0, 0.02)\n init.constant(m.bias.data, 0.0)\n\n\ndef init_weights(net, init_type='normal'):\n print('initialization method [%s]' % init_type)\n if init_type == 'rd_normal':\n net.apply(weights_init_rd_normal)\n elif init_type == 'normal':\n net.apply(weights_init_normal)\n elif init_type == 'uniform':\n net.apply(weights_init_uniform)\n elif init_type == 'xavier':\n net.apply(weights_init_xavier)\n elif init_type == 'kaiming':\n net.apply(weights_init_kaiming)\n elif init_type == 'orthogonal':\n net.apply(weights_init_orthogonal)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\n\ndef organize_data(moving, target, sched='depth_concat'):\n if sched == 'depth_concat':\n input = torch.cat([moving, target], dim=1)\n elif sched == 'width_concat':\n input = torch.cat((moving, target), dim=3)\n elif sched == 'list_concat':\n input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0)\n elif sched == 'difference':\n input = moving-target\n return input\n\n\ndef bh(m,gi,go):\n print(\"Grad Input\")\n print((torch.sum(gi[0].data), torch.sum(gi[1].data)))\n print(\"Grad Output\")\n print(torch.sum(go[0].data))\n return gi[0], gi[1], gi[2]\n\n\nclass ConvBnRel(nn.Module):\n # conv + bn (optional) + relu\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,\n bn=False, reverse=False, bias=False):\n super(ConvBnRel, self).__init__()\n padding = int((kernel_size - 1) // 2) if same_padding else 0\n if not reverse:\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)\n else:\n self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias)\n #y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n #When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.\n self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None\n if active_unit == 'relu':\n self.active_unit = nn.ReLU(inplace=True)\n elif active_unit == 'elu':\n self.active_unit = nn.ELU(inplace=True)\n else:\n self.active_unit = None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.active_unit is not None:\n x = self.active_unit(x)\n return x\n\nclass FcRel(nn.Module):\n # fc+ relu(option)\n def __init__(self, in_features, out_features, active_unit='relu'):\n super(FcRel, self).__init__()\n self.fc = nn.Linear(in_features, out_features)\n if active_unit == 'relu':\n self.active_unit = nn.ReLU(inplace=True)\n elif active_unit == 'elu':\n self.active_unit = nn.ELU(inplace=True)\n else:\n self.active_unit = None\n\n def forward(self, x):\n x = self.fc(x)\n if self.active_unit is not None:\n x = self.active_unit(x)\n return x\n\n\nclass AdpSmoother(nn.Module):\n \"\"\"\n a simple conv. implementation, generate displacement field\n \"\"\"\n def __init__(self, inputs, dim, net_sched=None):\n # settings should include [using_bias, using bn, using elu]\n # inputs should be a dictionary could contain ['s'],['t']\n super(AdpSmoother, self).__init__()\n self.dim = dim\n self.net_sched = 'm_only'\n self.s = inputs['s'].detach()\n self.t = inputs['t'].detach()\n self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True)\n self.get_net_sched()\n #self.net.register_backward_hook(bh)\n\n def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5):\n # return the self.net and self.net_input\n padding_size = (kernel_size-1)//2\n if self.net_sched == 'm_only':\n if debugging:\n self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2)\n else:\n net = \\\n [ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),\n ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]\n if using_sigmoid:\n net += [nn.Sigmoid()]\n self.net = nn.Sequential(*net)\n\n elif self.net_sched =='m_f_s':\n if debugging:\n self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)\n else:\n net = \\\n [ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),\n ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]\n if using_sigmoid:\n net += [nn.Sigmoid()]\n self.net = nn.Sequential(*net)\n\n elif self.net_sched == 'm_d_s':\n if debugging:\n self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)\n else:\n net = \\\n [ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),\n ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]\n if using_sigmoid:\n net += [nn.Sigmoid()]\n self.net = nn.Sequential(*net)\n\n elif self.net_sched == 'm_f_s_t':\n if debugging:\n self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False)\n else:\n net = \\\n [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),\n ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]\n if using_sigmoid:\n net += [nn.Sigmoid()]\n self.net = nn.Sequential(*net)\n elif self.net_sched == 'm_d_s_f_t':\n if debugging:\n self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False)\n else:\n net = \\\n [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),\n ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]\n if using_sigmoid:\n net += [nn.Sigmoid()]\n self.net = nn.Sequential(*net)\n\n\n def prepare_data(self, m, new_s):\n input=None\n if self.net_sched == 'm_only':\n input = m\n elif self.net_sched == 'm_f_s':\n input = organize_data(m,self.s,sched='depth_concat')\n elif self.net_sched == 'm_d_s':\n input = organize_data(m, new_s, sched='depth_concat')\n elif self.net_sched == 'm_f_s_t':\n input = organize_data(m, self.s, sched='depth_concat')\n input = organize_data(input, self.t, sched='depth_concat')\n elif self.net_sched == 'm_f_s_t':\n input = organize_data(m, self.s, sched='depth_concat')\n input = organize_data(input, self.t, sched='depth_concat')\n elif self.net_sched == 'm_d_s_f_t':\n input = organize_data(m, new_s, sched='depth_concat')\n input = organize_data(input, self.t, sched='depth_concat')\n\n return input\n\n def forward(self, m,new_s=None):\n m = m * self.mask\n input = self.prepare_data(m,new_s)\n x= input\n x = self.net(x)\n return x\n\n\n\n\n\n"},"avg_line_length":{"kind":"number","value":34.805303559,"string":"34.805304"},"max_line_length":{"kind":"number","value":130,"string":"130"},"alphanum_fraction":{"kind":"number","value":0.6026746331,"string":"0.602675"},"count_classes":{"kind":"number","value":6600,"string":"6,600"},"score_classes":{"kind":"number","value":0.13232817387120058,"string":"0.132328"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":17362,"string":"17,362"},"score_documentation":{"kind":"number","value":0.3481032961745128,"string":"0.348103"}}},{"rowIdx":3720,"cells":{"hexsha":{"kind":"string","value":"b96fca03cef0164231c4fa09bc83db6c5b2aa7db"},"size":{"kind":"number","value":1093,"string":"1,093"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"examples/io/plot_read_evoked.py"},"max_stars_repo_name":{"kind":"string","value":"fmamashli/mne-python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"52f064415e7c9fa8fe243d22108dcdf3d86505b9"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-01-04T08:45:56.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-19T12:25:59.000Z"},"max_issues_repo_path":{"kind":"string","value":"examples/io/plot_read_evoked.py"},"max_issues_repo_name":{"kind":"string","value":"fmamashli/mne-python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"52f064415e7c9fa8fe243d22108dcdf3d86505b9"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":28,"string":"28"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-05-07T00:58:34.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-08-29T23:02:17.000Z"},"max_forks_repo_path":{"kind":"string","value":"examples/io/plot_read_evoked.py"},"max_forks_repo_name":{"kind":"string","value":"fmamashli/mne-python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"52f064415e7c9fa8fe243d22108dcdf3d86505b9"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-01-28T13:48:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-07-10T16:02:11.000Z"},"content":{"kind":"string","value":"\"\"\"\n==================================\nReading and writing an evoked file\n==================================\n\nThis script shows how to read and write evoked datasets.\n\"\"\"\n# Author: Alexandre Gramfort \n#\n# License: BSD (3-clause)\n\nfrom mne import read_evokeds\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()\n\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\n\n# Reading\ncondition = 'Left Auditory'\nevoked = read_evokeds(fname, condition=condition, baseline=(None, 0),\n proj=True)\n\n###############################################################################\n# Show result as a butterfly plot:\n# By using exclude=[] bad channels are not excluded and are shown in red\nevoked.plot(exclude=[], time_unit='s')\n\n# Show result as a 2D image (x: time, y: channels, color: amplitude)\nevoked.plot_image(exclude=[], time_unit='s')\n\n###############################################################################\n# Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked\n# responses to a file.\n"},"avg_line_length":{"kind":"number","value":29.5405405405,"string":"29.540541"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.5690759378,"string":"0.569076"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":751,"string":"751"},"score_documentation":{"kind":"number","value":0.687099725526075,"string":"0.6871"}}},{"rowIdx":3721,"cells":{"hexsha":{"kind":"string","value":"b970d836b7397be4bc4d63762c0eec8adfb90a91"},"size":{"kind":"number","value":611,"string":"611"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"source/monkeyPatches/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"lukaszgo1/nvda"},"max_stars_repo_head_hexsha":{"kind":"string","value":"38a2efd1e1bff7db4471cb7afa03ab1590b7adef"},"max_stars_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_stars_count":{"kind":"number","value":19,"string":"19"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-05-11T05:15:31.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-17T12:40:10.000Z"},"max_issues_repo_path":{"kind":"string","value":"source/monkeyPatches/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"lukaszgo1/nvda"},"max_issues_repo_head_hexsha":{"kind":"string","value":"38a2efd1e1bff7db4471cb7afa03ab1590b7adef"},"max_issues_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_issues_count":{"kind":"number","value":307,"string":"307"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2015-08-27T11:22:33.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-29T10:43:34.000Z"},"max_forks_repo_path":{"kind":"string","value":"source/monkeyPatches/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"lukaszgo1/nvda"},"max_forks_repo_head_hexsha":{"kind":"string","value":"38a2efd1e1bff7db4471cb7afa03ab1590b7adef"},"max_forks_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_forks_count":{"kind":"number","value":14,"string":"14"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-03-28T07:31:49.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-30T04:56:35.000Z"},"content":{"kind":"string","value":"# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2021 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nfrom . import wxMonkeyPatches\r\n\r\n\r\napplyWxMonkeyPatches = wxMonkeyPatches.apply\r\n\r\n\r\ndef applyMonkeyPatches():\r\n\t# Apply several monkey patches to comtypes\r\n\t# F401 - imported but unused: Patches are applied during import\r\n\tfrom . import comtypesMonkeyPatches # noqa: F401\r\n\r\n\t# Apply patches to Enum, prevent cyclic references on ValueError during construction\r\n\tfrom . import enumPatches\r\n\tenumPatches.replace__new__()\r\n"},"avg_line_length":{"kind":"number","value":30.55,"string":"30.55"},"max_line_length":{"kind":"number","value":86,"string":"86"},"alphanum_fraction":{"kind":"number","value":0.7610474632,"string":"0.761047"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":387,"string":"387"},"score_documentation":{"kind":"number","value":0.6333878887070377,"string":"0.633388"}}},{"rowIdx":3722,"cells":{"hexsha":{"kind":"string","value":"b970f8ccb56e24dd8d65fd92869bbf7790f6e611"},"size":{"kind":"number","value":5298,"string":"5,298"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"yt_dlp/extractor/ninenow.py"},"max_stars_repo_name":{"kind":"string","value":"nxtreaming/yt-dlp"},"max_stars_repo_head_hexsha":{"kind":"string","value":"385ffb467b2285e85a2a5495b90314ba1f8e0700"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-01-06T22:09:50.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-12T22:26:22.000Z"},"max_issues_repo_path":{"kind":"string","value":"yt_dlp/extractor/ninenow.py"},"max_issues_repo_name":{"kind":"string","value":"nxtreaming/yt-dlp"},"max_issues_repo_head_hexsha":{"kind":"string","value":"385ffb467b2285e85a2a5495b90314ba1f8e0700"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-02-25T08:20:18.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-17T16:16:20.000Z"},"max_forks_repo_path":{"kind":"string","value":"yt_dlp/extractor/ninenow.py"},"max_forks_repo_name":{"kind":"string","value":"nxtreaming/yt-dlp"},"max_forks_repo_head_hexsha":{"kind":"string","value":"385ffb467b2285e85a2a5495b90314ba1f8e0700"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-02-19T08:59:13.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-06T16:11:21.000Z"},"content":{"kind":"string","value":"from .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n float_or_none,\n smuggle_url,\n str_or_none,\n try_get,\n unified_strdate,\n unified_timestamp,\n)\n\n\nclass NineNowIE(InfoExtractor):\n IE_NAME = '9now.com.au'\n _VALID_URL = r'https?://(?:www\\.)?9now\\.com\\.au/(?:[^/]+/){2}(?P[^/?#]+)'\n _GEO_COUNTRIES = ['AU']\n _TESTS = [{\n # clip\n 'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',\n 'md5': '17cf47d63ec9323e562c9957a968b565',\n 'info_dict': {\n 'id': '16801',\n 'ext': 'mp4',\n 'title': 'St. Kilda\\'s Joey Montagna on the potential for a player\\'s strike',\n 'description': 'Is a boycott of the NAB Cup \"on the table\"?',\n 'uploader_id': '4460760524001',\n 'upload_date': '20160713',\n 'timestamp': 1468421266,\n },\n 'skip': 'Only available in Australia',\n }, {\n # episode\n 'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',\n 'only_matching': True,\n }, {\n # DRM protected\n 'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1',\n 'only_matching': True,\n }, {\n # episode of series\n 'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3',\n 'info_dict': {\n 'id': '6249614030001',\n 'title': 'Episode 3',\n 'ext': 'mp4',\n 'season_number': 3,\n 'episode_number': 3,\n 'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.',\n 'uploader_id': '4460760524001',\n 'timestamp': 1619002200,\n 'upload_date': '20210421',\n },\n 'expected_warnings': ['Ignoring subtitle tracks'],\n 'params':{\n 'skip_download': True,\n }\n }]\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n page_data = self._parse_json(self._search_regex(\n r'window\\.__data\\s*=\\s*({.*?});', webpage,\n 'page data', default='{}'), display_id, fatal=False)\n if not page_data:\n page_data = self._parse_json(self._parse_json(self._search_regex(\n r'window\\.__data\\s*=\\s*JSON\\.parse\\s*\\(\\s*(\".+?\")\\s*\\)\\s*;',\n webpage, 'page data'), display_id), display_id)\n\n for kind in ('episode', 'clip'):\n current_key = page_data.get(kind, {}).get(\n 'current%sKey' % kind.capitalize())\n if not current_key:\n continue\n cache = page_data.get(kind, {}).get('%sCache' % kind, {})\n if not cache:\n continue\n common_data = {\n 'episode': (cache.get(current_key) or list(cache.values())[0])[kind],\n 'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None)\n }\n break\n else:\n raise ExtractorError('Unable to find video data')\n\n if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool):\n self.report_drm(display_id)\n brightcove_id = try_get(\n common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId']\n video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id\n\n title = try_get(common_data, lambda x: x['episode']['name'], compat_str)\n season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int)\n episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int)\n timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str))\n release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str))\n thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {}\n thumbnails = [{\n 'id': thumbnail_id,\n 'url': thumbnail_url,\n 'width': int_or_none(thumbnail_id[1:]),\n } for thumbnail_id, thumbnail_url in thumbnails_data.items()]\n\n return {\n '_type': 'url_transparent',\n 'url': smuggle_url(\n self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,\n {'geo_countries': self._GEO_COUNTRIES}),\n 'id': video_id,\n 'title': title,\n 'description': try_get(common_data, lambda x: x['episode']['description'], compat_str),\n 'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000),\n 'thumbnails': thumbnails,\n 'ie_key': 'BrightcoveNew',\n 'season_number': season_number,\n 'episode_number': episode_number,\n 'timestamp': timestamp,\n 'release_date': release_date,\n }\n"},"avg_line_length":{"kind":"number","value":43.0731707317,"string":"43.073171"},"max_line_length":{"kind":"number","value":146,"string":"146"},"alphanum_fraction":{"kind":"number","value":0.5751226878,"string":"0.575123"},"count_classes":{"kind":"number","value":5058,"string":"5,058"},"score_classes":{"kind":"number","value":0.9546998867497168,"string":"0.9547"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1871,"string":"1,871"},"score_documentation":{"kind":"number","value":0.3531521328803322,"string":"0.353152"}}},{"rowIdx":3723,"cells":{"hexsha":{"kind":"string","value":"b97242dec299cf214174fe1ceb1c2d4c7e16b595"},"size":{"kind":"number","value":4783,"string":"4,783"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"apex/fp16_utils/fused_weight_norm.py"},"max_stars_repo_name":{"kind":"string","value":"mcarilli/apex"},"max_stars_repo_head_hexsha":{"kind":"string","value":"766e36c9e10fe4efd847c3f77c3b38974c89eab1"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-05-05T01:37:42.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-05-05T01:37:42.000Z"},"max_issues_repo_path":{"kind":"string","value":"apex/fp16_utils/fused_weight_norm.py"},"max_issues_repo_name":{"kind":"string","value":"mcarilli/apex"},"max_issues_repo_head_hexsha":{"kind":"string","value":"766e36c9e10fe4efd847c3f77c3b38974c89eab1"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-06-24T18:56:56.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-06-24T18:56:56.000Z"},"max_forks_repo_path":{"kind":"string","value":"apex/fp16_utils/fused_weight_norm.py"},"max_forks_repo_name":{"kind":"string","value":"mcarilli/apex"},"max_forks_repo_head_hexsha":{"kind":"string","value":"766e36c9e10fe4efd847c3f77c3b38974c89eab1"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-07-03T00:37:20.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-07-03T00:37:20.000Z"},"content":{"kind":"string","value":"import torch\nfrom torch.autograd import Variable\nfrom torch.autograd.function import Function, once_differentiable\nimport apex_C\n\ndef check_contig_cuda(tensors, names):\n for tensor, name in zip(tensors, names):\n if not tensor.is_contiguous():\n raise RuntimeError(name+\" with size {} is not contiguous\"\n .format(tensor.size()))\n if not tensor.is_cuda:\n raise RuntimeError(name+\".is_cuda = False.\"\n \"Currently, only cuda tensors are supported.\")\n\nclass Fused_Weight_Norm(Function):\n \"\"\"\n Custom autograd function that implements weight norm, as presented in \n ``_,\n along a tensor's slowest or \n fastest dimension using fused kernel launches for the forward and backward passes.\n Accepts fp32 or fp16 input; the output type will match the input type.\n Within the kernels, all calculations are performed in fp32 for numerical stability, regardless\n of input/output precision.\n \"\"\"\n\n @staticmethod\n def forward(ctx, input, g, dim=0):\n \"\"\"\n Args:\n input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous.\n g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``.\n dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported.\n\n Returns:\n Output tensor corresponding to **w** in the paper. Output type and precision will match\n type and precision of ``input``.\n \n \"\"\"\n # torch.cuda.nvtx.range_push(\"FusedNorm.forward, input.size() = {}\"\n # .format(input.size()))\n\n check_contig_cuda((input,g),(\"input\",\"g\"))\n\n \"\"\"\n This is ok, new() treats a torch.Size object properly.\n No need to unpack with an asterisk via new(*input.size()).\n \"\"\"\n output = input.new(input.size()).contiguous()\n\n \"\"\"\n For output with size (slow, faster, faster, ...fastest), we want\n norms with size (slow, 1, 1, ...1), so that if you want retrieve norms \n and apply the same normalizing factors to another Tensor \"t\" with the \n same size as output, \"t/norms\" will broadcast each element of norms \n across the corresponding slowest dim of t.\n \"\"\"\n if dim == 0:\n norm_size = (output.size(0),) + (1,)*(output.dim() - 1)\n elif dim == output.dim() - 1:\n norm_size = (1,)*(output.dim() - 1) + (output.size(-1),)\n else:\n raise RuntimeError(\"Currently, Fused_Weight_Norm only supports first or last dimension.\")\n\n norms = torch.cuda.FloatTensor(*norm_size).contiguous()\n \"\"\"\n Beware: If you call the following:\n norms = torch.cuda.FloatTensor(norm_size).contiguous()\n the constructor sees a tuple:\n FloatTensor( (output_size(0),1,1,...) )\n and creates a 1D tensor with values from the tuple:\n [output_size(0),1,1,...].\n \"\"\"\n\n apex_C.weight_norm_fwd(output, norms, input, g, dim)\n ctx.save_for_backward(input, g)\n\n # save_for_backward can only save input or output tensors,\n # use ctx state to save the norms and dimension:\n ctx.norms = norms\n ctx.dim = dim\n\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n \"\"\"\n Args:\n grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance.\n\n Returns:\n Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``.\n \"\"\"\n check_contig_cuda((grad_output), (\"grad_output\"))\n\n savedInput, savedg = ctx.saved_tensors\n savedNorms = ctx.norms\n\n # We expect that these .contiguous() calls will be no-ops. They're present for safety.\n grad_output_contig = grad_output.contiguous()\n\n grad_input = grad_output_contig.new(grad_output.size()).contiguous()\n grad_g = savedg.new(savedg.size()).contiguous()\n\n apex_C.weight_norm_bwd(grad_input, \n grad_g,\n grad_output_contig, \n savedInput, \n savedg,\n savedNorms,\n ctx.dim)\n\n return grad_input, grad_g, None\n"},"avg_line_length":{"kind":"number","value":41.9561403509,"string":"41.95614"},"max_line_length":{"kind":"number","value":175,"string":"175"},"alphanum_fraction":{"kind":"number","value":0.6042232908,"string":"0.604223"},"count_classes":{"kind":"number","value":4238,"string":"4,238"},"score_classes":{"kind":"number","value":0.8860547773363997,"string":"0.886055"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":3733,"string":"3,733"},"score_decorators":{"kind":"number","value":0.7804725067948985,"string":"0.780473"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2826,"string":"2,826"},"score_documentation":{"kind":"number","value":0.5908425674263015,"string":"0.590843"}}},{"rowIdx":3724,"cells":{"hexsha":{"kind":"string","value":"b9724b70833f729e47c38eb018294247250b7282"},"size":{"kind":"number","value":23312,"string":"23,312"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"bzt/modules/grinder.py"},"max_stars_repo_name":{"kind":"string","value":"gerardorf/taurus"},"max_stars_repo_head_hexsha":{"kind":"string","value":"610872b4cf70af31d79a346db1aebd3466310d77"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-01-15T17:23:58.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-01-15T17:23:58.000Z"},"max_issues_repo_path":{"kind":"string","value":"bzt/modules/grinder.py"},"max_issues_repo_name":{"kind":"string","value":"gerardorf/taurus"},"max_issues_repo_head_hexsha":{"kind":"string","value":"610872b4cf70af31d79a346db1aebd3466310d77"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"bzt/modules/grinder.py"},"max_forks_repo_name":{"kind":"string","value":"gerardorf/taurus"},"max_forks_repo_head_hexsha":{"kind":"string","value":"610872b4cf70af31d79a346db1aebd3466310d77"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nModule holds all stuff regarding Grinder tool usage\n\nCopyright 2015 BlazeMeter Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nimport re\nimport time\n\nfrom bzt import TaurusConfigError, ToolError\nfrom bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable\nfrom bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader\nfrom bzt.modules.console import WidgetProvider, ExecutorWidget\nfrom bzt.modules.java import TaurusJavaHelper\nfrom bzt.requests_model import HTTPRequest\nfrom bzt.six import iteritems\nfrom bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS\nfrom bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n\n\nclass GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):\n \"\"\"\n Grinder executor module\n \"\"\"\n def __init__(self):\n super(GrinderExecutor, self).__init__()\n self.script = None\n self.exec_id = \"grinder-bzt-%s\" % id(self)\n self.properties_file = None\n self.kpi_file = None\n self.cmd_line = None\n self.process = None\n self.end_time = None\n self.retcode = None\n self.java_helper = None\n\n def __write_base_props(self, fds):\n \"\"\"\n write base properties and base properties file contents to fds\n :param fds: fds\n :return:\n \"\"\"\n base_props_file = self.settings.get(\"properties-file\")\n if base_props_file:\n fds.write(\"# Base Properies File Start: %s\\n\" % base_props_file)\n with open(base_props_file) as bpf:\n fds.write(bpf.read())\n fds.write(\"# Base Properies File End: %s\\n\\n\" % base_props_file)\n\n # base props\n base_props = self.settings.get(\"properties\")\n if base_props:\n fds.write(\"# Base Properies Start\\n\")\n for key, val in iteritems(base_props):\n fds.write(\"%s=%s\\n\" % (key, val))\n fds.write(\"# Base Properies End\\n\\n\")\n\n def __write_scenario_props(self, fds, scenario):\n \"\"\"\n Write scenario props and scenario file props to fds\n :param fds:\n :param scenario: dict\n :return:\n \"\"\"\n script_props_file = scenario.get(\"properties-file\")\n if script_props_file:\n fds.write(\"# Script Properies File Start: %s\\n\" % script_props_file)\n with open(script_props_file) as spf:\n fds.write(spf.read())\n fds.write(\"# Script Properies File End: %s\\n\\n\" % script_props_file)\n\n # scenario props\n local_props = scenario.get(\"properties\")\n if local_props:\n fds.write(\"# Scenario Properies Start\\n\")\n for key, val in iteritems(local_props):\n fds.write(\"%s=%s\\n\" % (key, val))\n fds.write(\"# Scenario Properies End\\n\\n\")\n\n def __write_bzt_props(self, fds):\n \"\"\"\n Write bzt properties to fds\n :param fds:\n :return:\n \"\"\"\n fds.write(\"# BZT Properies Start\\n\")\n fds.write(\"grinder.hostID=%s\\n\" % self.exec_id)\n fds.write(\"grinder.script=%s\\n\" % self.script.replace(os.path.sep, \"/\"))\n fds.write(\"grinder.logDirectory=%s\\n\" % self.engine.artifacts_dir.replace(os.path.sep, \"/\"))\n\n load = self.get_load()\n\n if load.iterations or load.concurrency:\n fds.write(\"grinder.runs=%s\\n\" % load.iterations or 0)\n\n if load.concurrency:\n fds.write(\"grinder.threads=%s\\n\" % load.concurrency)\n\n if load.duration:\n fds.write(\"grinder.duration=%s\\n\" % int(load.duration * 1000))\n\n fds.write(\"# taurus load values in case you need them\\n\")\n fds.write(\"taurus.concurrency=%s\\n\" % load.concurrency)\n fds.write(\"taurus.throughput=%s\\n\" % load.throughput)\n fds.write(\"taurus.ramp_up=%s\\n\" % load.ramp_up)\n fds.write(\"taurus.steps=%s\\n\" % load.steps)\n fds.write(\"taurus.hold_for=%s\\n\" % load.hold)\n fds.write(\"taurus.iterations=%s\\n\" % load.iterations)\n fds.write(\"# BZT Properies End\\n\")\n\n def prepare(self):\n self.stdout = open(self.engine.create_artifact(\"grinder\", \".out\"), \"w\")\n self.stderr = open(self.engine.create_artifact(\"grinder\", \".err\"), \"w\")\n\n self.install_required_tools()\n scenario = self.get_scenario()\n self.exec_id = self.label\n self.script = self.get_script_path()\n if not self.script:\n if \"requests\" in scenario:\n self.script = self.__scenario_from_requests()\n else:\n msg = \"There must be a script file or requests for its generation \"\n msg += \"to run Grinder tool (%s)\" % self.execution.get('scenario')\n raise TaurusConfigError(msg)\n\n self.properties_file = self.engine.create_artifact(\"grinder\", \".properties\")\n\n with open(self.properties_file, 'w') as fds:\n self.__write_base_props(fds)\n self.__write_scenario_props(fds, scenario)\n self.__write_bzt_props(fds)\n\n self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + \"-kpi.log\")\n\n self.reader = DataLogReader(self.kpi_file, self.log)\n self.reader.report_by_url = self.settings.get(\"report-by-url\", False)\n if isinstance(self.engine.aggregator, ConsolidatingAggregator):\n self.engine.aggregator.add_underling(self.reader)\n\n # add logback configurations used by worker processes (logback-worker.xml)\n self.env.add_path({\"CLASSPATH\": RESOURCES_DIR}, finish=True)\n self.env.add_path({\"CLASSPATH\": self.java_helper.tool_path}, finish=True)\n self.env.add_path({\"CLASSPATH\": self.settings.get(\"path\", None)}, finish=True)\n\n self.cmd_line = [\"java\", \"net.grinder.Grinder\", self.properties_file]\n\n def startup(self):\n \"\"\"\n Should start the tool as fast as possible.\n \"\"\"\n self.env.set({\"T_GRINDER_PREFIX\": self.exec_id})\n self.process = self.execute(self.cmd_line)\n\n def check(self):\n \"\"\"\n Checks if tool is still running. Also checks if resulting logs contains\n any data and throws exception otherwise.\n\n :return: bool\n :raise TaurusToolError:\n \"\"\"\n self.retcode = self.process.poll()\n if self.retcode is not None:\n if self.retcode != 0:\n raise ToolError(\"Gatling tool exited with non-zero code: %s\" % self.retcode,\n self.get_error_diagnostics())\n\n return True\n return False\n\n def shutdown(self):\n \"\"\"\n If tool is still running - let's stop it.\n \"\"\"\n shutdown_process(self.process, self.log)\n if self.start_time:\n self.end_time = time.time()\n self.log.debug(\"Grinder worked for %s seconds\", self.end_time - self.start_time)\n\n def post_process(self):\n \"\"\"\n Collect data file artifact\n \"\"\"\n if self.kpi_file:\n self.engine.existing_artifact(self.kpi_file)\n super(GrinderExecutor, self).post_process()\n\n def __scenario_from_requests(self):\n \"\"\"\n Generate grinder scenario from requests\n :return: script\n \"\"\"\n script = self.engine.create_artifact(\"grinder_requests\", \".py\")\n builder = GrinderScriptBuilder(self.get_scenario(), self.log)\n builder.label = self.label\n builder.build_source_code()\n builder.save(script)\n return script\n\n def install_required_tools(self):\n grinder = self._get_tool(Grinder, config=self.settings)\n self.settings[\"path\"] = grinder.tool_path\n\n self.java_helper = self._get_tool(TaurusJavaHelper)\n\n required_tools = [self._get_tool(TclLibrary),\n self._get_tool(JavaVM),\n self.java_helper,\n grinder]\n\n for tool in required_tools:\n if not tool.check_if_installed():\n tool.install()\n\n def get_widget(self):\n if not self.widget:\n if self.script is not None:\n label = \"Grinder: %s\" % os.path.basename(self.script)\n else:\n label = None\n self.widget = ExecutorWidget(self, label)\n if self.get_load().ramp_up:\n self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup\n return self.widget\n\n def resource_files(self):\n resource_files = []\n script_file_path = self.get_script_path()\n if script_file_path:\n resource_files.append(script_file_path)\n\n prop_file = self.get_scenario().get(\"properties-file\")\n if prop_file:\n resource_files.append(prop_file)\n\n return resource_files\n\n def get_error_diagnostics(self):\n diagnostics = []\n if self.stdout is not None:\n with open(self.stdout.name) as fds:\n contents = fds.read().strip()\n if contents.strip():\n diagnostics.append(\"Grinder STDOUT:\\n\" + contents)\n if self.stderr is not None:\n with open(self.stderr.name) as fds:\n contents = fds.read().strip()\n if contents.strip():\n diagnostics.append(\"Grinder STDOUT:\\n\" + contents)\n return diagnostics\n\n\nclass DataLogReader(ResultsReader):\n \"\"\" Class to read KPI from data log \"\"\"\n DELIMITER = \",\"\n DETAILS_REGEX = re.compile(r\"worker\\.(\\S+) (.+) -> (\\S+) (.+), (\\d+) bytes\")\n\n def __init__(self, filename, parent_logger):\n super(DataLogReader, self).__init__()\n self.report_by_url = False\n self.log = parent_logger.getChild(self.__class__.__name__)\n self.file = FileReader(filename=filename, parent_logger=self.log)\n self.idx = {}\n self.partial_buffer = \"\"\n self.start_time = 0\n self.end_time = 0\n self.concurrency = 0\n self.test_names = {}\n self.known_threads = set()\n\n def _read(self, last_pass=False):\n \"\"\"\n Generator method that returns next portion of data\n\n :param last_pass:\n \"\"\"\n self.log.debug(\"Reading grinder results...\")\n\n self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))\n\n lnum = None\n start = time.time()\n\n for lnum, line in enumerate(self.lines):\n if not self.idx:\n if not line.startswith('data.'):\n self.__split(line) # to capture early test name records\n continue\n\n line = line[line.find(' '):]\n\n header_list = line.strip().split(self.DELIMITER)\n for _ix, field in enumerate(header_list):\n self.idx[field.strip()] = _ix\n\n data_fields, worker_id = self.__split(line)\n if not data_fields:\n self.log.debug(\"Skipping line: %s\", line.strip())\n continue\n\n yield self.parse_line(data_fields, worker_id, lnum)\n\n if lnum is not None:\n duration = time.time() - start\n if duration < 0.001:\n duration = 0.001\n\n self.log.debug(\"Log reading speed: %s lines/s\", (lnum + 1) / duration)\n\n def parse_line(self, data_fields, worker_id, lnum):\n worker_id = worker_id.split('.')[1]\n t_stamp = int(int(data_fields[self.idx[\"Start time (ms since Epoch)\"]]) / 1000.0)\n r_time = int(data_fields[self.idx[\"Test time\"]]) / 1000.0\n latency = int(data_fields[self.idx[\"Time to first byte\"]]) / 1000.0\n r_code = data_fields[self.idx[\"HTTP response code\"]].strip()\n con_time = int(data_fields[self.idx[\"Time to resolve host\"]]) / 1000.0\n con_time += int(data_fields[self.idx[\"Time to establish connection\"]]) / 1000.0\n bytes_count = int(data_fields[self.idx[\"HTTP response length\"]].strip())\n test_id = data_fields[self.idx[\"Test\"]].strip()\n thread_id = worker_id + '/' + data_fields[self.idx[\"Thread\"]].strip()\n if thread_id not in self.known_threads:\n self.known_threads.add(thread_id)\n self.concurrency += 1\n\n url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)\n if int(data_fields[self.idx[\"Errors\"]]) or int(data_fields[self.idx['HTTP response errors']]):\n if not error_msg:\n if r_code != '0':\n error_msg = \"HTTP %s\" % r_code\n else:\n error_msg = \"Java exception calling TestRunner\"\n else:\n error_msg = None # suppress errors\n\n if self.report_by_url:\n label = url\n elif test_id in self.test_names:\n label = self.test_names[test_id]\n else:\n label = \"Test #%s\" % test_id\n\n source_id = '' # maybe use worker_id somehow?\n return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count\n\n def __split(self, line):\n if not line.endswith(\"\\n\"):\n self.partial_buffer += line\n return None, None\n\n line = \"%s%s\" % (self.partial_buffer, line)\n self.partial_buffer = \"\"\n\n line = line.strip()\n if not line.startswith('data.'):\n line_parts = line.split(' ')\n if len(line_parts) > 1:\n if line_parts[1] == 'starting,':\n # self.concurrency += 1\n pass\n elif line_parts[1] == 'finished':\n if self.concurrency > 0:\n self.concurrency -= 1\n elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:\n test_id = line_parts[5][:-1]\n test_name = ' '.join(line_parts[6:])\n self.test_names[test_id] = test_name\n self.log.debug(\"Recognized test id %s => %s\", test_id, test_name)\n return None, None\n\n worker_id = line[:line.find(' ')]\n line = line[line.find(' '):]\n data_fields = line.split(self.DELIMITER)\n if not data_fields[1].strip().isdigit():\n return None, None\n\n if len(data_fields) < max(self.idx.values()):\n return None, None\n\n return data_fields, worker_id\n\n def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):\n url = ''\n error_msg = None\n for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize?\n line = self.lines[lineNo].strip()\n matched = self.DETAILS_REGEX.match(line)\n if not matched:\n continue\n\n if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):\n return matched.group(2), matched.group(4)\n\n return url, error_msg\n\n\nclass Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?)\n VERSION = \"3.11\"\n LOCAL_PATH = \"~/.bzt/grinder-taurus/lib/grinder.jar\"\n\n def __init__(self, config=None, **kwargs):\n settings = config or {}\n grinder_path = settings.get(\"path\", self.LOCAL_PATH)\n grinder_path = get_full_path(grinder_path)\n\n download_link = settings.get(\"download-link\", \"\")\n\n super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs)\n self.version = self.VERSION\n self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version)\n\n def check_if_installed(self):\n self.log.debug(\"Trying %s: %s\", self.tool_name, self.tool_path)\n try:\n out, err = self.call([\"java\", \"-classpath\", self.tool_path, \"net.grinder.Grinder\"])\n if err:\n out += err\n self.log.debug(\"%s stdout: %s\", self.tool_name, out)\n return True\n except CALL_PROBLEMS as exc:\n self.log.warning(\"%s check failed: %s\", self.tool_name, exc)\n return False\n\n def install(self):\n dest = get_full_path(self.tool_path, step_up=2)\n self.log.info(\"Will install %s into %s\", self.tool_name, dest)\n grinder_dist = self._download(use_link=bool(self.download_link))\n self.log.info(\"Unzipping %s\", grinder_dist)\n unzip(grinder_dist, dest, 'grinder-' + self.version)\n os.remove(grinder_dist)\n self.log.info(\"Installed grinder successfully\")\n if not self.check_if_installed():\n raise ToolError(\"Unable to run %s after installation!\" % self.tool_name)\n\n\nclass GrinderMirrorsManager(MirrorsManager):\n MIRRORS_SOURCE = \"https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder\" \\\n \"%203/{version}/grinder-{version}-binary.zip&dialog=true\"\n DOWNLOAD_LINK = \"https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}\" \\\n \"/grinder-{version}-binary.zip?r=&ts=\" + str(int(time.time())) + \"&use_mirror=autoselect\"\n\n def __init__(self, http_client, parent_logger, grinder_version):\n self.grinder_version = grinder_version\n base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version)\n super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger)\n\n def _parse_mirrors(self):\n links = []\n if self.page_source is not None:\n self.log.debug('Parsing mirrors...')\n base_link = \"http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}\" \\\n \"-binary.zip/download?use_mirror={mirror}\"\n li_search_pattern = re.compile(r'
  • ')\n li_elements = li_search_pattern.findall(self.page_source)\n if li_elements:\n links = [base_link.format(version=self.grinder_version, mirror=link.strip('
  • ')) for\n link in li_elements]\n default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version)\n if default_link not in links:\n links.append(default_link)\n self.log.debug('Total mirrors: %d', len(links))\n return links\n\n\nclass GrinderScriptBuilder(PythonGenerator):\n IMPORTS = \"\"\"\nfrom net.grinder.script import Test\nfrom net.grinder.script.Grinder import grinder\nfrom net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities\nfrom HTTPClient import NVPair\n\"\"\"\n\n def __init__(self, scenario, parent_logger):\n super(GrinderScriptBuilder, self).__init__(scenario, parent_logger)\n self.label = \"BZT Requests\"\n\n def build_source_code(self):\n self.log.debug(\"Generating Python script for Grinder\")\n self.root.append(self.gen_comment(\"This script was generated by Taurus\", indent=0))\n self.root.append(self.add_imports())\n\n self.root.append(self.gen_new_line())\n\n default_address = self.scenario.get(\"default-address\")\n url_arg = \"url=%r\" % default_address if default_address else \"\"\n self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0))\n self.root.append(self.gen_statement('test = Test(1, \"%s\")' % self.label, indent=0))\n self.root.append(self.gen_statement('test.record(request)', indent=0))\n\n self.root.append(self.gen_new_line())\n\n self.root.append(self.gen_statement(\"defaults = HTTPPluginControl.getConnectionDefaults()\", indent=0))\n self.root.append(self.gen_statement(\"utilities = HTTPPluginControl.getHTTPUtilities()\", indent=0))\n\n headers = self.scenario.get_headers()\n if not self.scenario.get(\"keepalive\", True):\n headers['Connection'] = 'close'\n\n if headers:\n self.root.append(self.gen_statement(\"defaults.setDefaultHeaders([\", indent=0))\n for header, value in iteritems(headers):\n self.root.append(self.gen_statement(\"NVPair(%r, %r),\" % (header, value), indent=4))\n self.root.append(self.gen_statement(\"])\", indent=0))\n\n global_timeout = dehumanize_time(self.scenario.get(\"timeout\", None))\n if global_timeout:\n self.root.append(self.gen_statement(\"defaults.setTimeout(%s)\" % int(global_timeout * 1000), indent=0))\n\n cookie_flag = int(self.scenario.get(\"store-cookie\", True))\n self.root.append(self.gen_statement(\"defaults.setUseCookies(%s)\" % cookie_flag, indent=0))\n\n self.root.append(self.gen_new_line())\n\n self.root.append(self.gen_runner_class())\n\n @staticmethod\n def __list_to_nvpair_list(items):\n return \"[\" + \",\".join(\"NVPair(%r, %r)\" % (header, value) for header, value in items) + \"]\"\n\n def gen_runner_class(self):\n runner_classdef = self.gen_class_definition(\"TestRunner\", [\"object\"])\n\n sleep_method = self.gen_method_definition(\"rampUpSleeper\", [\"self\"])\n sleep_method.append(self.gen_statement(\"if grinder.runNumber != 0: return\"))\n sleep_method.append(self.gen_statement(\"tprops = grinder.properties.getPropertySubset('taurus.')\"))\n sleep_method.append(self.gen_statement(\"inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)\"))\n sleep_method.append(self.gen_statement(\"sleep_time = int(1000 * grinder.threadNumber * inc)\"))\n sleep_method.append(self.gen_statement(\"grinder.sleep(sleep_time, 0)\"))\n sleep_method.append(self.gen_statement(\"if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)\"))\n sleep_method.append(self.gen_statement(\"else: grinder.logger.info('No sleep needed')\"))\n sleep_method.append(self.gen_new_line())\n runner_classdef.append(sleep_method)\n\n main_method = self.gen_method_definition(\"__call__\", [\"self\"])\n main_method.append(self.gen_statement(\"self.rampUpSleeper()\"))\n\n for req in self.scenario.get_requests():\n if not isinstance(req, HTTPRequest):\n msg = \"Grinder script generator doesn't support '%s' blocks, skipping\"\n self.log.warning(msg, req.NAME)\n continue\n\n method = req.method.upper()\n url = req.url\n local_headers = req.headers\n\n params = \"[]\"\n headers = self.__list_to_nvpair_list(iteritems(local_headers))\n\n main_method.append(self.gen_statement(\"request.%s(%r, %s, %s)\" % (method, url, params, headers)))\n\n think_time = dehumanize_time(req.priority_option('think-time'))\n if think_time:\n main_method.append(self.gen_statement(\"grinder.sleep(%s)\" % int(think_time * 1000)))\n\n runner_classdef.append(main_method)\n\n return runner_classdef\n"},"avg_line_length":{"kind":"number","value":40.826619965,"string":"40.82662"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.6188229238,"string":"0.618823"},"count_classes":{"kind":"number","value":22046,"string":"22,046"},"score_classes":{"kind":"number","value":0.9456932052161977,"string":"0.945693"},"count_generators":{"kind":"number","value":1254,"string":"1,254"},"score_generators":{"kind":"number","value":0.0537920384351407,"string":"0.053792"},"count_decorators":{"kind":"number","value":150,"string":"150"},"score_decorators":{"kind":"number","value":0.006434454358270419,"string":"0.006434"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":5650,"string":"5,650"},"score_documentation":{"kind":"number","value":0.24236444749485245,"string":"0.242364"}}},{"rowIdx":3725,"cells":{"hexsha":{"kind":"string","value":"b972e358701b6b26d8d3c931dfecc57580620c15"},"size":{"kind":"number","value":467,"string":"467"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"test/Fortran/fixture/myfortran_flags.py"},"max_stars_repo_name":{"kind":"string","value":"moroten/scons"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20927b42ed4f0cb87f51287fa3b4b6cf915afcf8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1403,"string":"1,403"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-11-23T14:24:01.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T20:59:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"test/Fortran/fixture/myfortran_flags.py"},"max_issues_repo_name":{"kind":"string","value":"moroten/scons"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20927b42ed4f0cb87f51287fa3b4b6cf915afcf8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":3708,"string":"3,708"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2017-11-27T13:47:12.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-29T17:21:17.000Z"},"max_forks_repo_path":{"kind":"string","value":"test/Fortran/fixture/myfortran_flags.py"},"max_forks_repo_name":{"kind":"string","value":"moroten/scons"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20927b42ed4f0cb87f51287fa3b4b6cf915afcf8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":281,"string":"281"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-12-01T23:48:38.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T15:25:44.000Z"},"content":{"kind":"string","value":"import getopt\nimport sys\ncomment = ('#' + sys.argv[1]).encode()\nopts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')\noptstring = ''\nlength = len(comment)\nfor opt, arg in opts:\n if opt == '-o': out = arg\n elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt\ninfile = open(args[0], 'rb')\noutfile = open(out, 'wb')\noutfile.write((optstring + \"\\n\").encode())\nfor l in infile.readlines():\n if l[:length] != comment:\n outfile.write(l)\nsys.exit(0)\n"},"avg_line_length":{"kind":"number","value":27.4705882353,"string":"27.470588"},"max_line_length":{"kind":"number","value":67,"string":"67"},"alphanum_fraction":{"kind":"number","value":0.6017130621,"string":"0.601713"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":41,"string":"41"},"score_documentation":{"kind":"number","value":0.08779443254817987,"string":"0.087794"}}},{"rowIdx":3726,"cells":{"hexsha":{"kind":"string","value":"b9736fc25869ac44481082e255dc93e0f52aa441"},"size":{"kind":"number","value":9015,"string":"9,015"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"zen_knit/organizer/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"Zen-Reportz/zen_knit"},"max_stars_repo_head_hexsha":{"kind":"string","value":"104c2693d2cc61520657131da769f5d59d2df8e9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":30,"string":"30"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-25T15:39:42.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-25T04:53:44.000Z"},"max_issues_repo_path":{"kind":"string","value":"zen_knit/organizer/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"Zen-Reportz/zen_knit"},"max_issues_repo_head_hexsha":{"kind":"string","value":"104c2693d2cc61520657131da769f5d59d2df8e9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":11,"string":"11"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-01-02T22:10:07.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-02T00:56:33.000Z"},"max_forks_repo_path":{"kind":"string","value":"zen_knit/organizer/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"Zen-Reportz/zen_knit"},"max_forks_repo_head_hexsha":{"kind":"string","value":"104c2693d2cc61520657131da769f5d59d2df8e9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-01-27T13:22:46.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-01-30T05:01:59.000Z"},"content":{"kind":"string","value":"import io\nimport os\nimport base64\nfrom pathlib import Path\n\nfrom nbconvert import filters\nfrom pygments.formatters.latex import LatexFormatter\nfrom zen_knit import formattor\n\nfrom zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData\nfrom zen_knit.formattor.html_formatter import HTMLFormatter\n\n\nmime_extensions = {\"image/png\" : \"png\",\n \"image/jpg\" : \"jpg\"}\n\n\nclass BaseOrganizer:\n def __init__(self, executed_data: ExecutedData):\n self.format_started = False\n self.collected_string = \"\"\n self.fig_folder = None\n self.executed_data = executed_data\n self.formatted_doc = []\n self.organized_data = OrganizedData(\n global_options = self.executed_data.global_options,\n chunks = []\n )\n self._create_output_folder_name()\n self._create_fig_folder()\n self._organize_doc()\n self._create_output_file_name()\n\n def _create_output_file_name(self):\n global_options = self.organized_data.global_options\n global_options.output.file_name = global_options.input.file_name.split(\".\")[0] + \".\"+ global_options.output.format\n\n \n def _create_output_folder_name(self):\n global_options = self.organized_data.global_options\n \n if global_options.output.dir is None:\n global_options.output.dir = global_options.input.dir\n \n\n def _create_fig_folder(self):\n output_folder = self.organized_data.global_options.output.dir\n Path(output_folder).mkdir(parents=True, exist_ok=True)\n\n fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir)\n self.fig_folder = fig_folder\n Path(fig_folder).mkdir(parents=True, exist_ok=True)\n \n\n def _parse_raw(self, data, output_type):\n if data.get(\"code_text_raw\") is not None:\n if self._clean_up(data['code_text_raw']) is not None:\n if output_type in (\"code\"):\n t = {\"type\": \"code\", \"str_data\": data['code_text_raw'] }\n elif output_type in (\"sql\"):\n t = {\"type\": \"sql\", \"str_data\": data['code_text_raw'] }\n else:\n t = {\"type\": \"markdown\", \"str_data\": data['code_text_raw'] }\n \n self.organized_data.chunks.append(OrganizedChunk(**t))\n return True\n else:\n return False\n \n def _coder_string(self, data):\n list_ = [\"stream\", \"error\"]\n if data[\"output_type\"] is None:\n return False\n \n if data[\"output_type\"] in list_:\n if data[\"output_type\"] == \"stream\":\n if self._clean_up(data['text']) is not None:\n t = {\"type\": \"se_data\", \"str_data\": data['text'] }\n self.organized_data.chunks.append(OrganizedChunk(**t))\n\n if data[\"output_type\"] == \"error\":\n t = {\"type\": \"se_data\", \"str_data\": data[\"evalue\"] + filters.strip_ansi(\"\".join(data[\"traceback\"])) }\n self.organized_data.chunks.append(OrganizedChunk(**t))\n\n return True\n\n return False\n\n def _raw_string(self, data):\n if data[\"output_type\"] is None:\n return False\n\n if data[\"output_type\"] == \"execute_result\":\n if data.get(\"data\") is not None:\n if 'matplotlib' in data[\"data\"][\"text/plain\"]:\n # Doing nothing here\n return True\n else:\n if ((data[\"data\"][\"text/plain\"][0] == \"'\") or (data[\"data\"][\"text/plain\"][0] == '\"')):\n temp = data[\"data\"][\"text/plain\"][1:-1]\n else:\n temp = data[\"data\"][\"text/plain\"]\n \n if \"\" + temp.encode().decode() + \"\" }\n # self.organized_data.chunks.append(OrganizedChunk(**t))\n # return True\n\n if self._clean_up(temp) is not None:\n t = {\"type\": \"e_data\", \"str_data\":temp }\n self.organized_data.chunks.append(OrganizedChunk(**t))\n return True\n \n return True \n\n return False\n \n def _raw_plots(self, data, chunk_option:ChunkOption):\n if data[\"output_type\"] is None:\n return False\n\n if data[\"output_type\"] == \"display_data\":\n plot_infos = self._save_plots(data, chunk_option)\n t = {\"type\": \"plot\", \"complex_data\":{\"plots\": plot_infos, \"options\": chunk_option }}\n self.organized_data.chunks.append(OrganizedChunk(**t))\n return True\n return False\n \n def _save_plots(self, data, chunk_option:ChunkOption):\n figs = []\n i = 1\n for m in mime_extensions:\n if m in data[\"data\"]:\n fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name)\n figs.append(fig_relative_path)\n bfig = base64.b64decode(data[\"data\"][m])\n with open(fig_full_path, \"wb\") as f:\n f.write(bfig)\n i += 1\n \n return figs\n \n def _build_file(self, extension, index, fig_caption= None, name =None):\n \n \n fig_name = \"\"\n if fig_caption is not None:\n fig_name = fig_name + \"_\" + fig_caption\n\n if name is not None:\n fig_name = fig_name + \"_\" + name\n \n fig_name = fig_name + \"_\" + str(index)\n fig_name = fig_name + \".\" + extension\n return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name)\n\n def _interactive_plots(self, data):\n \n if data[\"output_type\"] is None:\n return False\n\n if data[\"output_type\"] == \"display_data\":\n if \"text/html\" in data[\"data\"]: \n print(self.executed_data.global_options.output.format)\n if self.executed_data.global_options.output.format != \"html\":\n raise Exception(\"output format is not HTML\")\n else: \n t = {\"type\": \"html_data\", \"str_data\":data[\"data\"][\"text/html\"].encode().decode() }\n self.organized_data.chunks.append(OrganizedChunk(**t))\n return True\n return False\n\n def _organize_doc(self):\n for index, chunk in enumerate(self.executed_data.chunks):\n chunk_option = chunk.chunk.options\n if chunk_option.name:\n print(f\"organizing {chunk_option.name}\")\n else:\n print(f\"organizing index {index}\")\n results = chunk.results\n for result in results:\n data = result.data\n present = self._parse_raw(data, result.output_type)\n if present:\n continue\n present = self._coder_string(data)\n if present:\n continue\n \n present = self._raw_string(data)\n if present:\n continue\n\n present = self._interactive_plots(data)\n if present:\n continue\n \n present = self._raw_plots(data, chunk_option)\n if present:\n continue\n print(\"not supported format\", data)\n \n \n t = []\n c: OrganizedChunk\n for c in self.organized_data.chunks:\n last_chank: OrganizedChunk\n if len(t)> 0:\n last_chank = t[-1]\n else:\n last_chank = None\n \n if last_chank is None:\n t.append(c)\n else:\n if (c.type == last_chank.type) & (c.type != \"plot\"):\n last_chank.str_data = last_chank.str_data + \"\\n\" + c.str_data\n else:\n t.append(c)\n self.organized_data.chunks = t\n \n @staticmethod\n def _clean_up(doc):\n d = doc.replace(\" \", \"\").replace(\"\\n\", \"\")\n if len(d) != 0:\n return doc\n else:\n return None\n \n\n # markdown_file = self.executed_data.global_options.input_file_name.split(\".\")[0] + \".md\"\n # markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file)\n # with open(markdown_file, \"w\") as f:\n # text = \"\\n\".join(self.formatted_doc)\n # f.write(text)\n "},"avg_line_length":{"kind":"number","value":37.5625,"string":"37.5625"},"max_line_length":{"kind":"number","value":139,"string":"139"},"alphanum_fraction":{"kind":"number","value":0.5338879645,"string":"0.533888"},"count_classes":{"kind":"number","value":8590,"string":"8,590"},"score_classes":{"kind":"number","value":0.9528563505268997,"string":"0.952856"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":173,"string":"173"},"score_decorators":{"kind":"number","value":0.01919023849140322,"string":"0.01919"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1463,"string":"1,463"},"score_documentation":{"kind":"number","value":0.16228508042151968,"string":"0.162285"}}},{"rowIdx":3727,"cells":{"hexsha":{"kind":"string","value":"b974558759b358f82c2d72d79bab9c7dc3e35a76"},"size":{"kind":"number","value":12467,"string":"12,467"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"qibullet/robot_virtual.py"},"max_stars_repo_name":{"kind":"string","value":"mcaniot/qibullet"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9c5e1b319a18dd289263eb82f9d7303429bcbe21"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"qibullet/robot_virtual.py"},"max_issues_repo_name":{"kind":"string","value":"mcaniot/qibullet"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9c5e1b319a18dd289263eb82f9d7303429bcbe21"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"qibullet/robot_virtual.py"},"max_forks_repo_name":{"kind":"string","value":"mcaniot/qibullet"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9c5e1b319a18dd289263eb82f9d7303429bcbe21"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport pybullet\nfrom qibullet.camera import *\nfrom qibullet.link import Link\nfrom qibullet.joint import Joint\n\nIS_VERSION_PYTHON_3 = sys.version_info[0] >= 3\n\n\nclass RobotVirtual:\n \"\"\"\n Mother class representing a virtual robot\n \"\"\"\n\n def __init__(self, description_file):\n \"\"\"\n Constructor\n\n Parameters:\n description_file - The file giving the description of the virtual\n robot. For now, only URDF is handled\n \"\"\"\n self.description_file = description_file\n self.physics_client = 0\n self.active_camera = None\n self.camera_dict = dict()\n self.joint_dict = dict()\n self.link_dict = dict()\n\n def loadRobot(self, translation, quaternion, physicsClientId=0):\n \"\"\"\n Loads the robot into a simulation, loads the joints and the links\n descriptions. The joints are set to 0 rad.\n\n Parameters:\n translation - List containing 3 elements, the translation [x, y, z]\n of the robot in the WORLD frame\n quaternion - List containing 4 elements, the quaternion\n [x, y, z, q] of the robot in the WORLD frame\n physicsClientId - The id of the simulated instance in which the\n robot is supposed to be loaded\n\n Returns:\n boolean - True if the method ran correctly, False otherwise\n \"\"\"\n try:\n self.physics_client = physicsClientId\n self.robot_model = pybullet.loadURDF(\n self.description_file,\n translation,\n quaternion,\n useFixedBase=False,\n globalScaling=1.0,\n physicsClientId=self.physics_client,\n flags=pybullet.URDF_USE_SELF_COLLISION |\n pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL)\n\n except pybullet.error as e:\n raise pybullet.error(\"Cannot load robot model: \" + str(e))\n\n for i in range(pybullet.getNumJoints(\n self.robot_model,\n physicsClientId=self.physics_client)):\n if IS_VERSION_PYTHON_3:\n # PYTHON 3 version needs a conversion bytes to str\n joint_info = pybullet.getJointInfo(\n self.robot_model,\n i,\n physicsClientId=self.physics_client)\n self.link_dict[joint_info[12].decode('utf-8')] =\\\n Link(joint_info)\n\n if joint_info[2] == pybullet.JOINT_PRISMATIC or\\\n joint_info[2] == pybullet.JOINT_REVOLUTE:\n self.joint_dict[joint_info[1].decode('utf-8')] =\\\n Joint(joint_info)\n else:\n # PYTHON 2 Version\n joint_info = pybullet.getJointInfo(\n self.robot_model,\n i,\n physicsClientId=self.physics_client)\n\n self.link_dict[joint_info[12]] = Link(joint_info)\n\n if joint_info[2] == pybullet.JOINT_PRISMATIC or\\\n joint_info[2] == pybullet.JOINT_REVOLUTE:\n self.joint_dict[joint_info[1]] = Joint(joint_info)\n\n def getRobotModel(self):\n \"\"\"\n Returns the pybullet model to which the module is associated.\n\n Returns:\n robot_model - The pybullet model of the robot\n \"\"\"\n return self.robot_model\n\n def getPhysicsClientId(self):\n \"\"\"\n Returns the id of the simulated instance in which the module is loaded.\n\n Returns:\n physics_client - The id of the simulation in which the robot\n (possessing the module) is spawned\n \"\"\"\n return self.physics_client\n\n def setAngles(self, joint_names, joint_values, percentage_speeds):\n \"\"\"\n Set angles on the robot's joints. Tests have to be performed by the\n child class to guarantee the validity of the input parameters.\n\n Parameters:\n joint_names - List of string containing the name of the joints\n to be controlled\n joint_values - List of values corresponding to the angles in\n radians to be applied\n percentage_speeds - Percentages of the max speed to be used for\n each joint, has to be strictly superior to 0 and inferior or equal\n to 1\n \"\"\"\n try:\n assert len(joint_names) ==\\\n len(joint_values) ==\\\n len(percentage_speeds)\n\n assert all(\n speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds)\n\n except AssertionError:\n raise pybullet.error(\"Error in the setAngles parameters\")\n\n for joint_name, joint_value, percentage_speed in zip(\n joint_names,\n joint_values,\n percentage_speeds):\n\n joint_speed =\\\n self.joint_dict[joint_name].getMaxVelocity() *\\\n percentage_speed\n\n pybullet.setJointMotorControl2(\n self.robot_model,\n self.joint_dict[joint_name].getIndex(),\n pybullet.POSITION_CONTROL,\n targetPosition=joint_value,\n maxVelocity=joint_speed,\n force=self.joint_dict[joint_name].getMaxEffort(),\n physicsClientId=self.physics_client)\n\n def getAnglesPosition(self, joint_names):\n \"\"\"\n Gets the position of the robot's joints in radians. If one of the joint\n doesn't exist, the method will raise a KeyError.\n\n Parameters:\n joint_names - List of string containing the names of the joints\n\n Returns:\n joint_positions - List of floats containing the joint's positions\n \"\"\"\n joint_positions = list()\n\n for joint_name in joint_names:\n joint_positions.append(pybullet.getJointState(\n self.robot_model,\n self.joint_dict[joint_name].getIndex(),\n physicsClientId=self.physics_client)[0])\n\n return joint_positions\n\n def getAnglesVelocity(self, joint_names):\n \"\"\"\n Gets the velocity of the robot's joints in rad/s. If one of the joint\n doesn't exist, the method will raise a KeyError.\n\n Parameters:\n joint_names - List of string containing the names of the joints\n\n Returns:\n joint_velocities - List of floats containing the joint's velocities\n \"\"\"\n joint_velocities = list()\n\n for joint_name in joint_names:\n joint_velocities.append(pybullet.getJointState(\n self.robot_model,\n self.joint_dict[joint_name].getIndex(),\n physicsClientId=self.physics_client)[1])\n\n return joint_velocities\n\n def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA):\n \"\"\"\n Subscribe to the camera holding the camera id. WARNING: at the moment,\n only one camera can be subscribed.\n\n Parameters:\n camera_id - The id of the camera to be subscribed\n resolution - CameraResolution object, the resolution of the camera\n \"\"\"\n try:\n self.active_camera = self.camera_dict[camera_id]\n self.active_camera.subscribe(resolution=resolution)\n\n except KeyError:\n print(\"This camera does not exist, use a valid camera id\")\n\n def unsubscribeCamera(self, camera_id):\n \"\"\"\n Unsubscribe from a camera, the one holding the camera id.\n\n Parameters:\n camera_id - The id of the camera to be unsubscribed\n \"\"\"\n try:\n # If no active camera is found, nothing is unsubscribed\n assert self.active_camera is not None\n\n if self.active_camera.getCameraId() == camera_id:\n self.active_camera.unsubscribe()\n self.active_camera = None\n\n except KeyError:\n print(\"This camera does not exist, use a valid camera id\")\n except AssertionError:\n pass\n\n def getCameraFrame(self):\n \"\"\"\n Returns a camera frame. Be advised that the subscribeCamera method\n needs to be called beforehand, otherwise a pybullet error will be\n raised.\n\n Returns:\n frame - The current camera frame as a formatted numpy array,\n directly exploitable from OpenCV\n \"\"\"\n try:\n assert self.active_camera is not None\n return self.active_camera.getFrame()\n\n except AssertionError:\n raise pybullet.error(\"No active camera, cannot retrieve any frame\")\n\n def getCameraResolution(self):\n \"\"\"\n Returns the resolution of the active camera. Be advised that the\n subscribeCamera method needs to be called beforehand, otherwise a\n pybullet error will be raised.\n\n Returns:\n resolution - a CameraResolution object describing the resolution of\n the active camera\n \"\"\"\n try:\n assert self.active_camera is not None\n return self.active_camera.getResolution()\n\n except KeyError:\n raise pybullet.error(\"No active camera, resolution unavailable\")\n\n def getCameraLink(self):\n \"\"\"\n Returns the link of the active camera. Be advised that the\n subscribeCamera method needs to be called beforehand, otherwise a\n pybullet error will be raised.\n\n Returns:\n resolution - a Link object describing the link to which the active\n camera is attached\n \"\"\"\n try:\n assert self.active_camera is not None\n return self.active_camera.getCameraLink()\n\n except KeyError:\n raise pybullet.error(\"No active camera, cannot retrieve any link\")\n\n def getActiveCamera(self):\n \"\"\"\n Returns the active camera of the robot.\n\n Returns:\n active_camera - Camera (CameraRgb or CameraDepth) object, the\n active camera of the robot. If there is no active camera, a None is\n returned\n \"\"\"\n return self.active_camera\n\n def getPosition(self):\n \"\"\"\n Gets the position of the robot's base in the world frame.\n\n Returns:\n x - The position of the robot's base on the x axis, in meters\n y - The positions of the robot's base on the y axis in meters\n theta - The rotation of the robot's base on the z axis in meters\n \"\"\"\n position, quaternions = pybullet.getBasePositionAndOrientation(\n self.robot_model,\n physicsClientId=self.physics_client)\n\n theta = pybullet.getEulerFromQuaternion(quaternions)[2]\n return position[0], position[1], theta\n\n def isSelfColliding(self, link_names):\n \"\"\"\n Specifies if a link is colliding with the rest of the virtual robot.\n\n Parameters:\n link_names - String or list of string containing the names of the\n links to be checked for self collision. WARNING: only the links\n with corresponding meshes should be used, otherwise the link cannot\n self collide\n\n Returns:\n self_colliding - Boolean, if True at least one of the links is self\n colliding\n \"\"\"\n try:\n if type(link_names) is str:\n assert link_names in self.link_dict.keys()\n names = [link_names]\n else:\n assert set(link_names).issubset(self.link_dict.keys())\n names = list(link_names)\n\n for name in names:\n contact_tuple = pybullet.getContactPoints(\n bodyA=self.robot_model,\n bodyB=self.robot_model,\n linkIndexA=self.link_dict[name].getIndex(),\n physicsClientId=self.physics_client)\n contact_tuple += pybullet.getContactPoints(\n bodyA=self.robot_model,\n bodyB=self.robot_model,\n linkIndexB=self.link_dict[name].getIndex(),\n physicsClientId=self.physics_client)\n\n if len(contact_tuple) != 0:\n return True\n\n return False\n\n except AssertionError:\n raise pybullet.error(\n \"Unauthorized link checking for self collisions\")\n"},"avg_line_length":{"kind":"number","value":35.3172804533,"string":"35.31728"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.5930055346,"string":"0.593006"},"count_classes":{"kind":"number","value":12256,"string":"12,256"},"score_classes":{"kind":"number","value":0.9830753188417422,"string":"0.983075"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":5530,"string":"5,530"},"score_documentation":{"kind":"number","value":0.44357102751263333,"string":"0.443571"}}},{"rowIdx":3728,"cells":{"hexsha":{"kind":"string","value":"b974d5d1bd35654f50415a8f7c66f3fb9a0316ab"},"size":{"kind":"number","value":704,"string":"704"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tests/test_formatter.py"},"max_stars_repo_name":{"kind":"string","value":"hbraux/kafkacli"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5f7ed23150932b66b484fb43dd6210b6c0968776"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"tests/test_formatter.py"},"max_issues_repo_name":{"kind":"string","value":"hbraux/kafkacli"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5f7ed23150932b66b484fb43dd6210b6c0968776"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"tests/test_formatter.py"},"max_forks_repo_name":{"kind":"string","value":"hbraux/kafkacli"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5f7ed23150932b66b484fb43dd6210b6c0968776"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport pytest\nimport json\nfrom kafkacli.formatter import Formatter\n\nsampleJson = json.loads('{\"a\":\"s\", \"b\":1}')\n\n\ndef test_print_default(capsys):\n Formatter().print(sampleJson)\n captured = capsys.readouterr()\n assert captured.out == '{\"a\": \"s\", \"b\": 1}\\n'\n\n\ndef test_print_idents(capsys):\n Formatter(indents=True).print(sampleJson)\n captured = capsys.readouterr()\n assert captured.out == '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n'\n\n\ndef test_print_colors(capsys):\n Formatter(colors=True).print(sampleJson)\n captured = capsys.readouterr()\n assert captured.out == \\\n '{\"a\": \\x1b[34m\"s\"\\x1b[39m, \"b\": \\x1b[31m1\\x1b[39m}\\n'\n"},"avg_line_length":{"kind":"number","value":24.275862069,"string":"24.275862"},"max_line_length":{"kind":"number","value":62,"string":"62"},"alphanum_fraction":{"kind":"number","value":0.640625,"string":"0.640625"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":173,"string":"173"},"score_documentation":{"kind":"number","value":0.24573863636363635,"string":"0.245739"}}},{"rowIdx":3729,"cells":{"hexsha":{"kind":"string","value":"b9750e636d7a3d49a65558af431533fc2e745edb"},"size":{"kind":"number","value":187,"string":"187"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/jobs/forms.py"},"max_stars_repo_name":{"kind":"string","value":"arc198/DJANGO-JOB-SITE"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d9547c4ee85751677ba6458380b609973c3b4a8d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":20,"string":"20"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-05-04T18:42:35.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-18T07:15:12.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/jobs/forms.py"},"max_issues_repo_name":{"kind":"string","value":"fleepgeek/django-jobsite"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d9547c4ee85751677ba6458380b609973c3b4a8d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":5,"string":"5"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-02-11T22:22:33.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-10T20:18:05.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/jobs/forms.py"},"max_forks_repo_name":{"kind":"string","value":"arc198/DJANGO-JOB-SITE"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d9547c4ee85751677ba6458380b609973c3b4a8d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":8,"string":"8"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-05-04T19:03:23.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-09-23T00:24:46.000Z"},"content":{"kind":"string","value":"from django import forms\n\nfrom .models import Application\n\nclass ApplicationForm(forms.ModelForm):\n class Meta:\n model = Application\n fields = ('resume', 'cover_letter',)"},"avg_line_length":{"kind":"number","value":23.375,"string":"23.375"},"max_line_length":{"kind":"number","value":44,"string":"44"},"alphanum_fraction":{"kind":"number","value":0.7005347594,"string":"0.700535"},"count_classes":{"kind":"number","value":128,"string":"128"},"score_classes":{"kind":"number","value":0.6844919786096256,"string":"0.684492"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":22,"string":"22"},"score_documentation":{"kind":"number","value":0.11764705882352941,"string":"0.117647"}}},{"rowIdx":3730,"cells":{"hexsha":{"kind":"string","value":"b975e6fb7fb3fa8849afb4e4ce41618c2ce94c1b"},"size":{"kind":"number","value":451,"string":"451"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/test/tests/unit/protocol.py"},"max_stars_repo_name":{"kind":"string","value":"ylee88/visit"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8e0920996d84fef70a7014b0d770360918d849d5"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-01-27T23:52:04.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-27T23:52:04.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/test/tests/unit/protocol.py"},"max_issues_repo_name":{"kind":"string","value":"ylee88/visit"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8e0920996d84fef70a7014b0d770360918d849d5"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/test/tests/unit/protocol.py"},"max_forks_repo_name":{"kind":"string","value":"ylee88/visit"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8e0920996d84fef70a7014b0d770360918d849d5"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# ----------------------------------------------------------------------------\n# CLASSES: nightly\n#\n# Test Case: protocolo.py\n#\n# Tests: vistprotocol unit test\n#\n# Mark C. Miller, Tue Jan 11 10:19:23 PST 2011\n# ----------------------------------------------------------------------------\ntapp = visit_bin_path(\"visitprotocol\")\nres = sexe(tapp,ret_output=True)\nif res[\"return_code\"] == 0:\n excode = 111\nelse:\n excode = 113\nExit(excode)\n"},"avg_line_length":{"kind":"number","value":26.5294117647,"string":"26.529412"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.4323725055,"string":"0.432373"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":317,"string":"317"},"score_documentation":{"kind":"number","value":0.7028824833702882,"string":"0.702882"}}},{"rowIdx":3731,"cells":{"hexsha":{"kind":"string","value":"b97645cb1bc48b7d30c6b37e139952912087b791"},"size":{"kind":"number","value":3348,"string":"3,348"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pyMazeBacktrack.py"},"max_stars_repo_name":{"kind":"string","value":"Dozed12/pyMazeBacktrack"},"max_stars_repo_head_hexsha":{"kind":"string","value":"aaa2a902fdca17dca6e2ee00e672b6bb38da5639"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-02-22T10:35:25.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-08-11T01:25:12.000Z"},"max_issues_repo_path":{"kind":"string","value":"pyMazeBacktrack.py"},"max_issues_repo_name":{"kind":"string","value":"Dozed12/pyMazeBacktrack"},"max_issues_repo_head_hexsha":{"kind":"string","value":"aaa2a902fdca17dca6e2ee00e672b6bb38da5639"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pyMazeBacktrack.py"},"max_forks_repo_name":{"kind":"string","value":"Dozed12/pyMazeBacktrack"},"max_forks_repo_head_hexsha":{"kind":"string","value":"aaa2a902fdca17dca6e2ee00e672b6bb38da5639"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import libtcodpy as libtcod\r\nfrom random import randint\r\n\r\nnSquares = 30\r\nnTiles = nSquares * 2 + 1\r\n\r\nSCREEN_WIDTH = nTiles\r\nSCREEN_HEIGHT = nTiles\r\n\r\nlibtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW)\r\nlibtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)\r\n\r\ndef CheckDir(x,y,size,direction,table):\r\n\r\n if direction == 1:\r\n if y - 2 <= 0:\r\n return 0\r\n if table[x][y-2] == white:\r\n return 0\r\n elif direction == 2:\r\n if x + 2 >= size:\r\n return 0\r\n if table[x+2][y] == white:\r\n return 0\r\n elif direction == 3:\r\n if y + 2 >= size:\r\n return 0\r\n if table[x][y+2] == white:\r\n return 0\r\n elif direction == 4:\r\n if x - 2 <= 0:\r\n return 0\r\n if table[x-2][y] == white:\r\n return 0\r\n \r\n return 1\r\n\r\ndef Possible(x,y,table,size):\r\n\r\n if x+2 < size:\r\n if table[x+2][y] == black:\r\n return 1\r\n if x-2 > 0:\r\n if table[x-2][y] == black:\r\n return 1\r\n if y+2 < size:\r\n if table[x][y+2] == black:\r\n return 1\r\n if y-2 > 0:\r\n if table[x][y-2] == black:\r\n return 1 \r\n\r\n return 0\r\n\r\nblack = libtcod.black\r\nwhite = libtcod.white\r\n\r\nTable = [[0 for i in range(nTiles)]for i in range(nTiles)]\r\n\r\nfor x in range(nTiles):\r\n for y in range(nTiles):\r\n Table[x][y] = black\r\n libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)\r\n\r\nlibtcod.console_flush()\r\n\r\nMemory = []\r\n\r\nCurrX = 1\r\nCurrY = 1\r\nTable[CurrX][CurrY] = white\r\n\r\nend = 0\r\n\r\nwhile end == 0:\r\n\r\n while Possible(CurrX,CurrY,Table,nTiles):\r\n\r\n Dir = randint(1,4)\r\n while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:\r\n Dir = randint(1,4)\r\n\r\n if Dir == 1:\r\n Table[CurrX][CurrY - 1] = white\r\n CurrY -= 2\r\n Table[CurrX][CurrY] = white\r\n elif Dir == 2:\r\n Table[CurrX + 1][CurrY] = white\r\n CurrX += 2\r\n Table[CurrX][CurrY] = white\r\n elif Dir == 3:\r\n Table[CurrX][CurrY + 1] = white\r\n CurrY += 2\r\n Table[CurrX][CurrY] = white\r\n elif Dir == 4:\r\n Table[CurrX - 1][CurrY] = white\r\n CurrX -= 2\r\n Table[CurrX][CurrY] = white\r\n\r\n Memory.append(Dir)\r\n\r\n #print\r\n for x in range(nTiles):\r\n for y in range(nTiles):\r\n libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)\r\n libtcod.console_flush()\r\n\r\n while Possible(CurrX,CurrY,Table,nTiles) == 0:\r\n\r\n MemorySize = len(Memory)\r\n\r\n Dir = Memory[MemorySize-1]\r\n\r\n if Dir == 1:\r\n CurrY += 2\r\n elif Dir == 2:\r\n CurrX -= 2\r\n elif Dir == 3:\r\n CurrY -= 2\r\n elif Dir == 4:\r\n CurrX += 2\r\n\r\n del Memory[MemorySize-1]\r\n\r\n if CurrX == 1 and CurrY == 1:\r\n end = 1\r\n break\r\n\r\n#print\r\nfor x in range(nTiles):\r\n for y in range(nTiles):\r\n libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)\r\nlibtcod.console_flush()\r\n\r\nlibtcod.console_wait_for_keypress(True)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n"},"avg_line_length":{"kind":"number","value":20.1686746988,"string":"20.168675"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.4964157706,"string":"0.496416"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":48,"string":"48"},"score_documentation":{"kind":"number","value":0.014336917562724014,"string":"0.014337"}}},{"rowIdx":3732,"cells":{"hexsha":{"kind":"string","value":"b978586a0e39802db346feaf3a0aa1c91c336f05"},"size":{"kind":"number","value":3011,"string":"3,011"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"source/tests/test_resources.py"},"max_stars_repo_name":{"kind":"string","value":"aws-solutions/maintaining-personalized-experiences-with-machine-learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3f6f1b0069df4828eae9b0835b717500189e4f71"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-09-23T16:33:24.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:45:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"source/tests/test_resources.py"},"max_issues_repo_name":{"kind":"string","value":"aws-solutions/maintaining-personalized-experiences-with-machine-learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3f6f1b0069df4828eae9b0835b717500189e4f71"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-09-24T21:34:14.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-01-27T22:11:08.000Z"},"max_forks_repo_path":{"kind":"string","value":"source/tests/test_resources.py"},"max_forks_repo_name":{"kind":"string","value":"aws-solutions/maintaining-personalized-experiences-with-machine-learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3f6f1b0069df4828eae9b0835b717500189e4f71"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":9,"string":"9"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-09-23T23:24:46.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-02-12T04:53:16.000Z"},"content":{"kind":"string","value":"# ######################################################################################################################\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance #\n# with the License. You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #\n# the specific language governing permissions and limitations under the License. #\n# ######################################################################################################################\n\nimport pytest\n\nfrom shared.resource import (\n DatasetGroup,\n Schema,\n Dataset,\n DatasetImportJob,\n Solution,\n SolutionVersion,\n Campaign,\n EventTracker,\n BatchSegmentJob,\n BatchInferenceJob,\n)\n\n\n@pytest.mark.parametrize(\n \"klass,camel,dash,snake\",\n [\n (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"),\n (Schema, \"schema\", \"schema\", \"schema\"),\n (Dataset, \"dataset\", \"dataset\", \"dataset\"),\n (\n DatasetImportJob,\n \"datasetImportJob\",\n \"dataset-import-job\",\n \"dataset_import_job\",\n ),\n (Solution, \"solution\", \"solution\", \"solution\"),\n (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"),\n (Campaign, \"campaign\", \"campaign\", \"campaign\"),\n (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"),\n (\n BatchInferenceJob,\n \"batchInferenceJob\",\n \"batch-inference-job\",\n \"batch_inference_job\",\n ),\n (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"),\n ],\n ids=[\n \"DatasetGroup\",\n \"Schema\",\n \"Dataset\",\n \"DatasetImportJob\",\n \"Solution\",\n \"SolutionVersion\",\n \"Campaign\",\n \"EventTracker\",\n \"BatchInferenceJob\",\n \"BatchSegmentJob,\",\n ],\n)\ndef test_resource_naming(klass, camel, dash, snake):\n assert klass().name.camel == camel\n assert klass().name.dash == dash\n assert klass().name.snake == snake\n"},"avg_line_length":{"kind":"number","value":42.4084507042,"string":"42.408451"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.4543341083,"string":"0.454334"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":1332,"string":"1,332"},"score_decorators":{"kind":"number","value":0.44237794752573895,"string":"0.442378"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2027,"string":"2,027"},"score_documentation":{"kind":"number","value":0.6731982729990037,"string":"0.673198"}}},{"rowIdx":3733,"cells":{"hexsha":{"kind":"string","value":"b9787b11fbcd5779df09a2f0f27e44e75ad576ac"},"size":{"kind":"number","value":1870,"string":"1,870"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"app_venv/Lib/site-packages/phonenumbers/data/region_AG.py"},"max_stars_repo_name":{"kind":"string","value":"orlandofv/sianna"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f07dd6dbc62a9604f31ab800e482e62f14fba766"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"app_venv/Lib/site-packages/phonenumbers/data/region_AG.py"},"max_issues_repo_name":{"kind":"string","value":"orlandofv/sianna"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f07dd6dbc62a9604f31ab800e482e62f14fba766"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"app_venv/Lib/site-packages/phonenumbers/data/region_AG.py"},"max_forks_repo_name":{"kind":"string","value":"orlandofv/sianna"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f07dd6dbc62a9604f31ab800e482e62f14fba766"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"Auto-generated file, do not edit by hand. AG metadata\"\"\"\nfrom ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata\n\nPHONE_METADATA_AG = PhoneMetadata(id='AG', country_code=1, international_prefix='011',\n general_desc=PhoneNumberDesc(national_number_pattern='(?:268|[58]\\\\d\\\\d|900)\\\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),\n fixed_line=PhoneNumberDesc(national_number_pattern='268(?:4(?:6[0-38]|84)|56[0-2])\\\\d{4}', example_number='2684601234', possible_length=(10,), possible_length_local_only=(7,)),\n mobile=PhoneNumberDesc(national_number_pattern='268(?:464|7(?:1[3-9]|[28]\\\\d|3[0246]|64|7[0-689]))\\\\d{4}', example_number='2684641234', possible_length=(10,), possible_length_local_only=(7,)),\n toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\\\d{6}', example_number='8002123456', possible_length=(10,)),\n premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\\\d{6}', example_number='9002123456', possible_length=(10,)),\n personal_number=PhoneNumberDesc(national_number_pattern='52(?:355[0-46-9]|4(?:5(?:2[024-9]|5[0-46-9])|60[1-9]|9(?:2[0-5]|49)))\\\\d{4}|52(?:3(?:[2-46-9][02-9]|5[02-46-9])|4(?:[2-478][02-9]|5[034]|6[2-9]|9[05-9])|7[2-4]\\\\d)\\\\d{5}|52[34][2-9]1[02-9]\\\\d{4}|5(?:00|2[1256]|33|44|66|77|88)[2-9]\\\\d{6}', example_number='5002345678', possible_length=(10,)),\n voip=PhoneNumberDesc(national_number_pattern='26848[01]\\\\d{4}', example_number='2684801234', possible_length=(10,), possible_length_local_only=(7,)),\n pager=PhoneNumberDesc(national_number_pattern='26840[69]\\\\d{4}', example_number='2684061234', possible_length=(10,), possible_length_local_only=(7,)),\n national_prefix='1',\n national_prefix_for_parsing='1|([457]\\\\d{6})$',\n national_prefix_transform_rule='268\\\\1',\n leading_digits='268',\n mobile_number_portable_region=True)\n"},"avg_line_length":{"kind":"number","value":103.8888888889,"string":"103.888889"},"max_line_length":{"kind":"number","value":352,"string":"352"},"alphanum_fraction":{"kind":"number","value":0.7117647059,"string":"0.711765"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":638,"string":"638"},"score_documentation":{"kind":"number","value":0.3411764705882353,"string":"0.341176"}}},{"rowIdx":3734,"cells":{"hexsha":{"kind":"string","value":"b97884a1b2bbd76cce01bb9efe2744d31832af25"},"size":{"kind":"number","value":2182,"string":"2,182"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"gradefiles-send.py"},"max_stars_repo_name":{"kind":"string","value":"lapets/bu-gsubmit-grading"},"max_stars_repo_head_hexsha":{"kind":"string","value":"69c40a763908be1c954dce3e5e5aab854ac379ff"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-10-03T15:29:20.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-06-28T17:33:06.000Z"},"max_issues_repo_path":{"kind":"string","value":"gradefiles-send.py"},"max_issues_repo_name":{"kind":"string","value":"lapets/bu-gsubmit-grading"},"max_issues_repo_head_hexsha":{"kind":"string","value":"69c40a763908be1c954dce3e5e5aab854ac379ff"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"gradefiles-send.py"},"max_forks_repo_name":{"kind":"string","value":"lapets/bu-gsubmit-grading"},"max_forks_repo_head_hexsha":{"kind":"string","value":"69c40a763908be1c954dce3e5e5aab854ac379ff"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#####################################################################\n## \n## gradefiles-send.py\n##\n## Script to send grade files by email to enrolled students; the\n## input grade file names should correspond to the user names of\n## the students.\n##\n##\n\nfrom email.mime.text import MIMEText # For creating a message string.\nfrom subprocess import Popen, PIPE # For sending email on linux.\nimport sys # For command line arguments.\nimport os # For commands and file manipulation (walk, path, system).\n\n#####################################################################\n## Sending a simple email message.\n##\n\ndef send(txt, courseNumber, task, sender, targets):\n msg = MIMEText(txt)\n msg[\"From\"] = sender + \"@bu.edu\"\n msg[\"To\"] = \",\".join([target + \"@bu.edu\" for target in targets])\n msg[\"Cc\"] = sender + \"@bu.edu\"\n msg[\"Subject\"] = \"CS \" + courseNumber + \" \" + task + \" grade\"\n p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE)\n p.communicate(bytes(msg.as_string(), 'UTF-8'))\n\n#####################################################################\n## Process the command line parameters.\n##\n\nif len(sys.argv) == 6\\\n and (int(sys.argv[1][0:3]) in range(100,1000))\\\n and sys.argv[2] in ['Fall', 'Spring']\\\n and int(sys.argv[3]) in range(2000,2100):\n courseNumber = sys.argv[1] # Accepts course names like \"591 X1.\"\n season = sys.argv[2]\n year = sys.argv[3]\n task = sys.argv[4]\n sender = sys.argv[5]\nelse:\n print('\\n Usage:\\n\\n % python gradefiles-send.py <###> \\n')\n exit()\n\n#####################################################################\n## Check for list of files.\n##\n\nif not os.path.exists('./data'):\n print('No folder \"data\" containing grade files found. Exiting.')\n exit()\n\n#####################################################################\n## Send the grade files.\n##\n\nfor curdir, dirs, files in os.walk('./data/'):\n for file in files:\n txt = open('./data/'+file, 'r').read()\n targets = file.split('.')[0].split(\"_\")\n send(txt, courseNumber, task, sender, targets) \n print('Sent grade file to ' + str(targets) + '.')\n\n#eof"},"avg_line_length":{"kind":"number","value":33.5692307692,"string":"33.569231"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.5197066911,"string":"0.519707"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1177,"string":"1,177"},"score_documentation":{"kind":"number","value":0.5394133822181485,"string":"0.539413"}}},{"rowIdx":3735,"cells":{"hexsha":{"kind":"string","value":"b9789c0f2981942a54633089abdf3245b58a73a3"},"size":{"kind":"number","value":1227,"string":"1,227"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py"},"max_stars_repo_name":{"kind":"string","value":"GalAster/16"},"max_stars_repo_head_hexsha":{"kind":"string","value":"47560a2132fbe4dda35a35dedfd7d8e6a8acc35a"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-10-03T01:51:38.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-10-04T16:15:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py"},"max_issues_repo_name":{"kind":"string","value":"GalAster/16"},"max_issues_repo_head_hexsha":{"kind":"string","value":"47560a2132fbe4dda35a35dedfd7d8e6a8acc35a"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py"},"max_forks_repo_name":{"kind":"string","value":"GalAster/16"},"max_forks_repo_head_hexsha":{"kind":"string","value":"47560a2132fbe4dda35a35dedfd7d8e6a8acc35a"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-03-17T12:58:52.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-03-17T12:58:52.000Z"},"content":{"kind":"string","value":"import os\nimport pickle\nimport tensorflow as tf\nimport wolframclient.serializers as wxf\n\nname = 'karras2018iclr-celebahq-1024x1024'\nfile = open(name + '.pkl', 'rb')\nsess = tf.InteractiveSession()\nG, D, Gs = pickle.load(file)\nsaver = tf.train.Saver()\nsave_path = \"./target/\" + name + \"/\"\nmodel_name = 'model'\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nsave_path_full = os.path.join(save_path, model_name)\nsaver.save(sess, save_path_full)\n\nckpt = tf.train.get_checkpoint_state(save_path)\nreader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)\nall_variables = list(reader.get_variable_to_shape_map().keys())\nnpy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))\nwxf.export(npy, name + '.wxf', target_format='wxf')\n\n# Save as protobuf\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=sess.graph_def,\n # output_node_names=['G_paper_1/images_out']\n output_node_names=['G_paper_1/ToRGB_lod0/add']\n )\n\n with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as file: # 保存模型\n file.write(output_graph_def.SerializeToString()) # 序列化输出\n"},"avg_line_length":{"kind":"number","value":34.0833333333,"string":"34.083333"},"max_line_length":{"kind":"number","value":74,"string":"74"},"alphanum_fraction":{"kind":"number","value":0.7269763651,"string":"0.726976"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":216,"string":"216"},"score_documentation":{"kind":"number","value":0.17349397590361446,"string":"0.173494"}}},{"rowIdx":3736,"cells":{"hexsha":{"kind":"string","value":"b978dfcb152bc099b2de54896ed9a54dfbc29639"},"size":{"kind":"number","value":6890,"string":"6,890"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/moveGoogle.py"},"max_stars_repo_name":{"kind":"string","value":"Quanta-Robotics/Robot-Blueberry"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":25,"string":"25"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-06-08T07:09:30.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-30T06:28:35.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/moveGoogle.py"},"max_issues_repo_name":{"kind":"string","value":"ICT-CoU/Robot-Blueberry"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d19fd1be037df9d67de64df57a87006d74cd6c43"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-05-23T12:54:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-07T17:47:56.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/moveGoogle.py"},"max_forks_repo_name":{"kind":"string","value":"ICT-CoU/Robot-Blueberry"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d19fd1be037df9d67de64df57a87006d74cd6c43"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":14,"string":"14"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-06-08T13:02:28.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-12-30T20:07:18.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport os\nimport os.path\nimport yaml\nimport time\nimport random\nimport multiprocessing\nimport RPi.GPIO as GPIO\nfrom talk import say\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nfrom adafruit_servokit import ServoKit\n\nMotor1 = {'EN': 27, 'input1': 19, 'input2': 16}\nMotor2 = {'EN': 22, 'input1': 26, 'input2': 20}\n\nfor x in Motor1:\n GPIO.setup(Motor1[x], GPIO.OUT)\n GPIO.setup(Motor2[x], GPIO.OUT)\n\nEN1 = GPIO.PWM(Motor1['EN'], 100) \nEN2 = GPIO.PWM(Motor2['EN'], 100) \n\nEN1.start(0) \nEN2.start(0) \n\n\nhand = ServoKit(channels=16)\n\nROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))\n\ndef readYaml():\n with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:\n servo = yaml.load(conf, Loader=yaml.FullLoader)\n return servo\n\n\ndef writeYaml(s=None):\n with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:\n if s==None:\n yaml.dump(servo,conf)\n else:\n yaml.dump(s,conf)\n\n\nservo = readYaml()\n\nif servo == None:\n with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:\n servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)\n writeYaml(servoBackUp)\n servo = readYaml()\n if servo == None:\n print('close')\n exit()\n\n\nInitial = servo['Initial_Position']['I2C']\nCurrent = servo['Current_Position']['I2C']\n\nInitialGpio = servo['Initial_Position']['Gpio']\nCurrentGpio = servo['Current_Position']['Gpio']\nGpioPin = servo['Pin']['Gpio']\n\n\nfor i in range(0,6):\n GPIO.setup(GpioPin[i], GPIO.OUT)\nServo = []\nfor i in range(0,6):\n Servo.append(GPIO.PWM(GpioPin[i],50))\n Servo[i].start(0)\n\n\ndef changeDegree(pin,newDegree,time1=0.05,update=5):\n maxChange = 0\n pinSize = len(pin)\n for i in range(0,pinSize):\n maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)\n for deg in range(0,maxChange,update):\n for i in range(0,pinSize):\n if Current[pin[i]]newDegree[i]:\n Current[pin[i]] -= update\n\n for i in range(0,pinSize):\n hand.servo[pin[i]].angle = Current[pin[i]]\n servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]\n writeYaml()\n time.sleep(time1)\n \n\n \ndef takePosition():\n changeDegree([7,8],[180,0])\n changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])\n\n\ndef changeDegreeGpio(pin,degree,update,duration):\n pinSize = len(pin)\n for i in range(0,pinSize):\n p = pin[i]\n if CurrentGpio[p]>degree[i]:\n update = -update\n\n for deg in range(CurrentGpio[p],degree[i],update):\n duty = deg/18\n duty+=2\n Servo[p].ChangeDutyCycle(duty)\n time.sleep(duration)\n CurrentGpio[p]=degree[i]\n writeYaml()\n\n\ndef Run(a, b, c, d, x):\n GPIO.output(Motor1['input1'], GPIO.LOW)\n GPIO.output(Motor1['input2'], GPIO.LOW)\n GPIO.output(Motor2['input1'], GPIO.LOW)\n GPIO.output(Motor2['input2'], GPIO.LOW)\n\n if a==1:\n GPIO.output(Motor1['input1'], GPIO.HIGH)\n if b==1:\n GPIO.output(Motor1['input2'], GPIO.HIGH)\n if c==1:\n GPIO.output(Motor2['input1'], GPIO.HIGH)\n if d==1:\n GPIO.output(Motor2['input2'], GPIO.HIGH)\n\n EN2.ChangeDutyCycle(x)\n EN1.ChangeDutyCycle(x)\n\n\ndef Stop():\n Run(0,0,0,0,0)\n\n\ndef Start_Slow(a, b, c, d):\n for i in range(0,100,20):\n Run(a,b,c,d,i)\n time.sleep(0.5)\n\n \ndef Stop_Slow(a,b,c,d):\n for i in range(100,0,-20):\n Run(a,b,c,d,i)\n time.sleep(0.5)\n\n\ndef yes(times=3):\n for i in range(0,times):\n changeDegree([0],[30])\n time.sleep(0.08)\n changeDegree([0],[0])\n time.sleep(0.08)\n\ndef no(times=3):\n for i in range(0,times):\n changeDegree([15],[70],5,0.05)\n time.sleep(0.2)\n changeDegree([15],[110],5,0.05)\n time.sleep(0.2)\n changeDegree([15],[90],5,0.05)\n\ndef move_head(times=3):\n for i in range(0,times):\n changeDegree([0],[20])\n changeDegreeGpio([0],[80],5,0.05)\n changeDegree([0],[0])\n changeDegreeGpio([0],[100],5,0.05)\n\n changeDegreeGpio([0],[90],10,0.01)\n\ndef random0():\n r = random.randrange(1,10000000)%3\n if(r==1):\n changeDegree([0],[20])\n changeDegree([0],[0]) \n elif(r==2):\n changeDegreeGpio([0],[120],5,0.05)\n changeDegreeGpio([0],[90],5,0.05)\n else:\n changeDegreeGpio([0],[60],5,0.05)\n changeDegreeGpio([0],[90],5,0.05)\n \ndef random1():\n r = random.randrange(1,3)\n if(r==1):\n changeDegree([0],[20])\n changeDegree([0],[0])\n changeDegree([3],[50])\n changeDegree([9],[100])\n changeDegree([9],[60])\n changeDegree([3],[0]) \n elif(r==2):\n changeDegree([0],[20])\n changeDegree([0],[0])\n changeDegree([4],[120])\n changeDegree([10],[140])\n changeDegree([10],[180])\n changeDegree([4],[170])\n else:\n changeDegree([3,4],[50,120])\n changeDegree([9,10],[100,140])\n changeDegree([9,10],[60,180])\n changeDegree([3,4],[0,180])\n\ndef random2():\n changeDegree([3,4],[20,150])\n pin = [7,8,9,10]\n deg = [[160,0,60,100],[180,20,100,140]]\n ok = [0,0,0,0]\n select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]\n for i in range(0,15):\n r = select[i%len(select)]%4\n print (' move ',r)\n changeDegree([pin[r]],[deg[ok[r]][r]])\n ok[r]^=1\n takePosition()\n\ndef random3():\n changeDegree([3,4],[20,150])\n pin = [7,8,9,10]\n deg = [[160,0,60,100],[180,20,100,140]]\n ok = [0,0,0,0]\n for i in range(0,15):\n r = random.randrange(1,1000000)%4\n print (' move ',r)\n changeDegree([pin[r]],[deg[ok[r]][r]])\n takePosition()\n\n\ndef randomCall(t):\n changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])\n pin = [5,6,7,8]\n deg = [[80,50,100,70],[110,90,110,90]]\n select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]\n ok = [0,0,0,0]\n ln = len(select)\n for i in range(0,t*3):\n r = select[i%16]%4\n changeDegree([pin[r]],[deg[ok[r]][r]])\n ok[r]^=1\n takePosition()\n\n\ndef expression(t):\n print (' i got value of t is : ',t)\n if(t==0):\n random0()\n elif(t==1):\n random1()\n elif(t==2):\n random2()\n elif(t==3):\n random3()\n else:\n randomCall(t)\n\ndef speakOnline(t):\n expression(t)\n\ndef speakOffline(speech):\n t = int(len(speech)/15)\n print ('Offline t value is : ',t)\n p1 = multiprocessing.Process(target=expression,args=[t])\n p1.start()\n say(speech)\n\n\n"},"avg_line_length":{"kind":"number","value":25.3308823529,"string":"25.330882"},"max_line_length":{"kind":"number","value":154,"string":"154"},"alphanum_fraction":{"kind":"number","value":0.5603773585,"string":"0.560377"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":456,"string":"456"},"score_documentation":{"kind":"number","value":0.06618287373004354,"string":"0.066183"}}},{"rowIdx":3737,"cells":{"hexsha":{"kind":"string","value":"b978fbbcd4002601ca1e2723cae4385002e671d8"},"size":{"kind":"number","value":2063,"string":"2,063"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/onegov/translator_directory/models/language.py"},"max_stars_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/onegov/translator_directory/models/language.py"},"max_issues_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/onegov/translator_directory/models/language.py"},"max_forks_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from uuid import uuid4\n\nfrom sqlalchemy import Index, Column, Text, Table, ForeignKey\nfrom sqlalchemy.orm import object_session\n\nfrom onegov.core.orm import Base\nfrom onegov.core.orm.types import UUID\n\n\nspoken_association_table = Table(\n 'spoken_lang_association',\n Base.metadata,\n Column(\n 'translator_id',\n UUID,\n ForeignKey('translators.id'),\n nullable=False),\n Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)\n)\n\nwritten_association_table = Table(\n 'written_lang_association',\n Base.metadata,\n Column(\n 'translator_id',\n UUID,\n ForeignKey('translators.id'),\n nullable=False),\n Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)\n)\n\nmother_tongue_association_table = Table(\n 'mother_tongue_association',\n Base.metadata,\n Column(\n 'translator_id',\n UUID,\n ForeignKey('translators.id'),\n nullable=False),\n Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)\n)\n\n\nclass Language(Base):\n\n __tablename__ = 'languages'\n\n __table_args__ = (\n Index('unique_name', 'name', unique=True),\n )\n\n id = Column(UUID, primary_key=True, default=uuid4)\n name = Column(Text, nullable=False)\n\n @property\n def speakers_count(self):\n session = object_session(self)\n return session.query(\n spoken_association_table).filter_by(lang_id=self.id).count()\n\n @property\n def writers_count(self):\n session = object_session(self)\n return session.query(\n written_association_table).filter_by(lang_id=self.id).count()\n\n @property\n def native_speakers_count(self):\n \"\"\"Having it as mother tongue...\"\"\"\n session = object_session(self)\n return session.query(\n mother_tongue_association_table).filter_by(lang_id=self.id).count()\n\n @property\n def deletable(self):\n return (\n self.speakers_count\n + self.writers_count\n + self.native_speakers_count\n ) == 0\n"},"avg_line_length":{"kind":"number","value":25.4691358025,"string":"25.469136"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.6509936985,"string":"0.650994"},"count_classes":{"kind":"number","value":1028,"string":"1,028"},"score_classes":{"kind":"number","value":0.4983034415899176,"string":"0.498303"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":773,"string":"773"},"score_decorators":{"kind":"number","value":0.37469704314105673,"string":"0.374697"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":305,"string":"305"},"score_documentation":{"kind":"number","value":0.1478429471643238,"string":"0.147843"}}},{"rowIdx":3738,"cells":{"hexsha":{"kind":"string","value":"b97a0b2a9f0b601569ce8973596517ed7d8790ec"},"size":{"kind":"number","value":3588,"string":"3,588"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py"},"max_stars_repo_name":{"kind":"string","value":"djemeljanovs/tfjs"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ee4430cd7a04283ec09184a3fe9d3fb27496f1dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py"},"max_issues_repo_name":{"kind":"string","value":"djemeljanovs/tfjs"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ee4430cd7a04283ec09184a3fe9d3fb27496f1dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py"},"max_forks_repo_name":{"kind":"string","value":"djemeljanovs/tfjs"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ee4430cd7a04283ec09184a3fe9d3fb27496f1dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport re\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import node_def_pb2\nfrom tensorflow.python.framework import tensor_util\n\n# Custom op name for fused depthwise conv2d\nFUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'\n# The grappler op name for fused MatMul which starts with '_'\nFUSED_MATMUL = '_FusedMatMul'\n\ndef node_from_map(node_map, name):\n \"\"\"Pulls a node def from a dictionary for a given name.\n\n Args:\n node_map: Dictionary containing an entry indexed by name for every node.\n name: Identifies the node we want to find.\n\n Returns:\n NodeDef of the node with the given name.\n\n Raises:\n ValueError: If the node isn't present in the dictionary.\n \"\"\"\n stripped_name = node_name_from_input(name)\n if stripped_name not in node_map:\n raise ValueError(\"No node named '%s' found in map.\" % name)\n return node_map[stripped_name]\n\n\ndef values_from_const(node_def):\n \"\"\"Extracts the values from a const NodeDef as a numpy ndarray.\n\n Args:\n node_def: Const NodeDef that has the values we want to access.\n\n Returns:\n Numpy ndarray containing the values.\n\n Raises:\n ValueError: If the node isn't a Const.\n \"\"\"\n if node_def.op != \"Const\":\n raise ValueError(\n \"Node named '%s' should be a Const op for values_from_const.\" %\n node_def.name)\n input_tensor = node_def.attr[\"value\"].tensor\n tensor_value = tensor_util.MakeNdarray(input_tensor)\n return tensor_value\n\n# Whether to scale by gamma after normalization.\ndef scale_after_normalization(node):\n if node.op == \"BatchNormWithGlobalNormalization\":\n return node.attr[\"scale_after_normalization\"].b\n return True\n\ndef node_name_from_input(node_name):\n \"\"\"Strips off ports and other decorations to get the underlying node name.\"\"\"\n if node_name.startswith(\"^\"):\n node_name = node_name[1:]\n m = re.search(r\"(.*):\\d+$\", node_name)\n if m:\n node_name = m.group(1)\n return node_name\n\ndef cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):\n \"\"\"Clean up the graph def by removing the skipped nodes and clean up the nodes\n with inputs that have been removed.\n\n Args:\n input_graph_def: GraphDef object to be cleaned.\n node_to_skip: Dict with node names to be skipped.\n inputs_to_remove: List of nodes to be removed from inputs of all nodes.\n Returns:\n GraphDef that has been cleaned.\n\n \"\"\"\n result_graph_def = graph_pb2.GraphDef()\n for node in input_graph_def.node:\n if node.name in nodes_to_skip:\n continue\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(node)\n for value in inputs_to_remove:\n for i, input_node in enumerate(new_node.input):\n if input_node == value.name:\n new_node.input[i] = value.input[0]\n result_graph_def.node.extend([new_node])\n result_graph_def.library.CopyFrom(input_graph_def.library)\n result_graph_def.versions.CopyFrom(input_graph_def.versions)\n return result_graph_def\n"},"avg_line_length":{"kind":"number","value":33.8490566038,"string":"33.849057"},"max_line_length":{"kind":"number","value":80,"string":"80"},"alphanum_fraction":{"kind":"number","value":0.7260312152,"string":"0.726031"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2035,"string":"2,035"},"score_documentation":{"kind":"number","value":0.5671683389074693,"string":"0.567168"}}},{"rowIdx":3739,"cells":{"hexsha":{"kind":"string","value":"b97af59ee4283114481f3e83dc8e3cf6244bb61c"},"size":{"kind":"number","value":1014,"string":"1,014"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"loss_fn/classification_loss_fns/binary_cross_entropy.py"},"max_stars_repo_name":{"kind":"string","value":"apple/ml-cvnets"},"max_stars_repo_head_hexsha":{"kind":"string","value":"84d992f413e52c0468f86d23196efd9dad885e6f"},"max_stars_repo_licenses":{"kind":"list like","value":["AML"],"string":"[\n \"AML\"\n]"},"max_stars_count":{"kind":"number","value":209,"string":"209"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-30T08:32:10.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T16:18:03.000Z"},"max_issues_repo_path":{"kind":"string","value":"loss_fn/classification_loss_fns/binary_cross_entropy.py"},"max_issues_repo_name":{"kind":"string","value":"apple/ml-cvnets"},"max_issues_repo_head_hexsha":{"kind":"string","value":"84d992f413e52c0468f86d23196efd9dad885e6f"},"max_issues_repo_licenses":{"kind":"list like","value":["AML"],"string":"[\n \"AML\"\n]"},"max_issues_count":{"kind":"number","value":12,"string":"12"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-12-04T10:47:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T15:39:40.000Z"},"max_forks_repo_path":{"kind":"string","value":"loss_fn/classification_loss_fns/binary_cross_entropy.py"},"max_forks_repo_name":{"kind":"string","value":"apple/ml-cvnets"},"max_forks_repo_head_hexsha":{"kind":"string","value":"84d992f413e52c0468f86d23196efd9dad885e6f"},"max_forks_repo_licenses":{"kind":"list like","value":["AML"],"string":"[\n \"AML\"\n]"},"max_forks_count":{"kind":"number","value":50,"string":"50"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-11-01T08:15:02.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-29T08:17:34.000Z"},"content":{"kind":"string","value":"#\n# For licensing see accompanying LICENSE file.\n# Copyright (C) 2022 Apple Inc. All Rights Reserved.\n#\n\nfrom torch.nn import functional as F\nfrom torch import Tensor\nimport argparse\n\nfrom . import register_classification_loss_fn\nfrom .. import BaseCriteria\n\n\n@register_classification_loss_fn(name=\"binary_cross_entropy\")\nclass ClsBinaryCrossEntropy(BaseCriteria):\n \"\"\"Binary CE for classification tasks\"\"\"\n\n def __init__(self, opts, *args, **kwargs) -> None:\n super().__init__()\n\n def forward(\n self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs\n ) -> Tensor:\n if target.dim() != prediction.dim():\n target = F.one_hot(target, num_classes=prediction.shape[-1])\n\n return F.binary_cross_entropy_with_logits(\n input=prediction,\n target=target.to(prediction.dtype),\n weight=None,\n reduction=\"sum\",\n )\n\n def __repr__(self) -> str:\n return \"{}()\".format(self.__class__.__name__)\n"},"avg_line_length":{"kind":"number","value":28.1666666667,"string":"28.166667"},"max_line_length":{"kind":"number","value":87,"string":"87"},"alphanum_fraction":{"kind":"number","value":0.66765286,"string":"0.667653"},"count_classes":{"kind":"number","value":691,"string":"691"},"score_classes":{"kind":"number","value":0.6814595660749507,"string":"0.68146"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":753,"string":"753"},"score_decorators":{"kind":"number","value":0.742603550295858,"string":"0.742604"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":173,"string":"173"},"score_documentation":{"kind":"number","value":0.17061143984220908,"string":"0.170611"}}},{"rowIdx":3740,"cells":{"hexsha":{"kind":"string","value":"b97c7f15dd61f4851cffcb3982337f852b3b8da5"},"size":{"kind":"number","value":576,"string":"576"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Sorting/insertion_sort.py"},"max_stars_repo_name":{"kind":"string","value":"lakshyarawal/pythonPractice"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4b400342198a8270c5ac0c6306afb555f927c6c1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Sorting/insertion_sort.py"},"max_issues_repo_name":{"kind":"string","value":"lakshyarawal/pythonPractice"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4b400342198a8270c5ac0c6306afb555f927c6c1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Sorting/insertion_sort.py"},"max_forks_repo_name":{"kind":"string","value":"lakshyarawal/pythonPractice"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4b400342198a8270c5ac0c6306afb555f927c6c1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\" Insertion Sort Algorithm:\"\"\"\n\n\n\"\"\"Implementation\"\"\"\n\n\ndef insertion_sort(arr) -> list:\n n = len(arr)\n for i in range(1, n):\n swap_index = i\n for j in range(i-1, -1, -1):\n if arr[swap_index] < arr[j]:\n arr[swap_index], arr[j] = arr[j], arr[swap_index]\n swap_index -= 1\n else:\n break\n return arr\n\n\ndef main():\n arr_input = [10, 5, 30, 1, 2, 5, 10, 10]\n a2 = insertion_sort(arr_input)\n print(a2)\n\n\n# Using the special variable\n# __name__\nif __name__ == \"__main__\":\n main()\n"},"avg_line_length":{"kind":"number","value":19.2,"string":"19.2"},"max_line_length":{"kind":"number","value":65,"string":"65"},"alphanum_fraction":{"kind":"number","value":0.5225694444,"string":"0.522569"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":100,"string":"100"},"score_documentation":{"kind":"number","value":0.1736111111111111,"string":"0.173611"}}},{"rowIdx":3741,"cells":{"hexsha":{"kind":"string","value":"b97c828450c34038ee92e089e3f2b951d2113017"},"size":{"kind":"number","value":903,"string":"903"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"nipype/interfaces/spm/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"felixsc1/nipype"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e722d6170593583f16ddfcb95473e5d30b5f1d7c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-05-29T09:38:30.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-01-20T03:36:59.000Z"},"max_issues_repo_path":{"kind":"string","value":"nipype/interfaces/spm/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"felixsc1/nipype"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e722d6170593583f16ddfcb95473e5d30b5f1d7c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":12,"string":"12"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-09T03:01:16.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-11T23:59:36.000Z"},"max_forks_repo_path":{"kind":"string","value":"nipype/interfaces/spm/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"felixsc1/nipype"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e722d6170593583f16ddfcb95473e5d30b5f1d7c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-07-17T12:49:49.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-07-17T12:49:49.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Top-level namespace for spm.\"\"\"\n\nfrom .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,\n scans_for_fnames)\nfrom .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp,\n Coregister, Normalize, Normalize12, Segment,\n Smooth, NewSegment, DARTEL, DARTELNorm2MNI,\n CreateWarped, VBMSegment)\nfrom .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,\n OneSampleTTestDesign, TwoSampleTTestDesign,\n PairedTTestDesign, MultipleRegressionDesign)\nfrom .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice,\n ApplyInverseDeformation, ResliceToReference, DicomImport)\n"},"avg_line_length":{"kind":"number","value":53.1176470588,"string":"53.117647"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.6533776301,"string":"0.653378"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":169,"string":"169"},"score_documentation":{"kind":"number","value":0.18715393133997785,"string":"0.187154"}}},{"rowIdx":3742,"cells":{"hexsha":{"kind":"string","value":"b97cd7905f5c596cb6d79b67c2c80e83907421d9"},"size":{"kind":"number","value":8257,"string":"8,257"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"network.py"},"max_stars_repo_name":{"kind":"string","value":"tobloef/neural-network"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bd05a8b9eccc0f5a973782247d39f9b5aa33156c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-01-06T22:27:58.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-08-12T20:29:51.000Z"},"max_issues_repo_path":{"kind":"string","value":"network.py"},"max_issues_repo_name":{"kind":"string","value":"tobloef/neural-network"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bd05a8b9eccc0f5a973782247d39f9b5aa33156c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-03-31T18:49:56.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-04-19T04:52:33.000Z"},"max_forks_repo_path":{"kind":"string","value":"network.py"},"max_forks_repo_name":{"kind":"string","value":"tobloef/neural-network"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bd05a8b9eccc0f5a973782247d39f9b5aa33156c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nfrom mathUtils import *\n\nclass Network(object):\n \"\"\"\n Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent.\n \"\"\"\n\n def __init__(self, layerSizes, biasVectors, weightMatrices):\n \"\"\"\n Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer.\n \"\"\"\n \n self.layerSizes = layerSizes\n self.biasVectors = biasVectors\n self.weightMatrices = weightMatrices\n\n @staticmethod\n def generateRandomNetwork(layerSizes):\n \"\"\"\n Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0.\n \"\"\"\n \n biasVectors = []\n \"\"\"Generate biases for each neuron in each layer, except the input layer.\"\"\"\n for size in layerSizes[1:]:\n \"\"\"\n np.random.randn generates arrays of arrays of random numbers, based on the paramters.\n np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers.\n \"\"\"\n biasVectors.append(np.random.randn(size, 1))\n \"\"\"Generate weights for connections between layers.\"\"\"\n weightMatrices = []\n for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]):\n weightMatrices.append(np.random.randn(prevSize, size))\n return Network(layerSizes, biasVectors, weightMatrices)\n\n def getOutputs(self, inputs):\n \"\"\"Return a vector of the network's outputs based on the given inputs, using feedforward.\"\"\"\n \n activations = inputs\n for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):\n \"\"\"\n For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer.\n \"\"\"\n zVector = np.dot(weightMatrix, activations) + biasVector\n activations = sigmoid(zVector)\n return activations\n\n def train(self, data, epochs, batchSize, rate, testData=None):\n \"\"\"\n Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch.\n \"\"\"\n\n print(\"Training network with shape {}, batch size {} and learning rate {} for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs))\n for e in range(epochs):\n np.random.shuffle(data)\n batches = []\n for i in range(0, len(data), batchSize):\n batches.append(data[i:i+batchSize])\n for batch in batches:\n self._tuneNetwork(batch, rate)\n if (testData):\n result = self._evaluate(testData)\n print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result))\n else:\n print(\"Epoch #{} completed.\".format(e))\n\n def _tuneNetwork(self, batch, rate):\n \"\"\"\n Tune the weights and biases of the network by using backpropagation with gradient descend.\n \"\"\"\n \n \"\"\"\n Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date.\n \"\"\"\n sumBiasVectors = []\n for biasVector in self.biasVectors:\n sumBiasVectors.append(np.zeros(biasVector.shape))\n sumWeightMatrices = []\n for weightMatrix in self.weightMatrices:\n sumWeightMatrices.append(np.zeros(weightMatrix.shape))\n for inputs, expected in batch:\n \"\"\"\n Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data.\n \"\"\" \n deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected)\n newSumBiasVectors = []\n for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors):\n newSumBiasVectors.append(totalBiasVector + deltaBiasVector)\n sumBiasVectors = newSumBiasVectors\n newSumWeightMatrices = []\n for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices):\n newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix)\n sumWeightMatrices = newSumWeightMatrices\n \"\"\"\n Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases.\n \"\"\"\n newBiasVectors = []\n for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors):\n newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector)\n newWeightMatrices = []\n for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices):\n newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix)\n self.biasVectors = newBiasVectors\n self.weightMatrices = newWeightMatrices\n\n def _backpropagate(self, inputs, expected):\n \"\"\"\n Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data.\n \"\"\"\n\n deltaBiasVectors = []\n for biasVector in self.biasVectors:\n deltaBiasVectors.append(np.zeros(biasVector.shape))\n deltaWeightMatrices = []\n for weightMatrix in self.weightMatrices:\n deltaWeightMatrices.append(np.zeros(weightMatrix.shape))\n \"\"\"Store all activations for the entire network, starting with the input layer.\"\"\"\n activationVector = inputs\n activationVectors = [inputs]\n \"\"\"Find the z-vector for layer in the network\"\"\"\n zVectors = []\n for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):\n zVector = np.dot(weightMatrix, activationVector) + biasVector\n zVectors.append(zVector)\n activationVector = sigmoid(zVector)\n activationVectors.append(activationVector)\n \"\"\"\n * Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias.\n * Then move onto each hidden layer and the input layer.\n \"\"\"\n deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1])\n deltaBiasVectors[-1] = deltaBiasVector\n deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose())\n\n for l in range(-2, -len(self.layerSizes), -1):\n # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead\n weightMatrix = self.weightMatrices[l+1].transpose()\n sigmoidDeriv = sigmoidDerivative(zVectors[l])\n deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv\n deltaBiasVectors[l] = deltaBiasVector\n deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose())\n return (deltaBiasVectors, deltaWeightMatrices)\n\n def _evaluate(self, testData):\n \"\"\"Test the network with the specified test data and return the number of correct guesses.\"\"\"\n correctGuesses = 0\n for inputs, expected in testData:\n \"\"\"Increment correct guesses if the most active output is the expected one.\"\"\"\n outputs = self.getOutputs(inputs)\n guess = np.argmax(outputs)\n if (guess == expected):\n correctGuesses += 1\n return correctGuesses"},"avg_line_length":{"kind":"number","value":53.2709677419,"string":"53.270968"},"max_line_length":{"kind":"number","value":286,"string":"286"},"alphanum_fraction":{"kind":"number","value":0.6579871624,"string":"0.657987"},"count_classes":{"kind":"number","value":8213,"string":"8,213"},"score_classes":{"kind":"number","value":0.9946711880828388,"string":"0.994671"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":1053,"string":"1,053"},"score_decorators":{"kind":"number","value":0.12752815792660774,"string":"0.127528"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":3275,"string":"3,275"},"score_documentation":{"kind":"number","value":0.39663315974324814,"string":"0.396633"}}},{"rowIdx":3743,"cells":{"hexsha":{"kind":"string","value":"b97d4675d330154e0b12b91fbd601affd888ea29"},"size":{"kind":"number","value":1901,"string":"1,901"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"examples/airflow/dags/etl_orders_7_days.py"},"max_stars_repo_name":{"kind":"string","value":"phixMe/marquez"},"max_stars_repo_head_hexsha":{"kind":"string","value":"06d71635369893b371a8a9c9e7023f11d7cbb1f8"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"examples/airflow/dags/etl_orders_7_days.py"},"max_issues_repo_name":{"kind":"string","value":"phixMe/marquez"},"max_issues_repo_head_hexsha":{"kind":"string","value":"06d71635369893b371a8a9c9e7023f11d7cbb1f8"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"examples/airflow/dags/etl_orders_7_days.py"},"max_forks_repo_name":{"kind":"string","value":"phixMe/marquez"},"max_forks_repo_head_hexsha":{"kind":"string","value":"06d71635369893b371a8a9c9e7023f11d7cbb1f8"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from datetime import datetime\nfrom marquez_airflow import DAG\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.utils.dates import days_ago\n\ndefault_args = {\n 'owner': 'datascience',\n 'depends_on_past': False,\n 'start_date': days_ago(1),\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'email': ['datascience@example.com']\n}\n\ndag = DAG(\n 'etl_orders_7_days',\n schedule_interval='@hourly',\n catchup=False,\n default_args=default_args,\n description='Loads newly placed orders weekly.'\n)\n\nt1 = PostgresOperator(\n task_id='if_not_exists',\n postgres_conn_id='food_delivery_db',\n sql='''\n CREATE TABLE IF NOT EXISTS orders_7_days (\n order_id INTEGER REFERENCES orders(id),\n placed_on TIMESTAMP NOT NULL,\n discount_id INTEGER REFERENCES discounts(id),\n menu_id INTEGER REFERENCES menus(id),\n restaurant_id INTEGER REFERENCES restaurants(id),\n menu_item_id INTEGER REFERENCES menu_items(id),\n category_id INTEGER REFERENCES categories(id)\n );''',\n dag=dag\n)\n\nt2 = PostgresOperator(\n task_id='tuncate',\n postgres_conn_id='food_delivery_db',\n sql='TRUNCATE TABLE orders_7_days;',\n dag=dag\n)\n\nt3 = PostgresOperator(\n task_id='insert',\n postgres_conn_id='food_delivery_db',\n sql='''\n INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id)\n SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id\n FROM orders AS o\n INNER JOIN menu_items AS mi\n ON mi.id = o.menu_item_id\n INNER JOIN categories AS c\n ON c.id = mi.category_id\n INNER JOIN menus AS m\n ON m.id = c.menu_id\n WHERE o.placed_on >= NOW() - interval '7 days'\n ''',\n dag=dag\n)\n\nt1 >> t2 >> t3\n"},"avg_line_length":{"kind":"number","value":29.2461538462,"string":"29.246154"},"max_line_length":{"kind":"number","value":135,"string":"135"},"alphanum_fraction":{"kind":"number","value":0.6817464492,"string":"0.681746"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1256,"string":"1,256"},"score_documentation":{"kind":"number","value":0.66070489216202,"string":"0.660705"}}},{"rowIdx":3744,"cells":{"hexsha":{"kind":"string","value":"b97deb7d2bd255cd9a3d9f169d969333b63452ec"},"size":{"kind":"number","value":313,"string":"313"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"sample/pizza.py"},"max_stars_repo_name":{"kind":"string","value":"marianarmorgado/python-starter"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"sample/pizza.py"},"max_issues_repo_name":{"kind":"string","value":"marianarmorgado/python-starter"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"sample/pizza.py"},"max_forks_repo_name":{"kind":"string","value":"marianarmorgado/python-starter"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# store information about a pizza being ordered\npizza = {\n 'crust': 'thick',\n 'toppings': ['mushrooms', 'extra vegan cheese']\n}\n\n# summarize the order\nprint(\"You ordered a \" + pizza['crust'] + \"-crust pizza\" + \n \"with the following toppings:\")\n\nfor topping in pizza['toppings']:\n print(\"\\t\" + topping)"},"avg_line_length":{"kind":"number","value":26.0833333333,"string":"26.083333"},"max_line_length":{"kind":"number","value":59,"string":"59"},"alphanum_fraction":{"kind":"number","value":0.6453674121,"string":"0.645367"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":204,"string":"204"},"score_documentation":{"kind":"number","value":0.6517571884984026,"string":"0.651757"}}},{"rowIdx":3745,"cells":{"hexsha":{"kind":"string","value":"b97e1419e0e45b84ecc462227c812c10beb92718"},"size":{"kind":"number","value":181,"string":"181"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"YouTube/CursoEmVideo/python/ex012.py"},"max_stars_repo_name":{"kind":"string","value":"Fh-Shadow/Progamando"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f496d83c36e9a079ed06b4e7c34396c57f539de9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"YouTube/CursoEmVideo/python/ex012.py"},"max_issues_repo_name":{"kind":"string","value":"Fh-Shadow/Progamando"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f496d83c36e9a079ed06b4e7c34396c57f539de9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"YouTube/CursoEmVideo/python/ex012.py"},"max_forks_repo_name":{"kind":"string","value":"Fh-Shadow/Progamando"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f496d83c36e9a079ed06b4e7c34396c57f539de9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"a = float(input('Qual é o preço do produto? R$'))\nd = a - (a * 23 / 100)\nprint('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))\n"},"avg_line_length":{"kind":"number","value":45.25,"string":"45.25"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.6077348066,"string":"0.607735"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":120,"string":"120"},"score_documentation":{"kind":"number","value":0.6486486486486487,"string":"0.648649"}}},{"rowIdx":3746,"cells":{"hexsha":{"kind":"string","value":"b97e5feb1052b87d359d8e3d9f63ba930bff8e66"},"size":{"kind":"number","value":15038,"string":"15,038"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"dnnlib/submission/submit.py"},"max_stars_repo_name":{"kind":"string","value":"gperdrizet/gansformer"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1172,"string":"1,172"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-03-02T02:00:44.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T02:46:45.000Z"},"max_issues_repo_path":{"kind":"string","value":"dnnlib/submission/submit.py"},"max_issues_repo_name":{"kind":"string","value":"gperdrizet/gansformer"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":37,"string":"37"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-03T14:11:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-12T15:40:15.000Z"},"max_forks_repo_path":{"kind":"string","value":"dnnlib/submission/submit.py"},"max_forks_repo_name":{"kind":"string","value":"gperdrizet/gansformer"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":138,"string":"138"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-03-02T06:37:10.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-30T14:59:09.000Z"},"content":{"kind":"string","value":"# Submit a function to be run either locally or in a computing cluster.\n# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,\n# and network recompilation.\nimport copy\nimport inspect\nimport os\nimport pathlib\nimport pickle\nimport platform\nimport pprint\nimport re\nimport shutil\nimport sys\nimport time\nimport traceback\n\nfrom enum import Enum\n\nfrom .. import util\nfrom ..util import EasyDict\n\nfrom . import internal\n\nclass SubmitTarget(Enum):\n # The target where the function should be run\n # LOCAL: Run it locally\n LOCAL = 1\n\nclass PathType(Enum):\n # Determines in which format should a path be formatted\n # WINDOWS: Format with Windows style\n # LINUX: Format with Linux/Posix style\n # AUTO: Use current OS type to select either WINDOWS or LINUX\n WINDOWS = 1\n LINUX = 2\n AUTO = 3\n\nclass PlatformExtras:\n # A mixed bag of values used by dnnlib heuristics\n # Attributes:\n # data_reader_buffer_size: Used by DataReader to size internal shared memory buffers\n # data_reader_process_count: Number of worker processes to spawn (zero for single\n # thread operation)\n def __init__(self):\n self.data_reader_buffer_size = 1<<30 # 1 GB\n self.data_reader_process_count = 0 # single threaded default\n\n_user_name_override = None\n\nclass SubmitConfig(util.EasyDict):\n # Strongly typed config dict needed to submit runs\n # Attributes:\n # run_dir_root: Path to the run dir root. Can be optionally templated with tags\n # Needs to always be run through get_path_from_template\n # run_desc: Description of the run. Will be used in the run dir and task name\n # run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir\n # run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will\n # be the src directory inside the run dir\n # submit_target: Submit target enum value. Used to select where the run is actually launched\n # num_gpus: Number of GPUs used/requested for the run\n # print_info: Whether to print debug information when submitting\n # local.do_not_copy_source_files: Do not copy source files from the working directory to the\n # run dir.\n # run_id: Automatically populated value during submit\n # run_name: Automatically populated value during submit\n # run_dir: Automatically populated value during submit\n # run_func_name: Automatically populated value during submit\n # run_func_kwargs: Automatically populated value during submit\n # user_name: Automatically populated value during submit. Can be set by the user which will then\n # override the automatic value\n # task_name: Automatically populated value during submit\n # host_name: Automatically populated value during submit\n # platform_extras: Automatically populated values during submit. Used by various dnnlib libraries\n # such as the DataReader class\n def __init__(self):\n super().__init__()\n\n # run (set these)\n self.run_dir_root = \"\" # should always be passed through get_path_from_template\n self.run_desc = \"\"\n self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\",\n \".vscode\", \"_cudacache\"]\n self.run_dir_extra_files = []\n\n # submit (set these)\n self.submit_target = SubmitTarget.LOCAL\n self.num_gpus = 1\n self.print_info = False\n self.nvprof = False\n self.local = internal.local.TargetOptions()\n self.datasets = []\n\n # (automatically populated)\n self.run_id = None\n self.run_name = None\n self.run_dir = None\n self.run_func_name = None\n self.run_func_kwargs = None\n self.user_name = None\n self.task_name = None\n self.host_name = \"localhost\"\n self.platform_extras = PlatformExtras()\n\ndef get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:\n # Replace tags in the given path template and return either Windows or Linux formatted path\n # automatically select path type depending on running OS\n if path_type == PathType.AUTO:\n if platform.system() == \"Windows\":\n path_type = PathType.WINDOWS\n elif platform.system() == \"Linux\":\n path_type = PathType.LINUX\n else:\n raise RuntimeError(\"Unknown platform\")\n\n path_template = path_template.replace(\"\", get_user_name())\n\n # return correctly formatted path\n if path_type == PathType.WINDOWS:\n return str(pathlib.PureWindowsPath(path_template))\n elif path_type == PathType.LINUX:\n return str(pathlib.PurePosixPath(path_template))\n else:\n raise RuntimeError(\"Unknown platform\")\n\ndef get_template_from_path(path: str) -> str:\n # Convert a normal path back to its template representation\n path = path.replace(\"\\\\\", \"/\")\n return path\n\ndef convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:\n # Convert a normal path to template and the convert it back to a normal path with given path type\n path_template = get_template_from_path(path)\n path = get_path_from_template(path_template, path_type)\n return path\n\ndef set_user_name_override(name: str) -> None:\n # Set the global username override value\n global _user_name_override\n _user_name_override = name\n\ndef get_user_name():\n # Get the current user name\n if _user_name_override is not None:\n return _user_name_override\n elif platform.system() == \"Windows\":\n return os.getlogin()\n elif platform.system() == \"Linux\":\n try:\n import pwd\n return pwd.getpwuid(os.geteuid()).pw_name\n except:\n return \"unknown\"\n else:\n raise RuntimeError(\"Unknown platform\")\n\ndef make_run_dir_path(*paths):\n # Make a path/filename that resides under the current submit run_dir\n # Args:\n # *paths: Path components to be passed to os.path.join\n # Returns:\n # A file/dirname rooted at submit_config.run_dir. If there's no\n # submit_config or run_dir, the base directory is the current\n # working directory.\n # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))`\n import dnnlib\n if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):\n return os.path.join(os.getcwd(), *paths)\n return os.path.join(dnnlib.submit_config.run_dir, *paths)\n\ndef _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:\n # Create a new run dir with increasing ID number at the start\n run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)\n\n if not os.path.exists(run_dir_root):\n os.makedirs(run_dir_root)\n\n run_dir = os.path.join(run_dir_root, submit_config.run_name)\n\n if not resume:\n if os.path.exists(run_dir) and create_new:\n raise RuntimeError(\"The run dir already exists! ({0})\".format(run_dir))\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n\n return run_dir\n\ndef _get_next_run_id_local(run_dir_root: str) -> int:\n # Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id\n # Assumes IDs are numbers at the start of the directory names\n dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]\n r = re.compile(\"^\\\\d+\") # match one or more digits at the start of the string\n run_id = 0\n\n for dir_name in dir_names:\n m = r.match(dir_name)\n\n if m is not None:\n i = int(m.group())\n run_id = max(run_id, i + 1)\n\n return run_id\n\ndef _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:\n # Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable\n pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\"))\n with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f:\n pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False)\n\n if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:\n return\n\n files = []\n\n run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)\n assert \".\" in submit_config.run_func_name\n for _idx in range(submit_config.run_func_name.count(\".\") - 1):\n run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)\n files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)\n\n dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\")\n files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)\n\n files += submit_config.run_dir_extra_files\n\n files = [(f[0], os.path.join(run_dir, \"src\", f[1])) for f in files]\n files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))]\n\n util.copy_files_and_create_dirs(files)\n\ndef run_wrapper(submit_config: SubmitConfig) -> None:\n # Wrap the actual run function call for handling logging, exceptions, typing, etc\n is_local = submit_config.submit_target == SubmitTarget.LOCAL\n\n # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing\n if is_local:\n logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True)\n else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)\n logger = util.Logger(file_name = None, should_flush = True)\n\n import dnnlib\n dnnlib.submit_config = submit_config\n\n exit_with_errcode = False\n try:\n print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name))\n start_time = time.time()\n\n run_func_obj = util.get_obj_by_name(submit_config.run_func_name)\n assert callable(run_func_obj)\n sig = inspect.signature(run_func_obj)\n if \"submit_config\" in sig.parameters:\n run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)\n else:\n run_func_obj(**submit_config.run_func_kwargs)\n\n print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))\n except:\n if is_local:\n raise\n else:\n traceback.print_exc()\n\n log_src = os.path.join(submit_config.run_dir, \"log.txt\")\n log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name))\n shutil.copyfile(log_src, log_dst)\n\n # Defer sys.exit(1) to happen after we close the logs and create a _finished.txt\n exit_with_errcode = True\n finally:\n open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close()\n\n dnnlib.RunContext.get().close()\n dnnlib.submit_config = None\n logger.close()\n\n # If we hit an error, get out of the script now and signal the error\n # to whatever process that started this script.\n if exit_with_errcode:\n sys.exit(1)\n\n return submit_config\n\ndef open_file_or_url(file_or_url):\n if util.is_url(file_or_url):\n return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\")\n return open(file_or_url, \"rb\")\n\ndef load_pkl(file_or_url):\n with open_file_or_url(file_or_url) as file:\n return pickle.load(file, encoding = \"latin1\")\n\ndef submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,\n resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None:\n # Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.\n # create_newdir: enforces the creation of a new run directory\n # resume: resumes a prior experiment using its existing run directory\n # load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters\n submit_config = copy.deepcopy(submit_config)\n\n submit_target = submit_config.submit_target\n farm = None\n if submit_target == SubmitTarget.LOCAL:\n farm = internal.local.Target()\n assert farm is not None # unknown target\n\n # Disallow submitting jobs with zero num_gpus\n if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):\n raise RuntimeError(\"submit_config.num_gpus must be set to a non-zero value\")\n\n if submit_config.user_name is None:\n submit_config.user_name = get_user_name()\n\n submit_config.run_func_name = run_func_name\n submit_config.run_func_kwargs = run_func_kwargs\n\n #--------------------------------------------------------------------\n # Prepare submission by populating the run dir\n #--------------------------------------------------------------------\n host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)\n\n submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)\n docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\"\n if not re.match(docker_valid_name_regex, submit_config.task_name):\n raise RuntimeError(\"Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: \" + docker_valid_name_regex + \", got \" + submit_config.task_name)\n\n # Farm specific preparations for a submit\n farm.finalize_submit_config(submit_config, host_run_dir)\n\n # In case of resumption, load_config = True to load the prior submit_config file from the directory\n # (so to maintain the original configuration of the experiment rather than the newly provided\n # command-line arguments.\n if load_config:\n config_file = os.path.join(host_run_dir, \"submit_config.pkl\")\n if os.path.exists(config_file):\n old_submit_config = submit_config\n submit_config = load_pkl(config_file)\n\n submit_config[\"run_id\"] = old_submit_config[\"run_id\"]\n submit_config[\"run_name\"] = old_submit_config[\"run_name\"]\n\n if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]:\n submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"]\n submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"]\n\n _populate_run_dir(submit_config, host_run_dir)\n return farm.submit(submit_config, host_run_dir)\n"},"avg_line_length":{"kind":"number","value":43.3371757925,"string":"43.337176"},"max_line_length":{"kind":"number","value":238,"string":"238"},"alphanum_fraction":{"kind":"number","value":0.6918473201,"string":"0.691847"},"count_classes":{"kind":"number","value":3564,"string":"3,564"},"score_classes":{"kind":"number","value":0.23699960101077272,"string":"0.237"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":5938,"string":"5,938"},"score_documentation":{"kind":"number","value":0.39486633860885756,"string":"0.394866"}}},{"rowIdx":3747,"cells":{"hexsha":{"kind":"string","value":"b97f4f2077af2e6d4198d160e8fea133c49dee89"},"size":{"kind":"number","value":4187,"string":"4,187"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pyecharts/custom/grid.py"},"max_stars_repo_name":{"kind":"string","value":"zilong305/pycharts"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6cf1bb7f17001a36da6a766615a78b1dbef5918f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pyecharts/custom/grid.py"},"max_issues_repo_name":{"kind":"string","value":"zilong305/pycharts"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6cf1bb7f17001a36da6a766615a78b1dbef5918f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pyecharts/custom/grid.py"},"max_forks_repo_name":{"kind":"string","value":"zilong305/pycharts"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6cf1bb7f17001a36da6a766615a78b1dbef5918f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom pyecharts.option import grid\n\n\nclass Grid(object):\n\n def __init__(self):\n self._chart = None\n self._js_dependencies = set()\n\n def add(self, chart,\n grid_width=None,\n grid_height=None,\n grid_top=None,\n grid_bottom=None,\n grid_left=None,\n grid_right=None):\n \"\"\"\n\n :param chart:\n chart instance\n :param grid_width:\n Width of grid component. Adaptive by default.\n :param grid_height:\n Height of grid component. Adaptive by default.\n :param grid_top:\n Distance between grid component and the top side of the container.\n :param grid_bottom:\n Distance between grid component and the bottom side of the container.\n :param grid_left:\n Distance between grid component and the left side of the container.\n :param grid_right:\n Distance between grid component and the right side of the container.\n :return:\n \"\"\"\n if self._chart is None:\n self._chart = chart\n self._chart._option.update(grid=[])\n self._js_dependencies = chart._js_dependencies\n\n _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)\n if _grid:\n for _ in range(len(self._chart._option.get('series'))):\n self._chart._option.get('grid').append(_grid)\n else:\n _series = (\n chart._option.get('series'),\n chart._option.get('xAxis', None),\n chart._option.get('yAxis', None),\n chart._option.get('legend')[0],\n chart._option.get('title')[0]\n )\n _index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series)\n self._chart._option.get('legend').append(_legned)\n self._chart._option.get('title').append(_title)\n if _xaxis and _yaxis is not None:\n try:\n _xaxis[0].update(gridIndex=_index-1)\n _yaxis[0].update(gridIndex=_index-1)\n self._chart._option.get('xAxis').append(_xaxis[0])\n self._chart._option.get('yAxis').append(_yaxis[0])\n except:\n pass\n\n # indexflag is only identify for every series\n _flag = self._chart._option.get('series')[0].get('indexflag')\n _series_index = 0\n for s in self._chart._option.get('series'):\n if _flag == s.get('indexflag'):\n s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)\n else:\n _series_index += 1\n s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)\n _flag = s.get('indexflag')\n _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)\n for _ in range(_index_once):\n self._chart._option.get('grid').append(_grid)\n self._js_dependencies.union(chart._js_dependencies)\n\n def __custom(self, series):\n \"\"\"\n\n :param series:\n series data\n :return:\n \"\"\"\n _series, _xaxis, _yaxis, _legend, _title = series\n for s in _series:\n self._chart._option.get('series').append(s)\n return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title\n\n def render(self, path=\"render.html\"):\n \"\"\"\n\n :param path:\n :return:\n \"\"\"\n self._chart.render(path)\n\n def render_embed(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._chart.render_embed()\n\n def show_config(self):\n \"\"\"\n\n :return:\n \"\"\"\n import pprint\n return pprint.pprint(self._chart._option)\n\n @property\n def chart(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._chart\n\n def _repr_html_(self):\n \"\"\"\n\n :return:\n \"\"\"\n return self._chart._repr_html_()\n"},"avg_line_length":{"kind":"number","value":31.9618320611,"string":"31.961832"},"max_line_length":{"kind":"number","value":100,"string":"100"},"alphanum_fraction":{"kind":"number","value":0.5404824457,"string":"0.540482"},"count_classes":{"kind":"number","value":4112,"string":"4,112"},"score_classes":{"kind":"number","value":0.9820874134224982,"string":"0.982087"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":99,"string":"99"},"score_decorators":{"kind":"number","value":0.023644614282302363,"string":"0.023645"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1192,"string":"1,192"},"score_documentation":{"kind":"number","value":0.2846907093384285,"string":"0.284691"}}},{"rowIdx":3748,"cells":{"hexsha":{"kind":"string","value":"b97f78c59a8296809ae879f2d6f8355b0f8c52d0"},"size":{"kind":"number","value":4588,"string":"4,588"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"smooch/conversations.py"},"max_stars_repo_name":{"kind":"string","value":"devinmcgloin/smooch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c9561c3e7f1546efc58daa472b70f738d0d35e13"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-07-04T12:02:03.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2017-03-20T19:39:36.000Z"},"max_issues_repo_path":{"kind":"string","value":"smooch/conversations.py"},"max_issues_repo_name":{"kind":"string","value":"devinmcgloin/smooch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c9561c3e7f1546efc58daa472b70f738d0d35e13"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":41,"string":"41"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-05-28T09:54:04.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-02-20T05:34:19.000Z"},"max_forks_repo_path":{"kind":"string","value":"smooch/conversations.py"},"max_forks_repo_name":{"kind":"string","value":"devinmcgloin/smooch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c9561c3e7f1546efc58daa472b70f738d0d35e13"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-07-20T14:31:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2016-11-18T12:19:38.000Z"},"content":{"kind":"string","value":"import logging\n\nfrom .endpoint import ask\n\n\ndef send_message(user_id, message, sent_by_maker=True):\n if not valid_args(user_id, message):\n logging.warning(\"send message called with invalid args user_id={} message={}\".format(user_id, message))\n return\n\n logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker))\n role = \"appMaker\"\n if not sent_by_maker:\n role = \"appUser\"\n\n data = {\"text\": message, \"role\": role}\n return ask('appusers/{0}/conversation/messages'.format(user_id),\n data,\n 'post')\n\n\ndef get_conversation(user_id):\n if not user_id:\n logging.warning(\"get conversation called with invalid arg user_id={}\".format(user_id))\n return\n\n logging.debug(\"Get conversation: user_id={}\".format(user_id))\n return ask('appusers/{0}/conversation'.format(user_id), {}, 'get')\n\n\ndef request_payment(user_id, message, options):\n \"\"\"Note that amount is a integer which specifies the amount of cents in the transaction\n Smooch will default to the currency specified in your account settings.\"\"\"\n\n if not valid_args(user_id, message, options):\n logging.warning(\"request payment called with invalid args user_id={} message={} options={}\"\n .format(user_id, message, options))\n return\n\n role = \"appMaker\"\n\n buttons = []\n\n for short_text, result in options:\n buttons.append({\n \"type\": \"buy\",\n \"text\": short_text,\n \"amount\": result})\n\n data = {\"text\": message,\n \"role\": role,\n \"actions\": buttons}\n return ask('appusers/{0}/conversation/messages'.format(user_id),\n data,\n 'post')\n\n\ndef send_links(user_id, message, options):\n \"\"\"Sends a series of links. The options field is a dictionary in which the keys are\n descriptions and values uris\"\"\"\n if not valid_args(user_id, message, options):\n logging.warning(\"send links called with invalid args user_id={} message={} options={}\"\n .format(user_id, message, options))\n return\n\n role = \"appMaker\"\n\n buttons = []\n\n for short_text, result in options:\n buttons.append({\n \"type\": \"link\",\n \"text\": short_text,\n \"uri\": result})\n\n data = {\"text\": message,\n \"role\": role,\n \"actions\": buttons}\n return ask('appusers/{0}/conversation/messages'.format(user_id),\n data,\n 'post')\n\n\ndef send_postbacks(user_id, message, options):\n \"\"\"Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are\n descriptions and values the postback payload. You need to set up a webhook to listen for the postback.\"\"\"\n\n if not valid_args(user_id, message, options):\n logging.warning(\"send postback called with invalid args user_id={} message={} options={}\"\n .format(user_id, message, options))\n return\n\n role = \"appMaker\"\n\n buttons = []\n\n for short_text, result in options:\n buttons.append({\n \"type\": \"postback\",\n \"text\": short_text,\n \"payload\": result\n })\n\n data = {\"text\": message,\n \"role\": role,\n \"actions\": buttons}\n return ask('appusers/{0}/conversation/messages'.format(user_id),\n data,\n 'post')\n\n\ndef send_buttons(user_id, message, options):\n \"\"\"Options is a list of tuples in which the first element is the type of the button,\n second the short text, and third the result for the specified type.\"\"\"\n\n if not valid_args(user_id, message, options):\n logging.warning(\"send buttons called with invalid args user_id={} message={} options={}\"\n .format(user_id, message, options))\n return\n\n role = \"appMaker\"\n\n buttons = []\n\n for text, kind, result in options:\n buttons.append({\n \"type\": kind,\n \"text\": text,\n \"payload\": result\n })\n\n data = {\"text\": message,\n \"role\": role,\n \"actions\": buttons}\n\n return ask('appusers/{0}/conversation/messages'.format(user_id),\n data,\n 'post')\n\n\ndef valid_args(user_id, message, options=None):\n if options is not None:\n if user_id and message and options and type(options) is list:\n return True\n return False\n else:\n if user_id and message:\n return True\n return False\n"},"avg_line_length":{"kind":"number","value":30.5866666667,"string":"30.586667"},"max_line_length":{"kind":"number","value":129,"string":"129"},"alphanum_fraction":{"kind":"number","value":0.5989537925,"string":"0.598954"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1670,"string":"1,670"},"score_documentation":{"kind":"number","value":0.36399302528334787,"string":"0.363993"}}},{"rowIdx":3749,"cells":{"hexsha":{"kind":"string","value":"b980ab008a2dab6e2778edec1d7d9e24b2315a73"},"size":{"kind":"number","value":1086,"string":"1,086"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"cifar/evalit.py"},"max_stars_repo_name":{"kind":"string","value":"Sharkbyteprojects/IRIS-ML_and_Deep-Learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f0e053cf7a0e69019bbba36e6da3e60d76105fe9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"cifar/evalit.py"},"max_issues_repo_name":{"kind":"string","value":"Sharkbyteprojects/IRIS-ML_and_Deep-Learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f0e053cf7a0e69019bbba36e6da3e60d76105fe9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"cifar/evalit.py"},"max_forks_repo_name":{"kind":"string","value":"Sharkbyteprojects/IRIS-ML_and_Deep-Learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f0e053cf7a0e69019bbba36e6da3e60d76105fe9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import keras\nfrom keras.models import load_model\nfrom PIL import Image\nimport matplotlib.pylab as plt\nimport numpy as np\nimport zipfile\nprint(\"Extract\")\nzip_ref = zipfile.ZipFile(\"./asset.zip\", 'r')\nzip_ref.extractall(\".\")\nzip_ref.close()\nprint(\"Load Model\")\nmodel=load_model(\"cifar-model.h5\")\nCIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"]\ndef calc(imname):\n test_image =Image.open(\"asset/\"+imname)\n test_image=test_image.resize((32,32),Image.ANTIALIAS)\n test_image=np.array(test_image,dtype=\"float32\")\n test_image/=255\n test_image=test_image.reshape(-1,32,32,3)\n predictions=model.predict(test_image)\n index_max_pred=np.argmax(predictions)\n plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred]))\n plt.imshow(test_image[0].reshape(32,32,3))\n print(predictions)\n plt.show()\nprint(\"START TEST\")\ncalc(\"lkw-image.jpg\")\ncalc(\"cat.jpg\")\ncalc(\"frog.jpg\")\ncalc(\"fog.jpg\")\ncalc(\"lfog.jpg\")\ncalc(\"d.jpg\")\ncalc(\"b.jpg\")\ncalc(\"bs.jpg\")\ncalc(\"plapper.jpg\")\ncalc(\"ds.jpg\")\nprint(\"Complete\")\nprint(\"End\")\nquit(0)\n"},"avg_line_length":{"kind":"number","value":27.15,"string":"27.15"},"max_line_length":{"kind":"number","value":88,"string":"88"},"alphanum_fraction":{"kind":"number","value":0.7108655617,"string":"0.710866"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":270,"string":"270"},"score_documentation":{"kind":"number","value":0.24861878453038674,"string":"0.248619"}}},{"rowIdx":3750,"cells":{"hexsha":{"kind":"string","value":"b980be1e0d2b8db749e25a4f49c35cdddbdca9d9"},"size":{"kind":"number","value":1650,"string":"1,650"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tt/urls.py"},"max_stars_repo_name":{"kind":"string","value":"samiksha-patil/Knowledge-Sharing-Platform"},"max_stars_repo_head_hexsha":{"kind":"string","value":"22e61a659d5ad63fe656fa639dc897cbdebad4fe"},"max_stars_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-05-09T08:18:49.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-09T08:18:49.000Z"},"max_issues_repo_path":{"kind":"string","value":"tt/urls.py"},"max_issues_repo_name":{"kind":"string","value":"samiksha-patil/Knowledge-Sharing-Platform"},"max_issues_repo_head_hexsha":{"kind":"string","value":"22e61a659d5ad63fe656fa639dc897cbdebad4fe"},"max_issues_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_issues_count":{"kind":"number","value":9,"string":"9"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-19T01:11:35.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-12T00:20:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"tt/urls.py"},"max_forks_repo_name":{"kind":"string","value":"samiksha-patil/Knowledge-Sharing-Platform"},"max_forks_repo_head_hexsha":{"kind":"string","value":"22e61a659d5ad63fe656fa639dc897cbdebad4fe"},"max_forks_repo_licenses":{"kind":"list like","value":["bzip2-1.0.6"],"string":"[\n \"bzip2-1.0.6\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\ntt URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\n\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\n# Uncomment next two lines to enable admin:\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom users import views as user_views\nfrom django.contrib.auth import views as auth_views\nfrom upload import views as upload_views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\n\n\nurlpatterns = [\n # Uncomment the next line to enable the admin:\n path('admin/', admin.site.urls),\n path('', include('blog.urls')),\n path('register/', user_views.register, name='register'),\n path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),\n path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'),\n path('profile/', user_views.profile, name='profile'),\n path('book/',upload_views.book_list,name='book_list'),\n path('book/upload',upload_views.upload_book,name='upload_book'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n \n "},"avg_line_length":{"kind":"number","value":35.8695652174,"string":"35.869565"},"max_line_length":{"kind":"number","value":100,"string":"100"},"alphanum_fraction":{"kind":"number","value":0.7260606061,"string":"0.726061"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":890,"string":"890"},"score_documentation":{"kind":"number","value":0.5393939393939394,"string":"0.539394"}}},{"rowIdx":3751,"cells":{"hexsha":{"kind":"string","value":"b9814171798d1f2ddf5247c67182a7e7e032132e"},"size":{"kind":"number","value":105,"string":"105"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/git/cmd.py"},"max_stars_repo_name":{"kind":"string","value":"danihodovic/dht"},"max_stars_repo_head_hexsha":{"kind":"string","value":"636f54d70f8c6ca60ab48f2815b3e9e1a336d78f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-01-21T15:04:32.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-01-21T16:23:32.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/git/cmd.py"},"max_issues_repo_name":{"kind":"string","value":"danihodovic/dht"},"max_issues_repo_head_hexsha":{"kind":"string","value":"636f54d70f8c6ca60ab48f2815b3e9e1a336d78f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-12-30T20:34:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-01-17T20:02:02.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/git/cmd.py"},"max_forks_repo_name":{"kind":"string","value":"danihodovic/dht"},"max_forks_repo_head_hexsha":{"kind":"string","value":"636f54d70f8c6ca60ab48f2815b3e9e1a336d78f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import os\n\nimport click\n\nos.environ[\"GIT_PYTHON_REFRESH\"] = \"quiet\"\n\n\n@click.group()\ndef git():\n pass\n"},"avg_line_length":{"kind":"number","value":9.5454545455,"string":"9.545455"},"max_line_length":{"kind":"number","value":42,"string":"42"},"alphanum_fraction":{"kind":"number","value":0.6761904762,"string":"0.67619"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":34,"string":"34"},"score_decorators":{"kind":"number","value":0.3238095238095238,"string":"0.32381"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":27,"string":"27"},"score_documentation":{"kind":"number","value":0.2571428571428571,"string":"0.257143"}}},{"rowIdx":3752,"cells":{"hexsha":{"kind":"string","value":"b98238142a5e4442e3c9fdd220f6bde9274299de"},"size":{"kind":"number","value":570,"string":"570"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"TwitterImage2JPG.py"},"max_stars_repo_name":{"kind":"string","value":"Tymec/Playground"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5a4aaa4a88e084d8d31803485b1ec521ad49a3d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"TwitterImage2JPG.py"},"max_issues_repo_name":{"kind":"string","value":"Tymec/Playground"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5a4aaa4a88e084d8d31803485b1ec521ad49a3d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"TwitterImage2JPG.py"},"max_forks_repo_name":{"kind":"string","value":"Tymec/Playground"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5a4aaa4a88e084d8d31803485b1ec521ad49a3d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-02-19T10:32:07.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-02-19T10:32:07.000Z"},"content":{"kind":"string","value":"import glob\nimport os\n\n\ndef main():\n os.chdir(\"F:/Downloads\")\n extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"]\n file_list = list()\n\n for extension in extensions:\n file_list = file_list + glob.glob(extension)\n\n for file in file_list:\n for extension in extensions:\n new_extension = extension.replace('*', '')\n if file.endswith(new_extension):\n new_name = file.replace(new_extension, '') + \".jpg\"\n os.rename(file, new_name)\n\n print(\"Done!\")\n\n\nif __name__ == __name__:\n main()\n"},"avg_line_length":{"kind":"number","value":22.8,"string":"22.8"},"max_line_length":{"kind":"number","value":67,"string":"67"},"alphanum_fraction":{"kind":"number","value":0.5859649123,"string":"0.585965"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":72,"string":"72"},"score_documentation":{"kind":"number","value":0.12631578947368421,"string":"0.126316"}}},{"rowIdx":3753,"cells":{"hexsha":{"kind":"string","value":"b982943f0b8c226209550f8c7f62a0e03d0b5ff5"},"size":{"kind":"number","value":6405,"string":"6,405"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Data Analysis/classification.py"},"max_stars_repo_name":{"kind":"string","value":"Riccardo95Facchini/DIL-2019"},"max_stars_repo_head_hexsha":{"kind":"string","value":"febeda55fd647943a1b8c49b3c5192fcd69fdaf5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Data Analysis/classification.py"},"max_issues_repo_name":{"kind":"string","value":"Riccardo95Facchini/DIL-2019"},"max_issues_repo_head_hexsha":{"kind":"string","value":"febeda55fd647943a1b8c49b3c5192fcd69fdaf5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Data Analysis/classification.py"},"max_forks_repo_name":{"kind":"string","value":"Riccardo95Facchini/DIL-2019"},"max_forks_repo_head_hexsha":{"kind":"string","value":"febeda55fd647943a1b8c49b3c5192fcd69fdaf5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\n\n#EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB\n\ninput_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv'\ndataset = pd.read_csv(input_file, sep=';', header = 0)\n\ndataset.head()\n\n#DELETE NEXT CALLS DATA\n\ndataset = dataset.drop(\"contact\", axis=1)\ndataset = dataset.drop(\"day\", axis=1)\ndataset = dataset.drop(\"month\", axis=1)\ndataset = dataset.drop(\"duration\", axis=1)\ndataset = dataset.drop(\"campaign\", axis=1)\ndataset = dataset.drop(\"pdays\", axis=1)\ndataset = dataset.drop(\"previous\", axis=1)\ndataset = dataset.drop(\"poutcome\", axis=1)\n\ndataset.head()\n\n#FEATURE ENGINEERING\n\ncleanup_nums = {\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1},\n \"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\": 3},\n \"default\": {\"yes\": 1, \"no\": 0},\n \"housing\": {\"yes\": 1, \"no\": 0},\n \"loan\": {\"yes\": 1, \"no\": 0},\n \"y\": {\"yes\": 1, \"no\": 0}}\n\ndataset.replace(cleanup_nums, inplace=True)\ndataset.head()\n\ndataset.dtypes\n\ndataset = dataset[dataset.job != 'unknown']\ndataset = dataset[dataset.education != 'unknown']\ndataset['education'] = dataset['education'].astype(int)\n\n#COLLERATION MATRIX\n\nplt.figure(figsize=(12,10))\ncor = dataset.corr()\nsns.heatmap(cor, annot=True, cmap=plt.cm.Reds)\nplt.show()\n\n#CLASSIFIFICATION\n\nX = dataset.iloc[:, 0:7]\ny = dataset.iloc[:, 7]\n\nX = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\n\n#DECISION TREE\n\nfrom sklearn import tree\nfrom sklearn.tree import DecisionTreeClassifier\n\nclf_dt = DecisionTreeClassifier()\nclt_dt = clf_dt.fit(X_train,y_train)\n\nesito = clf_dt.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, esito,target_names=target_names))\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, esito)\nprint(cm)\n\nplt.hist(esito)\n\n#RANDOM FOREST\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nclf_dt = RandomForestClassifier()\nclt_dt = clf_dt.fit(X_train,y_train)\n\nesito = clf_dt.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, esito,target_names=target_names))\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, esito)\nprint(cm)\n\nplt.hist(esito)\n\n# K-NEAREST NEIGHBOURS\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# TRAINING - TEST\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# SCALING\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# FITTING\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)\nclassifier.fit(X_train, y_train)\n\n# PREDICTION\ny_pred = classifier.predict(X_test)\n\n# CONFUSION MATRIX\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, y_pred,target_names=target_names))\n\nprint(cm)\n\nplt.hist(y_pred)\n\n#UNDERSAMPLING\n\nfrom sklearn.utils import resample\n\ndataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"])\n\n#SPLIT FEATURE AND TARGET\ny = dataset_sample.y\nX = dataset_sample.drop('y', axis=1)\n\n#TRAIN TEST\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\nX = pd.concat([X_train, y_train], axis=1)\n\n#SELECTING TARGET CLASSES\n\nnot_sub = X[X.y==0]\nsub = X[X.y==1]\n\nnot_sub_downsampled = resample(not_sub,\n replace = False,\n n_samples = len(sub),\n random_state = 27)\n\n# COMBINE MINORITY AND DOWNSAMPLED MAJORITY\ndownsampled = pd.concat([not_sub_downsampled, sub])\n\n#DECISION TREE\n\ny_train = downsampled.y\nX_train = downsampled.drop('y', axis=1)\n\nclf_dt = DecisionTreeClassifier()\nclt_dt = clf_dt.fit(X_train,y_train)\n\nesito = clf_dt.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, esito,target_names=target_names))\n\n#RANDOM FOREST\ny_train = downsampled.y\nX_train = downsampled.drop('y', axis=1)\n\nclf_dt = RandomForestClassifier()\nclt_dt = clf_dt.fit(X_train,y_train)\n\nesito = clf_dt.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, esito,target_names=target_names))\n\n#SMOTE - DECISION TREE\n\nfrom imblearn.over_sampling import SMOTE\n\n#SPLIT FEATURE TARGET\ny = dataset_sample.y\nX = dataset_sample.drop('y', axis=1)\n\n#TRAIN TEST\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n#SMOTE\nsm = SMOTE(random_state=27, ratio=1.0)\nX_train, y_train = sm.fit_sample(X_train, y_train)\n\nclf_dt = DecisionTreeClassifier()\n\n#FIT\nsmote = clf_dt.fit(X_train,y_train)\n\n#PREDICITON\nsmote_pred = smote.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, smote_pred,target_names=target_names))\n\n#SMOTE - RANDOM FOREST\n\nfrom imblearn.over_sampling import SMOTE\n\ny = dataset_sample.y\nX = dataset_sample.drop('y', axis=1)\n\n# setting up testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\nsm = SMOTE(random_state=27, ratio=1.0)\nX_train, y_train = sm.fit_sample(X_train, y_train)\n\nclf_dt = RandomForestClassifier()\n\nsmote = clf_dt.fit(X_train,y_train)\n\nsmote_pred = smote.predict(X_test)\n\ntarget_names = ['NOT-sub', 'Subscribed']\nprint(classification_report(y_test, smote_pred,target_names=target_names))\n\n#RECAP on RECALL\n\nx = np.arange(3)\nplt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT')\nplt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF')\nplt.xticks(x-0.1, ['Normal','Under','Smote'])\nplt.legend(loc='upper right')\n\n#RECAP on F1\n\nx = np.arange(3)\nplt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT')\nplt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF')\nplt.xticks(x-0.1, ['Normal','Under','Smote'])\nplt.legend(loc='lower right')"},"avg_line_length":{"kind":"number","value":25.7228915663,"string":"25.722892"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.7216237315,"string":"0.721624"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1210,"string":"1,210"},"score_documentation":{"kind":"number","value":0.18891491022638562,"string":"0.188915"}}},{"rowIdx":3754,"cells":{"hexsha":{"kind":"string","value":"b982c2b4e976b723dfa3208c1bc1e4ea51b77ac9"},"size":{"kind":"number","value":5562,"string":"5,562"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tools/c7n_azure/tests/test_route_table.py"},"max_stars_repo_name":{"kind":"string","value":"anastasiia-zolochevska/cloud-custodian"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f25315a01bec808c16ab0e2d433d6151cf5769e4"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-01-20T19:46:28.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-08-19T14:20:27.000Z"},"max_issues_repo_path":{"kind":"string","value":"tools/c7n_azure/tests/test_route_table.py"},"max_issues_repo_name":{"kind":"string","value":"anastasiia-zolochevska/cloud-custodian"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f25315a01bec808c16ab0e2d433d6151cf5769e4"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":79,"string":"79"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-03-20T12:27:06.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-08-14T14:07:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"tools/c7n_azure/tests/test_route_table.py"},"max_forks_repo_name":{"kind":"string","value":"anastasiia-zolochevska/cloud-custodian"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f25315a01bec808c16ab0e2d433d6151cf5769e4"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-04-22T15:20:23.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-08-27T12:37:51.000Z"},"content":{"kind":"string","value":"# Copyright 2015-2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom azure_common import BaseTest, arm_template\n\n\nclass RouteTableTest(BaseTest):\n\n route_table_name = 'cctestroutetable'\n vnet_name = 'ccroutetablevnet'\n allowed_subnet_name = 'cctestsubnet1'\n disallowed_subnet_name = 'cctestsubnet2'\n\n @staticmethod\n def _subnet_id_suffix(subnet):\n return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)\n\n def test_route_table_schema_validate(self):\n with self.sign_out_patch():\n p = self.load_policy({\n 'name': 'test-azure-route-table',\n 'resource': 'azure.routetable'\n }, validate=True)\n self.assertTrue(p)\n\n @arm_template('route-table-and-vnet.json')\n def test_find_route_table_by_name(self):\n\n p = self.load_policy({\n 'name': 'test-find-route-table-by-name',\n 'resource': 'azure.routetable',\n 'filters': [\n {\n 'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value': RouteTableTest.route_table_name\n }\n ]\n })\n\n resources = p.run()\n self._assert_only_route_table_in_resources(resources)\n\n @arm_template('route-table-and-vnet.json')\n def test_detect_route_table_is_routing_to_correct_subnet(self):\n\n p = self.load_policy({\n 'name': 'test-detect-route-table-is-routing-to-correct-subnet',\n 'resource': 'azure.routetable',\n 'filters': [\n {\n 'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value': RouteTableTest.route_table_name\n },\n {\n 'type': 'value',\n 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format(\n RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)\n ),\n 'value': 'not-null'\n }\n ]\n })\n\n resources = p.run()\n self._assert_only_route_table_in_resources(resources)\n\n @arm_template('route-table-and-vnet.json')\n def test_detect_route_table_not_routing_to_incorrect_subnet(self):\n\n p = self.load_policy({\n 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',\n 'resource': 'azure.routetable',\n 'filters': [\n {\n 'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value': RouteTableTest.route_table_name\n },\n {\n 'type': 'value',\n 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format(\n RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)\n ),\n 'value': 'not-null'\n }\n ]\n })\n\n resources = p.run()\n self.assertEqual(len(resources), 0, \"A route table is routing to a disallowed subnet\")\n\n @arm_template('route-table-and-vnet.json')\n def test_detect_route_only_routes_to_specific_subnets(self):\n\n p = self.load_policy({\n 'name': 'test-detect-route-only-routes-to-specific-subnets',\n 'resource': 'azure.routetable',\n 'filters': [\n {\n 'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value': RouteTableTest.route_table_name\n },\n {\n 'type': 'value',\n 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format(\n RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)\n ),\n 'value': 'not-null'\n },\n {\n 'type': 'value',\n 'key': 'length(properties.subnets)',\n 'op': 'eq',\n 'value': 1\n }\n ]\n })\n\n resources = p.run()\n self._assert_only_route_table_in_resources(resources)\n\n def _assert_only_route_table_in_resources(self, resources):\n\n self.assertEqual(len(resources), 1, \"Only one route table should be found\")\n\n route_table = resources[0]\n self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),\n \"The wrong route table was found\")\n\n properties = route_table.get('properties')\n self.assertIsNotNone(properties, \"Missing properties\")\n\n subnets = properties.get('subnets')\n self.assertIsNotNone(subnets, \"Missing subnets\")\n self.assertEqual(1, len(subnets), \"There should only be one subnet\")\n\n subnet = subnets[0]\n self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), \"Incorrect subnet\")\n"},"avg_line_length":{"kind":"number","value":35.4267515924,"string":"35.426752"},"max_line_length":{"kind":"number","value":95,"string":"95"},"alphanum_fraction":{"kind":"number","value":0.53865516,"string":"0.538655"},"count_classes":{"kind":"number","value":4920,"string":"4,920"},"score_classes":{"kind":"number","value":0.8845738942826321,"string":"0.884574"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":3668,"string":"3,668"},"score_decorators":{"kind":"number","value":0.6594750089895721,"string":"0.659475"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1900,"string":"1,900"},"score_documentation":{"kind":"number","value":0.3416037396619921,"string":"0.341604"}}},{"rowIdx":3755,"cells":{"hexsha":{"kind":"string","value":"b98531b0567b9e4719006397ec461d3fa4999e4b"},"size":{"kind":"number","value":11730,"string":"11,730"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py"},"max_stars_repo_name":{"kind":"string","value":"pkthein/sparts_all_fam"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ff162e4ea8c3919a197dc0cc13fde6b32da113c7"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-04-03T18:31:36.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-04-03T18:31:36.000Z"},"max_issues_repo_path":{"kind":"string","value":"proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py"},"max_issues_repo_name":{"kind":"string","value":"pkthein/sparts_all_fam"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ff162e4ea8c3919a197dc0cc13fde6b32da113c7"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py"},"max_forks_repo_name":{"kind":"string","value":"pkthein/sparts_all_fam"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ff162e4ea8c3919a197dc0cc13fde6b32da113c7"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright 2016 Intel Corporation\n# Copyright 2017 Wind River \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n################################################################################\n# LIBRARIES & DEPENDENCIES #\n################################################################################\nimport hashlib\nimport logging\nimport json\nfrom collections import OrderedDict\nfrom sawtooth_sdk.processor.exceptions import InvalidTransaction\nfrom sawtooth_sdk.processor.exceptions import InternalError\nfrom sawtooth_sdk.processor.handler import TransactionHandler\n\nLOGGER = logging.getLogger(__name__)\n################################################################################\n# HANDLER OBJ #\n################################################################################\nclass ArtifactTransactionHandler:\n \"\"\"\n Class for handling the Transaction Family : Artifact\n \n Attributes:\n namespace_prefix (str): The namespace prefix of the transaction family\n \n \"\"\"\n \n def __init__(self, namespace_prefix):\n \"\"\"\n Constructs the ArtifactTransactionHandler object.\n \n Args:\n namespace_prefix (str):\n The namepsace prefix of the transaction family\n \n \"\"\"\n self._namespace_prefix = namespace_prefix\n\n @property\n def family_name(self):\n \"\"\"\n type: str\n Returns the family name of the handler object.\n \n \"\"\"\n return \"artifact\"\n\n @property\n def family_versions(self):\n \"\"\"\n type: list of str\n Returns the family version of the handler object.\n \n \"\"\"\n return [\"1.0\"]\n\n @property\n def encodings(self):\n \"\"\"\n type: list of str\n Returns the encoding scheme used for the data for the handler object.\n \n \"\"\"\n return [\"csv-utf8\"]\n\n @property\n def namespaces(self):\n \"\"\"\n type: list of str\n Returns the namespaces associating with the handler object.\n \n \"\"\"\n return [self._namespace_prefix]\n################################################################################\n# FUNCTIONS #\n################################################################################\n def apply(self, transaction, context):\n \"\"\"\n Applys the payload from transaction onto the state storage.\n \n Args:\n transaction (Transaction): The transaction pertaining the payload\n context (State): The current state of the ledger\n \n Returns:\n type: State\n The new state of the ledger, which includes the data from the\n transaction, is returned to be stored on the state storage.\n \n Raises:\n InvalidTransaction:\n * If deserialization for payload from transaction failed\n * If \"create\" was called on non-unique uuid\n * If \"amend\" was called on non-existing uuid\n * If \"Add...\" were called on non-existing uuid\n * If invalid operation was called\n InternalError:\n * If deserialization of State.data failed\n \n \"\"\"\n \n # Parsing required fields from transaction payload\n try:\n \n payload = json.loads(transaction.payload.decode())\n artifact_id = payload[\"uuid\"]\n artifact_alias = payload[\"alias\"]\n artifact_name = payload[\"name\"]\n artifact_type = payload[\"content_type\"]\n artifact_checksum = payload[\"checksum\"]\n artifact_label = payload[\"label\"]\n artifact_openchain = payload[\"openchain\"]\n action = payload[\"action\"]\n prev = payload[\"prev_block\"]\n cur = payload[\"cur_block\"]\n timestamp = payload[\"timestamp\"]\n artifact_list = payload[\"artifact_list\"]\n uri_list = payload[\"uri_list\"]\n \n except ValueError:\n raise InvalidTransaction(\"Invalid payload serialization\")\n \n # Soft sanity check and loading required data\n validate_transaction(artifact_id, action)\n data_address = make_artifact_address(self._namespace_prefix, \n artifact_id)\n state_entries = context.get_state([data_address])\n \n # Hard sanity check before creating final payload for the state storage\n if len(state_entries) != 0:\n try:\n \n stored_artifact = json.loads(state_entries[0].data.decode())\n stored_artifact_id = stored_artifact[\"uuid\"]\n \n except ValueError:\n raise InternalError(\"Failed to deserialize data.\")\n \n else:\n stored_artifact_id = stored_artifact = None\n \n if action == \"create\" and stored_artifact_id is not None:\n raise InvalidTransaction(\"Invalid Action-artifact already exists.\")\n \n elif action == \"create\":\n artifact = create_artifact(artifact_id, artifact_alias, \n artifact_name, artifact_type, artifact_checksum, \n artifact_label, artifact_openchain, \n prev, cur, timestamp)\n elif action == \"amend\" and stored_artifact_id is not None:\n artifact = create_artifact(artifact_id, artifact_alias, \n artifact_name, artifact_type, artifact_checksum, \n artifact_label, artifact_openchain,\n prev, cur, timestamp, artifact_list, uri_list)\n elif action == \"AddArtifact\" or action == \"AddURI\":\n if stored_artifact_id is None:\n raise InvalidTransaction(\n \"Invalid Action-requires an existing artifact.\"\n )\n artifact = create_artifact(artifact_id, artifact_alias, \n artifact_name, artifact_type, artifact_checksum, \n artifact_label, artifact_openchain, \n prev, cur, timestamp, \n artifact_list, uri_list)\n \n # Adding the final payload to the state storage \n data = json.dumps(artifact).encode()\n addresses = context.set_state({data_address:data})\n \n return addresses\n################################################################################\n# HELPER FUNCTIONS #\n################################################################################\ndef create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, \n artifact_checksum, artifact_label, artifact_openchain, \n prev, cur, timestamp, artifact_list=[], uri_list=[]):\n \"\"\"\n Constructs the payload to be stored in the state storage.\n \n Args:\n artifact_uuid (str): The uuid of the artifact\n artifact_alias (str): The alias of the artifact\n artifact_name (str): The name of the artifact\n artifact_type (str): The type of the artifact\n artifact_checksum (str): The checksum of the artifact\n artifact_label (str): The label of the artifact\n artifact_openchain (str): The openchain of the artifact\n prev (str): The previous block id of the transaction (default \"0\")\n cur (str): the current block id of the transaction\n timestamp (str): The UTC time for when the transaction was submitted\n artifact_list (list of dict):\n The list of the artifact uuid associated with the artifact\n (default [])\n uri_list (list of dict):\n The list of the uri associated with the artifact (default [])\n \n Returns:\n type: dict\n The dictionary pertaining all the param is created and returned to\n be stored on the state storage.\n \n \"\"\"\n return { \n \"uuid\" : artifact_id,\n \"alias\" : artifact_alias,\n \"name\" : artifact_name,\n \"content_type\" : artifact_type,\n \"checksum\" : artifact_checksum,\n \"label\" : artifact_label,\n \"openchain\" : artifact_openchain,\n \"prev_block\" : prev, \n \"cur_block\" : cur,\n \"timestamp\" : timestamp,\n \"artifact_list\" : artifact_list,\n \"uri_list\" : uri_list\n }\n\ndef validate_transaction(artifact_id, action):\n \"\"\"\n Performs soft sanity check in order to improve runtime by eliminating the\n obvious exception errors.\n \n Args:\n artifact_id (str): The uuid of the artifact\n action (str): The command to be performed\n \n Raises:\n InvalidTransaction:\n If the uuid or the action are not passed in or the \n action is not a valid action.\n \n \"\"\"\n if not artifact_id:\n raise InvalidTransaction(\"Artifact ID is required\")\n if not action:\n raise InvalidTransaction(\"Action is required\")\n if action not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"):\n raise InvalidTransaction(\"Invalid action: {}\".format(action))\n\ndef make_artifact_address(namespace_prefix, artifact_id):\n \"\"\"\n Creates an artifact address which will be used to recover the associated\n UUID if the artifact already exists in the state storage; or, used as a key to\n store the new data into the state storage.\n \n Args:\n namespace_prefix (str):\n The prefix associating with the transaction family\n artifact_id (str): The uuid of the artifact\n \n Returns:\n type: str\n The address-to-be, which associates the uuid and the namespace prefix.\n \n \"\"\"\n return namespace_prefix + \\\n hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64]\n\ndef _display(msg):\n \"\"\"\n Logs the message to the debug logger.\n \n Args:\n msg (str): The message that is to be logged into the debug logger\n \n \"\"\"\n n = msg.count(\"\\n\")\n\n if n > 0:\n msg = msg.split(\"\\n\")\n length = max(len(line) for line in msg)\n else:\n length = len(msg)\n msg = [msg]\n\n LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")\n for line in msg:\n LOGGER.debug(\"+ \" + line.center(length) + \" +\")\n LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")\n################################################################################\n# #\n################################################################################\n"},"avg_line_length":{"kind":"number","value":39.8979591837,"string":"39.897959"},"max_line_length":{"kind":"number","value":82,"string":"82"},"alphanum_fraction":{"kind":"number","value":0.521312873,"string":"0.521313"},"count_classes":{"kind":"number","value":5899,"string":"5,899"},"score_classes":{"kind":"number","value":0.5028985507246376,"string":"0.502899"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":749,"string":"749"},"score_decorators":{"kind":"number","value":0.0638533674339301,"string":"0.063853"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":6596,"string":"6,596"},"score_documentation":{"kind":"number","value":0.5623188405797102,"string":"0.562319"}}},{"rowIdx":3756,"cells":{"hexsha":{"kind":"string","value":"b9877d896f97460bc5a35787da6277925368bc9f"},"size":{"kind":"number","value":764,"string":"764"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"ReviewsCollector.py"},"max_stars_repo_name":{"kind":"string","value":"fsandx/moodybooks"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5c13fe43849e4fa861a163c74411e9f796518bc9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"ReviewsCollector.py"},"max_issues_repo_name":{"kind":"string","value":"fsandx/moodybooks"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5c13fe43849e4fa861a163c74411e9f796518bc9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ReviewsCollector.py"},"max_forks_repo_name":{"kind":"string","value":"fsandx/moodybooks"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5c13fe43849e4fa861a163c74411e9f796518bc9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSTEP 2\nTakes the list of urls in the json files and downloads the html files to local drive\nStart with: scrapy runspider ReviewsCollector.py\n\n\"\"\"\n\nimport scrapy\nimport json\n\nclass ReviewsCollector(scrapy.Spider):\n\n def start_requests(self):\n with open(\"data/books.json\") as f:\n self.data = json.load(f)\n for item in self.data:\n if (item['url'] is not None):\n yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)\n\n\n def parse(self, response):\n filename = response.url.split(\"/\")[-1] + '.html'\n with open('data/reviews/' + filename, 'wb+') as f:\n f.write(response.body)"},"avg_line_length":{"kind":"number","value":29.3846153846,"string":"29.384615"},"max_line_length":{"kind":"number","value":124,"string":"124"},"alphanum_fraction":{"kind":"number","value":0.6112565445,"string":"0.611257"},"count_classes":{"kind":"number","value":539,"string":"539"},"score_classes":{"kind":"number","value":0.7054973821989529,"string":"0.705497"},"count_generators":{"kind":"number","value":311,"string":"311"},"score_generators":{"kind":"number","value":0.40706806282722513,"string":"0.407068"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":284,"string":"284"},"score_documentation":{"kind":"number","value":0.3717277486910995,"string":"0.371728"}}},{"rowIdx":3757,"cells":{"hexsha":{"kind":"string","value":"b9887b38cf06939bc8dd710e9861e2366862482a"},"size":{"kind":"number","value":3120,"string":"3,120"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"firelight/interfaces/light.py"},"max_stars_repo_name":{"kind":"string","value":"roshie548/firelight"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3a5af5e2a1e5784127baebcf1517ffddcaff4062"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":16,"string":"16"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-11-29T03:05:31.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-19T05:32:45.000Z"},"max_issues_repo_path":{"kind":"string","value":"firelight/interfaces/light.py"},"max_issues_repo_name":{"kind":"string","value":"roshie548/firelight"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3a5af5e2a1e5784127baebcf1517ffddcaff4062"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"firelight/interfaces/light.py"},"max_forks_repo_name":{"kind":"string","value":"roshie548/firelight"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3a5af5e2a1e5784127baebcf1517ffddcaff4062"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from abc import ABC, abstractmethod\nfrom .color import Color\n\n\nclass LightSystem(ABC):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (hasattr(subclass, 'set_transition_time')\n and callable(subclass.set_transition_time)\n and hasattr(subclass, 'discover_lights')\n and callable(subclass.discover_lights)\n and hasattr(subclass, 'set_color_all_lights')\n and callable(subclass.set_color_all_lights))\n\n @abstractmethod\n def discover_lights(self):\n \"\"\"Discover the lights and groups in this LightSystem.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_transition_time(self, transition_time: int):\n \"\"\"Set how long it takes in milliseconds for colors to transition.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_color(self, color: Color):\n \"\"\"Set the color of all the lights in the LightSystem.\"\"\"\n raise NotImplementedError\n\n\nclass LightGroup(ABC):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (hasattr(subclass, 'turn_on')\n and callable(subclass.turn_on)\n and hasattr(subclass, 'turn_off')\n and callable(subclass.turn_off)\n and hasattr(subclass, 'set_transition_time')\n and callable(subclass.set_transition_time)\n and hasattr(subclass, 'set_color')\n and callable(subclass.set_color))\n\n @abstractmethod\n def turn_on(self):\n \"\"\"Turn on the lights in this group.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def turn_off(self):\n \"\"\"Turn off the lights in this group.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_transition_time(self, transition_time: int):\n \"\"\"Set how long it takes in milliseconds for colors to transition.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_color(self, color: Color):\n \"\"\"Set the color of this light.\"\"\"\n raise NotImplementedError\n\n\nclass LightDevice(ABC):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (hasattr(subclass, 'turn_on')\n and callable(subclass.turn_on)\n and hasattr(subclass, 'turn_off')\n and callable(subclass.turn_off)\n and hasattr(subclass, 'set_transition_time')\n and callable(subclass.set_transition_time)\n and hasattr(subclass, 'set_color')\n and callable(subclass.set_color))\n\n @abstractmethod\n def turn_on(self):\n \"\"\"Turn on this light.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def turn_off(self):\n \"\"\"Turn off the light.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_transition_time(self, transition_time: int):\n \"\"\"Set how long it takes in milliseconds for colors to transition.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def set_color(self, color: Color):\n \"\"\"Set the color of this light.\"\"\"\n raise NotImplementedError\n"},"avg_line_length":{"kind":"number","value":32.8421052632,"string":"32.842105"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.641025641,"string":"0.641026"},"count_classes":{"kind":"number","value":3050,"string":"3,050"},"score_classes":{"kind":"number","value":0.9775641025641025,"string":"0.977564"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":2901,"string":"2,901"},"score_decorators":{"kind":"number","value":0.9298076923076923,"string":"0.929808"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":680,"string":"680"},"score_documentation":{"kind":"number","value":0.21794871794871795,"string":"0.217949"}}},{"rowIdx":3758,"cells":{"hexsha":{"kind":"string","value":"b98b6f0b6e5f35ef44fd272ec1f3a99b4d72acf0"},"size":{"kind":"number","value":1293,"string":"1,293"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"PolymorphismPYTHON/Polypy.py"},"max_stars_repo_name":{"kind":"string","value":"cadeng23/oop-cjgustafson"},"max_stars_repo_head_hexsha":{"kind":"string","value":"cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"PolymorphismPYTHON/Polypy.py"},"max_issues_repo_name":{"kind":"string","value":"cadeng23/oop-cjgustafson"},"max_issues_repo_head_hexsha":{"kind":"string","value":"cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"PolymorphismPYTHON/Polypy.py"},"max_forks_repo_name":{"kind":"string","value":"cadeng23/oop-cjgustafson"},"max_forks_repo_head_hexsha":{"kind":"string","value":"cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import random\n\nclass Family:\n\n def __init__(self,first, last, hair):\n self.first = first\n self.last = last\n self.hair = hair\n\n\n def fullname(self):\n return '{} {}'.format(self.first,self.last)\n\n\n def eyefind(self):\n temp = random.choice([1,2])\n #using the punnet square in genetics we know thatt a donor\n #with blue eyes and one with brown makes it 50/50 odds\n #that the childs eyes will be brown or blue\n if (temp == 1):\n self.EYES = (\"Brown\")\n else:\n self.EYES = (\"Blue\")\n return self.EYES\n\n def Apply_eyes(self):\n self.eyes = self.EYES\n\nDaughter = Family('Ashley', 'Smith', 'Brown')\nSon = Family('Kevin', 'Smith', 'Brown')\n\nprint(Daughter.eyes)\nprint(Son.eyes)\n\n\n#with the kids being born it will define what color hair and eyes \n# they may randomly get through inheritance\n\nclass Kids(Family):\n pass\n#Eyes are marked as Grey because they are unknown for now\n# hair colors are brown because brown is the dominant hair color\n Daughter = Kids('Danielle', 'Smith', 'Brown' )\n Son = Kids('Kevin','Smith','Brown')\n\n print(Daughter.eyes)\n print(Son.eyes)\n\n Daughter.Apply_eyes()\n Son.Apply_eyes()\n \n print(Daughter.eyes)\n print(Son.eyes)\n \n\n "},"avg_line_length":{"kind":"number","value":23.0892857143,"string":"23.089286"},"max_line_length":{"kind":"number","value":66,"string":"66"},"alphanum_fraction":{"kind":"number","value":0.618716164,"string":"0.618716"},"count_classes":{"kind":"number","value":1027,"string":"1,027"},"score_classes":{"kind":"number","value":0.794276875483372,"string":"0.794277"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":493,"string":"493"},"score_documentation":{"kind":"number","value":0.38128383604021654,"string":"0.381284"}}},{"rowIdx":3759,"cells":{"hexsha":{"kind":"string","value":"b98c3a1636cff18e5244db1f52b8e6e89e2c99b5"},"size":{"kind":"number","value":1494,"string":"1,494"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"homeassistant/components/device_tracker/owntracks.py"},"max_stars_repo_name":{"kind":"string","value":"evancohen/home-assistant"},"max_stars_repo_head_hexsha":{"kind":"string","value":"dafc0ced6b07025c03417d8e7a2c0133b4c622fc"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":14,"string":"14"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-11-10T07:57:43.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-29T13:45:26.000Z"},"max_issues_repo_path":{"kind":"string","value":"homeassistant/components/device_tracker/owntracks.py"},"max_issues_repo_name":{"kind":"string","value":"evancohen/home-assistant"},"max_issues_repo_head_hexsha":{"kind":"string","value":"dafc0ced6b07025c03417d8e7a2c0133b4c622fc"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"homeassistant/components/device_tracker/owntracks.py"},"max_forks_repo_name":{"kind":"string","value":"evancohen/home-assistant"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dafc0ced6b07025c03417d8e7a2c0133b4c622fc"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":8,"string":"8"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2015-11-14T16:40:41.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-02-17T19:48:08.000Z"},"content":{"kind":"string","value":"\"\"\"\nhomeassistant.components.device_tracker.owntracks\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nOwnTracks platform for the device tracker.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/device_tracker.owntracks/\n\"\"\"\nimport json\nimport logging\n\nimport homeassistant.components.mqtt as mqtt\n\nDEPENDENCIES = ['mqtt']\n\nLOCATION_TOPIC = 'owntracks/+/+'\n\n\ndef setup_scanner(hass, config, see):\n \"\"\" Set up a OwnTracksks tracker. \"\"\"\n\n def owntracks_location_update(topic, payload, qos):\n \"\"\" MQTT message received. \"\"\"\n\n # Docs on available data:\n # http://owntracks.org/booklet/tech/json/#_typelocation\n try:\n data = json.loads(payload)\n except ValueError:\n # If invalid JSON\n logging.getLogger(__name__).error(\n 'Unable to parse payload as JSON: %s', payload)\n return\n\n if not isinstance(data, dict) or data.get('_type') != 'location':\n return\n\n parts = topic.split('/')\n kwargs = {\n 'dev_id': '{}_{}'.format(parts[1], parts[2]),\n 'host_name': parts[1],\n 'gps': (data['lat'], data['lon']),\n }\n if 'acc' in data:\n kwargs['gps_accuracy'] = data['acc']\n if 'batt' in data:\n kwargs['battery'] = data['batt']\n\n see(**kwargs)\n\n mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)\n\n return True\n"},"avg_line_length":{"kind":"number","value":27.6666666667,"string":"27.666667"},"max_line_length":{"kind":"number","value":74,"string":"74"},"alphanum_fraction":{"kind":"number","value":0.5829986613,"string":"0.582999"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":617,"string":"617"},"score_documentation":{"kind":"number","value":0.41298527443105754,"string":"0.412985"}}},{"rowIdx":3760,"cells":{"hexsha":{"kind":"string","value":"b98c6a6e2a07073f4614093d6ae5d6469afd6835"},"size":{"kind":"number","value":48027,"string":"48,027"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/models/end_to_end_event_coreference.py"},"max_stars_repo_name":{"kind":"string","value":"luyaojie/E3C"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4b2f33da4629211fd6a3738077794f821c7f7c8b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-02-20T15:13:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-22T03:47:21.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/models/end_to_end_event_coreference.py"},"max_issues_repo_name":{"kind":"string","value":"luyaojie/E3C"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4b2f33da4629211fd6a3738077794f821c7f7c8b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/models/end_to_end_event_coreference.py"},"max_forks_repo_name":{"kind":"string","value":"luyaojie/E3C"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4b2f33da4629211fd6a3738077794f821c7f7c8b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n# Created by Roger on 2019-09-10\n# Mostly by AllenNLP\n\nimport logging\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import FeedForward, Pruner\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\nfrom allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder\nfrom allennlp.modules.similarity_functions import DotProductSimilarity\nfrom allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.nn import util, InitializerApplicator, RegularizerApplicator\nfrom allennlp.training.metrics import Average\nfrom overrides import overrides\nfrom torch.nn import BCEWithLogitsLoss\n\nfrom src.metrics.event_coref_scores import EventCorefScores\nfrom src.metrics.mention_f1 import TopSpanMentionTypeF1\nfrom src.utils.cluster_decoding_utils import node_decode\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n@Model.register(\"end-to-end-event-coreference\")\nclass End2EndEventCoreferenceResolver(Model):\n \"\"\"\n This ``Model`` implements the coreference resolution model described \"End-to-end Neural\n Coreference Resolution\"\n \n by Lee et al., 2017.\n The basic outline of this model is to get an embedded representation of each span in the\n document. These span representations are scored and used to prune away spans that are unlikely\n to occur in a coreference cluster. For the remaining spans, the model decides which antecedent\n span (if any) they are coreferent with. The resulting coreference links, after applying\n transitivity, imply a clustering of the spans in the document.\n\n Parameters\n ----------\n vocab : ``Vocabulary``\n text_field_embedder : ``TextFieldEmbedder``\n Used to embed the ``text`` ``TextField`` we get as input to the model.\n context_layer : ``Seq2SeqEncoder``\n This layer incorporates contextual information for each word in the document.\n mention_feedforward : ``FeedForward``\n This feedforward network is applied to the span representations which is then scored\n by a linear layer.\n antecedent_feedforward: ``FeedForward``\n This feedforward network is applied to pairs of span representation, along with any\n pairwise features, which is then scored by a linear layer.\n feature_size: ``int``\n The embedding size for all the embedded features, such as distances or span widths.\n max_span_width: ``int``\n The maximum width of candidate spans.\n spans_per_word: float, required.\n A multiplier between zero and one which controls what percentage of candidate mention\n spans we retain with respect to the number of words in the document.\n max_antecedents: int, required.\n For each mention which survives the pruning stage, we consider this many antecedents.\n lexical_dropout: ``int``\n The probability of dropping out dimensions of the embedded text.\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n\n def __init__(self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n mention_feedforward: FeedForward,\n antecedent_feedforward: FeedForward,\n feature_size: int,\n context_layer: Seq2SeqEncoder = None,\n max_span_width: int = 1,\n spans_per_word: float = 0.1,\n max_antecedents: int = 50,\n lexical_dropout: float = 0.2,\n pretrain_ed: bool = False,\n pretrain_coref: bool = False,\n coref_loss_weight: float = 1.0,\n bce_loss_weight: float = 1.0,\n bce_pos_weight: float = None,\n local_window_size: int = 10,\n attention_type: str = 'dot',\n decoding: str = 'type-guided',\n type_threshold: float = -1.,\n type_refine: bool = True,\n type_match_in_eval: bool = True,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)\n logger.info(vocab)\n self._text_field_embedder = text_field_embedder\n self._context_layer = context_layer\n self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)\n self._event_scorer = torch.nn.Sequential(\n TimeDistributed(mention_feedforward),\n TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))\n )\n self._pretrain_ed = pretrain_ed\n self._pretrain_coref = pretrain_coref\n\n self._mention_pruner = Pruner(self._event_scorer)\n self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))\n\n self._local_window_size = local_window_size\n self._attention_type = attention_type\n self._decoding = decoding\n self._type_threshold = type_threshold\n logger.info(vocab.get_token_from_index(0, \"labels\"))\n\n if context_layer is not None:\n endpoint_span_extractor_dim = context_layer.get_output_dim()\n attentive_span_extractor_dim = text_field_embedder.get_output_dim()\n self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,\n combination=\"x,y\",\n num_width_embeddings=max_span_width,\n span_width_embedding_dim=feature_size)\n self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)\n span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()\n\n if self._local_window_size <= 0:\n self._attention_layer = None\n else:\n if self._attention_type == 'dot':\n similarity_function = DotProductSimilarity(scale_output=True)\n num_head = 1\n else:\n raise NotImplementedError('Attention Type: %s' % self._attention_type)\n self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,\n similarity_function=similarity_function,\n combination='2',\n num_attention_heads=num_head\n )\n else:\n attentive_span_extractor_dim = text_field_embedder.get_output_dim()\n\n if max_span_width > 1:\n endpoint_span_extractor_dim = text_field_embedder.get_output_dim()\n self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,\n combination=\"x,y\",\n num_width_embeddings=max_span_width,\n span_width_embedding_dim=feature_size)\n else:\n self._endpoint_span_extractor = None\n\n self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)\n\n if self._local_window_size <= 0:\n self._attention_layer = None\n else:\n if self._attention_type == 'dot':\n similarity_function = DotProductSimilarity(scale_output=True)\n num_head = 1\n else:\n raise NotImplementedError('Attention Type: %s' % self._attention_type)\n self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,\n similarity_function=similarity_function,\n combination='2',\n num_attention_heads=num_head\n )\n\n if self._endpoint_span_extractor is not None:\n span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()\n else:\n span_embedding_size = self._attentive_span_extractor.get_output_dim()\n\n if type_refine:\n self._type_refine_gate = torch.nn.Sequential(\n TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),\n torch.nn.Sigmoid()\n )\n else:\n self._type_refine_gate = None\n\n # NIL for Unified Event\n self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),\n embedding_dim=span_embedding_size)\n self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,\n self._event_embedding.get_output_dim())\n\n self._positive_label_size = vocab.get_vocab_size('labels') - 1\n\n # 10 possible distance buckets.\n self._num_distance_buckets = 10\n self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)\n self._coref_loss_weight = coref_loss_weight\n self._bce_loss_weight = bce_loss_weight\n self._bce_pos_weight = bce_pos_weight\n\n self._max_span_width = max_span_width\n self._spans_per_word = spans_per_word\n self._max_antecedents = max_antecedents\n\n self._mention_f1_score = TopSpanMentionTypeF1()\n self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)\n self._type_loss_metric = Average()\n self._realis_loss_metric = Average()\n self._coref_loss_metric = Average()\n self._coref_label_metric = Average()\n self._type_label_metric = Average()\n self._nil_label_metric = Average()\n\n if self._bce_pos_weight:\n self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))\n else:\n self._bce_loss = BCEWithLogitsLoss(reduction='none')\n\n if lexical_dropout > 0:\n self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)\n else:\n self._lexical_dropout = lambda x: x\n\n initializer(self)\n\n def _get_event_embedding(self, span_mask):\n \"\"\"\n :param span_mask:\n (batch, top_span_size, 1)\n :return:\n (batch, top_span_size, positive_label_size)\n \"\"\"\n event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1\n event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)\n event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])\n\n event_embeddings = self._event_embedding(event_indices)\n event_embeddings = event_embeddings.reshape(event_embeddings.size(0),\n event_embeddings.size(1) * event_embeddings.size(2))\n\n event_embeddings = self._event_embedding_map.forward(event_embeddings)\n event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),\n event_embeddings.size(0),\n event_embeddings.size(1),\n )\n return event_embeddings\n\n def _get_type_antecedent_labels(self, top_event_type_labels):\n \"\"\"\n :param top_event_type_labels:\n (batch, top_span_size, 1)\n :return:\n (batch, top_span_size, positive_label_size)\n \"\"\"\n event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),\n device=util.get_device_of(top_event_type_labels))\n\n top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),\n top_event_type_labels.size(1),\n event_indices.size(0)])\n\n type_antecedent_labels = (top_event_type_labels == event_indices).float()\n return type_antecedent_labels\n\n def _type_refine_embedding(self, top_embeddings, event_embeddings):\n # (batch, top_span_size, emb_size) bmm\n event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))\n shape = [event_prob.size(0), event_prob.size(1), 1]\n dummy_scores = event_prob.new_zeros(*shape)\n\n event_prob = torch.cat([dummy_scores, event_prob], -1)\n event_prob = torch.softmax(event_prob, -1)\n\n event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings\n\n refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))\n\n top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep\n return top_embeddings\n\n def _local_attention(self, raw_contextualized_embeddings, text_mask):\n device = util.get_device_of(raw_contextualized_embeddings)\n if device < 0:\n device = 'cpu'\n attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)\n # attention_mask = attention_mask - torch.eye(text_mask.size(1),\n # device=util.get_device_of(contextualized_embeddings))\n new_attention_mask = text_mask[:, :, None] * attention_mask\n new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),\n -self._local_window_size)\n new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,\n new_attention_mask)\n return new_contextualized_embeddings\n\n @overrides\n def forward(self, # type: ignore\n text: Dict[str, torch.LongTensor],\n spans: torch.IntTensor,\n coref_labels: torch.IntTensor = None,\n event_type_labels: torch.IntTensor = None,\n realis_labels: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Parameters\n ----------\n text : ``Dict[str, torch.LongTensor]``, required.\n The output of a ``TextField`` representing the text of\n the document.\n spans : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end\n indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of\n indices into the text of the document.\n coref_labels : ``torch.IntTensor``, optional (default = None).\n A tensor of shape (batch_size, num_spans), representing the cluster ids\n of each span, or -1 for those which do not appear in any clusters.\n event_type_labels : ``torch.IntTensor``, optional (default = None).\n A tensor of shape (batch_size, num_spans), representing the event label of the specific span.\n realis_labels : ``torch.IntTensor``, optional (default = None).\n A tensor of shape (batch_size, num_spans), representing the realis label of the specific span.\n metadata : ``List[Dict[str, Any]]``, optional (default = None).\n A metadata dictionary for each instance in the batch. We use the \"original_text\" and \"clusters\" keys\n from this dictionary, which respectively have the original text and the annotated gold coreference\n clusters for that instance.\n\n Returns\n -------\n An output dictionary consisting of:\n top_spans : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing\n the start and end word indices of the top spans that survived the pruning stage.\n antecedent_indices : ``torch.IntTensor``\n A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span\n the index (with respect to top_spans) of the possible antecedents the model considered.\n predicted_antecedents : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the\n index (with respect to antecedent_indices) of the most likely antecedent. -1 means there\n was no predicted link.\n loss : ``torch.FloatTensor``, optional\n A scalar loss to be optimised.\n \"\"\"\n # Shape: (batch_size, document_length, embedding_size)\n text_embeddings = self._lexical_dropout(self._text_field_embedder(text))\n\n document_length = text_embeddings.size(1)\n num_spans = spans.size(1)\n\n # Shape: (batch_size, document_length)\n text_mask = util.get_text_field_mask(text).float()\n # Shape: (batch_size, num_spans)\n span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()\n # SpanFields return -1 when they are used as padding. As we do\n # some comparisons based on span widths when we attend over the\n # span representations that we generate from these indices, we\n # need them to be <= 0. This is only relevant in edge cases where\n # the number of spans we consider after the pruning stage is >= the\n # total number of spans, because in this case, it is possible we might\n # consider a masked span.\n # Shape: (batch_size, num_spans, 2)\n spans = F.relu(spans.float()).long()\n\n if self._context_layer:\n # Shape: (batch_size, document_length, encoding_dim)\n raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)\n\n if self._attention_layer is not None:\n new_contextualized_embeddings = self._local_attention(\n raw_contextualized_embeddings=raw_contextualized_embeddings,\n text_mask=text_mask\n )\n else:\n new_contextualized_embeddings = raw_contextualized_embeddings\n\n # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)\n endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)\n # Shape: (batch_size, num_spans, embedding_size)\n attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)\n\n # Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)\n # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)\n span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)\n else:\n raw_contextualized_embeddings = text_embeddings\n\n if self._attention_layer is not None:\n new_contextualized_embeddings = self._local_attention(\n raw_contextualized_embeddings=raw_contextualized_embeddings,\n text_mask=text_mask\n )\n else:\n new_contextualized_embeddings = raw_contextualized_embeddings\n\n span_embeddings_list = list()\n attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)\n span_embeddings_list += [attended_span_embeddings]\n if self._endpoint_span_extractor is not None:\n # Shape: (batch_size, num_spans, embedding_size)\n endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)\n span_embeddings_list += [endpoint_span_embeddings]\n span_embeddings = torch.cat(span_embeddings_list, -1)\n\n # event_scores = self._event_classifier.forward(span_embeddings)\n # Shape: (batch_size, num_spans, num_event_realis_label)\n # Shape: (batch_size, num_spans, num_event_realis_label)\n # event_realis_scores = self._event_realis_classifier.forward(span_embeddings)\n\n # Prune based on mention scores.\n num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))\n\n (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,\n span_mask,\n num_spans_to_keep_according_doc_len,\n )\n\n event_embeddings = self._get_event_embedding(span_mask)\n top_mask = top_mask.unsqueeze(-1)\n # Shape: (batch_size * num_spans_to_keep)\n # torch.index_select only accepts 1D indices, but here\n # we need to select spans for each element in the batch.\n # This reformats the indices to take into account their\n # index into the batch. We precompute this here to make\n # the multiple calls to util.batched_index_select below more efficient.\n flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)\n # Compute final predictions for which spans to consider as mentions.\n # Shape: (batch_size, num_spans_to_keep, 2)\n top_spans = util.batched_index_select(spans,\n top_indices,\n flat_top_span_indices)\n\n # Compute indices for antecedent spans to consider.\n max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)\n\n # top_span_embeddings = top_span_embeddings.detach()\n # top_span_mention_scores = top_span_mention_scores.detach()\n\n # Now that we have our variables in terms of num_spans_to_keep, we need to\n # compare span pairs to decide each span's antecedent. Each span can only\n # have prior spans as antecedents, and we only consider up to max_antecedents\n # prior spans. So the first thing we do is construct a matrix mapping a span's\n # index to the indices of its allowed antecedents. Note that this is independent\n # of the batch dimension - it's just a function of the span's position in\n # top_spans. The spans are in document order, so we can just use the relative\n # index of the spans to know which other spans are allowed antecedents.\n\n # Once we have this matrix, we reformat our variables again to get embeddings\n # for all valid antecedents for each span. This gives us variables with shapes\n # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which\n # we can use to make coreference decisions between valid span pairs.\n\n # Shapes:\n # (num_spans_to_keep, max_antecedents),\n # (1, max_antecedents),\n # (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\\n _generate_valid_antecedents(num_spans_to_keep_according_doc_len,\n max_antecedents,\n util.get_device_of(text_mask))\n\n if self._type_refine_gate is not None:\n top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)\n\n # Select tensors relating to the antecedent spans.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,\n valid_antecedent_indices)\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,\n valid_antecedent_indices).squeeze(-1)\n\n # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)\n candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(\n event_embeddings,\n candidate_antecedent_embeddings)\n\n # Compute antecedent scores.\n # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)\n span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,\n candidate_antecedent_embeddings,\n valid_antecedent_offsets)\n # (batch_size, event_type_size, 1)\n event_type_prior_scores = self._event_scorer(event_embeddings)\n # (batch_size, num_spans_to_keep, event_type_size)\n event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(\n candidate_antecedent_mention_scores.size(0),\n candidate_antecedent_mention_scores.size(1),\n -1)\n\n # (batch_size, num_spans_to_keep, event_type_size + max_antecedents)\n candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,\n candidate_antecedent_mention_scores],\n -1)\n\n # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)\n coreference_scores = self._compute_coreference_scores(span_pair_embeddings,\n top_scores,\n candidate_antecedent_mention_scores,\n valid_antecedent_log_mask)\n\n # We now have, for each span which survived the pruning stage,\n # a predicted antecedent. This implies a clustering if we group\n # mentions which refer to each other in a chain.\n # Shape: (batch_size, num_spans_to_keep)\n _, predicted_antecedents = coreference_scores.max(2)\n # Subtract one here because index 0 is the \"no antecedent\" class,\n # so this makes the indices line up with actual spans if the prediction\n # is greater than -1.\n predicted_antecedents -= 1\n\n output_dict = {\"top_spans\": top_spans,\n \"antecedent_indices\": valid_antecedent_indices,\n \"predicted_antecedents\": predicted_antecedents,\n \"coreference_scores\": coreference_scores,\n }\n\n if coref_labels is not None and event_type_labels is not None:\n\n pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)\n type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)\n\n # Find the gold labels for the spans which we kept.\n pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),\n top_indices,\n flat_top_span_indices)\n\n antecedent_labels = util.flattened_index_select(pruned_gold_labels,\n valid_antecedent_indices).squeeze(-1)\n\n antecedent_labels += valid_antecedent_log_mask.long()\n\n # Compute labels.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,\n type_antecedent_labels,\n antecedent_labels)\n\n bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),\n (event_type_labels > 0).float()) * span_mask\n bce_loss = bce_loss.sum() * self._bce_loss_weight\n\n # Now, compute the loss using the negative marginal log-likelihood.\n # This is equal to the log of the sum of the probabilities of all antecedent predictions\n # that would be consistent with the data, in the sense that we are minimising, for a\n # given span, the negative marginal log likelihood of all antecedents which are in the\n # same gold cluster as the span we are currently considering. Each span i predicts a\n # single antecedent j, but there might be several prior mentions k in the same\n # coreference cluster that would be valid antecedents. Our loss is the sum of the\n # probability assigned to all valid antecedents. This is a valid objective for\n # clustering as we don't mind which antecedent is predicted, so long as they are in\n # the same coreference cluster.\n\n if self._pretrain_ed:\n # All antecedent mask is 0\n top_mask = top_mask.expand_as(coreference_scores).clone()\n top_mask[:, :, self._positive_label_size + 2:] = 0\n\n coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)\n correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()\n negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()\n coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight\n\n output_dict[\"loss\"] = coref_loss + bce_loss\n\n decoded_result = self.decode(output_dict)\n\n pred_label_spans_list = decoded_result['pred_label_spans']\n gold_label_spans_list = [m['gold_label_spans'] for m in metadata]\n\n self._mention_f1_score(pred_label_spans_list,\n gold_label_spans_list,\n )\n self._conll_coref_scores(decoded_result['clusters'],\n metadata,\n pred_label_spans_list,\n gold_label_spans_list)\n\n self._type_loss_metric(bce_loss.item())\n self._coref_loss_metric(negative_marginal_log_likelihood.item())\n else:\n self._coref_loss_metric(0.)\n\n if metadata is not None:\n output_dict[\"document\"] = [x[\"original_text\"] for x in metadata]\n output_dict[\"offset\"] = [x[\"token_offset\"] for x in metadata]\n output_dict['doc_id'] = [x.get(\"doc_id\", None) for x in metadata]\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]):\n \"\"\"\n Converts the list of spans and predicted antecedent indices into clusters\n of spans for each element in the batch.\n\n Parameters\n ----------\n output_dict : ``Dict[str, torch.Tensor]``, required.\n The result of calling :func:`forward` on an instance or batch of instances.\n\n Returns\n -------\n The same output dictionary, but with an additional ``clusters`` key:\n\n clusters : ``List[List[List[Tuple[int, int]]]]``\n A nested list, representing, for each instance in the batch, the list of clusters,\n which are in turn comprised of a list of (start, end) inclusive spans into the\n original document.\n \"\"\"\n return node_decode(output_dict,\n self.vocab, decoding_algorithm=self._decoding,\n positive_label_size=self._positive_label_size,\n type_threshold=self._type_threshold)\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n mention_result = self._mention_f1_score.get_metric(reset)\n coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)\n\n return {\"c_p\": coref_precision,\n \"c_r\": coref_recall,\n \"c_f1\": coref_f1,\n \"m_p\": mention_result['precision'],\n \"m_r\": mention_result['recall'],\n \"m_f1\": mention_result['f1-score'],\n \"nil\": self._nil_label_metric.get_metric(reset),\n \"type\": self._type_label_metric.get_metric(reset),\n \"coref\": self._coref_label_metric.get_metric(reset),\n \"t_l\": self._type_loss_metric.get_metric(reset),\n \"c_l\": self._coref_loss_metric.get_metric(reset),\n \"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.}\n\n @staticmethod\n def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor):\n \"\"\"\n event_embeddings: ``torch.FloatTensor``, required.\n Embedding representations of the event types. Has shape\n (batch_size, event_type_size, embedding_size).\n antecedent_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the antecedent spans we are considering\n for each top span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size).\n return:\n (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)\n \"\"\"\n event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),\n antecedent_embeddings.size(1),\n event_embeddings.size(1),\n antecedent_embeddings.size(3),))\n return torch.cat([event_embeddings, antecedent_embeddings], 2)\n\n def _compute_span_pair_embeddings(self,\n top_span_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor,\n antecedent_offsets: torch.FloatTensor):\n \"\"\"\n Computes an embedding representation of pairs of spans for the pairwise scoring function\n to consider. This includes both the original span representations, the element-wise\n similarity of the span representations, and an embedding representation of the distance\n between the two spans.\n\n Parameters\n ---------- shape\n (batch_size, event_type_size, embedding_size).\n top_span_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the top spans. Has shape\n (batch_size, num_spans_to_keep, embedding_size).\n antecedent_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the antecedent spans we are considering\n for each top span. Has shape\n (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).\n antecedent_offsets : ``torch.IntTensor``, required.\n The offsets between each top span and its antecedent spans in terms\n of spans we are considering. Has shape (1, max_antecedents).\n\n Returns\n -------\n span_pair_embeddings : ``torch.FloatTensor``\n Embedding representation of the pair of spans to consider. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)\n\n # Shape: (1, max_antecedents)\n bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)\n # (1, event_type)\n label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))\n # Shape: (1, max_antecedents + event_type_size, embedding_size)\n antecedent_distance_embeddings = self._distance_embedding(\n torch.cat([bucket_values, label_bucket_values], 1)\n )\n\n # Shape: (1, 1, max_antecedents + event_type_size, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)\n\n expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),\n antecedent_embeddings.size(1),\n antecedent_embeddings.size(2),\n antecedent_distance_embeddings.size(-1))\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)\n span_pair_embeddings = torch.cat([target_embeddings,\n antecedent_embeddings,\n antecedent_embeddings * target_embeddings,\n antecedent_distance_embeddings], -1)\n return span_pair_embeddings\n\n def _compute_antecedent_gold_labels(self,\n top_span_labels: torch.IntTensor,\n type_antecedent_labels: torch.IntTensor,\n antecedent_labels: torch.IntTensor):\n \"\"\"\n Generates a binary indicator for every pair of spans. This label is one if and\n only if the pair of spans belong to the same cluster. The labels are augmented\n with a dummy antecedent at the zeroth position, which represents the prediction\n that a span does not have any antecedent.\n\n Parameters\n ----------\n top_span_labels : ``torch.IntTensor``, required.\n The cluster id label for every span. The id is arbitrary,\n as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).\n antecedent_labels : ``torch.IntTensor``, required.\n The cluster id label for every antecedent span. The id is arbitrary,\n as we just care about the clustering. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n\n Returns\n -------\n pairwise_labels_with_dummy_label : ``torch.FloatTensor``\n A binary tensor representing whether a given pair of spans belong to\n the same cluster in the gold clustering.\n Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).\n\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n # print(top_span_labels)\n # print(antecedent_labels)\n\n target_labels = top_span_labels.expand_as(antecedent_labels)\n same_cluster_indicator = (target_labels == antecedent_labels).float()\n non_dummy_indicator = (target_labels >= 0).float()\n pairwise_labels = same_cluster_indicator * non_dummy_indicator\n\n if self._pretrain_ed:\n pairwise_labels = pairwise_labels * 0\n else:\n # for pairwise_labels without type_antecedent_labels\n pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()\n type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)\n\n self._coref_label_metric(torch.sum(pairwise_labels).item())\n self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())\n self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())\n\n # print(pairwise_labels)\n #\n # # Shape: (batch_size, num_spans_to_keep, 1)\n # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)\n\n # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)\n pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)\n return pairwise_labels_with_dummy_label\n\n def _compute_coreference_scores(self,\n pairwise_embeddings: torch.FloatTensor,\n top_span_mention_scores: torch.FloatTensor,\n antecedent_mention_scores: torch.FloatTensor,\n antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Computes scores for every pair of spans. Additionally, a dummy label is included,\n representing the decision that the span is not coreferent with anything. For the dummy\n label, the score is always zero. For the true antecedent spans, the score consists of\n the pairwise antecedent score and the unary mention scores for the span and its\n antecedent. The factoring allows the model to blame many of the absent links on bad\n spans, enabling the pruning strategy used in the forward pass.\n\n Parameters\n ----------\n pairwise_embeddings: ``torch.FloatTensor``, required.\n Embedding representations of pairs of spans. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, encoding_dim)\n top_span_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every antecedent. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_log_mask: ``torch.FloatTensor``, required.\n The log of the mask for valid antecedents.\n\n Returns\n -------\n coreference_scores: ``torch.FloatTensor``\n A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),\n representing the unormalised score for each (span, antecedent) pair\n we considered.\n\n \"\"\"\n antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),\n antecedent_log_mask.size(1),\n self._positive_label_size)),\n antecedent_log_mask],\n -1)\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n antecedent_scores = self._antecedent_scorer(\n self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)\n antecedent_scores += top_span_mention_scores + antecedent_mention_scores\n antecedent_scores += antecedent_log_mask\n\n # Shape: (batch_size, num_spans_to_keep, 1)\n shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]\n dummy_scores = antecedent_scores.new_zeros(*shape)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)\n return coreference_scores\n\n\ndef _generate_valid_antecedents(num_spans_to_keep: int,\n max_antecedents: int,\n device: int) -> Tuple[torch.IntTensor,\n torch.IntTensor,\n torch.FloatTensor]:\n \"\"\"\n This method generates possible antecedents per span which survived the pruning\n stage. This procedure is `generic across the batch`. The reason this is the case is\n that each span in a batch can be coreferent with any previous span, but here we\n are computing the possible `indices` of these spans. So, regardless of the batch,\n the 1st span _cannot_ have any antecedents, because there are none to select from.\n Similarly, each element can only predict previous spans, so this returns a matrix\n of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to\n (i - 1) - j if j <= i, or zero otherwise.\n\n Parameters\n ----------\n num_spans_to_keep : ``int``, required.\n The number of spans that were kept while pruning.\n max_antecedents : ``int``, required.\n The maximum number of antecedent spans to consider for every span.\n device: ``int``, required.\n The CUDA device to use.\n\n Returns\n -------\n valid_antecedent_indices : ``torch.IntTensor``\n The indices of every antecedent to consider with respect to the top k spans.\n Has shape ``(num_spans_to_keep, max_antecedents)``.\n valid_antecedent_offsets : ``torch.IntTensor``\n The distance between the span and each of its antecedents in terms of the number\n of considered spans (i.e not the word distance between the spans).\n Has shape ``(1, max_antecedents)``.\n valid_antecedent_log_mask : ``torch.FloatTensor``\n The logged mask representing whether each antecedent span is valid. Required since\n different spans have different numbers of valid antecedents. For example, the first\n span in the document should have no valid antecedents.\n Has shape ``(1, num_spans_to_keep, max_antecedents)``.\n \"\"\"\n # Shape: (num_spans_to_keep, 1)\n target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)\n\n # Shape: (1, max_antecedents)\n valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)\n\n # This is a broadcasted subtraction.\n # Shape: (num_spans_to_keep, max_antecedents)\n raw_antecedent_indices = target_indices - valid_antecedent_offsets\n\n # In our matrix of indices, the upper triangular part will be negative\n # because the offsets will be > the target indices. We want to mask these,\n # because these are exactly the indices which we don't want to predict, per span.\n # We're generating a logspace mask here because we will eventually create a\n # distribution over these indices, so we need the 0 elements of the mask to be -inf\n # in order to not mess up the normalisation of the distribution.\n # Shape: (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()\n\n # Shape: (num_spans_to_keep, max_antecedents)\n valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()\n return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask\n"},"avg_line_length":{"kind":"number","value":54.5141884222,"string":"54.514188"},"max_line_length":{"kind":"number","value":134,"string":"134"},"alphanum_fraction":{"kind":"number","value":0.629271035,"string":"0.629271"},"count_classes":{"kind":"number","value":43414,"string":"43,414"},"score_classes":{"kind":"number","value":0.9039498615362193,"string":"0.90395"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":43462,"string":"43,462"},"score_decorators":{"kind":"number","value":0.9049492993524476,"string":"0.904949"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":19498,"string":"19,498"},"score_documentation":{"kind":"number","value":0.4059799696004331,"string":"0.40598"}}},{"rowIdx":3761,"cells":{"hexsha":{"kind":"string","value":"b98ccbb0c859fdccad6b30924e5845122d497aa5"},"size":{"kind":"number","value":1964,"string":"1,964"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"week2/7litersProblem.py"},"max_stars_repo_name":{"kind":"string","value":"vietanhtran2710/ArtificialIntelligenceHomework"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f4da761016d67477b50856cadf1e2560230d3f79"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-09-20T08:32:23.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-09-25T08:11:48.000Z"},"max_issues_repo_path":{"kind":"string","value":"week2/7litersProblem.py"},"max_issues_repo_name":{"kind":"string","value":"vietanhtran2710/ArtificialIntelligenceHomework"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f4da761016d67477b50856cadf1e2560230d3f79"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"week2/7litersProblem.py"},"max_forks_repo_name":{"kind":"string","value":"vietanhtran2710/ArtificialIntelligenceHomework"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f4da761016d67477b50856cadf1e2560230d3f79"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\n Given 3 bottles of capacities 3, 5, and 9 liters, \n count number of all possible solutions to get 7 liters\n\"\"\"\n\ncurrent_path = [[0, 0, 0]]\nCAPACITIES = (3, 5, 9)\nsolutions_count = 0\n\ndef move_to_new_state(current_state):\n global solutions_count, current_path\n if 7 in current_state:\n solutions_count += 1\n else:\n # Empty bottle\n for i in range(3):\n if current_state[i] != 0:\n new_state = list(current_state)\n new_state[i] = 0\n if new_state not in current_path:\n current_path.append(new_state)\n move_to_new_state(new_state)\n current_path.pop()\n # Fill bottle\n for i in range(3):\n if current_state[i] != CAPACITIES[i]:\n new_state = list(current_state)\n new_state[i] = CAPACITIES[i]\n if new_state not in current_path:\n current_path.append(new_state)\n move_to_new_state(new_state)\n current_path.pop()\n # Pour from one bottle to another\n for i in range(3):\n for j in range(3):\n if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]:\n new_state = list(current_state)\n liters_change = min(CAPACITIES[j] - current_state[j], current_state[i])\n new_state[j] += liters_change\n new_state[i] -= liters_change\n if new_state not in current_path:\n current_path.append(new_state)\n move_to_new_state(new_state)\n current_path.pop()\n\nif __name__ == \"__main__\":\n try:\n current_state = [0, 0, 0]\n move_to_new_state(current_state)\n print(solutions_count)\n except KeyboardInterrupt:\n print(solutions_count)\n \n# Result: at least 44900799 solution\n"},"avg_line_length":{"kind":"number","value":35.7090909091,"string":"35.709091"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.5483706721,"string":"0.548371"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":227,"string":"227"},"score_documentation":{"kind":"number","value":0.11558044806517312,"string":"0.11558"}}},{"rowIdx":3762,"cells":{"hexsha":{"kind":"string","value":"b98d02f62eca1818cb1fb297d1c8644dd35ff288"},"size":{"kind":"number","value":8263,"string":"8,263"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"st2common/st2common/bootstrap/rulesregistrar.py"},"max_stars_repo_name":{"kind":"string","value":"avezraj/st2"},"max_stars_repo_head_hexsha":{"kind":"string","value":"519c7f6819e52fb289c440bb7d1df7b558bb9ed7"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"st2common/st2common/bootstrap/rulesregistrar.py"},"max_issues_repo_name":{"kind":"string","value":"avezraj/st2"},"max_issues_repo_head_hexsha":{"kind":"string","value":"519c7f6819e52fb289c440bb7d1df7b558bb9ed7"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"st2common/st2common/bootstrap/rulesregistrar.py"},"max_forks_repo_name":{"kind":"string","value":"avezraj/st2"},"max_forks_repo_head_hexsha":{"kind":"string","value":"519c7f6819e52fb289c440bb7d1df7b558bb9ed7"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os\n\nimport six\n\nfrom st2common import log as logging\nfrom st2common.constants.meta import ALLOWED_EXTS\nfrom st2common.constants.pack import DEFAULT_PACK_NAME\nfrom st2common.bootstrap.base import ResourceRegistrar\nfrom st2common.models.api.rule import RuleAPI\nfrom st2common.models.system.common import ResourceReference\nfrom st2common.persistence.rule import Rule\nfrom st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count\nfrom st2common.exceptions.db import coditationDBObjectNotFoundError\nimport st2common.content.utils as content_utils\n\n__all__ = [\n 'RulesRegistrar',\n 'register_rules'\n]\n\nLOG = logging.getLogger(__name__)\n\n\nclass RulesRegistrar(ResourceRegistrar):\n ALLOWED_EXTENSIONS = ALLOWED_EXTS\n\n def register_from_packs(self, base_dirs):\n \"\"\"\n :return: Number of rules registered.\n :rtype: ``int``\n \"\"\"\n # Register packs first\n self.register_packs(base_dirs=base_dirs)\n\n registered_count = 0\n content = self._pack_loader.get_content(base_dirs=base_dirs,\n content_type='rules')\n for pack, rules_dir in six.iteritems(content):\n if not rules_dir:\n LOG.debug('Pack %s does not contain rules.', pack)\n continue\n try:\n LOG.debug('Registering rules from pack: %s', pack)\n rules = self._get_rules_from_pack(rules_dir)\n count = self._register_rules_from_pack(pack, rules)\n registered_count += count\n except Exception as e:\n if self._fail_on_failure:\n raise e\n\n LOG.exception('Failed registering all rules from pack: %s', rules_dir)\n\n return registered_count\n\n def register_from_pack(self, pack_dir):\n \"\"\"\n Register all the rules from the provided pack.\n\n :return: Number of rules registered.\n :rtype: ``int``\n \"\"\"\n pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir\n _, pack = os.path.split(pack_dir)\n rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,\n content_type='rules')\n\n # Register pack first\n self.register_pack(pack_name=pack, pack_dir=pack_dir)\n\n registered_count = 0\n if not rules_dir:\n return registered_count\n\n LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir)\n\n try:\n rules = self._get_rules_from_pack(rules_dir=rules_dir)\n registered_count = self._register_rules_from_pack(pack=pack, rules=rules)\n except Exception as e:\n if self._fail_on_failure:\n raise e\n\n LOG.exception('Failed registering all rules from pack: %s', rules_dir)\n\n return registered_count\n\n def _get_rules_from_pack(self, rules_dir):\n return self.get_resources_from_pack(resources_dir=rules_dir)\n\n def _register_rules_from_pack(self, pack, rules):\n registered_count = 0\n\n # TODO: Refactor this monstrosity\n for rule in rules:\n LOG.debug('Loading rule from %s.', rule)\n try:\n content = self._meta_loader.load(rule)\n pack_field = content.get('pack', None)\n if not pack_field:\n content['pack'] = pack\n pack_field = pack\n if pack_field != pack:\n raise Exception('Model is in pack \"%s\" but field \"pack\" is different: %s' %\n (pack, pack_field))\n\n metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack,\n file_path=rule,\n use_pack_cache=True)\n content['metadata_file'] = metadata_file\n\n rule_api = RuleAPI(**content)\n rule_api.validate()\n rule_db = RuleAPI.to_model(rule_api)\n\n # Migration from rule without pack to rule with pack.\n # There might be a rule with same name but in pack `default`\n # generated in migration script. In this case, we want to\n # delete so we don't have duplicates.\n if pack_field != DEFAULT_PACK_NAME:\n try:\n rule_ref = ResourceReference.to_string_reference(name=content['name'],\n pack=DEFAULT_PACK_NAME)\n LOG.debug('Looking for rule %s in pack %s', content['name'],\n DEFAULT_PACK_NAME)\n existing = Rule.get_by_ref(rule_ref)\n LOG.debug('Existing = %s', existing)\n if existing:\n LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref)\n Rule.delete(existing)\n except:\n LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME)\n\n try:\n rule_ref = ResourceReference.to_string_reference(name=content['name'],\n pack=content['pack'])\n existing = Rule.get_by_ref(rule_ref)\n if existing:\n rule_db.id = existing.id\n LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id)\n except coditationDBObjectNotFoundError:\n LOG.debug('Rule %s not found. Creating new one.', rule)\n\n try:\n rule_db = Rule.add_or_update(rule_db)\n increment_trigger_ref_count(rule_api=rule_api)\n extra = {'rule_db': rule_db}\n LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra)\n except Exception:\n LOG.exception('Failed to create rule %s.', rule_api.name)\n\n # If there was an existing rule then the ref count was updated in\n # to_model so it needs to be adjusted down here. Also, update could\n # lead to removal of a Trigger so now is a good time for book-keeping.\n if existing:\n cleanup_trigger_db_for_rule(existing)\n except Exception as e:\n if self._fail_on_failure:\n msg = ('Failed to register rule \"%s\" from pack \"%s\": %s' % (rule, pack,\n six.text_type(e)))\n raise ValueError(msg)\n\n LOG.exception('Failed registering rule from %s.', rule)\n else:\n registered_count += 1\n\n return registered_count\n\n\ndef register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True,\n fail_on_failure=False):\n if packs_base_paths:\n assert isinstance(packs_base_paths, list)\n\n if not packs_base_paths:\n packs_base_paths = content_utils.get_packs_base_paths()\n\n registrar = RulesRegistrar(use_pack_cache=use_pack_cache,\n fail_on_failure=fail_on_failure)\n\n if pack_dir:\n result = registrar.register_from_pack(pack_dir=pack_dir)\n else:\n result = registrar.register_from_packs(base_dirs=packs_base_paths)\n\n return result\n"},"avg_line_length":{"kind":"number","value":41.1094527363,"string":"41.109453"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.5782403485,"string":"0.57824"},"count_classes":{"kind":"number","value":6351,"string":"6,351"},"score_classes":{"kind":"number","value":0.7686070434466901,"string":"0.768607"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2013,"string":"2,013"},"score_documentation":{"kind":"number","value":0.24361612005324942,"string":"0.243616"}}},{"rowIdx":3763,"cells":{"hexsha":{"kind":"string","value":"b9912797a8155d6800745fe804b93206d95de8ac"},"size":{"kind":"number","value":91819,"string":"91,819"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py"},"max_stars_repo_name":{"kind":"string","value":"aiven/azure-sdk-for-python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8764dc07423beca46ed0b51212d81289d9e52c60"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-09-07T18:43:20.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-09-07T18:43:20.000Z"},"max_issues_repo_path":{"kind":"string","value":"sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py"},"max_issues_repo_name":{"kind":"string","value":"aiven/azure-sdk-for-python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8764dc07423beca46ed0b51212d81289d9e52c60"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-11-03T06:10:36.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-12-01T06:29:39.000Z"},"max_forks_repo_path":{"kind":"string","value":"sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py"},"max_forks_repo_name":{"kind":"string","value":"msyyc/azure-sdk-for-python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e2dba75181f8b4336ae57e75aa391322c12c3123"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-05-19T02:55:10.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-05-19T02:55:10.000Z"},"content":{"kind":"string","value":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nimport datetime\nfrom typing import Dict, List, Optional, Union\n\nfrom azure.core.exceptions import HttpResponseError\nimport msrest.serialization\n\nfrom ._cost_management_client_enums import *\n\n\nclass Resource(msrest.serialization.Model):\n \"\"\"The Resource model definition.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :ivar tags: A set of tags. Resource tags.\n :vartype tags: dict[str, str]\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'tags': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(Resource, self).__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.tags = None\n\n\nclass Alert(Resource):\n \"\"\"An individual alert.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :ivar tags: A set of tags. Resource tags.\n :vartype tags: dict[str, str]\n :param definition: defines the type of alert.\n :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition\n :param description: Alert description.\n :type description: str\n :param source: Source of alert. Possible values include: \"Preset\", \"User\".\n :type source: str or ~azure.mgmt.costmanagement.models.AlertSource\n :param details: Alert details.\n :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails\n :param cost_entity_id: related budget.\n :type cost_entity_id: str\n :param status: alert status. Possible values include: \"None\", \"Active\", \"Overridden\",\n \"Resolved\", \"Dismissed\".\n :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus\n :param creation_time: dateTime in which alert was created.\n :type creation_time: str\n :param close_time: dateTime in which alert was closed.\n :type close_time: str\n :param modification_time: dateTime in which alert was last modified.\n :type modification_time: str\n :param status_modification_user_name:\n :type status_modification_user_name: str\n :param status_modification_time: dateTime in which the alert status was last modified.\n :type status_modification_time: str\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'tags': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},\n 'description': {'key': 'properties.description', 'type': 'str'},\n 'source': {'key': 'properties.source', 'type': 'str'},\n 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},\n 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},\n 'status': {'key': 'properties.status', 'type': 'str'},\n 'creation_time': {'key': 'properties.creationTime', 'type': 'str'},\n 'close_time': {'key': 'properties.closeTime', 'type': 'str'},\n 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},\n 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},\n 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n definition: Optional[\"AlertPropertiesDefinition\"] = None,\n description: Optional[str] = None,\n source: Optional[Union[str, \"AlertSource\"]] = None,\n details: Optional[\"AlertPropertiesDetails\"] = None,\n cost_entity_id: Optional[str] = None,\n status: Optional[Union[str, \"AlertStatus\"]] = None,\n creation_time: Optional[str] = None,\n close_time: Optional[str] = None,\n modification_time: Optional[str] = None,\n status_modification_user_name: Optional[str] = None,\n status_modification_time: Optional[str] = None,\n **kwargs\n ):\n super(Alert, self).__init__(**kwargs)\n self.definition = definition\n self.description = description\n self.source = source\n self.details = details\n self.cost_entity_id = cost_entity_id\n self.status = status\n self.creation_time = creation_time\n self.close_time = close_time\n self.modification_time = modification_time\n self.status_modification_user_name = status_modification_user_name\n self.status_modification_time = status_modification_time\n\n\nclass AlertPropertiesDefinition(msrest.serialization.Model):\n \"\"\"defines the type of alert.\n\n :param type: type of alert. Possible values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\",\n \"General\", \"xCloud\", \"BudgetForecast\".\n :type type: str or ~azure.mgmt.costmanagement.models.AlertType\n :param category: Alert category. Possible values include: \"Cost\", \"Usage\", \"Billing\", \"System\".\n :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory\n :param criteria: Criteria that triggered alert. Possible values include:\n \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\",\n \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\",\n \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\",\n \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\",\n \"CrossCloudCollectionError\", \"GeneralThresholdError\".\n :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria\n \"\"\"\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'category': {'key': 'category', 'type': 'str'},\n 'criteria': {'key': 'criteria', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n type: Optional[Union[str, \"AlertType\"]] = None,\n category: Optional[Union[str, \"AlertCategory\"]] = None,\n criteria: Optional[Union[str, \"AlertCriteria\"]] = None,\n **kwargs\n ):\n super(AlertPropertiesDefinition, self).__init__(**kwargs)\n self.type = type\n self.category = category\n self.criteria = criteria\n\n\nclass AlertPropertiesDetails(msrest.serialization.Model):\n \"\"\"Alert details.\n\n :param time_grain_type: Type of timegrain cadence. Possible values include: \"None\", \"Monthly\",\n \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\".\n :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType\n :param period_start_date: datetime of periodStartDate.\n :type period_start_date: str\n :param triggered_by: notificationId that triggered this alert.\n :type triggered_by: str\n :param resource_group_filter: array of resourceGroups to filter by.\n :type resource_group_filter: list[object]\n :param resource_filter: array of resources to filter by.\n :type resource_filter: list[object]\n :param meter_filter: array of meters to filter by.\n :type meter_filter: list[object]\n :param tag_filter: tags to filter by.\n :type tag_filter: object\n :param threshold: notification threshold percentage as a decimal which activated this alert.\n :type threshold: float\n :param operator: operator used to compare currentSpend with amount. Possible values include:\n \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\".\n :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator\n :param amount: budget threshold amount.\n :type amount: float\n :param unit: unit of currency being used.\n :type unit: str\n :param current_spend: current spend.\n :type current_spend: float\n :param contact_emails: list of emails to contact.\n :type contact_emails: list[str]\n :param contact_groups: list of action groups to broadcast to.\n :type contact_groups: list[str]\n :param contact_roles: list of contact roles.\n :type contact_roles: list[str]\n :param overriding_alert: overriding alert.\n :type overriding_alert: str\n \"\"\"\n\n _attribute_map = {\n 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'},\n 'period_start_date': {'key': 'periodStartDate', 'type': 'str'},\n 'triggered_by': {'key': 'triggeredBy', 'type': 'str'},\n 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'},\n 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'},\n 'meter_filter': {'key': 'meterFilter', 'type': '[object]'},\n 'tag_filter': {'key': 'tagFilter', 'type': 'object'},\n 'threshold': {'key': 'threshold', 'type': 'float'},\n 'operator': {'key': 'operator', 'type': 'str'},\n 'amount': {'key': 'amount', 'type': 'float'},\n 'unit': {'key': 'unit', 'type': 'str'},\n 'current_spend': {'key': 'currentSpend', 'type': 'float'},\n 'contact_emails': {'key': 'contactEmails', 'type': '[str]'},\n 'contact_groups': {'key': 'contactGroups', 'type': '[str]'},\n 'contact_roles': {'key': 'contactRoles', 'type': '[str]'},\n 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None,\n period_start_date: Optional[str] = None,\n triggered_by: Optional[str] = None,\n resource_group_filter: Optional[List[object]] = None,\n resource_filter: Optional[List[object]] = None,\n meter_filter: Optional[List[object]] = None,\n tag_filter: Optional[object] = None,\n threshold: Optional[float] = None,\n operator: Optional[Union[str, \"AlertOperator\"]] = None,\n amount: Optional[float] = None,\n unit: Optional[str] = None,\n current_spend: Optional[float] = None,\n contact_emails: Optional[List[str]] = None,\n contact_groups: Optional[List[str]] = None,\n contact_roles: Optional[List[str]] = None,\n overriding_alert: Optional[str] = None,\n **kwargs\n ):\n super(AlertPropertiesDetails, self).__init__(**kwargs)\n self.time_grain_type = time_grain_type\n self.period_start_date = period_start_date\n self.triggered_by = triggered_by\n self.resource_group_filter = resource_group_filter\n self.resource_filter = resource_filter\n self.meter_filter = meter_filter\n self.tag_filter = tag_filter\n self.threshold = threshold\n self.operator = operator\n self.amount = amount\n self.unit = unit\n self.current_spend = current_spend\n self.contact_emails = contact_emails\n self.contact_groups = contact_groups\n self.contact_roles = contact_roles\n self.overriding_alert = overriding_alert\n\n\nclass AlertsResult(msrest.serialization.Model):\n \"\"\"Result of alerts.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: List of alerts.\n :vartype value: list[~azure.mgmt.costmanagement.models.Alert]\n :ivar next_link: URL to get the next set of alerts results if there are any.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n 'next_link': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Alert]'},\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(AlertsResult, self).__init__(**kwargs)\n self.value = None\n self.next_link = None\n\n\nclass CommonExportProperties(msrest.serialization.Model):\n \"\"\"The common properties of the export.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param format: The format of the export being delivered. Currently only 'Csv' is supported.\n Possible values include: \"Csv\".\n :type format: str or ~azure.mgmt.costmanagement.models.FormatType\n :param delivery_info: Required. Has delivery information for the export.\n :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo\n :param definition: Required. Has the definition for the export.\n :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition\n :param run_history: If requested, has the most recent execution history for the export.\n :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult\n :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the\n next execution time.\n :vartype next_run_time_estimate: ~datetime.datetime\n \"\"\"\n\n _validation = {\n 'delivery_info': {'required': True},\n 'definition': {'required': True},\n 'next_run_time_estimate': {'readonly': True},\n }\n\n _attribute_map = {\n 'format': {'key': 'format', 'type': 'str'},\n 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},\n 'definition': {'key': 'definition', 'type': 'ExportDefinition'},\n 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},\n 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},\n }\n\n def __init__(\n self,\n *,\n delivery_info: \"ExportDeliveryInfo\",\n definition: \"ExportDefinition\",\n format: Optional[Union[str, \"FormatType\"]] = None,\n run_history: Optional[\"ExportExecutionListResult\"] = None,\n **kwargs\n ):\n super(CommonExportProperties, self).__init__(**kwargs)\n self.format = format\n self.delivery_info = delivery_info\n self.definition = definition\n self.run_history = run_history\n self.next_run_time_estimate = None\n\n\nclass Dimension(Resource):\n \"\"\"Dimension.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :ivar tags: A set of tags. Resource tags.\n :vartype tags: dict[str, str]\n :ivar description: Dimension description.\n :vartype description: str\n :ivar filter_enabled: Filter enabled.\n :vartype filter_enabled: bool\n :ivar grouping_enabled: Grouping enabled.\n :vartype grouping_enabled: bool\n :param data:\n :type data: list[str]\n :ivar total: Total number of data for the dimension.\n :vartype total: int\n :ivar category: Dimension category.\n :vartype category: str\n :ivar usage_start: Usage start.\n :vartype usage_start: ~datetime.datetime\n :ivar usage_end: Usage end.\n :vartype usage_end: ~datetime.datetime\n :ivar next_link: The link (url) to the next page of results.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'tags': {'readonly': True},\n 'description': {'readonly': True},\n 'filter_enabled': {'readonly': True},\n 'grouping_enabled': {'readonly': True},\n 'total': {'readonly': True},\n 'category': {'readonly': True},\n 'usage_start': {'readonly': True},\n 'usage_end': {'readonly': True},\n 'next_link': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'description': {'key': 'properties.description', 'type': 'str'},\n 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'},\n 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'},\n 'data': {'key': 'properties.data', 'type': '[str]'},\n 'total': {'key': 'properties.total', 'type': 'int'},\n 'category': {'key': 'properties.category', 'type': 'str'},\n 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'},\n 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'},\n 'next_link': {'key': 'properties.nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n data: Optional[List[str]] = None,\n **kwargs\n ):\n super(Dimension, self).__init__(**kwargs)\n self.description = None\n self.filter_enabled = None\n self.grouping_enabled = None\n self.data = data\n self.total = None\n self.category = None\n self.usage_start = None\n self.usage_end = None\n self.next_link = None\n\n\nclass DimensionsListResult(msrest.serialization.Model):\n \"\"\"Result of listing dimensions. It contains a list of available dimensions.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: The list of dimensions.\n :vartype value: list[~azure.mgmt.costmanagement.models.Dimension]\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Dimension]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(DimensionsListResult, self).__init__(**kwargs)\n self.value = None\n\n\nclass DismissAlertPayload(msrest.serialization.Model):\n \"\"\"The request payload to update an alert.\n\n :param definition: defines the type of alert.\n :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition\n :param description: Alert description.\n :type description: str\n :param source: Source of alert. Possible values include: \"Preset\", \"User\".\n :type source: str or ~azure.mgmt.costmanagement.models.AlertSource\n :param details: Alert details.\n :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails\n :param cost_entity_id: related budget.\n :type cost_entity_id: str\n :param status: alert status. Possible values include: \"None\", \"Active\", \"Overridden\",\n \"Resolved\", \"Dismissed\".\n :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus\n :param creation_time: dateTime in which alert was created.\n :type creation_time: str\n :param close_time: dateTime in which alert was closed.\n :type close_time: str\n :param modification_time: dateTime in which alert was last modified.\n :type modification_time: str\n :param status_modification_user_name:\n :type status_modification_user_name: str\n :param status_modification_time: dateTime in which the alert status was last modified.\n :type status_modification_time: str\n \"\"\"\n\n _attribute_map = {\n 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},\n 'description': {'key': 'properties.description', 'type': 'str'},\n 'source': {'key': 'properties.source', 'type': 'str'},\n 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},\n 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},\n 'status': {'key': 'properties.status', 'type': 'str'},\n 'creation_time': {'key': 'properties.creationTime', 'type': 'str'},\n 'close_time': {'key': 'properties.closeTime', 'type': 'str'},\n 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},\n 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},\n 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n definition: Optional[\"AlertPropertiesDefinition\"] = None,\n description: Optional[str] = None,\n source: Optional[Union[str, \"AlertSource\"]] = None,\n details: Optional[\"AlertPropertiesDetails\"] = None,\n cost_entity_id: Optional[str] = None,\n status: Optional[Union[str, \"AlertStatus\"]] = None,\n creation_time: Optional[str] = None,\n close_time: Optional[str] = None,\n modification_time: Optional[str] = None,\n status_modification_user_name: Optional[str] = None,\n status_modification_time: Optional[str] = None,\n **kwargs\n ):\n super(DismissAlertPayload, self).__init__(**kwargs)\n self.definition = definition\n self.description = description\n self.source = source\n self.details = details\n self.cost_entity_id = cost_entity_id\n self.status = status\n self.creation_time = creation_time\n self.close_time = close_time\n self.modification_time = modification_time\n self.status_modification_user_name = status_modification_user_name\n self.status_modification_time = status_modification_time\n\n\nclass ErrorDetails(msrest.serialization.Model):\n \"\"\"The details of the error.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar code: Error code.\n :vartype code: str\n :ivar message: Error message indicating why the operation failed.\n :vartype message: str\n \"\"\"\n\n _validation = {\n 'code': {'readonly': True},\n 'message': {'readonly': True},\n }\n\n _attribute_map = {\n 'code': {'key': 'code', 'type': 'str'},\n 'message': {'key': 'message', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ErrorDetails, self).__init__(**kwargs)\n self.code = None\n self.message = None\n\n\nclass ErrorResponse(msrest.serialization.Model):\n \"\"\"Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message. \n\nSome Error responses: \n\n\n* \n 429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. \n\n* \n 503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the \"Retry-After\" header.\n\n :param error: The details of the error.\n :type error: ~azure.mgmt.costmanagement.models.ErrorDetails\n \"\"\"\n\n _attribute_map = {\n 'error': {'key': 'error', 'type': 'ErrorDetails'},\n }\n\n def __init__(\n self,\n *,\n error: Optional[\"ErrorDetails\"] = None,\n **kwargs\n ):\n super(ErrorResponse, self).__init__(**kwargs)\n self.error = error\n\n\nclass ProxyResource(msrest.serialization.Model):\n \"\"\"The Resource model definition.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be\n used to determine whether the user is updating the latest version or not.\n :type e_tag: str\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'e_tag': {'key': 'eTag', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n e_tag: Optional[str] = None,\n **kwargs\n ):\n super(ProxyResource, self).__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.e_tag = e_tag\n\n\nclass Export(ProxyResource):\n \"\"\"An export resource.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be\n used to determine whether the user is updating the latest version or not.\n :type e_tag: str\n :param format: The format of the export being delivered. Currently only 'Csv' is supported.\n Possible values include: \"Csv\".\n :type format: str or ~azure.mgmt.costmanagement.models.FormatType\n :param delivery_info: Has delivery information for the export.\n :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo\n :param definition: Has the definition for the export.\n :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition\n :param run_history: If requested, has the most recent execution history for the export.\n :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult\n :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the\n next execution time.\n :vartype next_run_time_estimate: ~datetime.datetime\n :param schedule: Has schedule information for the export.\n :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'next_run_time_estimate': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'e_tag': {'key': 'eTag', 'type': 'str'},\n 'format': {'key': 'properties.format', 'type': 'str'},\n 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'},\n 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'},\n 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'},\n 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'},\n 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'},\n }\n\n def __init__(\n self,\n *,\n e_tag: Optional[str] = None,\n format: Optional[Union[str, \"FormatType\"]] = None,\n delivery_info: Optional[\"ExportDeliveryInfo\"] = None,\n definition: Optional[\"ExportDefinition\"] = None,\n run_history: Optional[\"ExportExecutionListResult\"] = None,\n schedule: Optional[\"ExportSchedule\"] = None,\n **kwargs\n ):\n super(Export, self).__init__(e_tag=e_tag, **kwargs)\n self.format = format\n self.delivery_info = delivery_info\n self.definition = definition\n self.run_history = run_history\n self.next_run_time_estimate = None\n self.schedule = schedule\n\n\nclass ExportDataset(msrest.serialization.Model):\n \"\"\"The definition for data in the export.\n\n :param granularity: The granularity of rows in the export. Currently only 'Daily' is supported.\n Possible values include: \"Daily\".\n :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType\n :param configuration: The export dataset configuration.\n :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration\n \"\"\"\n\n _attribute_map = {\n 'granularity': {'key': 'granularity', 'type': 'str'},\n 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'},\n }\n\n def __init__(\n self,\n *,\n granularity: Optional[Union[str, \"GranularityType\"]] = None,\n configuration: Optional[\"ExportDatasetConfiguration\"] = None,\n **kwargs\n ):\n super(ExportDataset, self).__init__(**kwargs)\n self.granularity = granularity\n self.configuration = configuration\n\n\nclass ExportDatasetConfiguration(msrest.serialization.Model):\n \"\"\"The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns.\n\n :param columns: Array of column names to be included in the export. If not provided then the\n export will include all available columns. The available columns can vary by customer channel\n (see examples).\n :type columns: list[str]\n \"\"\"\n\n _attribute_map = {\n 'columns': {'key': 'columns', 'type': '[str]'},\n }\n\n def __init__(\n self,\n *,\n columns: Optional[List[str]] = None,\n **kwargs\n ):\n super(ExportDatasetConfiguration, self).__init__(**kwargs)\n self.columns = columns\n\n\nclass ExportDefinition(msrest.serialization.Model):\n \"\"\"The definition of an export.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost'\n and is applicable to exports that do not yet provide data for charges or amortization for\n service reservations. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\".\n :type type: str or ~azure.mgmt.costmanagement.models.ExportType\n :param timeframe: Required. The time frame for pulling data for the export. If custom, then a\n specific time period must be provided. Possible values include: \"MonthToDate\",\n \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\".\n :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType\n :param time_period: Has time period for pulling data for the export.\n :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod\n :param data_set: The definition for data in the export.\n :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'timeframe': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'timeframe': {'key': 'timeframe', 'type': 'str'},\n 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'},\n 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"ExportType\"],\n timeframe: Union[str, \"TimeframeType\"],\n time_period: Optional[\"ExportTimePeriod\"] = None,\n data_set: Optional[\"ExportDataset\"] = None,\n **kwargs\n ):\n super(ExportDefinition, self).__init__(**kwargs)\n self.type = type\n self.timeframe = timeframe\n self.time_period = time_period\n self.data_set = data_set\n\n\nclass ExportDeliveryDestination(msrest.serialization.Model):\n \"\"\"The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services .\n\n All required parameters must be populated in order to send to Azure.\n\n :param resource_id: Required. The resource id of the storage account where exports will be\n delivered.\n :type resource_id: str\n :param container: Required. The name of the container where exports will be uploaded.\n :type container: str\n :param root_folder_path: The name of the directory where exports will be uploaded.\n :type root_folder_path: str\n \"\"\"\n\n _validation = {\n 'resource_id': {'required': True},\n 'container': {'required': True},\n }\n\n _attribute_map = {\n 'resource_id': {'key': 'resourceId', 'type': 'str'},\n 'container': {'key': 'container', 'type': 'str'},\n 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n resource_id: str,\n container: str,\n root_folder_path: Optional[str] = None,\n **kwargs\n ):\n super(ExportDeliveryDestination, self).__init__(**kwargs)\n self.resource_id = resource_id\n self.container = container\n self.root_folder_path = root_folder_path\n\n\nclass ExportDeliveryInfo(msrest.serialization.Model):\n \"\"\"The delivery information associated with a export.\n\n All required parameters must be populated in order to send to Azure.\n\n :param destination: Required. Has destination for the export being delivered.\n :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination\n \"\"\"\n\n _validation = {\n 'destination': {'required': True},\n }\n\n _attribute_map = {\n 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'},\n }\n\n def __init__(\n self,\n *,\n destination: \"ExportDeliveryDestination\",\n **kwargs\n ):\n super(ExportDeliveryInfo, self).__init__(**kwargs)\n self.destination = destination\n\n\nclass ExportExecution(Resource):\n \"\"\"An export execution.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :ivar tags: A set of tags. Resource tags.\n :vartype tags: dict[str, str]\n :param execution_type: The type of the export execution. Possible values include: \"OnDemand\",\n \"Scheduled\".\n :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType\n :param status: The last known status of the export execution. Possible values include:\n \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\",\n \"DataNotAvailable\".\n :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus\n :param submitted_by: The identifier for the entity that executed the export. For OnDemand\n executions it is the user email. For scheduled executions it is 'System'.\n :type submitted_by: str\n :param submitted_time: The time when export was queued to be executed.\n :type submitted_time: ~datetime.datetime\n :param processing_start_time: The time when export was picked up to be executed.\n :type processing_start_time: ~datetime.datetime\n :param processing_end_time: The time when the export execution finished.\n :type processing_end_time: ~datetime.datetime\n :param file_name: The name of the exported file.\n :type file_name: str\n :param run_settings: The export settings that were in effect for this execution.\n :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties\n :param error: The details of any error.\n :type error: ~azure.mgmt.costmanagement.models.ErrorDetails\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'tags': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'execution_type': {'key': 'properties.executionType', 'type': 'str'},\n 'status': {'key': 'properties.status', 'type': 'str'},\n 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'},\n 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'},\n 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'},\n 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'},\n 'file_name': {'key': 'properties.fileName', 'type': 'str'},\n 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'},\n 'error': {'key': 'properties.error', 'type': 'ErrorDetails'},\n }\n\n def __init__(\n self,\n *,\n execution_type: Optional[Union[str, \"ExecutionType\"]] = None,\n status: Optional[Union[str, \"ExecutionStatus\"]] = None,\n submitted_by: Optional[str] = None,\n submitted_time: Optional[datetime.datetime] = None,\n processing_start_time: Optional[datetime.datetime] = None,\n processing_end_time: Optional[datetime.datetime] = None,\n file_name: Optional[str] = None,\n run_settings: Optional[\"CommonExportProperties\"] = None,\n error: Optional[\"ErrorDetails\"] = None,\n **kwargs\n ):\n super(ExportExecution, self).__init__(**kwargs)\n self.execution_type = execution_type\n self.status = status\n self.submitted_by = submitted_by\n self.submitted_time = submitted_time\n self.processing_start_time = processing_start_time\n self.processing_end_time = processing_end_time\n self.file_name = file_name\n self.run_settings = run_settings\n self.error = error\n\n\nclass ExportExecutionListResult(msrest.serialization.Model):\n \"\"\"Result of listing the execution history of an export.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: A list of export executions.\n :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution]\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[ExportExecution]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ExportExecutionListResult, self).__init__(**kwargs)\n self.value = None\n\n\nclass ExportListResult(msrest.serialization.Model):\n \"\"\"Result of listing exports. It contains a list of available exports in the scope provided.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: The list of exports.\n :vartype value: list[~azure.mgmt.costmanagement.models.Export]\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Export]'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ExportListResult, self).__init__(**kwargs)\n self.value = None\n\n\nclass ExportProperties(CommonExportProperties):\n \"\"\"The properties of the export.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :param format: The format of the export being delivered. Currently only 'Csv' is supported.\n Possible values include: \"Csv\".\n :type format: str or ~azure.mgmt.costmanagement.models.FormatType\n :param delivery_info: Required. Has delivery information for the export.\n :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo\n :param definition: Required. Has the definition for the export.\n :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition\n :param run_history: If requested, has the most recent execution history for the export.\n :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult\n :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the\n next execution time.\n :vartype next_run_time_estimate: ~datetime.datetime\n :param schedule: Has schedule information for the export.\n :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule\n \"\"\"\n\n _validation = {\n 'delivery_info': {'required': True},\n 'definition': {'required': True},\n 'next_run_time_estimate': {'readonly': True},\n }\n\n _attribute_map = {\n 'format': {'key': 'format', 'type': 'str'},\n 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},\n 'definition': {'key': 'definition', 'type': 'ExportDefinition'},\n 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},\n 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},\n 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'},\n }\n\n def __init__(\n self,\n *,\n delivery_info: \"ExportDeliveryInfo\",\n definition: \"ExportDefinition\",\n format: Optional[Union[str, \"FormatType\"]] = None,\n run_history: Optional[\"ExportExecutionListResult\"] = None,\n schedule: Optional[\"ExportSchedule\"] = None,\n **kwargs\n ):\n super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs)\n self.schedule = schedule\n\n\nclass ExportRecurrencePeriod(msrest.serialization.Model):\n \"\"\"The start and end date for recurrence schedule.\n\n All required parameters must be populated in order to send to Azure.\n\n :param from_property: Required. The start date of recurrence.\n :type from_property: ~datetime.datetime\n :param to: The end date of recurrence.\n :type to: ~datetime.datetime\n \"\"\"\n\n _validation = {\n 'from_property': {'required': True},\n }\n\n _attribute_map = {\n 'from_property': {'key': 'from', 'type': 'iso-8601'},\n 'to': {'key': 'to', 'type': 'iso-8601'},\n }\n\n def __init__(\n self,\n *,\n from_property: datetime.datetime,\n to: Optional[datetime.datetime] = None,\n **kwargs\n ):\n super(ExportRecurrencePeriod, self).__init__(**kwargs)\n self.from_property = from_property\n self.to = to\n\n\nclass ExportSchedule(msrest.serialization.Model):\n \"\"\"The schedule associated with the export.\n\n All required parameters must be populated in order to send to Azure.\n\n :param status: The status of the export's schedule. If 'Inactive', the export's schedule is\n paused. Possible values include: \"Active\", \"Inactive\".\n :type status: str or ~azure.mgmt.costmanagement.models.StatusType\n :param recurrence: Required. The schedule recurrence. Possible values include: \"Daily\",\n \"Weekly\", \"Monthly\", \"Annually\".\n :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType\n :param recurrence_period: Has start and end date of the recurrence. The start date must be in\n future. If present, the end date must be greater than start date.\n :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod\n \"\"\"\n\n _validation = {\n 'recurrence': {'required': True},\n }\n\n _attribute_map = {\n 'status': {'key': 'status', 'type': 'str'},\n 'recurrence': {'key': 'recurrence', 'type': 'str'},\n 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},\n }\n\n def __init__(\n self,\n *,\n recurrence: Union[str, \"RecurrenceType\"],\n status: Optional[Union[str, \"StatusType\"]] = None,\n recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None,\n **kwargs\n ):\n super(ExportSchedule, self).__init__(**kwargs)\n self.status = status\n self.recurrence = recurrence\n self.recurrence_period = recurrence_period\n\n\nclass ExportTimePeriod(msrest.serialization.Model):\n \"\"\"The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months.\n\n All required parameters must be populated in order to send to Azure.\n\n :param from_property: Required. The start date for export data.\n :type from_property: ~datetime.datetime\n :param to: Required. The end date for export data.\n :type to: ~datetime.datetime\n \"\"\"\n\n _validation = {\n 'from_property': {'required': True},\n 'to': {'required': True},\n }\n\n _attribute_map = {\n 'from_property': {'key': 'from', 'type': 'iso-8601'},\n 'to': {'key': 'to', 'type': 'iso-8601'},\n }\n\n def __init__(\n self,\n *,\n from_property: datetime.datetime,\n to: datetime.datetime,\n **kwargs\n ):\n super(ExportTimePeriod, self).__init__(**kwargs)\n self.from_property = from_property\n self.to = to\n\n\nclass ForecastDataset(msrest.serialization.Model):\n \"\"\"The definition of data present in the forecast.\n\n :param granularity: The granularity of rows in the forecast. Possible values include: \"Daily\".\n :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType\n :param configuration: Has configuration information for the data in the export. The\n configuration will be ignored if aggregation and grouping are provided.\n :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration\n :param aggregation: Dictionary of aggregation expression to use in the forecast. The key of\n each item in the dictionary is the alias for the aggregated column. forecast can have up to 2\n aggregation clauses.\n :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]\n :param filter: Has filter expression to use in the forecast.\n :type filter: ~azure.mgmt.costmanagement.models.QueryFilter\n \"\"\"\n\n _attribute_map = {\n 'granularity': {'key': 'granularity', 'type': 'str'},\n 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},\n 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},\n 'filter': {'key': 'filter', 'type': 'QueryFilter'},\n }\n\n def __init__(\n self,\n *,\n granularity: Optional[Union[str, \"GranularityType\"]] = None,\n configuration: Optional[\"QueryDatasetConfiguration\"] = None,\n aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None,\n filter: Optional[\"QueryFilter\"] = None,\n **kwargs\n ):\n super(ForecastDataset, self).__init__(**kwargs)\n self.granularity = granularity\n self.configuration = configuration\n self.aggregation = aggregation\n self.filter = filter\n\n\nclass ForecastDefinition(msrest.serialization.Model):\n \"\"\"The definition of a forecast.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. The type of the forecast. Possible values include: \"Usage\",\n \"ActualCost\", \"AmortizedCost\".\n :type type: str or ~azure.mgmt.costmanagement.models.ForecastType\n :param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a\n specific time period must be provided. Possible values include: \"MonthToDate\",\n \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\".\n :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType\n :param time_period: Has time period for pulling data for the forecast.\n :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod\n :param dataset: Has definition for data in this forecast.\n :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset\n :param include_actual_cost: a boolean determining if actualCost will be included.\n :type include_actual_cost: bool\n :param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.\n :type include_fresh_partial_cost: bool\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'timeframe': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'timeframe': {'key': 'timeframe', 'type': 'str'},\n 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},\n 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},\n 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},\n 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"ForecastType\"],\n timeframe: Union[str, \"ForecastTimeframeType\"],\n time_period: Optional[\"QueryTimePeriod\"] = None,\n dataset: Optional[\"ForecastDataset\"] = None,\n include_actual_cost: Optional[bool] = None,\n include_fresh_partial_cost: Optional[bool] = None,\n **kwargs\n ):\n super(ForecastDefinition, self).__init__(**kwargs)\n self.type = type\n self.timeframe = timeframe\n self.time_period = time_period\n self.dataset = dataset\n self.include_actual_cost = include_actual_cost\n self.include_fresh_partial_cost = include_fresh_partial_cost\n\n\nclass KpiProperties(msrest.serialization.Model):\n \"\"\"Each KPI must contain a 'type' and 'enabled' key.\n\n :param type: KPI type (Forecast, Budget). Possible values include: \"Forecast\", \"Budget\".\n :type type: str or ~azure.mgmt.costmanagement.models.KpiType\n :param id: ID of resource related to metric (budget).\n :type id: str\n :param enabled: show the KPI in the UI?.\n :type enabled: bool\n \"\"\"\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'id': {'key': 'id', 'type': 'str'},\n 'enabled': {'key': 'enabled', 'type': 'bool'},\n }\n\n def __init__(\n self,\n *,\n type: Optional[Union[str, \"KpiType\"]] = None,\n id: Optional[str] = None,\n enabled: Optional[bool] = None,\n **kwargs\n ):\n super(KpiProperties, self).__init__(**kwargs)\n self.type = type\n self.id = id\n self.enabled = enabled\n\n\nclass Operation(msrest.serialization.Model):\n \"\"\"A Cost management REST API operation.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar name: Operation name: {provider}/{resource}/{operation}.\n :vartype name: str\n :param display: The object that represents the operation.\n :type display: ~azure.mgmt.costmanagement.models.OperationDisplay\n \"\"\"\n\n _validation = {\n 'name': {'readonly': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'display': {'key': 'display', 'type': 'OperationDisplay'},\n }\n\n def __init__(\n self,\n *,\n display: Optional[\"OperationDisplay\"] = None,\n **kwargs\n ):\n super(Operation, self).__init__(**kwargs)\n self.name = None\n self.display = display\n\n\nclass OperationDisplay(msrest.serialization.Model):\n \"\"\"The object that represents the operation.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar provider: Service provider: Microsoft.CostManagement.\n :vartype provider: str\n :ivar resource: Resource on which the operation is performed: Dimensions, Query.\n :vartype resource: str\n :ivar operation: Operation type: Read, write, delete, etc.\n :vartype operation: str\n \"\"\"\n\n _validation = {\n 'provider': {'readonly': True},\n 'resource': {'readonly': True},\n 'operation': {'readonly': True},\n }\n\n _attribute_map = {\n 'provider': {'key': 'provider', 'type': 'str'},\n 'resource': {'key': 'resource', 'type': 'str'},\n 'operation': {'key': 'operation', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(OperationDisplay, self).__init__(**kwargs)\n self.provider = None\n self.resource = None\n self.operation = None\n\n\nclass OperationListResult(msrest.serialization.Model):\n \"\"\"Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: List of cost management operations supported by the Microsoft.CostManagement\n resource provider.\n :vartype value: list[~azure.mgmt.costmanagement.models.Operation]\n :ivar next_link: URL to get the next set of operation list results if there are any.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n 'next_link': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[Operation]'},\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(OperationListResult, self).__init__(**kwargs)\n self.value = None\n self.next_link = None\n\n\nclass PivotProperties(msrest.serialization.Model):\n \"\"\"Each pivot must contain a 'type' and 'name'.\n\n :param type: Data type to show in view. Possible values include: \"Dimension\", \"TagKey\".\n :type type: str or ~azure.mgmt.costmanagement.models.PivotType\n :param name: Data field to show in view.\n :type name: str\n \"\"\"\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n type: Optional[Union[str, \"PivotType\"]] = None,\n name: Optional[str] = None,\n **kwargs\n ):\n super(PivotProperties, self).__init__(**kwargs)\n self.type = type\n self.name = name\n\n\nclass QueryAggregation(msrest.serialization.Model):\n \"\"\"The aggregation expression to be used in the query.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the column to aggregate.\n :type name: str\n :param function: Required. The name of the aggregation function to use. Possible values\n include: \"Sum\".\n :type function: str or ~azure.mgmt.costmanagement.models.FunctionType\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'function': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'function': {'key': 'function', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n name: str,\n function: Union[str, \"FunctionType\"],\n **kwargs\n ):\n super(QueryAggregation, self).__init__(**kwargs)\n self.name = name\n self.function = function\n\n\nclass QueryColumn(msrest.serialization.Model):\n \"\"\"QueryColumn.\n\n :param name: The name of column.\n :type name: str\n :param type: The type of column.\n :type type: str\n \"\"\"\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n type: Optional[str] = None,\n **kwargs\n ):\n super(QueryColumn, self).__init__(**kwargs)\n self.name = name\n self.type = type\n\n\nclass QueryComparisonExpression(msrest.serialization.Model):\n \"\"\"The comparison expression to be used in the query.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the column to use in comparison.\n :type name: str\n :param operator: Required. The operator to use for comparison. Possible values include: \"In\",\n \"Contains\".\n :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType\n :param values: Required. Array of values to use for comparison.\n :type values: list[str]\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'operator': {'required': True},\n 'values': {'required': True, 'min_items': 1},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'operator': {'key': 'operator', 'type': 'str'},\n 'values': {'key': 'values', 'type': '[str]'},\n }\n\n def __init__(\n self,\n *,\n name: str,\n operator: Union[str, \"OperatorType\"],\n values: List[str],\n **kwargs\n ):\n super(QueryComparisonExpression, self).__init__(**kwargs)\n self.name = name\n self.operator = operator\n self.values = values\n\n\nclass QueryDataset(msrest.serialization.Model):\n \"\"\"The definition of data present in the query.\n\n :param granularity: The granularity of rows in the query. Possible values include: \"Daily\".\n :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType\n :param configuration: Has configuration information for the data in the export. The\n configuration will be ignored if aggregation and grouping are provided.\n :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration\n :param aggregation: Dictionary of aggregation expression to use in the query. The key of each\n item in the dictionary is the alias for the aggregated column. Query can have up to 2\n aggregation clauses.\n :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]\n :param grouping: Array of group by expression to use in the query. Query can have up to 2 group\n by clauses.\n :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping]\n :param filter: Has filter expression to use in the query.\n :type filter: ~azure.mgmt.costmanagement.models.QueryFilter\n \"\"\"\n\n _validation = {\n 'grouping': {'max_items': 2, 'min_items': 0},\n }\n\n _attribute_map = {\n 'granularity': {'key': 'granularity', 'type': 'str'},\n 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},\n 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},\n 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'},\n 'filter': {'key': 'filter', 'type': 'QueryFilter'},\n }\n\n def __init__(\n self,\n *,\n granularity: Optional[Union[str, \"GranularityType\"]] = None,\n configuration: Optional[\"QueryDatasetConfiguration\"] = None,\n aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None,\n grouping: Optional[List[\"QueryGrouping\"]] = None,\n filter: Optional[\"QueryFilter\"] = None,\n **kwargs\n ):\n super(QueryDataset, self).__init__(**kwargs)\n self.granularity = granularity\n self.configuration = configuration\n self.aggregation = aggregation\n self.grouping = grouping\n self.filter = filter\n\n\nclass QueryDatasetConfiguration(msrest.serialization.Model):\n \"\"\"The configuration of dataset in the query.\n\n :param columns: Array of column names to be included in the query. Any valid query column name\n is allowed. If not provided, then query includes all columns.\n :type columns: list[str]\n \"\"\"\n\n _attribute_map = {\n 'columns': {'key': 'columns', 'type': '[str]'},\n }\n\n def __init__(\n self,\n *,\n columns: Optional[List[str]] = None,\n **kwargs\n ):\n super(QueryDatasetConfiguration, self).__init__(**kwargs)\n self.columns = columns\n\n\nclass QueryDefinition(msrest.serialization.Model):\n \"\"\"The definition of a query.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. The type of the query. Possible values include: \"Usage\", \"ActualCost\",\n \"AmortizedCost\".\n :type type: str or ~azure.mgmt.costmanagement.models.ExportType\n :param timeframe: Required. The time frame for pulling data for the query. If custom, then a\n specific time period must be provided. Possible values include: \"MonthToDate\",\n \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\".\n :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType\n :param time_period: Has time period for pulling data for the query.\n :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod\n :param dataset: Has definition for data in this query.\n :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'timeframe': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'timeframe': {'key': 'timeframe', 'type': 'str'},\n 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},\n 'dataset': {'key': 'dataset', 'type': 'QueryDataset'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"ExportType\"],\n timeframe: Union[str, \"TimeframeType\"],\n time_period: Optional[\"QueryTimePeriod\"] = None,\n dataset: Optional[\"QueryDataset\"] = None,\n **kwargs\n ):\n super(QueryDefinition, self).__init__(**kwargs)\n self.type = type\n self.timeframe = timeframe\n self.time_period = time_period\n self.dataset = dataset\n\n\nclass QueryFilter(msrest.serialization.Model):\n \"\"\"The filter expression to be used in the export.\n\n :param and_property: The logical \"AND\" expression. Must have at least 2 items.\n :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter]\n :param or_property: The logical \"OR\" expression. Must have at least 2 items.\n :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter]\n :param not_property: The logical \"NOT\" expression.\n :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter\n :param dimension: Has comparison expression for a dimension.\n :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression\n :param tag: Has comparison expression for a tag.\n :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression\n \"\"\"\n\n _validation = {\n 'and_property': {'min_items': 2},\n 'or_property': {'min_items': 2},\n }\n\n _attribute_map = {\n 'and_property': {'key': 'and', 'type': '[QueryFilter]'},\n 'or_property': {'key': 'or', 'type': '[QueryFilter]'},\n 'not_property': {'key': 'not', 'type': 'QueryFilter'},\n 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'},\n 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'},\n }\n\n def __init__(\n self,\n *,\n and_property: Optional[List[\"QueryFilter\"]] = None,\n or_property: Optional[List[\"QueryFilter\"]] = None,\n not_property: Optional[\"QueryFilter\"] = None,\n dimension: Optional[\"QueryComparisonExpression\"] = None,\n tag: Optional[\"QueryComparisonExpression\"] = None,\n **kwargs\n ):\n super(QueryFilter, self).__init__(**kwargs)\n self.and_property = and_property\n self.or_property = or_property\n self.not_property = not_property\n self.dimension = dimension\n self.tag = tag\n\n\nclass QueryGrouping(msrest.serialization.Model):\n \"\"\"The group by expression to be used in the query.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. Has type of the column to group. Possible values include: \"Tag\",\n \"Dimension\".\n :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType\n :param name: Required. The name of the column to group.\n :type name: str\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'name': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"QueryColumnType\"],\n name: str,\n **kwargs\n ):\n super(QueryGrouping, self).__init__(**kwargs)\n self.type = type\n self.name = name\n\n\nclass QueryResult(Resource):\n \"\"\"Result of query. It contains all columns listed under groupings and aggregation.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :ivar tags: A set of tags. Resource tags.\n :vartype tags: dict[str, str]\n :param next_link: The link (url) to the next page of results.\n :type next_link: str\n :param columns: Array of columns.\n :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn]\n :param rows: Array of rows.\n :type rows: list[list[object]]\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'tags': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'next_link': {'key': 'properties.nextLink', 'type': 'str'},\n 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'},\n 'rows': {'key': 'properties.rows', 'type': '[[object]]'},\n }\n\n def __init__(\n self,\n *,\n next_link: Optional[str] = None,\n columns: Optional[List[\"QueryColumn\"]] = None,\n rows: Optional[List[List[object]]] = None,\n **kwargs\n ):\n super(QueryResult, self).__init__(**kwargs)\n self.next_link = next_link\n self.columns = columns\n self.rows = rows\n\n\nclass QueryTimePeriod(msrest.serialization.Model):\n \"\"\"The start and end date for pulling data for the query.\n\n All required parameters must be populated in order to send to Azure.\n\n :param from_property: Required. The start date to pull data from.\n :type from_property: ~datetime.datetime\n :param to: Required. The end date to pull data to.\n :type to: ~datetime.datetime\n \"\"\"\n\n _validation = {\n 'from_property': {'required': True},\n 'to': {'required': True},\n }\n\n _attribute_map = {\n 'from_property': {'key': 'from', 'type': 'iso-8601'},\n 'to': {'key': 'to', 'type': 'iso-8601'},\n }\n\n def __init__(\n self,\n *,\n from_property: datetime.datetime,\n to: datetime.datetime,\n **kwargs\n ):\n super(QueryTimePeriod, self).__init__(**kwargs)\n self.from_property = from_property\n self.to = to\n\n\nclass ReportConfigAggregation(msrest.serialization.Model):\n \"\"\"The aggregation expression to be used in the report.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the column to aggregate.\n :type name: str\n :param function: Required. The name of the aggregation function to use. Possible values\n include: \"Sum\".\n :type function: str or ~azure.mgmt.costmanagement.models.FunctionType\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'function': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'function': {'key': 'function', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n name: str,\n function: Union[str, \"FunctionType\"],\n **kwargs\n ):\n super(ReportConfigAggregation, self).__init__(**kwargs)\n self.name = name\n self.function = function\n\n\nclass ReportConfigComparisonExpression(msrest.serialization.Model):\n \"\"\"The comparison expression to be used in the report.\n\n All required parameters must be populated in order to send to Azure.\n\n :param name: Required. The name of the column to use in comparison.\n :type name: str\n :param operator: Required. The operator to use for comparison. Possible values include: \"In\",\n \"Contains\".\n :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType\n :param values: Required. Array of values to use for comparison.\n :type values: list[str]\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n 'operator': {'required': True},\n 'values': {'required': True, 'min_items': 1},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'operator': {'key': 'operator', 'type': 'str'},\n 'values': {'key': 'values', 'type': '[str]'},\n }\n\n def __init__(\n self,\n *,\n name: str,\n operator: Union[str, \"OperatorType\"],\n values: List[str],\n **kwargs\n ):\n super(ReportConfigComparisonExpression, self).__init__(**kwargs)\n self.name = name\n self.operator = operator\n self.values = values\n\n\nclass ReportConfigDataset(msrest.serialization.Model):\n \"\"\"The definition of data present in the report.\n\n :param granularity: The granularity of rows in the report. Possible values include: \"Daily\",\n \"Monthly\".\n :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType\n :param configuration: Has configuration information for the data in the report. The\n configuration will be ignored if aggregation and grouping are provided.\n :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration\n :param aggregation: Dictionary of aggregation expression to use in the report. The key of each\n item in the dictionary is the alias for the aggregated column. Report can have up to 2\n aggregation clauses.\n :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]\n :param grouping: Array of group by expression to use in the report. Report can have up to 2\n group by clauses.\n :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]\n :param sorting: Array of order by expression to use in the report.\n :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]\n :param filter: Has filter expression to use in the report.\n :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter\n \"\"\"\n\n _validation = {\n 'grouping': {'max_items': 2, 'min_items': 0},\n }\n\n _attribute_map = {\n 'granularity': {'key': 'granularity', 'type': 'str'},\n 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},\n 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},\n 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},\n 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},\n 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'},\n }\n\n def __init__(\n self,\n *,\n granularity: Optional[Union[str, \"ReportGranularityType\"]] = None,\n configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None,\n aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None,\n grouping: Optional[List[\"ReportConfigGrouping\"]] = None,\n sorting: Optional[List[\"ReportConfigSorting\"]] = None,\n filter: Optional[\"ReportConfigFilter\"] = None,\n **kwargs\n ):\n super(ReportConfigDataset, self).__init__(**kwargs)\n self.granularity = granularity\n self.configuration = configuration\n self.aggregation = aggregation\n self.grouping = grouping\n self.sorting = sorting\n self.filter = filter\n\n\nclass ReportConfigDatasetAutoGenerated(msrest.serialization.Model):\n \"\"\"The definition of data present in the report.\n\n :param granularity: The granularity of rows in the report. Possible values include: \"Daily\",\n \"Monthly\".\n :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType\n :param configuration: Has configuration information for the data in the report. The\n configuration will be ignored if aggregation and grouping are provided.\n :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration\n :param aggregation: Dictionary of aggregation expression to use in the report. The key of each\n item in the dictionary is the alias for the aggregated column. Report can have up to 2\n aggregation clauses.\n :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]\n :param grouping: Array of group by expression to use in the report. Report can have up to 2\n group by clauses.\n :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]\n :param sorting: Array of order by expression to use in the report.\n :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]\n :param filter: Has filter expression to use in the report.\n :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated\n \"\"\"\n\n _validation = {\n 'grouping': {'max_items': 2, 'min_items': 0},\n }\n\n _attribute_map = {\n 'granularity': {'key': 'granularity', 'type': 'str'},\n 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},\n 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},\n 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},\n 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},\n 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'},\n }\n\n def __init__(\n self,\n *,\n granularity: Optional[Union[str, \"ReportGranularityType\"]] = None,\n configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None,\n aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None,\n grouping: Optional[List[\"ReportConfigGrouping\"]] = None,\n sorting: Optional[List[\"ReportConfigSorting\"]] = None,\n filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None,\n **kwargs\n ):\n super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs)\n self.granularity = granularity\n self.configuration = configuration\n self.aggregation = aggregation\n self.grouping = grouping\n self.sorting = sorting\n self.filter = filter\n\n\nclass ReportConfigDatasetConfiguration(msrest.serialization.Model):\n \"\"\"The configuration of dataset in the report.\n\n :param columns: Array of column names to be included in the report. Any valid report column\n name is allowed. If not provided, then report includes all columns.\n :type columns: list[str]\n \"\"\"\n\n _attribute_map = {\n 'columns': {'key': 'columns', 'type': '[str]'},\n }\n\n def __init__(\n self,\n *,\n columns: Optional[List[str]] = None,\n **kwargs\n ):\n super(ReportConfigDatasetConfiguration, self).__init__(**kwargs)\n self.columns = columns\n\n\nclass ReportConfigDefinition(msrest.serialization.Model):\n \"\"\"The definition of a report config.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. The type of the report. Usage represents actual usage, forecast\n represents forecasted data and UsageAndForecast represents both usage and forecasted data.\n Actual usage and forecasted data can be differentiated based on dates. Possible values include:\n \"Usage\".\n :type type: str or ~azure.mgmt.costmanagement.models.ReportType\n :param timeframe: Required. The time frame for pulling data for the report. If custom, then a\n specific time period must be provided. Possible values include: \"WeekToDate\", \"MonthToDate\",\n \"YearToDate\", \"Custom\".\n :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType\n :param time_period: Has time period for pulling data for the report.\n :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod\n :param dataset: Has definition for data in this report config.\n :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'timeframe': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'timeframe': {'key': 'timeframe', 'type': 'str'},\n 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'},\n 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"ReportType\"],\n timeframe: Union[str, \"ReportTimeframeType\"],\n time_period: Optional[\"ReportConfigTimePeriod\"] = None,\n dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None,\n **kwargs\n ):\n super(ReportConfigDefinition, self).__init__(**kwargs)\n self.type = type\n self.timeframe = timeframe\n self.time_period = time_period\n self.dataset = dataset\n\n\nclass ReportConfigFilter(msrest.serialization.Model):\n \"\"\"The filter expression to be used in the report.\n\n :param and_property: The logical \"AND\" expression. Must have at least 2 items.\n :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]\n :param or_property: The logical \"OR\" expression. Must have at least 2 items.\n :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]\n :param not_property: The logical \"NOT\" expression.\n :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter\n :param dimension: Has comparison expression for a dimension.\n :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression\n :param tag: Has comparison expression for a tag.\n :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression\n \"\"\"\n\n _validation = {\n 'and_property': {'min_items': 2},\n 'or_property': {'min_items': 2},\n }\n\n _attribute_map = {\n 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'},\n 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'},\n 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'},\n 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},\n 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},\n }\n\n def __init__(\n self,\n *,\n and_property: Optional[List[\"ReportConfigFilter\"]] = None,\n or_property: Optional[List[\"ReportConfigFilter\"]] = None,\n not_property: Optional[\"ReportConfigFilter\"] = None,\n dimension: Optional[\"ReportConfigComparisonExpression\"] = None,\n tag: Optional[\"ReportConfigComparisonExpression\"] = None,\n **kwargs\n ):\n super(ReportConfigFilter, self).__init__(**kwargs)\n self.and_property = and_property\n self.or_property = or_property\n self.not_property = not_property\n self.dimension = dimension\n self.tag = tag\n\n\nclass ReportConfigFilterAutoGenerated(msrest.serialization.Model):\n \"\"\"The filter expression to be used in the report.\n\n :param and_property: The logical \"AND\" expression. Must have at least 2 items.\n :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]\n :param or_property: The logical \"OR\" expression. Must have at least 2 items.\n :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]\n :param not_property: The logical \"NOT\" expression.\n :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated\n :param dimension: Has comparison expression for a dimension.\n :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression\n :param tag: Has comparison expression for a tag.\n :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression\n \"\"\"\n\n _validation = {\n 'and_property': {'min_items': 2},\n 'or_property': {'min_items': 2},\n }\n\n _attribute_map = {\n 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'},\n 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'},\n 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'},\n 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},\n 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},\n }\n\n def __init__(\n self,\n *,\n and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None,\n or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None,\n not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None,\n dimension: Optional[\"ReportConfigComparisonExpression\"] = None,\n tag: Optional[\"ReportConfigComparisonExpression\"] = None,\n **kwargs\n ):\n super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs)\n self.and_property = and_property\n self.or_property = or_property\n self.not_property = not_property\n self.dimension = dimension\n self.tag = tag\n\n\nclass ReportConfigGrouping(msrest.serialization.Model):\n \"\"\"The group by expression to be used in the report.\n\n All required parameters must be populated in order to send to Azure.\n\n :param type: Required. Has type of the column to group. Possible values include: \"Tag\",\n \"Dimension\".\n :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType\n :param name: Required. The name of the column to group. This version supports subscription\n lowest possible grain.\n :type name: str\n \"\"\"\n\n _validation = {\n 'type': {'required': True},\n 'name': {'required': True},\n }\n\n _attribute_map = {\n 'type': {'key': 'type', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n type: Union[str, \"ReportConfigColumnType\"],\n name: str,\n **kwargs\n ):\n super(ReportConfigGrouping, self).__init__(**kwargs)\n self.type = type\n self.name = name\n\n\nclass ReportConfigSorting(msrest.serialization.Model):\n \"\"\"The order by expression to be used in the report.\n\n All required parameters must be populated in order to send to Azure.\n\n :param direction: Direction of sort. Possible values include: \"Ascending\", \"Descending\".\n :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection\n :param name: Required. The name of the column to sort.\n :type name: str\n \"\"\"\n\n _validation = {\n 'name': {'required': True},\n }\n\n _attribute_map = {\n 'direction': {'key': 'direction', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n }\n\n def __init__(\n self,\n *,\n name: str,\n direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None,\n **kwargs\n ):\n super(ReportConfigSorting, self).__init__(**kwargs)\n self.direction = direction\n self.name = name\n\n\nclass ReportConfigTimePeriod(msrest.serialization.Model):\n \"\"\"The start and end date for pulling data for the report.\n\n All required parameters must be populated in order to send to Azure.\n\n :param from_property: Required. The start date to pull data from.\n :type from_property: ~datetime.datetime\n :param to: Required. The end date to pull data to.\n :type to: ~datetime.datetime\n \"\"\"\n\n _validation = {\n 'from_property': {'required': True},\n 'to': {'required': True},\n }\n\n _attribute_map = {\n 'from_property': {'key': 'from', 'type': 'iso-8601'},\n 'to': {'key': 'to', 'type': 'iso-8601'},\n }\n\n def __init__(\n self,\n *,\n from_property: datetime.datetime,\n to: datetime.datetime,\n **kwargs\n ):\n super(ReportConfigTimePeriod, self).__init__(**kwargs)\n self.from_property = from_property\n self.to = to\n\n\nclass View(ProxyResource):\n \"\"\"States and configurations of Cost Analysis.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Resource Id.\n :vartype id: str\n :ivar name: Resource name.\n :vartype name: str\n :ivar type: Resource type.\n :vartype type: str\n :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be\n used to determine whether the user is updating the latest version or not.\n :type e_tag: str\n :param display_name: User input name of the view. Required.\n :type display_name: str\n :param scope: Cost Management scope to save the view on. This includes\n 'subscriptions/{subscriptionId}' for subscription scope,\n 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,\n 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,\n 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for\n Department scope,\n 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'\n for EnrollmentAccount scope,\n 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'\n for BillingProfile scope,\n 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'\n for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'\n for Management Group scope,\n '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for\n ExternalBillingAccount scope, and\n '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for\n ExternalSubscription scope.\n :type scope: str\n :ivar created_on: Date the user created this view.\n :vartype created_on: ~datetime.datetime\n :ivar modified_on: Date when the user last modified this view.\n :vartype modified_on: ~datetime.datetime\n :param chart: Chart type of the main view in Cost Analysis. Required. Possible values include:\n \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\".\n :type chart: str or ~azure.mgmt.costmanagement.models.ChartType\n :param accumulated: Show costs accumulated over time. Possible values include: \"true\", \"false\".\n :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType\n :param metric: Metric to use when displaying costs. Possible values include: \"ActualCost\",\n \"AmortizedCost\", \"AHUB\".\n :type metric: str or ~azure.mgmt.costmanagement.models.MetricType\n :param kpis: List of KPIs to show in Cost Analysis UI.\n :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]\n :param pivots: Configuration of 3 sub-views in the Cost Analysis UI.\n :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]\n :param type_properties_query_type: The type of the report. Usage represents actual usage,\n forecast represents forecasted data and UsageAndForecast represents both usage and forecasted\n data. Actual usage and forecasted data can be differentiated based on dates. Possible values\n include: \"Usage\".\n :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType\n :param timeframe: The time frame for pulling data for the report. If custom, then a specific\n time period must be provided. Possible values include: \"WeekToDate\", \"MonthToDate\",\n \"YearToDate\", \"Custom\".\n :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType\n :param time_period: Has time period for pulling data for the report.\n :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod\n :param dataset: Has definition for data in this report config.\n :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'name': {'readonly': True},\n 'type': {'readonly': True},\n 'created_on': {'readonly': True},\n 'modified_on': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'e_tag': {'key': 'eTag', 'type': 'str'},\n 'display_name': {'key': 'properties.displayName', 'type': 'str'},\n 'scope': {'key': 'properties.scope', 'type': 'str'},\n 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},\n 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},\n 'chart': {'key': 'properties.chart', 'type': 'str'},\n 'accumulated': {'key': 'properties.accumulated', 'type': 'str'},\n 'metric': {'key': 'properties.metric', 'type': 'str'},\n 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'},\n 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'},\n 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'},\n 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'},\n 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'},\n 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'},\n }\n\n def __init__(\n self,\n *,\n e_tag: Optional[str] = None,\n display_name: Optional[str] = None,\n scope: Optional[str] = None,\n chart: Optional[Union[str, \"ChartType\"]] = None,\n accumulated: Optional[Union[str, \"AccumulatedType\"]] = None,\n metric: Optional[Union[str, \"MetricType\"]] = None,\n kpis: Optional[List[\"KpiProperties\"]] = None,\n pivots: Optional[List[\"PivotProperties\"]] = None,\n type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None,\n timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None,\n time_period: Optional[\"ReportConfigTimePeriod\"] = None,\n dataset: Optional[\"ReportConfigDataset\"] = None,\n **kwargs\n ):\n super(View, self).__init__(e_tag=e_tag, **kwargs)\n self.display_name = display_name\n self.scope = scope\n self.created_on = None\n self.modified_on = None\n self.chart = chart\n self.accumulated = accumulated\n self.metric = metric\n self.kpis = kpis\n self.pivots = pivots\n self.type_properties_query_type = type_properties_query_type\n self.timeframe = timeframe\n self.time_period = time_period\n self.dataset = dataset\n\n\nclass ViewListResult(msrest.serialization.Model):\n \"\"\"Result of listing views. It contains a list of available views.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: The list of views.\n :vartype value: list[~azure.mgmt.costmanagement.models.View]\n :ivar next_link: The link (url) to the next page of results.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n 'value': {'readonly': True},\n 'next_link': {'readonly': True},\n }\n\n _attribute_map = {\n 'value': {'key': 'value', 'type': '[View]'},\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n }\n\n def __init__(\n self,\n **kwargs\n ):\n super(ViewListResult, self).__init__(**kwargs)\n self.value = None\n self.next_link = None\n"},"avg_line_length":{"kind":"number","value":38.1625103907,"string":"38.16251"},"max_line_length":{"kind":"number","value":498,"string":"498"},"alphanum_fraction":{"kind":"number","value":0.6490051079,"string":"0.649005"},"count_classes":{"kind":"number","value":90996,"string":"90,996"},"score_classes":{"kind":"number","value":0.9910367135342358,"string":"0.991037"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":57015,"string":"57,015"},"score_documentation":{"kind":"number","value":0.620949912327514,"string":"0.62095"}}},{"rowIdx":3764,"cells":{"hexsha":{"kind":"string","value":"b9921ebf7fdd9b5fb1dd763092a97ae1888e730f"},"size":{"kind":"number","value":3860,"string":"3,860"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"test/test_simple_compression.py"},"max_stars_repo_name":{"kind":"string","value":"jayvdb/brotlipy"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ffddf2ea5adc584c8c353d246bb1077b7e781b63"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"test/test_simple_compression.py"},"max_issues_repo_name":{"kind":"string","value":"jayvdb/brotlipy"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ffddf2ea5adc584c8c353d246bb1077b7e781b63"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"test/test_simple_compression.py"},"max_forks_repo_name":{"kind":"string","value":"jayvdb/brotlipy"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ffddf2ea5adc584c8c353d246bb1077b7e781b63"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\ntest_simple_compression\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTests for compression of single chunks.\n\"\"\"\nimport brotli\n\nimport pytest\n\nfrom hypothesis import given\nfrom hypothesis.strategies import binary, integers, sampled_from, one_of\n\n\ndef test_roundtrip_compression_with_files(simple_compressed_file):\n \"\"\"\n Roundtripping data through the compressor works correctly.\n \"\"\"\n with open(simple_compressed_file[0], 'rb') as f:\n uncompressed_data = f.read()\n\n assert brotli.decompress(\n brotli.compress(uncompressed_data)\n ) == uncompressed_data\n\n\n@given(\n chunk_size=integers(min_value=1, max_value=2**12),\n mode=sampled_from(list(brotli.BrotliEncoderMode)),\n quality=integers(min_value=0, max_value=11),\n lgwin=integers(min_value=10, max_value=24),\n lgblock=one_of(\n integers(min_value=0, max_value=0),\n integers(min_value=16, max_value=24)\n ),\n)\ndef test_streaming_compression(one_compressed_file,\n chunk_size,\n mode,\n quality,\n lgwin,\n lgblock):\n \"\"\"\n Confirm that the streaming compressor works as expected.\n \"\"\"\n compressed_chunks = []\n c = brotli.Compressor(\n mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock\n )\n with open(one_compressed_file, 'rb') as f:\n while True:\n next_data = f.read(chunk_size)\n if not next_data:\n break\n\n compressed_chunks.append(c.compress(next_data))\n\n compressed_chunks.append(c.finish())\n decompressed = brotli.decompress(b''.join(compressed_chunks))\n with open(one_compressed_file, 'rb') as f:\n assert decompressed == f.read()\n\n\n@given(\n chunk_size=integers(min_value=1, max_value=2**12),\n mode=sampled_from(list(brotli.BrotliEncoderMode)),\n quality=integers(min_value=0, max_value=11),\n lgwin=integers(min_value=10, max_value=24),\n lgblock=one_of(\n integers(min_value=0, max_value=0),\n integers(min_value=16, max_value=24)\n ),\n)\ndef test_streaming_compression_flush(one_compressed_file,\n chunk_size,\n mode,\n quality,\n lgwin,\n lgblock):\n \"\"\"\n Confirm that the streaming compressor works as expected, including flushes\n after each chunk.\n \"\"\"\n compressed_chunks = []\n c = brotli.Compressor(\n mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock\n )\n with open(one_compressed_file, 'rb') as f:\n while True:\n next_data = f.read(chunk_size)\n if not next_data:\n break\n\n compressed_chunks.append(c.compress(next_data))\n compressed_chunks.append(c.flush())\n\n compressed_chunks.append(c.finish())\n decompressed = brotli.decompress(b''.join(compressed_chunks))\n with open(one_compressed_file, 'rb') as f:\n assert decompressed == f.read()\n\n\n@given(binary())\ndef test_compressed_data_roundtrips(s):\n assert brotli.decompress(brotli.compress(s)) == s\n\n\n@given(binary(), binary())\ndef test_compressed_data_with_dictionaries(s, dictionary):\n d = brotli.Decompressor(dictionary)\n compressed = brotli.compress(s, dictionary=dictionary)\n uncompressed = d.decompress(compressed)\n assert uncompressed == s\n\n\n@pytest.mark.parametrize(\n \"params\",\n [\n {\"mode\": 52},\n {\"quality\": 52},\n {\"lgwin\": 52},\n {\"lgblock\": 52},\n ]\n)\n@pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error])\ndef test_bad_compressor_parameters(params, exception_cls):\n with pytest.raises(exception_cls):\n brotli.Compressor(**params)\n"},"avg_line_length":{"kind":"number","value":29.6923076923,"string":"29.692308"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.6158031088,"string":"0.615803"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":3251,"string":"3,251"},"score_decorators":{"kind":"number","value":0.8422279792746113,"string":"0.842228"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":459,"string":"459"},"score_documentation":{"kind":"number","value":0.11891191709844559,"string":"0.118912"}}},{"rowIdx":3765,"cells":{"hexsha":{"kind":"string","value":"b992a4ec960bcf3e39ba5a1bb6a8cd2e68be293e"},"size":{"kind":"number","value":1987,"string":"1,987"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"wexapi/models/ticker.py"},"max_stars_repo_name":{"kind":"string","value":"madmis/wexapi"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f5b1b9b566f767bca7d8fad1f08c3d1bca42355a"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-06-08T12:45:04.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-08-02T11:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"wexapi/models/ticker.py"},"max_issues_repo_name":{"kind":"string","value":"madmis/wexapi"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f5b1b9b566f767bca7d8fad1f08c3d1bca42355a"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"wexapi/models/ticker.py"},"max_forks_repo_name":{"kind":"string","value":"madmis/wexapi"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f5b1b9b566f767bca7d8fad1f08c3d1bca42355a"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from decimal import Decimal\n\n\nclass Ticker(object):\n def __init__(\n self,\n high: float,\n low: float,\n avg: float,\n vol: float,\n vol_cur: int,\n last: float,\n buy: float,\n sell: float,\n updated: int,\n ):\n self.high = high\n self.low = low\n self.avg = avg\n self.vol = vol\n self.vol_cur = vol_cur\n self.last = last\n self.buy = buy\n self.sell = sell\n self.updated = updated\n\n @property\n def high(self) -> Decimal:\n return self._high\n\n @high.setter\n def high(self, value: float):\n self._high = Decimal(value)\n\n @property\n def low(self) -> Decimal:\n return self._low\n\n @low.setter\n def low(self, value: float):\n self._low = Decimal(value)\n\n @property\n def avg(self) -> Decimal:\n return self._avg\n\n @avg.setter\n def avg(self, value: float):\n self._avg = Decimal(value)\n\n @property\n def vol(self) -> Decimal:\n return self._vol\n\n @vol.setter\n def vol(self, value: float):\n self._vol = Decimal(value)\n\n @property\n def vol_cur(self) -> Decimal:\n return self._vol_cur\n\n @vol_cur.setter\n def vol_cur(self, value: float):\n self._vol_cur = Decimal(value)\n\n @property\n def last(self) -> Decimal:\n return self._last\n\n @last.setter\n def last(self, value: float):\n self._last = Decimal(value)\n\n @property\n def buy(self) -> Decimal:\n return self._buy\n\n @buy.setter\n def buy(self, value: float):\n self._buy = Decimal(value)\n\n @property\n def sell(self) -> Decimal:\n return self._sell\n\n @sell.setter\n def sell(self, value: float):\n self._sell = Decimal(value)\n\n @property\n def updated(self) -> int:\n return self._updated\n\n @updated.setter\n def updated(self, value: int):\n self._updated = int(value)\n"},"avg_line_length":{"kind":"number","value":20.2755102041,"string":"20.27551"},"max_line_length":{"kind":"number","value":38,"string":"38"},"alphanum_fraction":{"kind":"number","value":0.5455460493,"string":"0.545546"},"count_classes":{"kind":"number","value":1956,"string":"1,956"},"score_classes":{"kind":"number","value":0.984398590840463,"string":"0.984399"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":1332,"string":"1,332"},"score_decorators":{"kind":"number","value":0.6703573225968797,"string":"0.670357"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":0,"string":"0"},"score_documentation":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":3766,"cells":{"hexsha":{"kind":"string","value":"b99506d26f9716e398b3a3724d393185a9900942"},"size":{"kind":"number","value":1216,"string":"1,216"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"hard-gists/98bb452dc14e8c40e403/snippet.py"},"max_stars_repo_name":{"kind":"string","value":"jjhenkel/dockerizeme"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eaa4fe5366f6b9adf74399eab01c712cacaeb279"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":21,"string":"21"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-07-08T08:26:45.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-24T23:53:25.000Z"},"max_issues_repo_path":{"kind":"string","value":"hard-gists/98bb452dc14e8c40e403/snippet.py"},"max_issues_repo_name":{"kind":"string","value":"jjhenkel/dockerizeme"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eaa4fe5366f6b9adf74399eab01c712cacaeb279"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":5,"string":"5"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-06-15T14:47:47.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-26T05:02:56.000Z"},"max_forks_repo_path":{"kind":"string","value":"hard-gists/98bb452dc14e8c40e403/snippet.py"},"max_forks_repo_name":{"kind":"string","value":"jjhenkel/dockerizeme"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eaa4fe5366f6b9adf74399eab01c712cacaeb279"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":17,"string":"17"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-05-16T03:50:34.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-01-14T14:35:12.000Z"},"content":{"kind":"string","value":"from scryptos import *\n\np1 = 32581479300404876772405716877547\np2 = 27038194053540661979045656526063\np3 = 26440615366395242196516853423447\nn = p1*p2*p3\ne = 3\n\nc = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16)\n\n# from User's Guide to PARI/GP, nth_root function\nsqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}'\n\nc1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)]))\nc2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)]))\nc3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)]))\n\n\"\"\"\nc1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629]\nc2 = [19616973567618515464515107624812]\nc3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946]\n\"\"\"\n\nfor x in c1:\n for y in c2:\n for z in c3:\n crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)])\n d = hex(crt, 2)[2:].decode(\"hex\")\n if \"0ctf\" in d:\n print d[d.find(\"0ctf\"):].strip()\n"},"avg_line_length":{"kind":"number","value":39.2258064516,"string":"39.225806"},"max_line_length":{"kind":"number","value":224,"string":"224"},"alphanum_fraction":{"kind":"number","value":0.6636513158,"string":"0.663651"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":678,"string":"678"},"score_documentation":{"kind":"number","value":0.5575657894736842,"string":"0.557566"}}},{"rowIdx":3767,"cells":{"hexsha":{"kind":"string","value":"b9954284c404c9a5aed225965d5006c8735af349"},"size":{"kind":"number","value":1717,"string":"1,717"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"musa/migrations/0001_initial.py"},"max_stars_repo_name":{"kind":"string","value":"ccsreenidhin/Music-Web-Django"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9b8286914f9099b9ed56c712c7ca384846f189d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"musa/migrations/0001_initial.py"},"max_issues_repo_name":{"kind":"string","value":"ccsreenidhin/Music-Web-Django"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9b8286914f9099b9ed56c712c7ca384846f189d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"musa/migrations/0001_initial.py"},"max_forks_repo_name":{"kind":"string","value":"ccsreenidhin/Music-Web-Django"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9b8286914f9099b9ed56c712c7ca384846f189d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-03-29 06:43\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport musa.models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MusicCollection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, max_length=70, null=True)),\n ('document', models.FileField(upload_to=musa.models.get_upload_path)),\n ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('fullname', models.CharField(blank=True, max_length=70)),\n ('favourite_music', models.CharField(blank=True, max_length=70)),\n ('about', models.TextField(blank=True, max_length=300)),\n ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n"},"avg_line_length":{"kind":"number","value":40.880952381,"string":"40.880952"},"max_line_length":{"kind":"number","value":121,"string":"121"},"alphanum_fraction":{"kind":"number","value":0.6336633663,"string":"0.633663"},"count_classes":{"kind":"number","value":1476,"string":"1,476"},"score_classes":{"kind":"number","value":0.8596389050669773,"string":"0.859639"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":245,"string":"245"},"score_documentation":{"kind":"number","value":0.14269073966220153,"string":"0.142691"}}},{"rowIdx":3768,"cells":{"hexsha":{"kind":"string","value":"b9957182927ee0480e35dd837a4d9ee2d8587462"},"size":{"kind":"number","value":3207,"string":"3,207"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"nuitka/codegen/LoopCodes.py"},"max_stars_repo_name":{"kind":"string","value":"RESP3CT88/Nuitka"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0fcc25d9f00c4fc78c79a863c4b7987f573962e1"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-05-25T12:48:28.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-25T12:48:28.000Z"},"max_issues_repo_path":{"kind":"string","value":"venv/Lib/site-packages/nuitka/codegen/LoopCodes.py"},"max_issues_repo_name":{"kind":"string","value":"matthijsvanvliet/raytracing-python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"73d692b47330ab94eedde579a51063e3a907e92b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"venv/Lib/site-packages/nuitka/codegen/LoopCodes.py"},"max_forks_repo_name":{"kind":"string","value":"matthijsvanvliet/raytracing-python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"73d692b47330ab94eedde579a51063e3a907e92b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Part of \"Nuitka\", an optimizing Python compiler that is compatible and\n# integrates with CPython, but also works on its own.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\" Loop codes.\n\nCode generation for loops, breaking them, or continuing them. In Nuitka, there\nare no for-loops or while-loops at this point. They have been re-formulated in\na simpler loop without a condition, and statements there-in that break under\ncertain conditions.\n\nSee Developer Manual for how the CPython loops are mapped to these nodes.\n\"\"\"\n\nfrom .CodeHelpers import generateStatementSequenceCode\nfrom .ErrorCodes import getErrorExitBoolCode\nfrom .ExceptionCodes import getExceptionUnpublishedReleaseCode\nfrom .LabelCodes import getGotoCode, getLabelCode\n\n\ndef generateLoopBreakCode(statement, emit, context):\n # Functions used for generation all accept statement, but this one does\n # not use it. pylint: disable=unused-argument\n\n getExceptionUnpublishedReleaseCode(emit, context)\n\n break_target = context.getLoopBreakTarget()\n getGotoCode(break_target, emit)\n\n\ndef generateLoopContinueCode(statement, emit, context):\n # Functions used for generation all accept statement, but this one does\n # not use it. pylint: disable=unused-argument\n\n getExceptionUnpublishedReleaseCode(emit, context)\n\n continue_target = context.getLoopContinueTarget()\n getGotoCode(continue_target, emit)\n\n\ndef generateLoopCode(statement, emit, context):\n loop_start_label = context.allocateLabel(\"loop_start\")\n\n if not statement.isStatementAborting():\n loop_end_label = context.allocateLabel(\"loop_end\")\n else:\n loop_end_label = None\n\n getLabelCode(loop_start_label, emit)\n\n old_loop_break = context.setLoopBreakTarget(loop_end_label)\n old_loop_continue = context.setLoopContinueTarget(loop_start_label)\n\n generateStatementSequenceCode(\n statement_sequence=statement.subnode_loop_body,\n allow_none=True,\n emit=emit,\n context=context,\n )\n\n context.setLoopBreakTarget(old_loop_break)\n context.setLoopContinueTarget(old_loop_continue)\n\n # Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.\n old_source_ref = context.setCurrentSourceCodeReference(\n statement.getSourceReference()\n )\n\n getErrorExitBoolCode(\n condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context\n )\n\n context.setCurrentSourceCodeReference(old_source_ref)\n\n getGotoCode(loop_start_label, emit)\n\n if loop_end_label is not None:\n getLabelCode(loop_end_label, emit)\n"},"avg_line_length":{"kind":"number","value":34.8586956522,"string":"34.858696"},"max_line_length":{"kind":"number","value":111,"string":"111"},"alphanum_fraction":{"kind":"number","value":0.7527284066,"string":"0.752728"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1505,"string":"1,505"},"score_documentation":{"kind":"number","value":0.46928593701278454,"string":"0.469286"}}},{"rowIdx":3769,"cells":{"hexsha":{"kind":"string","value":"b995831c9a98c5b05882c5bbcc4b241cd51503bd"},"size":{"kind":"number","value":4837,"string":"4,837"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"3_module/C_BloomFilter.py"},"max_stars_repo_name":{"kind":"string","value":"L4mborg1n1-D14610/Algoritms_and_DataStructure"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f61b7434dbc600da02e8ec38648fa84beb160f17"},"max_stars_repo_licenses":{"kind":"list like","value":["Xnet","X11","CECILL-B"],"string":"[\n \"Xnet\",\n \"X11\",\n \"CECILL-B\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"3_module/C_BloomFilter.py"},"max_issues_repo_name":{"kind":"string","value":"L4mborg1n1-D14610/Algoritms_and_DataStructure"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f61b7434dbc600da02e8ec38648fa84beb160f17"},"max_issues_repo_licenses":{"kind":"list like","value":["Xnet","X11","CECILL-B"],"string":"[\n \"Xnet\",\n \"X11\",\n \"CECILL-B\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"3_module/C_BloomFilter.py"},"max_forks_repo_name":{"kind":"string","value":"L4mborg1n1-D14610/Algoritms_and_DataStructure"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f61b7434dbc600da02e8ec38648fa84beb160f17"},"max_forks_repo_licenses":{"kind":"list like","value":["Xnet","X11","CECILL-B"],"string":"[\n \"Xnet\",\n \"X11\",\n \"CECILL-B\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import math\nfrom sys import exit\n\n# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер\n# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P\n# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,\n# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.\n\n# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter\n# и будем хранить в структуре данных.\n# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,\n# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить\n# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив\n\nMersen_31 = 2147483647\n\n\nclass BitArray:\n def __init__(self, size):\n self.__array = bytearray(int(math.ceil(size / 8)))\n self.__size = size\n\n def add_bit(self, i):\n # i-тый бит содержится в i//8 байте на i % 8 месте\n self.__array[i // 8] |= 2 ** (7 - (i % 8))\n\n def check_bit(self, i):\n if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:\n return False\n else:\n return True\n\n def print(self):\n array_str = \"\"\n for byte in self.__array:\n _line = str(bin(byte))[2:]\n if len(_line) != 8:\n _line = '0' * (8 - len(_line)) + _line\n array_str += _line\n return array_str[:self.__size]\n\n\nclass BloomFilter:\n def __init__(self, n: int, p: float):\n self.size = int(-round(n * math.log2(p) / math.log(2)))\n self.hash_numbers = int(-round(math.log2(p)))\n self.__prime_numbers = list()\n self.__get_prime(self.hash_numbers + 1)\n self.__bitarray = BitArray(self.size)\n\n def __get_prime(self, prime_size):\n # обычный проход по всем числам и их проверка на простоту - сложно по времени\n # немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на\n # делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)\n if prime_size == 1:\n self.__prime_numbers.append(2)\n return\n self.__prime_numbers.append(2)\n i = 3\n while len(self.__prime_numbers) < prime_size:\n j = 1\n prime_flag = True\n while j < len(self.__prime_numbers):\n if (i % self.__prime_numbers[j]) == 0:\n prime_flag = False\n break\n j += 1\n if prime_flag:\n self.__prime_numbers.append(i)\n i += 2\n\n def __get_hash(self, x, i):\n return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size\n\n def add(self, key: int):\n i = 0\n while i < self.hash_numbers:\n self.__bitarray.add_bit(self.__get_hash(key, i))\n i += 1\n\n def search(self, key: int):\n i = 0\n while i < self.hash_numbers:\n if not self.__bitarray.check_bit(self.__get_hash(key, i)):\n return False\n i += 1\n return True\n\n def print(self):\n return self.__bitarray.print()\n\n\nbloom_filter = 0\n\nwhile True:\n try:\n line = input().split()\n if len(line) == 0:\n continue\n else:\n if line[0] == \"set\":\n try:\n elements_number = int(line[1])\n probability = float(line[2])\n if (elements_number <= 0) | (probability <= 0) | (probability >= 1):\n print(\"error\")\n continue\n bloom_filter = BloomFilter(elements_number, probability)\n if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):\n print(\"error\")\n continue\n break\n except TypeError:\n print(\"error\")\n continue\n else:\n print(\"error\")\n continue\n except EOFError:\n exit()\n\nprint(bloom_filter.size, bloom_filter.hash_numbers)\n\nwhile True:\n try:\n line = input().split()\n if len(line) == 0:\n continue\n elif line[0] == \"print\":\n print(bloom_filter.print())\n elif (line[0] == \"add\") & (line[1].isnumeric()):\n bloom_filter.add(int(line[1]))\n elif (line[0] == \"search\") & (line[1].isnumeric()):\n print(int(bloom_filter.search(int(line[1]))))\n else:\n print(\"error\")\n except EOFError:\n break\n"},"avg_line_length":{"kind":"number","value":34.7985611511,"string":"34.798561"},"max_line_length":{"kind":"number","value":116,"string":"116"},"alphanum_fraction":{"kind":"number","value":0.5519950382,"string":"0.551995"},"count_classes":{"kind":"number","value":2671,"string":"2,671"},"score_classes":{"kind":"number","value":0.4697502638058389,"string":"0.46975"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2126,"string":"2,126"},"score_documentation":{"kind":"number","value":0.3739008090045726,"string":"0.373901"}}},{"rowIdx":3770,"cells":{"hexsha":{"kind":"string","value":"b996ad8d5f407e5b1769d9b50ca7be5705a211e8"},"size":{"kind":"number","value":1937,"string":"1,937"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pyzmq/examples/pubsub/subscriber.py"},"max_stars_repo_name":{"kind":"string","value":"Surfndez/source-publish"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c3838b303c1a0806f21cd4e8d8c207015b3ce9c8"},"max_stars_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pyzmq/examples/pubsub/subscriber.py"},"max_issues_repo_name":{"kind":"string","value":"Surfndez/source-publish"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c3838b303c1a0806f21cd4e8d8c207015b3ce9c8"},"max_issues_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-01-21T17:43:33.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-01-21T17:43:33.000Z"},"max_forks_repo_path":{"kind":"string","value":"pyzmq/examples/pubsub/subscriber.py"},"max_forks_repo_name":{"kind":"string","value":"Surfndez/source-publish"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c3838b303c1a0806f21cd4e8d8c207015b3ce9c8"},"max_forks_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"A test that subscribes to NumPy arrays.\n\nUses REQ/REP (on PUB/SUB socket + 1) to synchronize\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2010 Brian Granger\n#\n# Distributed under the terms of the New BSD License. The full license is in\n# the file COPYING.BSD, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n\nimport sys\nimport time\n\nimport zmq\nimport numpy\n\ndef sync(connect_to):\n # use connect socket + 1\n sync_with = ':'.join(connect_to.split(':')[:-1] +\n [str(int(connect_to.split(':')[-1]) + 1)]\n )\n ctx = zmq.Context.instance()\n s = ctx.socket(zmq.REQ)\n s.connect(sync_with)\n s.send('READY')\n s.recv()\n\ndef main():\n if len (sys.argv) != 3:\n print 'usage: subscriber '\n sys.exit (1)\n\n try:\n connect_to = sys.argv[1]\n array_count = int (sys.argv[2])\n except (ValueError, OverflowError), e:\n print 'array-count must be integers'\n sys.exit (1)\n\n ctx = zmq.Context()\n s = ctx.socket(zmq.SUB)\n s.connect(connect_to)\n s.setsockopt(zmq.SUBSCRIBE,'')\n\n sync(connect_to)\n\n start = time.clock()\n\n print \"Receiving arrays...\"\n for i in range(array_count):\n a = s.recv_pyobj()\n print \" Done.\"\n\n end = time.clock()\n\n elapsed = (end - start) * 1000000\n if elapsed == 0:\n \telapsed = 1\n throughput = (1000000.0 * float (array_count)) / float (elapsed)\n message_size = a.nbytes\n megabits = float (throughput * message_size * 8) / 1000000\n\n print \"message size: %.0f [B]\" % (message_size, )\n print \"array count: %.0f\" % (array_count, )\n print \"mean throughput: %.0f [msg/s]\" % (throughput, )\n print \"mean throughput: %.3f [Mb/s]\" % (megabits, )\n\n time.sleep(1.0)\n\nif __name__ == \"__main__\":\n main()\n"},"avg_line_length":{"kind":"number","value":25.8266666667,"string":"25.826667"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.5451729479,"string":"0.545173"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":694,"string":"694"},"score_documentation":{"kind":"number","value":0.3582860092927207,"string":"0.358286"}}},{"rowIdx":3771,"cells":{"hexsha":{"kind":"string","value":"b997c70668ace413cc27502883f737e007e56239"},"size":{"kind":"number","value":1006,"string":"1,006"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Doc/includes/sqlite3/load_extension.py"},"max_stars_repo_name":{"kind":"string","value":"livioso/cpython"},"max_stars_repo_head_hexsha":{"kind":"string","value":"077061a7b24917aaf31057885c69919c5a553c88"},"max_stars_repo_licenses":{"kind":"list like","value":["PSF-2.0"],"string":"[\n \"PSF-2.0\"\n]"},"max_stars_count":{"kind":"number","value":36,"string":"36"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-06-07T20:44:06.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-23T06:19:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"Doc/includes/sqlite3/load_extension.py"},"max_issues_repo_name":{"kind":"string","value":"livioso/cpython"},"max_issues_repo_head_hexsha":{"kind":"string","value":"077061a7b24917aaf31057885c69919c5a553c88"},"max_issues_repo_licenses":{"kind":"list like","value":["PSF-2.0"],"string":"[\n \"PSF-2.0\"\n]"},"max_issues_count":{"kind":"number","value":49,"string":"49"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-02-29T17:59:52.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-05-05T04:59:26.000Z"},"max_forks_repo_path":{"kind":"string","value":"Doc/includes/sqlite3/load_extension.py"},"max_forks_repo_name":{"kind":"string","value":"livioso/cpython"},"max_forks_repo_head_hexsha":{"kind":"string","value":"077061a7b24917aaf31057885c69919c5a553c88"},"max_forks_repo_licenses":{"kind":"list like","value":["PSF-2.0"],"string":"[\n \"PSF-2.0\"\n]"},"max_forks_count":{"kind":"number","value":28,"string":"28"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-06-27T04:11:27.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-11T06:27:44.000Z"},"content":{"kind":"string","value":"import sqlite3\n\ncon = sqlite3.connect(\":memory:\")\n\n# enable extension loading\ncon.enable_load_extension(True)\n\n# Load the fulltext search extension\ncon.execute(\"select load_extension('./fts3.so')\")\n\n# alternatively you can load the extension using an API call:\n# con.load_extension(\"./fts3.so\")\n\n# disable extension loading again\ncon.enable_load_extension(False)\n\n# example from SQLite wiki\ncon.execute(\"create virtual table recipe using fts3(name, ingredients)\")\ncon.executescript(\"\"\"\n insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');\n insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');\n insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');\n insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');\n \"\"\")\nfor row in con.execute(\"select rowid, name, ingredients from recipe where name match 'pie'\"):\n print(row)\n"},"avg_line_length":{"kind":"number","value":37.2592592593,"string":"37.259259"},"max_line_length":{"kind":"number","value":104,"string":"104"},"alphanum_fraction":{"kind":"number","value":0.7445328032,"string":"0.744533"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":801,"string":"801"},"score_documentation":{"kind":"number","value":0.7962226640159046,"string":"0.796223"}}},{"rowIdx":3772,"cells":{"hexsha":{"kind":"string","value":"b9982b7f935a0931c3a9dc4e8ec48b12b5523acb"},"size":{"kind":"number","value":22060,"string":"22,060"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"lingvo/core/inference_graph_exporter.py"},"max_stars_repo_name":{"kind":"string","value":"RunzheYang/lingvo"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1291e29812f9ee9836f9cacbb05db9ec6b095234"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-09-02T18:04:13.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-09-02T18:04:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"lingvo/core/inference_graph_exporter.py"},"max_issues_repo_name":{"kind":"string","value":"RunzheYang/lingvo"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1291e29812f9ee9836f9cacbb05db9ec6b095234"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"lingvo/core/inference_graph_exporter.py"},"max_forks_repo_name":{"kind":"string","value":"RunzheYang/lingvo"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1291e29812f9ee9836f9cacbb05db9ec6b095234"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility for exporting an InferenceGraph proto from model params.\"\"\"\n\nimport collections\nimport contextlib\nimport re\nimport lingvo.compat as tf\nfrom lingvo.core import base_model\nfrom lingvo.core import bfloat16_variables\nfrom lingvo.core import inference_graph_pb2\nfrom lingvo.core import py_utils\nimport six\n\nfrom google.protobuf import text_format\n\nFLAGS = tf.flags.FLAGS\n\n# InferenceDeviceOptions contains options to configure inference on the device.\n# device: Device to infer on.\n# retain_device_placement: If true, the specified device in the generated\n# inference graph nodes will be retained. Otherwise, the specified device\n# will be cleared, so that the runtime can choose automatically.\n# var_options: Options on handling variables. For TPUs, variables can be\n# either placed on device through 'ON_DEVICE' option, or treated as\n# constants with AS_CONSTANTS.\n# gen_init_op: Whether to serialize initialization ops for the device. For TPUs,\n# servers can be initialized globally once, in which case this should be\n# turned off to avoid tripping initialization checks.\n# dtype_override: Whether to override the dtype to use for activations and\n# weights in the model. Options supported are None or tf.bfloat16.\nInferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [\n 'device', 'retain_device_placement', 'var_options', 'gen_init_op',\n 'dtype_override', 'fprop_dtype_override'\n])\n\n_CONST_GUARANTEE = None\n\n\n@contextlib.contextmanager\ndef NoConstGuaranteeScope():\n \"\"\"Disallow const gauranteeing variable with-in scope.\"\"\"\n global _CONST_GUARANTEE\n var_scope = tf.get_variable_scope()\n old_caching_device = var_scope.caching_device\n old_val = _CONST_GUARANTEE\n var_scope.set_caching_device(None)\n _CONST_GUARANTEE = False\n yield\n _CONST_GUARANTEE = old_val\n var_scope.set_caching_device(old_caching_device)\n\n\n# Marks variable as constants for compilation\ndef MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):\n global _CONST_GUARANTEE\n if _CONST_GUARANTEE:\n with tf.control_dependencies(None):\n return tf.guarantee_const(\n getter(name, *args, **kwargs), name=name + '/GuaranteeConst')\n else:\n return getter(name, *args, **kwargs)\n\n\n@contextlib.contextmanager\ndef ConstGuaranteeScope():\n \"\"\"Treats all variables under this scope as constants.\"\"\"\n global _CONST_GUARANTEE\n var_scope = tf.get_variable_scope()\n old_custom_getter = var_scope.custom_getter\n old_caching_device = var_scope.caching_device\n old_val = _CONST_GUARANTEE\n var_scope.set_custom_getter(MaybeGuaranteeConstGetter)\n var_scope.set_caching_device(lambda op: op.device)\n _CONST_GUARANTEE = True\n yield\n _CONST_GUARANTEE = old_val\n var_scope.set_custom_getter(old_custom_getter)\n var_scope.set_caching_device(old_caching_device)\n\n\n@contextlib.contextmanager\ndef _DummyScope():\n yield None\n\n\ndef _GetVarName(v):\n return v.name[:-len(':0')]\n\n\ndef _MakeVariableDictionary(variables):\n \"\"\"Returns a dictionary with name -> tf.Variable() mapping.\"\"\"\n vars_dict = {}\n for v in variables:\n vars_dict[_GetVarName(v)] = v\n return vars_dict\n\n\ndef IsTpu(device_options):\n return device_options.device == 'tpu'\n\n\ndef ShouldForceBfloat16ForWeightsAndActivations(device_options):\n return device_options.dtype_override == tf.bfloat16\n\n\ndef ShouldForceBfloat16ForActivations(device_options):\n return device_options.fprop_dtype_override == tf.bfloat16\n\n\ndef ConvertSubgraphDictToProto(subgraphs_dict):\n \"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph.\n\n Args:\n subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a\n NestedMap.\n\n Returns:\n Equivalent InferenceGraph.\n \"\"\"\n # Build the output inference graph.\n inference_graph_proto = inference_graph_pb2.InferenceGraph()\n for subgraph_name, tensors in subgraphs_dict.items():\n fetches = tensors[0]\n feeds = tensors[1]\n\n # Rewrite fetches and feeds to map to their tensor name instead of\n # Tensor instance.\n named_fetches = {k: v.name for k, v in fetches.items() if v is not None}\n named_feeds = {k: v.name for k, v in feeds.items() if v is not None}\n\n # Export as subgraph.\n inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)\n inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)\n return inference_graph_proto\n\n\ndef GetOutputOpNames(graph,\n inference_graph_proto,\n subgraphs=None,\n preserve_colocation_nodes=True,\n preserve_saver_restore_nodes=False,\n preserve_extra_ops=None):\n \"\"\"Gets output op names from an inference graph.\n\n Args:\n graph: The tf graph.\n inference_graph_proto: an InferenceGraph proto.\n subgraphs: an optional list of subgraph names. If provided, only output ops\n from these subgraphs are preserved. Otherwise, all subgraphs are included.\n preserve_colocation_nodes: a Python bool, default to True. Preserves nodes\n colocating with the closure of output ops in the returned array.\n preserve_saver_restore_nodes: a Python bool, default to False. Preserves\n nodes for restoring according to inference_graph_proto.saver_def.\n preserve_extra_ops: an optional list of extra op names to preserve as long\n as they present in the graph.\n\n Returns:\n Array of tf op names that should be preserved in the graph.\n \"\"\"\n output_op_names = set()\n\n def _GetOpName(tensor_or_op_name):\n \"\"\"Returns the op name of the given node name.\"\"\"\n # Tensor names have format :. Some inference\n # graphs put tensors and others put ops in the feeds/fetches (depends\n # on how it is used). We differentiate here. We still do the lookup in\n # the graph to sanity check (versus relying on the text manipulation).\n # If this logic ever breaks, TensorFlow will raise a ValueError with\n # a description of the syntax of each.\n if re.search(r':[0-9]+$', tensor_or_op_name):\n # Tensor-name.\n t = graph.get_tensor_by_name(tensor_or_op_name)\n return t.op.name\n else:\n op = graph.get_operation_by_name(tensor_or_op_name)\n return op.name\n\n for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():\n if subgraphs and subgraph_name not in subgraphs:\n tf.logging.info('Skip subgraph %s.', subgraph_name)\n continue\n # Sometimes feeds aren't connected to any outputs but keep them in the graph\n # anyways to avoid errors.\n for tensor_or_op_name in (list(subgraph.feeds.values()) +\n list(subgraph.fetches.values())):\n output_op_names.add(_GetOpName(tensor_or_op_name))\n\n if preserve_saver_restore_nodes:\n # Only nodes for restoring is preserved. saver_def.save_tensor_name is\n # skipped because it's only used for saving.\n saver_def = inference_graph_proto.saver_def\n for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:\n try:\n output_op_names.add(_GetOpName(op_name))\n except KeyError:\n tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)\n\n if not preserve_colocation_nodes and not preserve_extra_ops:\n return sorted(list(output_op_names))\n\n # We also need to preserve any nodes that are used for colocation.\n # E.g., a node may have this attr:\n # attr {\n # key: \"_class\"\n # value {\n # list {\n # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\"\n # }\n # }\n # }\n #\n # In this case, we need to make sure the node\n # inference/embedding_lookup/Read/ReadVariableOp is not pruned.\n #\n # TODO(zhifengc): It's possible that it's better to fix in\n # tf.graph_util.extract_sub_graph.\n graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),\n list(output_op_names))\n reachable_vars = [node.name for node in graph_def.node]\n\n for node in graph.get_operations():\n if preserve_extra_ops and node.name in preserve_extra_ops:\n output_op_names.add(node.name)\n elif preserve_colocation_nodes and '_class' in node.node_def.attr:\n for loc in node.node_def.attr['_class'].list.s:\n loc = six.ensure_text(loc, 'utf-8')\n if loc.startswith('loc:@'):\n loc_name = loc[5:]\n if loc_name not in reachable_vars:\n # Skip nodes that cannot be reached from the pruned graph.\n continue\n output_op_names.add(node.name)\n\n return sorted(list(output_op_names))\n\n\ndef _ParamExists(param_obj, param_name):\n \"\"\"Tests whether param_name is contained in param_obj.\"\"\"\n if not param_obj:\n return\n for k, _ in param_obj.IterParams():\n if k == param_name:\n return True\n return False\n\n\ndef _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):\n \"\"\"Freezes a graph from a checkpoint.\n\n Args:\n graph: tf.Graph.\n saver: The tf.Saver to use for restoration.\n checkpoint: The checkpoint to restore.\n output_op_names: Names of output ops.\n\n Returns:\n Resulting tf.GraphDef.\n \"\"\"\n sess = tf.Session(graph=graph, config=py_utils.SessionConfig())\n saver.restore(sess, checkpoint)\n return tf.graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), output_op_names)\n\n\ndef _FreezeDefaults(graph, output_op_names):\n \"\"\"Default initializes a graph and freezes it.\n\n Args:\n graph: tf.Graph.\n output_op_names: Names of output ops.\n\n Returns:\n Resulting tf.GraphDef.\n \"\"\"\n with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:\n sess.run(graph.get_operation_by_name('init_all_variables'))\n return tf.graph_util.convert_variables_to_constants(sess,\n graph.as_graph_def(),\n output_op_names)\n\n\nclass InferenceGraphExporter:\n \"\"\"Class for exporting inference graphs.\"\"\"\n\n @classmethod\n def Export(cls,\n model_cfg,\n model_task_name=None,\n device_options=InferenceDeviceOptions(\n device='',\n retain_device_placement=False,\n var_options=None,\n gen_init_op=True,\n dtype_override=None,\n fprop_dtype_override=None),\n freeze_checkpoint=None,\n freeze_defaults=False,\n export_path=None,\n subgraph_filter=None,\n random_seed=None,\n disable_packed_input=True):\n \"\"\"Exports a InferenceGraph proto with piecewise subgraphs.\n\n Sets FLAGS.enable_asserts to False unless user explicitly sets it to True.\n\n Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing\n and multi-core inference on TPUs work properly.\n\n Args:\n model_cfg: a Params instance as returned by\n model_registry.GetParams(modelname, 'Test') or model_params.Model().\n model_task_name: The task to generate an inference graph for. Should be\n None for single-task models.\n device_options: Device options for the accelerator used for serving.\n freeze_checkpoint: The checkpoint to load. Loads and freezes the model if\n given.\n freeze_defaults: Default initializes the graph and freeze. Useful for\n early testing of downstream tools without having a checkpoint.\n export_path: If not None, write the inference graph in ASCII to this path.\n subgraph_filter: A string or a list of subgraph names. If not None or\n empty, export only this list of inference subgraphs.\n random_seed: Fixes the random seed in the exported inference graph.\n disable_packed_input: Disable packed input for inference writing purposes.\n\n Returns:\n InferenceGraph proto.\n\n Raises:\n ValueError: if the model does not support the listed subgraphs.\n \"\"\"\n assert issubclass(model_cfg.cls, base_model.BaseModel)\n if device_options.dtype_override and device_options.fprop_dtype_override:\n raise ValueError(\n 'device_options{dtype_override,fprop_dtype_override) can not both be'\n 'set.')\n if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):\n subgraph_filter = [subgraph_filter]\n\n # Disable assertions unless user explicitly enables it.\n if FLAGS['enable_asserts'].using_default_value:\n FLAGS.enable_asserts = False\n\n # TODO(laurenzo): Work out how much we need to specify here in terms of\n # cluster configuration.\n cls._SetClusterParams(model_cfg.cluster, device_options)\n\n # Configure the model.\n model_cfg.random_seed = random_seed\n model_cfg.is_inference = True\n\n if disable_packed_input:\n\n def _DisablePackedInput(task):\n if (_ParamExists(task, 'encoder') and\n _ParamExists(task.encoder, 'packed_input')):\n task.encoder.packed_input = False\n if (_ParamExists(task, 'decoder') and\n _ParamExists(task.decoder, 'packed_input')):\n task.decoder.packed_input = False\n\n if issubclass(model_cfg.cls, base_model.MultiTaskModel):\n for _, task_param in model_cfg.task_params.IterParams():\n _DisablePackedInput(task_param)\n else:\n _DisablePackedInput(model_cfg.task)\n\n tf.logging.debug('Model %s params:', model_cfg.name)\n for line in model_cfg.ToText().split('\\n'):\n tf.logging.debug('%s', line)\n\n # Instantiate the graph.\n graph = tf.Graph()\n with graph.as_default():\n tf.random.set_seed(random_seed)\n cluster = model_cfg.cluster.Instantiate()\n device = cluster.GetPlacer()\n tpu_const_scope = _DummyScope()\n if (IsTpu(device_options) and\n device_options.var_options == 'AS_CONSTANTS'):\n # Do not specify devices for variables if we are marking them as\n # constants.\n device = ''\n tpu_const_scope = ConstGuaranteeScope()\n\n with cluster, tf.device(device), tpu_const_scope:\n\n bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(\n device_options)\n\n if bfloat16_override:\n py_utils.UpdateDtype(model_cfg, tf.bfloat16)\n py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)\n\n act_bfloat16_override = ShouldForceBfloat16ForActivations(\n device_options)\n if act_bfloat16_override:\n py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)\n\n # Hard-code TPU-related flags prior to instantiating model.\n old_enable_asserts = FLAGS.enable_asserts\n old_xla_device = FLAGS.xla_device\n if IsTpu(device_options):\n FLAGS.enable_asserts = False\n FLAGS.xla_device = 'tpu'\n\n try:\n mdl = model_cfg.Instantiate()\n task = mdl.GetTask(model_task_name)\n\n variables_to_restore = (\n _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else\n mdl.ema.variables_to_restore(mdl.variables_for_ema))\n\n if bfloat16_override:\n saver_var_spec = (\n bfloat16_variables\n .get_saver_spec_for_variables_with_bf16_overrides(\n variables_to_restore))\n else:\n saver_var_spec = variables_to_restore\n\n saver = tf.train.Saver(saver_var_spec)\n tf.variables_initializer(\n tf.global_variables(), name='init_all_variables')\n if IsTpu(device_options) and device_options.gen_init_op:\n tf.group(tf.tpu.initialize_system(), name='tpu_init_op')\n\n if freeze_checkpoint or freeze_defaults:\n # Replace variables with tensors using tf.identity in theta before\n # freezing to avoid the graph referencing types of DT_RESOURCE.\n def AddIdentityToTheta(layer):\n layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access\n layer.children.Transform(AddIdentityToTheta)\n\n AddIdentityToTheta(task)\n\n inference_graph_proto = inference_graph_pb2.InferenceGraph()\n subgraphs_proto = task.Inference()\n if isinstance(subgraphs_proto, dict):\n subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)\n for name, subgraph in subgraphs_proto.subgraphs.items():\n if not subgraph_filter or name in subgraph_filter:\n inference_graph_proto.subgraphs[name].CopyFrom(subgraph)\n\n # Yes, graph collections are bad, however this seems to be the\n # easiest way to get this assets registered from\n # TextFileInitializer.\n assets_collection = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.ASSET_FILEPATHS)\n for asset in assets_collection:\n if asset.op.type == 'Const' and asset.op.get_attr(\n 'dtype') == tf.dtypes.string:\n constant_value = asset.op.get_attr('value')\n if constant_value.string_val:\n tf.logging.info('Found asset file_path: %s',\n constant_value.string_val[0])\n asset_file_def = inference_graph_proto.asset_file_def.add()\n asset_file_def.tensor_info.name = asset.name\n asset_file_def.filename = constant_value.string_val[0]\n\n # Add a table init op and global variable init op to the graph.\n # Tables can be declared anywhere in the graph, so this op has to be\n # added last.\n tf.tables_initializer(name='init_all_tables')\n finally:\n # Reset TPU-related flags after model instantiation.\n FLAGS.enable_asserts = old_enable_asserts\n FLAGS.xla_device = old_xla_device\n\n tf.logging.info('Graph contains ops: %r',\n [op.name for op in graph.get_operations()])\n\n # Collection defs\n if not tf.executing_eagerly():\n meta_graph = tf.train.export_meta_graph(graph=graph)\n for key in meta_graph.collection_def:\n tf.logging.info('copying collection %s', key)\n inference_graph_proto.collection_def[key].CopyFrom(\n meta_graph.collection_def[key])\n else:\n tf.logging.warning('Not exporting collection defs '\n 'since operating in eager mode.')\n\n # Freezing.\n if freeze_defaults or freeze_checkpoint:\n output_op_names = GetOutputOpNames(\n graph,\n inference_graph_proto,\n preserve_colocation_nodes=False,\n preserve_saver_restore_nodes=False)\n if cls._DeviceSupportsFreezing(device_options):\n raise ValueError('freeze_checkpoint cannot be used with device ' +\n device_options.device)\n if freeze_checkpoint:\n tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)\n graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,\n output_op_names)\n elif freeze_defaults:\n tf.logging.info('Default initializing graph and freezing.')\n graph_def = _FreezeDefaults(graph, output_op_names)\n else:\n inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())\n output_op_names = GetOutputOpNames(graph, inference_graph_proto)\n\n # Prune the graph to just the parts we need.\n # To support restoring, we have to not prune out the restore node.\n output_op_names.append('init_all_tables')\n output_op_names.append('init_all_variables')\n output_op_names.append('save/control_dependency')\n output_op_names.append('save/restore_all')\n if IsTpu(device_options) and device_options.gen_init_op:\n output_op_names.append('tpu_init_op')\n graph_def = graph.as_graph_def()\n tf.logging.info('Pruning graph to output ops: %r', output_op_names)\n graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)\n\n if not device_options.retain_device_placement:\n # Clear the device so that the runtime can choose.\n tf.logging.info('Clearing device placement for: %s',\n device_options.device)\n for node in graph_def.node:\n node.ClearField('device')\n for function in graph_def.library.function:\n for node_def in function.node_def:\n node_def.ClearField('device')\n\n inference_graph_proto.graph_def.CopyFrom(graph_def)\n\n if export_path:\n with tf.io.gfile.GFile(export_path, 'w') as f:\n f.write(text_format.MessageToString(inference_graph_proto))\n return inference_graph_proto\n\n @classmethod\n def _SetClusterParams(cls, cluster_params, device_options):\n \"\"\"Sets cluster params.\n\n Args:\n cluster_params: Model().cluster config.\n device_options: InferenceDeviceOptions.\n \"\"\"\n\n def Update(p):\n \"\"\"Update cluster params `p`.\"\"\"\n p.name = '/job:localhost'\n p.replicas = 1\n p.tpus_per_replica = 1 if IsTpu(device_options) else 0\n p.gpus_per_replica = 0\n p.devices_per_split = 1\n\n cluster_params.mode = 'sync'\n cluster_params.job = 'decoder'\n cluster_params.add_summary = False\n cluster_params.do_eval = True\n Update(cluster_params.controller)\n Update(cluster_params.worker)\n Update(cluster_params.ps)\n Update(cluster_params.evaler)\n Update(cluster_params.decoder)\n Update(cluster_params.input)\n\n @classmethod\n def _DeviceSupportsFreezing(cls, device_options):\n return IsTpu(device_options)\n"},"avg_line_length":{"kind":"number","value":38.1001727116,"string":"38.100173"},"max_line_length":{"kind":"number","value":116,"string":"116"},"alphanum_fraction":{"kind":"number","value":0.6943336355,"string":"0.694334"},"count_classes":{"kind":"number","value":11523,"string":"11,523"},"score_classes":{"kind":"number","value":0.5223481414324569,"string":"0.522348"},"count_generators":{"kind":"number","value":958,"string":"958"},"score_generators":{"kind":"number","value":0.04342701722574796,"string":"0.043427"},"count_decorators":{"kind":"number","value":12475,"string":"12,475"},"score_decorators":{"kind":"number","value":0.5655031731640979,"string":"0.565503"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":8165,"string":"8,165"},"score_documentation":{"kind":"number","value":0.3701269265639166,"string":"0.370127"}}},{"rowIdx":3773,"cells":{"hexsha":{"kind":"string","value":"b9982e3e4e7a4b4799e5780bd7629d5235cc1b40"},"size":{"kind":"number","value":1836,"string":"1,836"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/preprocessing/annual_hc_by_crime_loc.py"},"max_stars_repo_name":{"kind":"string","value":"VijayKalmath/USCrimeAnalysis"},"max_stars_repo_head_hexsha":{"kind":"string","value":"14c96aae52547a4f7ea140395c62a621a97def50"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/preprocessing/annual_hc_by_crime_loc.py"},"max_issues_repo_name":{"kind":"string","value":"VijayKalmath/USCrimeAnalysis"},"max_issues_repo_head_hexsha":{"kind":"string","value":"14c96aae52547a4f7ea140395c62a621a97def50"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/preprocessing/annual_hc_by_crime_loc.py"},"max_forks_repo_name":{"kind":"string","value":"VijayKalmath/USCrimeAnalysis"},"max_forks_repo_head_hexsha":{"kind":"string","value":"14c96aae52547a4f7ea140395c62a621a97def50"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#! usr/env/bin python\nimport glob\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef main():\n # Fetch File Paths\n file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls')\n # Sort them according to year\n file_paths.sort(key = lambda x: int(x[-8:-4]))\n # Create a result dataframe to store the data\n df_res = get_place_crime_count(file_paths[0])\n # Iterate over the rest of the files\n for p in tqdm(file_paths[1:]):\n df_temp = get_place_crime_count(p)\n df_res = pd.merge(df_res, df_temp, on = \"Place\", how = \"left\")\n # Save the result to disk\n df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False)\n\n\n\ndef get_place_crime_count(path:str)->pd.DataFrame:\n \"\"\"\n Function to return \n \"\"\"\n # Extracting the table name from and year from the given file path\n t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\"))\n t_year = path[path.index(\".xls\")-4:path.index(\".xls\")]\n\n try:\n # Read the Excel spreadsheet\n df = pd.read_excel(path,sheet_name=t_name)\n # Get the start and end indices of the interested datapoints\n start = df.index[df[t_name] == \"Total\"][0] + 1\n end = df.index[df[t_name] == \"Multiple locations\"][0] \n # Slice the dataset\n df = df.iloc[start:end,0:2]\n # Reset the index for the reduced dataframe\n df.reset_index(drop = True, inplace = True)\n # Rename the columns\n df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace = True)\n # Return the value\n return df\n except:\n # If there is no such data return an empty dataframe\n i_list = list(range(0,47))\n return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year])\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":33.3818181818,"string":"33.381818"},"max_line_length":{"kind":"number","value":84,"string":"84"},"alphanum_fraction":{"kind":"number","value":0.6377995643,"string":"0.6378"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":720,"string":"720"},"score_documentation":{"kind":"number","value":0.39215686274509803,"string":"0.392157"}}},{"rowIdx":3774,"cells":{"hexsha":{"kind":"string","value":"b998534e368ce74be309448b790e384f839c6d4a"},"size":{"kind":"number","value":1672,"string":"1,672"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py"},"max_stars_repo_name":{"kind":"string","value":"ethanjperez/allennlp"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e520993f16f0da7e2c40f6e44b8dc56338f46b57"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":24,"string":"24"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-09-16T00:10:54.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-09-08T19:31:51.000Z"},"max_issues_repo_path":{"kind":"string","value":"allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py"},"max_issues_repo_name":{"kind":"string","value":"ethanjperez/allennlp"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e520993f16f0da7e2c40f6e44b8dc56338f46b57"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py"},"max_forks_repo_name":{"kind":"string","value":"ethanjperez/allennlp"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e520993f16f0da7e2c40f6e44b8dc56338f46b57"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":7,"string":"7"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-16T02:37:31.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-09-01T06:06:17.000Z"},"content":{"kind":"string","value":"# pylint: disable=no-self-use,invalid-name\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport torch\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\nfrom allennlp.common.testing import AllenNlpTestCase\n\n\nclass TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):\n def setUp(self):\n super(TestBagOfWordCountsTokenEmbedder, self).setUp()\n self.vocab = Vocabulary()\n self.vocab.add_token_to_namespace(\"1\")\n self.vocab.add_token_to_namespace(\"2\")\n self.vocab.add_token_to_namespace(\"3\")\n self.vocab.add_token_to_namespace(\"4\")\n\n def test_forward_calculates_bow_properly(self):\n params = Params({})\n embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)\n numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])\n inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)\n embedder_output = embedder(inputs)\n numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])\n manual_output = torch.from_numpy(numpy_tensor).float()\n assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())\n\n def test_projects_properly(self):\n params = Params({\"projection_dim\": 50})\n embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)\n numpy_tensor = np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\", \"3\"]])\n inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)\n embedder_output = embedder(inputs)\n assert embedder_output.shape[1] == 50\n"},"avg_line_length":{"kind":"number","value":45.1891891892,"string":"45.189189"},"max_line_length":{"kind":"number","value":93,"string":"93"},"alphanum_fraction":{"kind":"number","value":0.7063397129,"string":"0.70634"},"count_classes":{"kind":"number","value":1349,"string":"1,349"},"score_classes":{"kind":"number","value":0.8068181818181818,"string":"0.806818"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":79,"string":"79"},"score_documentation":{"kind":"number","value":0.04724880382775119,"string":"0.047249"}}},{"rowIdx":3775,"cells":{"hexsha":{"kind":"string","value":"b998e92d411833a80bc4657adf0243c90d5c6084"},"size":{"kind":"number","value":5457,"string":"5,457"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"demo/demo_shapenet.py"},"max_stars_repo_name":{"kind":"string","value":"hengkaiz/meshrcnn"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"demo/demo_shapenet.py"},"max_issues_repo_name":{"kind":"string","value":"hengkaiz/meshrcnn"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"demo/demo_shapenet.py"},"max_forks_repo_name":{"kind":"string","value":"hengkaiz/meshrcnn"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import argparse\nimport logging\nimport multiprocessing as mp\nimport logging\nimport os\nfrom detectron2.evaluation import inference_context\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom detectron2.utils.collect_env import collect_env_info\nfrom detectron2.utils.logger import setup_logger\nfrom fvcore.common.file_io import PathManager\nfrom pathlib import Path\nfrom pytorch3d.io import save_obj\n\nfrom shapenet.config.config import get_shapenet_cfg\nfrom shapenet.data.utils import imagenet_preprocess\nfrom shapenet.modeling.heads import voxel_head\nfrom shapenet.modeling.mesh_arch import build_model\nfrom shapenet.utils.checkpoint import clean_state_dict\n\nimport torchvision.transforms as T\n\nimport glob\nfrom PIL import Image\n\nimport trimesh\nimport pyvista as pv\nimport pyacvd\nimport numpy as np\n\nlogger = logging.getLogger('demo')\n\ndef setup_cfgs(args):\n cfg = get_shapenet_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n return cfg\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/shapenet/voxmesh_R50.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--input\", help=\"A path to an input main folder\")\n # parser.add_argument(\"--output\", help=\"A directory to save output visualizations\")\n parser.add_argument(\n \"--focal-length\", type=float, default=20.0, help=\"Focal length for the image\"\n )\n parser.add_argument(\n \"--onlyhighest\", action=\"store_true\", help=\"will return only the highest scoring detection\"\n )\n\n parser.add_argument(\n \"opts\",\n help=\"Modify model config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\ndef resample_mesh(mesh, count=2466):\n pv_mesh = pv.wrap(mesh)\n # logger.info('Original mesh:')\n # print(pv_mesh)\n \n clus = pyacvd.Clustering(pv_mesh)\n clus.subdivide(3)\n clus.cluster(count)\n\n # remesh\n remesh = clus.create_mesh()\n\n # verts = remesh.points\n # faces = remesh.faces.reshape((-1, 4))[:, 1:]\n \n return remesh\n\nif __name__ == \"__main__\":\n mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n\n device = torch.device(\"cuda:%d\" % 0)\n\n logger = setup_logger(name=\"demo shapenet\")\n logger.info(\"Arguments: \" + str(args))\n\n cfg = setup_cfgs(args)\n\n # load checkpoing and build model\n if cfg.MODEL.CHECKPOINT == \"\":\n raise ValueError(\"Invalid checkpoing provided\")\n logger.info(\"Loading model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT))\n cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))\n state_dict = clean_state_dict(cp[\"best_states\"][\"model\"])\n model = build_model(cfg)\n model.load_state_dict(state_dict)\n logger.info(\"Model loaded\")\n model.to(device)\n\n sub_dir = sorted(os.listdir(args.input))\n\n for sd in sub_dir:\n curr_path = os.path.join(args.input, sd)\n images = glob.glob(curr_path + \"/*.png\")\n \n for img_dir in images:\n # load image\n transform = [T.ToTensor()]\n transform.append(imagenet_preprocess())\n transform = T.Compose(transform)\n \n im_name = img_dir.split(\"/\")[-1].split(\".\")[0]\n\n with PathManager.open(img_dir, \"rb\") as f:\n img = Image.open(f).convert(\"RGB\")\n\n img = transform(img)\n img = img[None, :, :, :]\n img = img.to(device)\n\n with inference_context(model):\n img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)\n\n # Save voxel_score\n voxel_odir = os.path.join(curr_path, \"voxel_score\")\n if not Path(voxel_odir).is_dir():\n os.mkdir(voxel_odir)\n\n voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name))\n torch.save(voxel_scores, voxel_file)\n\n # Save image features\n imgfeat_odir = os.path.join(curr_path, \"img_feat\")\n if not Path(imgfeat_odir).is_dir():\n os.mkdir(imgfeat_odir)\n\n img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name))\n torch.save(img_feats, img_feat_file)\n\n # Save P\n p_odir = os.path.join(curr_path, \"P\")\n if not Path(p_odir).is_dir():\n os.mkdir(p_odir)\n\n p_file = os.path.join(p_odir, \"%s.pt\" % (im_name))\n torch.save(P, p_file)\n\n # Save cubified mesh\n cmesh_odir = os.path.join(curr_path, \"cube_mesh\")\n if not Path(cmesh_odir).is_dir():\n os.mkdir(cmesh_odir)\n\n cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name))\n c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)\n save_obj(cube_mesh_file, c_verts, c_faces)\n\n # Save predicted mesh\n mesh_odir = os.path.join(curr_path, \"final_mesh\")\n if not Path(mesh_odir).is_dir():\n os.mkdir(mesh_odir)\n\n save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name))\n verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)\n save_obj(save_file, verts, faces)\n logger.info(\"Predictions saved for %s/%s\" % (curr_path.split('/')[-1], im_name))\n"},"avg_line_length":{"kind":"number","value":31.9122807018,"string":"31.912281"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.6377130291,"string":"0.637713"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":956,"string":"956"},"score_documentation":{"kind":"number","value":0.17518783214220268,"string":"0.175188"}}},{"rowIdx":3776,"cells":{"hexsha":{"kind":"string","value":"b998f6994cf6e83702b501cd661bb37f91b59317"},"size":{"kind":"number","value":7854,"string":"7,854"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"proglearn/voters.py"},"max_stars_repo_name":{"kind":"string","value":"jshin13/progressive-learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"proglearn/voters.py"},"max_issues_repo_name":{"kind":"string","value":"jshin13/progressive-learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"proglearn/voters.py"},"max_forks_repo_name":{"kind":"string","value":"jshin13/progressive-learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\n\n# from sklearn.ensemble import BaggingClassifier\n# from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.utils.validation import (\n check_X_y,\n check_array,\n NotFittedError,\n)\n\nfrom sklearn.utils.multiclass import check_classification_targets, type_of_target\n\nfrom .base import BaseVoter\n\nfrom tensorflow import keras\nfrom keras import layers\n\n\nclass TreeClassificationVoter(BaseVoter):\n def __init__(self, finite_sample_correction=False):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n self.finite_sample_correction = finite_sample_correction\n self._is_fitted = False\n self.multilabel = False\n\n def fit(self, X, y):\n \"\"\"\n Doc strings here.\n \"\"\"\n check_classification_targets(y)\n\n if type_of_target(y) == 'multilabel-indicator':\n # Fit multilabel binary task.\n self.multilabel = True\n return self.fit_multilabel(X, y)\n\n num_classes = len(np.unique(y))\n self.uniform_posterior = np.ones(num_classes) / num_classes\n\n self.leaf_to_posterior = {}\n\n for leaf_id in np.unique(X):\n idxs_in_leaf = np.where(X == leaf_id)[0]\n class_counts = [\n len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)\n ]\n posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))\n\n if self.finite_sample_correction:\n posteriors = self._finite_sample_correction(\n posteriors, len(idxs_in_leaf), len(np.unique(y))\n )\n\n self.leaf_to_posterior[leaf_id] = posteriors\n\n self._is_fitted = True\n\n return self\n\n def fit_multilabel(self, X, y):\n\n num_labels = y.shape[1]\n self.uniform_posterior = y.sum(axis=0) / len(y)\n \n # Each posterior is now a num_labels size vector or binary probabilities.\n self.leaf_to_posterior = {}\n\n for leaf_id in np.unique(X):\n idxs_in_leaf = np.where(X == leaf_id)[0]\n label_counts = [\n len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)\n ]\n posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))\n\n # TODO: multilabel finite sample correction.\n\n self.leaf_to_posterior[leaf_id] = posteriors\n\n self._is_fitted = True\n\n return self\n\n def vote(self, X):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n if not self.is_fitted():\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this voter.\"\n )\n raise NotFittedError(msg % {\"name\": type(self).__name__})\n\n votes_per_example = []\n for x in X:\n if x in list(self.leaf_to_posterior.keys()):\n votes_per_example.append(self.leaf_to_posterior[x])\n else:\n votes_per_example.append(self.uniform_posterior)\n return np.array(votes_per_example)\n\n def is_fitted(self):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n return self._is_fitted\n\n def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):\n \"\"\"\n encourage posteriors to approach uniform when there is low data\n \"\"\"\n correction_constant = 1 / (num_classes * num_points_in_partition)\n\n zero_posterior_idxs = np.where(posteriors == 0)[0]\n posteriors[zero_posterior_idxs] = correction_constant\n\n posteriors /= sum(posteriors)\n\n return posteriors\n\n\nclass KNNClassificationVoter(BaseVoter):\n def __init__(self, k, kwargs={}):\n \"\"\"\n Doc strings here.\n \"\"\"\n self._is_fitted = False\n self.k = k\n self.kwargs = kwargs\n\n def fit(self, X, y):\n \"\"\"\n Doc strings here.\n \"\"\"\n X, y = check_X_y(X, y)\n self.knn = KNeighborsClassifier(self.k, **self.kwargs)\n self.knn.fit(X, y)\n self._is_fitted = True\n\n return self\n\n def vote(self, X):\n \"\"\"\n Doc strings here.\n \"\"\"\n if not self.is_fitted():\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this transformer.\"\n )\n raise NotFittedError(msg % {\"name\": type(self).__name__})\n\n X = check_array(X)\n return self.knn.predict_proba(X)\n\n def is_fitted(self):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n return self._is_fitted\n\n\nclass NeuralRegressionVoter(BaseVoter):\n def __init__(\n self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False,\n ):\n \"\"\"\n Doc strings here.\n \"\"\"\n self.validation_split = validation_split\n self.loss = loss\n self.epochs = epochs\n self.lr = lr\n self.verbose = verbose\n self._is_fitted = False\n\n def fit(self, X, y):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n X, y = check_X_y(X, y)\n\n self.voter = keras.Sequential()\n self.voter.add(\n layers.Dense(\n 1,\n activation=\"linear\",\n input_shape=(X.shape[1],),\n name=\"transform_to_vote\",\n )\n )\n self.voter.compile(\n loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr)\n )\n self.voter.fit(\n X,\n y,\n epochs=self.epochs,\n callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")],\n verbose=self.verbose,\n validation_split=self.validation_split,\n shuffle=True,\n )\n\n self._is_fitted = True\n return self\n\n def vote(self, X):\n \"\"\"\n Doc strings here.\n \"\"\"\n if not self.is_fitted():\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this transformer.\"\n )\n raise NotFittedError(msg % {\"name\": type(self).__name__})\n\n X = check_array(X)\n return self.voter.predict(X)\n\n def is_fitted(self):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n return self._is_fitted\n\n\nclass TreeRegressionVoter(BaseVoter):\n def __init__(self):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n self._is_fitted = False\n\n\n def fit(self, X, y):\n \"\"\"\n Doc strings here.\n \"\"\"\n \n \n self.leaf_to_yhat = {}\n self.global_yhat = np.mean(y)\n\n for leaf_id in np.unique(X):\n idxs_in_leaf = np.where(X == leaf_id)[0]\n # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]\n self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))\n\n self._is_fitted = True\n\n return self\n\n\n def vote(self, X):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n if not self.is_fitted():\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this voter.\"\n )\n raise NotFittedError(msg % {\"name\": type(self).__name__})\n \n votes_per_example = []\n for x in X:\n if x in list(self.leaf_to_yhat.keys()):\n votes_per_example.append(self.leaf_to_yhat[x])\n else:\n votes_per_example.append(self.global_yhat)\n return np.array(votes_per_example)\n\n\n def is_fitted(self):\n \"\"\"\n Doc strings here.\n \"\"\"\n\n return self._is_fitted"},"avg_line_length":{"kind":"number","value":26.8054607509,"string":"26.805461"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.5562770563,"string":"0.556277"},"count_classes":{"kind":"number","value":7412,"string":"7,412"},"score_classes":{"kind":"number","value":0.9437229437229437,"string":"0.943723"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1610,"string":"1,610"},"score_documentation":{"kind":"number","value":0.20499108734402852,"string":"0.204991"}}},{"rowIdx":3777,"cells":{"hexsha":{"kind":"string","value":"b999024320e50c940c8f273e6f0536039450c829"},"size":{"kind":"number","value":1949,"string":"1,949"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"config.py"},"max_stars_repo_name":{"kind":"string","value":"jhattat/photoBooth"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f6fe3ab418bb917792e10349597401ed34078766"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"config.py"},"max_issues_repo_name":{"kind":"string","value":"jhattat/photoBooth"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f6fe3ab418bb917792e10349597401ed34078766"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"config.py"},"max_forks_repo_name":{"kind":"string","value":"jhattat/photoBooth"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f6fe3ab418bb917792e10349597401ed34078766"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Tumblr Setup\n# Replace the values with your information\n# OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info\nconsumer_key='ShbOqI5zErQXOL7Qnd5XduXpY9XQUlBgJDpCLeq1OYqnY2KzSt' #replace with your key\nconsumer_secret='ulZradkbJGksjpl2MMlshAfJgEW6TNeSdZucykqeTp8jvwgnhu' #replace with your secret code\noath_token='uUcBuvJx8yhk4HJIZ39sfcYo0W4VoqcvUetR2EwcI5Sn8SLgNt' #replace with your oath token\noath_secret='iNJlqQJI6dwhAGmdNbMtD9u7VazmX2Rk5uW0fuIozIEjk97lz4' #replace with your oath secret code\ntumblr_blog = 'soniaetjeremie' # replace with your tumblr account name without .tumblr.com\ntagsForTumblr = \"photobooth\" # change to tags you want, separated with commas\n\n#Config settings to change behavior of photo booth\nmonitor_w = 800 # width of the display monitor\nmonitor_h = 480 # height of the display monitor\nfile_path = '/home/pi/photobooth/pics/' # path to save images\nclear_on_startup = False # True will clear previously stored photos as the program launches. False will leave all previous photos.\ndebounce = 0.3 # how long to debounce the button. Add more time if the button triggers too many times.\npost_online = True # True to upload images. False to store locally only.\ncapture_count_pics = True # if true, show a photo count between taking photos. If false, do not. False is faster.\nmake_gifs = True # True to make an animated gif. False to post 4 jpgs into one post.\nhi_res_pics = False # True to save high res pics from camera.\n # If also uploading, the program will also convert each image to a smaller image before making the gif.\n # False to first capture low res pics. False is faster.\n # Careful, each photo costs against your daily Tumblr upload max.\ncamera_iso = 400 # adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400. Dark is 800 max.\n # available options: 100, 200, 320, 400, 500, 640, 800"},"avg_line_length":{"kind":"number","value":77.96,"string":"77.96"},"max_line_length":{"kind":"number","value":130,"string":"130"},"alphanum_fraction":{"kind":"number","value":0.758337609,"string":"0.758338"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1561,"string":"1,561"},"score_documentation":{"kind":"number","value":0.8009235505387378,"string":"0.800924"}}},{"rowIdx":3778,"cells":{"hexsha":{"kind":"string","value":"b9991711cbe60fa3459b0fb4cb64d023132610e8"},"size":{"kind":"number","value":896,"string":"896"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"accounts/admin.py"},"max_stars_repo_name":{"kind":"string","value":"GuilhemN/site-interludes"},"max_stars_repo_head_hexsha":{"kind":"string","value":"69873810d5b0168aa57277ba51805117e6c53874"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"accounts/admin.py"},"max_issues_repo_name":{"kind":"string","value":"GuilhemN/site-interludes"},"max_issues_repo_head_hexsha":{"kind":"string","value":"69873810d5b0168aa57277ba51805117e6c53874"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-03-24T10:41:10.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-24T12:39:30.000Z"},"max_forks_repo_path":{"kind":"string","value":"accounts/admin.py"},"max_forks_repo_name":{"kind":"string","value":"GuilhemN/site-interludes"},"max_forks_repo_head_hexsha":{"kind":"string","value":"69873810d5b0168aa57277ba51805117e6c53874"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-03-23T22:30:12.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-23T22:30:12.000Z"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom django.contrib.auth.models import Group\n\nfrom accounts.models import EmailUser\nfrom shared.admin import ExportCsvMixin\n\n# no need for groups - we only have regular users and superusers\nadmin.site.unregister(Group)\n\n@admin.register(EmailUser)\nclass EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin):\n\t\"\"\"option d'affichage des activités dans la vue django admin\"\"\"\n\tfilename = \"export_utilisateurs.csv\"\n\tlist_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",)\n\tlist_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",)\n\tfields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\",\n\t\t(\"date_joined\", \"last_login\",),\n\t)\n\tordering = (\"last_name\", \"first_name\")\n\treadonly_fields = (\"date_joined\", \"last_login\",)\n\tlist_per_page = 200\n\n\tcsv_export_exclude = [\"password\"]\n"},"avg_line_length":{"kind":"number","value":37.3333333333,"string":"37.333333"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.7533482143,"string":"0.753348"},"count_classes":{"kind":"number","value":616,"string":"616"},"score_classes":{"kind":"number","value":0.6867335562987736,"string":"0.686734"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":643,"string":"643"},"score_decorators":{"kind":"number","value":0.7168338907469343,"string":"0.716834"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":432,"string":"432"},"score_documentation":{"kind":"number","value":0.4816053511705686,"string":"0.481605"}}},{"rowIdx":3779,"cells":{"hexsha":{"kind":"string","value":"b9993aa0d134cc4869bfe49fd1ecd6dc8c6b0b96"},"size":{"kind":"number","value":23640,"string":"23,640"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"rotkehlchen/exchanges/coinbase.py"},"max_stars_repo_name":{"kind":"string","value":"vnavascues/rotki"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"rotkehlchen/exchanges/coinbase.py"},"max_issues_repo_name":{"kind":"string","value":"vnavascues/rotki"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"rotkehlchen/exchanges/coinbase.py"},"max_forks_repo_name":{"kind":"string","value":"vnavascues/rotki"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import hashlib\nimport hmac\nimport logging\nimport time\nfrom json.decoder import JSONDecodeError\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple\nfrom urllib.parse import urlencode\n\nimport requests\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.assets.converters import asset_from_coinbase\nfrom rotkehlchen.constants.misc import ZERO\nfrom rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset\nfrom rotkehlchen.exchanges.data_structures import AssetMovement, Trade\nfrom rotkehlchen.exchanges.exchange import ExchangeInterface\nfrom rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.serialization.deserialize import (\n deserialize_asset_amount,\n deserialize_asset_amount_force_positive,\n deserialize_asset_movement_category,\n deserialize_fee,\n deserialize_timestamp_from_date,\n deserialize_trade_type,\n)\nfrom rotkehlchen.typing import (\n ApiKey,\n ApiSecret,\n AssetMovementCategory,\n Fee,\n Location,\n Price,\n Timestamp,\n TradePair,\n)\nfrom rotkehlchen.user_messages import MessagesAggregator\nfrom rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock\nfrom rotkehlchen.utils.serialization import rlk_jsonloads_dict\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\ndef trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]:\n \"\"\"Turns a coinbase transaction into a rotkehlchen Trade.\n\n https://developers.coinbase.com/api/v2?python#buys\n If the coinbase transaction is not a trade related transaction returns None\n\n Throws:\n - UnknownAsset due to Asset instantiation\n - DeserializationError due to unexpected format of dict entries\n - KeyError due to dict entires missing an expected entry\n \"\"\"\n\n if raw_trade['status'] != 'completed':\n # We only want to deal with completed trades\n return None\n\n if raw_trade['instant']:\n raw_time = raw_trade['created_at']\n else:\n raw_time = raw_trade['payout_at']\n timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase')\n trade_type = deserialize_trade_type(raw_trade['resource'])\n tx_amount = deserialize_asset_amount(raw_trade['amount']['amount'])\n tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp)\n native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount'])\n native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp)\n # in coinbase you are buying/selling tx_asset for native_asset\n pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}')\n amount = tx_amount\n # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency\n rate = Price(native_amount / tx_amount)\n fee_amount = deserialize_fee(raw_trade['fee']['amount'])\n fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp)\n\n return Trade(\n timestamp=timestamp,\n location=Location.COINBASE,\n pair=pair,\n trade_type=trade_type,\n amount=amount,\n rate=rate,\n fee=fee_amount,\n fee_currency=fee_asset,\n link=str(raw_trade['id']),\n )\n\n\nclass CoinbasePermissionError(Exception):\n pass\n\n\nclass Coinbase(ExchangeInterface):\n\n def __init__(\n self,\n api_key: ApiKey,\n secret: ApiSecret,\n database: 'DBHandler',\n msg_aggregator: MessagesAggregator,\n ):\n super(Coinbase, self).__init__('coinbase', api_key, secret, database)\n self.apiversion = 'v2'\n self.base_uri = 'https://api.coinbase.com'\n self.msg_aggregator = msg_aggregator\n\n def first_connection(self) -> None:\n self.first_connection_made = True\n\n def _validate_single_api_key_action(\n self,\n method_str: str,\n ignore_pagination: bool = False,\n ) -> Tuple[Optional[List[Any]], str]:\n try:\n result = self._api_query(method_str, ignore_pagination=ignore_pagination)\n\n except CoinbasePermissionError as e:\n error = str(e)\n if 'transactions' in method_str:\n permission = 'wallet:transactions:read'\n elif 'buys' in method_str:\n permission = 'wallet:buys:read'\n elif 'sells' in method_str:\n permission = 'wallet:sells:read'\n elif 'deposits' in method_str:\n permission = 'wallet:deposits:read'\n elif 'withdrawals' in method_str:\n permission = 'wallet:withdrawals:read'\n elif 'trades' in method_str:\n permission = 'wallet:trades:read'\n # the accounts elif should be at the end since the word appears\n # in other endpoints\n elif 'accounts' in method_str:\n permission = 'wallet:accounts:read'\n else:\n raise AssertionError(\n f'Unexpected coinbase method {method_str} at API key validation',\n )\n msg = (\n f'Provided Coinbase API key needs to have {permission} permission activated. '\n f'Please log into your coinbase account and set all required permissions: '\n f'wallet:accounts:read, wallet:transactions:read, '\n f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, '\n f'wallet:deposits:read, wallet:trades:read'\n )\n\n return None, msg\n except RemoteError as e:\n error = str(e)\n if 'invalid signature' in error:\n return None, 'Failed to authenticate with the Provided API key/secret'\n elif 'invalid api key' in error:\n return None, 'Provided API Key is invalid'\n else:\n # any other remote error\n return None, error\n\n return result, ''\n\n def validate_api_key(self) -> Tuple[bool, str]:\n \"\"\"Validates that the Coinbase API key is good for usage in Rotki\n\n Makes sure that the following permissions are given to the key:\n wallet:accounts:read, wallet:transactions:read,\n wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,\n wallet:deposits:read\n \"\"\"\n result, msg = self._validate_single_api_key_action('accounts')\n if result is None:\n return False, msg\n\n # now get the account ids\n account_ids = self._get_account_ids(result)\n if len(account_ids) != 0:\n # and now try to get all transactions of an account to see if that's possible\n method = f'accounts/{account_ids[0]}/transactions'\n result, msg = self._validate_single_api_key_action(method)\n if result is None:\n return False, msg\n\n # and now try to get all buys of an account to see if that's possible\n method = f'accounts/{account_ids[0]}/buys'\n result, msg = self._validate_single_api_key_action(method)\n if result is None:\n return False, msg\n\n # and now try to get all sells of an account to see if that's possible\n method = f'accounts/{account_ids[0]}/sells'\n result, msg = self._validate_single_api_key_action(method)\n if result is None:\n return False, msg\n\n # and now try to get all deposits of an account to see if that's possible\n method = f'accounts/{account_ids[0]}/deposits'\n result, msg = self._validate_single_api_key_action(method)\n if result is None:\n return False, msg\n\n # and now try to get all withdrawals of an account to see if that's possible\n method = f'accounts/{account_ids[0]}/withdrawals'\n result, msg = self._validate_single_api_key_action(method)\n if result is None:\n return False, msg\n\n return True, ''\n\n def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]:\n \"\"\"Gets the account ids out of the accounts response\"\"\"\n account_ids = []\n for account_data in accounts:\n if 'id' not in account_data:\n self.msg_aggregator.add_error(\n 'Found coinbase account entry without an id key. Skipping it. ',\n )\n continue\n\n if not isinstance(account_data['id'], str):\n self.msg_aggregator.add_error(\n f'Found coinbase account entry with a non string id: '\n f'{account_data[\"id\"]}. Skipping it. ',\n )\n continue\n\n account_ids.append(account_data['id'])\n\n return account_ids\n\n def _api_query(\n self,\n endpoint: str,\n options: Optional[Dict[str, Any]] = None,\n pagination_next_uri: str = None,\n ignore_pagination: bool = False,\n ) -> List[Any]:\n \"\"\"Performs a coinbase API Query for endpoint\n\n You can optionally provide extra arguments to the endpoint via the options argument.\n If this is an ongoing paginating call then provide pagination_next_uri.\n If you want just the first results then set ignore_pagination to True.\n \"\"\"\n request_verb = \"GET\"\n if pagination_next_uri:\n request_url = pagination_next_uri\n else:\n request_url = f'/{self.apiversion}/{endpoint}'\n if options:\n request_url += urlencode(options)\n\n timestamp = str(int(time.time()))\n message = timestamp + request_verb + request_url\n\n signature = hmac.new(\n self.secret,\n message.encode(),\n hashlib.sha256,\n ).hexdigest()\n log.debug('Coinbase API query', request_url=request_url)\n\n self.session.headers.update({\n 'CB-ACCESS-SIGN': signature,\n 'CB-ACCESS-TIMESTAMP': timestamp,\n 'CB-ACCESS-KEY': self.api_key,\n # This is needed to guarantee the up to the given date\n # API version response.\n 'CB-VERSION': '2019-08-25',\n })\n full_url = self.base_uri + request_url\n try:\n response = self.session.get(full_url)\n except requests.exceptions.RequestException as e:\n raise RemoteError(f'Coinbase API request failed due to {str(e)}')\n\n if response.status_code == 403:\n raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')\n\n if response.status_code != 200:\n raise RemoteError(\n f'Coinbase query {full_url} responded with error status code: '\n f'{response.status_code} and text: {response.text}',\n )\n\n try:\n json_ret = rlk_jsonloads_dict(response.text)\n except JSONDecodeError:\n raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')\n\n if 'data' not in json_ret:\n raise RemoteError(f'Coinbase json response does not contain data: {response.text}')\n\n final_data = json_ret['data']\n\n # If we got pagination and this is the first query, gather all the subsequent queries\n if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:\n if 'next_uri' not in json_ret['pagination']:\n raise RemoteError('Coinbase json response contained no \"next_uri\" key')\n\n next_uri = json_ret['pagination']['next_uri']\n if not next_uri:\n # As per the docs: https://developers.coinbase.com/api/v2?python#pagination\n # once we get an empty next_uri we are done\n return final_data\n\n additional_data = self._api_query(\n endpoint=endpoint,\n options=options,\n pagination_next_uri=next_uri,\n )\n final_data.extend(additional_data)\n\n return final_data\n\n @protect_with_lock()\n @cache_response_timewise()\n def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:\n try:\n resp = self._api_query('accounts')\n except RemoteError as e:\n msg = (\n 'Coinbase API request failed. Could not reach coinbase due '\n 'to {}'.format(e)\n )\n log.error(msg)\n return None, msg\n\n returned_balances: Dict[Asset, Dict[str, Any]] = {}\n for account in resp:\n try:\n if not account['balance']:\n continue\n\n amount = deserialize_asset_amount(account['balance']['amount'])\n\n # ignore empty balances. Coinbase returns zero balances for everything\n # a user does not own\n if amount == ZERO:\n continue\n\n asset = asset_from_coinbase(account['balance']['currency'])\n\n try:\n usd_price = Inquirer().find_usd_price(asset=asset)\n except RemoteError as e:\n self.msg_aggregator.add_error(\n f'Error processing coinbase balance entry due to inability to '\n f'query USD price: {str(e)}. Skipping balance entry',\n )\n continue\n\n if asset in returned_balances:\n amount = returned_balances[asset]['amount'] + amount\n else:\n returned_balances[asset] = {}\n\n returned_balances[asset]['amount'] = amount\n usd_value = returned_balances[asset]['amount'] * usd_price\n returned_balances[asset]['usd_value'] = usd_value\n\n except UnknownAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase balance result with unknown asset '\n f'{e.asset_name}. Ignoring it.',\n )\n continue\n except UnsupportedAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase balance result with unsupported asset '\n f'{e.asset_name}. Ignoring it.',\n )\n continue\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_error(\n 'Error processing a coinbase account balance. Check logs '\n 'for details. Ignoring it.',\n )\n log.error(\n 'Error processing a coinbase account balance',\n account_balance=account,\n error=msg,\n )\n continue\n\n return returned_balances, ''\n\n def query_online_trade_history(\n self,\n start_ts: Timestamp,\n end_ts: Timestamp,\n ) -> List[Trade]:\n account_data = self._api_query('accounts')\n # now get the account ids and for each one query buys/sells\n # Looking at coinbase's API no other type of transaction\n # https://developers.coinbase.com/api/v2?python#list-transactions\n # consitutes something that Rotkehlchen would need to return in query_trade_history\n account_ids = self._get_account_ids(account_data)\n\n raw_data = []\n for account_id in account_ids:\n raw_data.extend(self._api_query(f'accounts/{account_id}/buys'))\n raw_data.extend(self._api_query(f'accounts/{account_id}/sells'))\n log.debug('coinbase buys/sells history result', results_num=len(raw_data))\n\n trades = []\n for raw_trade in raw_data:\n try:\n trade = trade_from_coinbase(raw_trade)\n except UnknownAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase transaction with unknown asset '\n f'{e.asset_name}. Ignoring it.',\n )\n continue\n except UnsupportedAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase trade with unsupported asset '\n f'{e.asset_name}. Ignoring it.',\n )\n continue\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_error(\n 'Error processing a coinbase trade. Check logs '\n 'for details. Ignoring it.',\n )\n log.error(\n 'Error processing a coinbase trade',\n trade=raw_trade,\n error=msg,\n )\n continue\n\n # limit coinbase trades in the requested time range here since there\n # is no argument in the API call\n if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts:\n trades.append(trade)\n\n return trades\n\n def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]:\n \"\"\"Processes a single deposit/withdrawal from coinbase and deserializes it\n\n Can log error/warning and return None if something went wrong at deserialization\n \"\"\"\n try:\n if raw_data['status'] != 'completed':\n return None\n\n payout_date = raw_data.get('payout_at', None)\n if payout_date:\n timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase')\n else:\n timestamp = deserialize_timestamp_from_date(\n raw_data['created_at'],\n 'iso8601',\n 'coinbase',\n )\n\n # Only get address/transaction id for \"send\" type of transactions\n address = None\n transaction_id = None\n # movement_category: Union[Literal['deposit'], Literal['withdrawal']]\n if 'type' in raw_data:\n # Then this should be a \"send\" which is the way Coinbase uses to send\n # crypto outside of the exchange\n # https://developers.coinbase.com/api/v2?python#transaction-resource\n msg = 'Non \"send\" type found in coinbase deposit/withdrawal processing'\n assert raw_data['type'] == 'send', msg\n movement_category = AssetMovementCategory.WITHDRAWAL\n # Can't see the fee being charged from the \"send\" resource\n\n amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])\n asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)\n # Fees dont appear in the docs but from an experiment of sending ETH\n # to an address from coinbase there is the network fee in the response\n fee = Fee(ZERO)\n raw_network = raw_data.get('network', None)\n if raw_network:\n raw_fee = raw_network.get('transaction_fee', None)\n\n if raw_fee:\n # Since this is a withdrawal the fee should be the same as the moved asset\n if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp):\n # If not we set ZERO fee and ignore\n log.error(\n f'In a coinbase withdrawal of {asset.identifier} the fee'\n f'is denoted in {raw_fee[\"currency\"]}',\n )\n else:\n fee = deserialize_fee(raw_fee['amount'])\n\n if 'network' in raw_data:\n transaction_id = get_key_if_has_val(raw_data['network'], 'hash')\n if 'to' in raw_data:\n address = deserialize_asset_movement_address(raw_data['to'], 'address', asset)\n else:\n movement_category = deserialize_asset_movement_category(raw_data['resource'])\n amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])\n fee = deserialize_fee(raw_data['fee']['amount'])\n asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)\n\n return AssetMovement(\n location=Location.COINBASE,\n category=movement_category,\n address=address,\n transaction_id=transaction_id,\n timestamp=timestamp,\n asset=asset,\n amount=amount,\n fee_asset=asset,\n fee=fee,\n link=str(raw_data['id']),\n )\n except UnknownAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase deposit/withdrawal with unknown asset '\n f'{e.asset_name}. Ignoring it.',\n )\n except UnsupportedAsset as e:\n self.msg_aggregator.add_warning(\n f'Found coinbase deposit/withdrawal with unsupported asset '\n f'{e.asset_name}. Ignoring it.',\n )\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_error(\n 'Unexpected data encountered during deserialization of a coinbase '\n 'asset movement. Check logs for details and open a bug report.',\n )\n log.error(\n f'Unexpected data encountered during deserialization of coinbase '\n f'asset_movement {raw_data}. Error was: {str(e)}',\n )\n\n return None\n\n def query_online_deposits_withdrawals(\n self,\n start_ts: Timestamp,\n end_ts: Timestamp,\n ) -> List[AssetMovement]:\n account_data = self._api_query('accounts')\n account_ids = self._get_account_ids(account_data)\n raw_data = []\n for account_id in account_ids:\n raw_data.extend(self._api_query(f'accounts/{account_id}/deposits'))\n raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals'))\n # also get transactions to get the \"sends\", which in Coinbase is the\n # way to send Crypto out of the exchange\n txs = self._api_query(f'accounts/{account_id}/transactions')\n for tx in txs:\n if 'type' not in tx:\n continue\n if tx['type'] == 'send':\n raw_data.append(tx)\n\n log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data))\n\n movements = []\n for raw_movement in raw_data:\n movement = self._deserialize_asset_movement(raw_movement)\n # limit coinbase deposit/withdrawals in the requested time range\n # here since there is no argument in the API call\n if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts:\n movements.append(movement)\n\n return movements\n"},"avg_line_length":{"kind":"number","value":40.6884681583,"string":"40.688468"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.5920473773,"string":"0.592047"},"count_classes":{"kind":"number","value":20182,"string":"20,182"},"score_classes":{"kind":"number","value":0.8537225042301184,"string":"0.853723"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":2906,"string":"2,906"},"score_decorators":{"kind":"number","value":0.12292724196277496,"string":"0.122927"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":7542,"string":"7,542"},"score_documentation":{"kind":"number","value":0.3190355329949239,"string":"0.319036"}}},{"rowIdx":3780,"cells":{"hexsha":{"kind":"string","value":"b9994eb6b47f29e07dc9f474ab82878fdc8ae029"},"size":{"kind":"number","value":3533,"string":"3,533"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"lib/python3.7/site-packages/ldap/controls/deref.py"},"max_stars_repo_name":{"kind":"string","value":"aonrobot/MSC-thug-auth-provider"},"max_stars_repo_head_hexsha":{"kind":"string","value":"aef37ef5a000586b8502cc536244f31e08b9c2db"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-06-21T11:51:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-06-21T11:51:26.000Z"},"max_issues_repo_path":{"kind":"string","value":"lib/python3.7/site-packages/ldap/controls/deref.py"},"max_issues_repo_name":{"kind":"string","value":"aonrobot/MSC-thug-auth-provider"},"max_issues_repo_head_hexsha":{"kind":"string","value":"aef37ef5a000586b8502cc536244f31e08b9c2db"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":13,"string":"13"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-07-03T21:28:31.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-26T10:42:05.000Z"},"max_forks_repo_path":{"kind":"string","value":"lib/python3.7/site-packages/ldap/controls/deref.py"},"max_forks_repo_name":{"kind":"string","value":"aonrobot/MSC-thug-auth-provider"},"max_forks_repo_head_hexsha":{"kind":"string","value":"aef37ef5a000586b8502cc536244f31e08b9c2db"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-02-11T09:34:39.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-11-10T14:41:32.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nldap.controls.deref - classes for\n(see https://tools.ietf.org/html/draft-masarati-ldap-deref)\n\nSee https://www.python-ldap.org/ for project details.\n\"\"\"\n\n__all__ = [\n 'DEREF_CONTROL_OID',\n 'DereferenceControl',\n]\n\nimport ldap.controls\nfrom ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS\n\nimport pyasn1_modules.rfc2251\nfrom pyasn1.type import namedtype,univ,tag\nfrom pyasn1.codec.ber import encoder,decoder\nfrom pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue\n\n\nDEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'\n\n\n# Request types\n#---------------------------------------------------------------------------\n\n# For compatibility with ASN.1 declaration in I-D\nAttributeList = AttributeDescriptionList\n\nclass DerefSpec(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType(\n 'derefAttr',\n AttributeDescription()\n ),\n namedtype.NamedType(\n 'attributes',\n AttributeList()\n ),\n )\n\nclass DerefSpecs(univ.SequenceOf):\n componentType = DerefSpec()\n\n# Response types\n#---------------------------------------------------------------------------\n\n\nclass AttributeValues(univ.SetOf):\n componentType = AttributeValue()\n\n\nclass PartialAttribute(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('type', AttributeDescription()),\n namedtype.NamedType('vals', AttributeValues()),\n )\n\n\nclass PartialAttributeList(univ.SequenceOf):\n componentType = PartialAttribute()\n tagSet = univ.Sequence.tagSet.tagImplicitly(\n tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)\n )\n\n\nclass DerefRes(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('derefAttr', AttributeDescription()),\n namedtype.NamedType('derefVal', LDAPDN()),\n namedtype.OptionalNamedType('attrVals', PartialAttributeList()),\n )\n\n\nclass DerefResultControlValue(univ.SequenceOf):\n componentType = DerefRes()\n\n\nclass DereferenceControl(LDAPControl):\n controlType = DEREF_CONTROL_OID\n\n def __init__(self,criticality=False,derefSpecs=None):\n LDAPControl.__init__(self,self.controlType,criticality)\n self.derefSpecs = derefSpecs or {}\n\n def _derefSpecs(self):\n deref_specs = DerefSpecs()\n i = 0\n for deref_attr,deref_attribute_names in self.derefSpecs.items():\n deref_spec = DerefSpec()\n deref_attributes = AttributeList()\n for j in range(len(deref_attribute_names)):\n deref_attributes.setComponentByPosition(j,deref_attribute_names[j])\n deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))\n deref_spec.setComponentByName('attributes',deref_attributes)\n deref_specs.setComponentByPosition(i,deref_spec)\n i += 1\n return deref_specs\n\n def encodeControlValue(self):\n return encoder.encode(self._derefSpecs())\n\n def decodeControlValue(self,encodedControlValue):\n decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())\n self.derefRes = {}\n for deref_res in decodedValue:\n deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]\n partial_attrs_dict = {\n str(tv[0]): [str(v) for v in tv[1]]\n for tv in deref_vals or []\n }\n try:\n self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))\n except KeyError:\n self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]\n\nKNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl\n"},"avg_line_length":{"kind":"number","value":29.4416666667,"string":"29.441667"},"max_line_length":{"kind":"number","value":102,"string":"102"},"alphanum_fraction":{"kind":"number","value":0.7115765638,"string":"0.711577"},"count_classes":{"kind":"number","value":2552,"string":"2,552"},"score_classes":{"kind":"number","value":0.7223322954995754,"string":"0.722332"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":566,"string":"566"},"score_documentation":{"kind":"number","value":0.1602037928106425,"string":"0.160204"}}},{"rowIdx":3781,"cells":{"hexsha":{"kind":"string","value":"b999aec7c34874ef90e0f30812ac97217ce90cca"},"size":{"kind":"number","value":3145,"string":"3,145"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"emoji.py"},"max_stars_repo_name":{"kind":"string","value":"notagoat/Deepmoji"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1ab922306c3647f9c7ea98caa2660a53b18fe4b6"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-19T20:09:00.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-03-19T20:09:00.000Z"},"max_issues_repo_path":{"kind":"string","value":"emoji.py"},"max_issues_repo_name":{"kind":"string","value":"notagoat/Deepmoji"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1ab922306c3647f9c7ea98caa2660a53b18fe4b6"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"emoji.py"},"max_forks_repo_name":{"kind":"string","value":"notagoat/Deepmoji"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1ab922306c3647f9c7ea98caa2660a53b18fe4b6"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import requests\nimport urllib.request\nimport os.path\nimport shutil\nimport csv\n\ndef main():\n with open(\"data.csv\") as i: #Open the data.csv file\n instances = i.readlines() #Write them into memory\n instances = [x.strip() for x in instances] #Strip any weird issues from writing\n \n instances.sort() #Sort them alphabetically\n setup(instances) #Run setup to create all the necessary files and subfolders\n count = len(instances) #Get the count just for fun\n i = 0 \n try:\n for name in instances:\n try:\n i += 1\n print(\"-----!\"+name+\"!-----\")\n print(str(i) +\" of \" + str(count) + \" remaining!\")\n fetch(name) #Run the fetching code\n except Exception as e:\n print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc\n pass #Don't stop the beat\n except Exception as e:\n print(\"Instance Error\")\n print(e)\n pass\n clone(instances) #Clone all of them into one big folder for ease of access\n\n\ndef fetch(name):\n r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data\n path = \"emoji/%s/\" % name #Because of the clone function we know all of these folders will exist\n try:\n for emoji in r.json(): #Emoji = the json code from the request\n try:\n if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it exists. \n pass\n else:\n if \"ms_\" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common\n #print(emoji['shortcode'] + \" found!\")\n emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json\n open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an image in the filesystem\n except Exception as e:\n print(\"Did not get: \" + emoji['url']) #If somethings fucky throw a nice error then keep going.\n print(e)\n pass\n except Exception as e:\n print(e)\n\ndef setup(instances): \n if (os.path.isdir(\"emoji/\")): #Check to see if emoji/ exists \n pass \n else:\n os.mkdir(\"emoji/\") #make it if it doesnt\n\n for name in instances:\n if (os.path.isdir(\"emoji/%s/\"%name)):\n pass\n else: os.mkdir(\"emoji/%s/\"%name)\n \n if (os.path.isdir(\"emoji/all\")):\n pass\n else:\n os.mkdir(\"emoji/all\")\n\ndef clone(instances):\n for name in instances:\n print(\"Copying emoji for: %s\"% name)\n path = \"emoji/%s/\" % name\n files = os.listdir(path)\n for name in files: #This gets alll files\n try: \n shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the all folder\n except Exception as e:\n print(e)\n pass\n \n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":37.4404761905,"string":"37.440476"},"max_line_length":{"kind":"number","value":151,"string":"151"},"alphanum_fraction":{"kind":"number","value":0.5742448331,"string":"0.574245"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1256,"string":"1,256"},"score_documentation":{"kind":"number","value":0.3993640699523052,"string":"0.399364"}}},{"rowIdx":3782,"cells":{"hexsha":{"kind":"string","value":"b99add86778172fa08bc930ed29f8f26a88ec4d3"},"size":{"kind":"number","value":943,"string":"943"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"String/640.One Edit Distance/Solution_DP.py"},"max_stars_repo_name":{"kind":"string","value":"Zhenye-Na/LxxxCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"afd79d790d0a7495d75e6650f80adaa99bd0ff07"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-05-04T04:21:27.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-02T07:06:57.000Z"},"max_issues_repo_path":{"kind":"string","value":"String/640.One Edit Distance/Solution_DP.py"},"max_issues_repo_name":{"kind":"string","value":"Zhenye-Na/LxxxCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"afd79d790d0a7495d75e6650f80adaa99bd0ff07"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-07-24T18:43:53.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-07-24T18:43:53.000Z"},"max_forks_repo_path":{"kind":"string","value":"String/640.One Edit Distance/Solution_DP.py"},"max_forks_repo_name":{"kind":"string","value":"Zhenye-Na/LxxxCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"afd79d790d0a7495d75e6650f80adaa99bd0ff07"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":10,"string":"10"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-07-01T04:03:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-09T03:57:37.000Z"},"content":{"kind":"string","value":"class Solution:\n \"\"\"\n @param s: a string\n @param t: a string\n @return: true if they are both one edit distance apart or false\n \"\"\"\n def isOneEditDistance(self, s, t):\n # write your code here\n if s == t:\n return False\n\n if abs(len(s) - len(t)) > 1:\n return False\n\n n, m = len(s), len(t)\n f = [[0] * (m + 1) for _ in range(2)]\n\n for j in range(m + 1):\n f[0][j] = j\n\n for i in range(1, n + 1):\n f[i % 2][0] = i\n for j in range(1, m + 1):\n if s[i - 1] == t[j - 1]:\n f[i % 2][j] = min(f[(i - 1) % 2][j - 1],\n f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)\n else:\n f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1,\n f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1)\n\n return f[n % 2][m] == 1\n"},"avg_line_length":{"kind":"number","value":29.46875,"string":"29.46875"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.3414634146,"string":"0.341463"},"count_classes":{"kind":"number","value":942,"string":"942"},"score_classes":{"kind":"number","value":0.9989395546129375,"string":"0.99894"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":147,"string":"147"},"score_documentation":{"kind":"number","value":0.15588547189819724,"string":"0.155885"}}},{"rowIdx":3783,"cells":{"hexsha":{"kind":"string","value":"b99b1d1ec6004cbeeb91e19410dbbb1e2216c45e"},"size":{"kind":"number","value":1478,"string":"1,478"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"nsq/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"jehiah/pynsq"},"max_stars_repo_head_hexsha":{"kind":"string","value":"899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-05-25T00:23:53.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2015-05-25T00:23:53.000Z"},"max_issues_repo_path":{"kind":"string","value":"nsq/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"barkinet/pynsq"},"max_issues_repo_head_hexsha":{"kind":"string","value":"899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"nsq/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"barkinet/pynsq"},"max_forks_repo_head_hexsha":{"kind":"string","value":"899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from __future__ import absolute_import\n\nimport signal\nimport tornado.ioloop\nimport logging\n\nfrom .protocol import (\n Error,\n unpack_response,\n decode_message,\n valid_topic_name,\n valid_channel_name,\n identify,\n subscribe,\n ready,\n finish,\n touch,\n requeue,\n nop,\n pub,\n mpub,\n FRAME_TYPE_RESPONSE,\n FRAME_TYPE_ERROR,\n FRAME_TYPE_MESSAGE,\n)\nfrom .message import Message\nfrom .backoff_timer import BackoffTimer\nfrom .sync import SyncConn\nfrom .async import AsyncConn\nfrom .reader import Reader\nfrom .legacy_reader import LegacyReader\nfrom .writer import Writer\nfrom .version import __version__ # NOQA\n\n\ndef _handle_term_signal(sig_num, frame):\n logging.getLogger(__name__).info(\n 'TERM Signal handler called with signal %r', sig_num)\n tornado.ioloop.IOLoop.instance().stop()\n\n\ndef run():\n \"\"\"\n Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`\n \"\"\"\n signal.signal(signal.SIGTERM, _handle_term_signal)\n tornado.ioloop.IOLoop.instance().start()\n\n\n__author__ = \"Matt Reiferson \"\n__all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\",\n \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\",\n \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\",\n \"valid_topic_name\", \"valid_channel_name\",\n \"FRAME_TYPE_RESPONSE\", \"FRAME_TYPE_ERROR\", \"FRAME_TYPE_MESSAGE\"]\n"},"avg_line_length":{"kind":"number","value":26.3928571429,"string":"26.392857"},"max_line_length":{"kind":"number","value":96,"string":"96"},"alphanum_fraction":{"kind":"number","value":0.6962110961,"string":"0.696211"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":450,"string":"450"},"score_documentation":{"kind":"number","value":0.3044654939106901,"string":"0.304465"}}},{"rowIdx":3784,"cells":{"hexsha":{"kind":"string","value":"b99b2da4f2ac2ca37d2ded7c72545cef1cab4228"},"size":{"kind":"number","value":5356,"string":"5,356"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"scripts/summaryPlot.py"},"max_stars_repo_name":{"kind":"string","value":"Hespian/ParFastKer"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5ddf1685c0652e73c889cfc64c7ec1fd827f905c"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause","MIT"],"string":"[\n \"BSD-3-Clause\",\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-08-10T08:24:19.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-08-12T07:16:03.000Z"},"max_issues_repo_path":{"kind":"string","value":"scripts/summaryPlot.py"},"max_issues_repo_name":{"kind":"string","value":"Hespian/ParFastKer"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5ddf1685c0652e73c889cfc64c7ec1fd827f905c"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause","MIT"],"string":"[\n \"BSD-3-Clause\",\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"scripts/summaryPlot.py"},"max_forks_repo_name":{"kind":"string","value":"Hespian/ParFastKer"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5ddf1685c0652e73c889cfc64c7ec1fd827f905c"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause","MIT"],"string":"[\n \"BSD-3-Clause\",\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import get_data_ours\nimport get_data_akiba\nimport get_data_NearLinear\nimport get_data_LinearTime\nimport os\nimport matplotlib.pyplot as plt\n\n# graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"]\ngraphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"]\nlinearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\"\npartitioningDir = \"../../LinearTimeKernels/partitions\"\nourTimeDir = \"../../results/LinearTimeKernelsScalingAll\"\nnearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\"\nakibaDir = \"../../akiba_vertex_cover/results\"\n\ndef getOurTimeAndSizeSequential(graph):\n res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)\n result = dict()\n result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"]\n result[\"size\"] = res[\"sequential_quasikernel_size\"]\n return result\n\ndef getOurTimeAndSizeParallel(graph):\n res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)\n result = dict()\n result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"]\n result[\"size\"] = res[\"parallel_quasikernel_size\"]\n return result\n\ndef getAkibaTimeAndSize(graph):\n return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)\n\ndef getNearLinearTimeAndSize(graph):\n return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)\n\ndef getLinearTimeTimeAndSize(graph):\n return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)\n\ndef minProperty(graph, prop):\n oursequential = getOurTimeAndSizeSequential(graph)[prop]\n ourparallel = getOurTimeAndSizeParallel(graph)[prop]\n akiba = getAkibaTimeAndSize(graph)[prop]\n nearLinear = getNearLinearTimeAndSize(graph)[prop]\n linearTime = getLinearTimeTimeAndSize(graph)[prop]\n data = [oursequential, ourparallel, akiba, nearLinear, linearTime]\n # data = [oursequential, ourparallel, akiba, nearLinear]\n data = filter(lambda x : x >= 0, data)\n minimum = min(data)\n if minimum == 0:\n return 1\n return minimum\n\noursizeSequential = []\nourtimeSequential = []\noursizeParallel = []\nourtimeParallel = []\nakibasize = []\nakibatime = []\nnearlinearsize = []\nnearlineartime = []\nlineartimesize = []\nlineartimetime = []\n\nfor graph in graphs:\n minsize = getAkibaTimeAndSize(graph)[\"size\"]\n mintime = getAkibaTimeAndSize(graph)[\"time\"]\n\n oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize\n # print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"]))\n ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime\n if oss > 0 and ots > 0:\n oursizeSequential.append(oss)\n ourtimeSequential.append(ots)\n\n osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize\n # print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"]))\n otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime\n if osp > 0 and otp > 0:\n oursizeParallel.append(osp)\n ourtimeParallel.append(otp)\n\n aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize\n akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime\n if aks > 0 and akt > 0:\n akibasize.append(aks)\n akibatime.append(akt)\n\n nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize\n nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime\n if nls > 0 and nlt > 0:\n nearlinearsize.append(nls)\n nearlineartime.append(nlt)\n\n lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize\n ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime\n if nls > 0 and nlt > 0:\n lineartimesize.append(lts)\n lineartimetime.append(ltt)\n\n# print(\"We\")\n# print(oursizeSequential)\n# print(ourtimeSequential)\n\n# print(\"We (parallel)\")\n# print(oursizeParallel)\n# print(ourtimeParallel)\n\n# print(\"Akiba\")\n# print(akibasize)\n# print(akibatime)\n\n# print(\"NearLinear\")\n# print(nearlinearsize)\n# print(nearlineartime)\n\n# print(\"LinearTime\")\n# print(lineartimesize)\n# print(lineartimetime)\n\nplt.rc('font', size=14)\nfig = plt.figure(figsize=(3.2, 2.4))\nax = fig.add_subplot(1,1,1)\nplt.title(\"Summary\", fontsize=14)\nax.set_yscale(\"log\")\nax.set_xscale(\"log\")\nax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\")\nax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\")\n# ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\")\nax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\")\nax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\")\nplt.xlabel(\"time / VCSolver time\")\nplt.ylabel(\"size / VCSolver size\")\nplt.xticks([0.0001, 0.01, 1])\nax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\")\nplt.savefig(\"summaryplot_vcsolver_baseline.pdf\", bbox_inches=\"tight\")\n# plt.show()\n"},"avg_line_length":{"kind":"number","value":39.0948905109,"string":"39.094891"},"max_line_length":{"kind":"number","value":234,"string":"234"},"alphanum_fraction":{"kind":"number","value":0.7201269604,"string":"0.720127"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1795,"string":"1,795"},"score_documentation":{"kind":"number","value":0.33513816280806574,"string":"0.335138"}}},{"rowIdx":3785,"cells":{"hexsha":{"kind":"string","value":"b99c2305beceab596bedee8ad399b6faa3216070"},"size":{"kind":"number","value":3587,"string":"3,587"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"bouncer/cli/base.py"},"max_stars_repo_name":{"kind":"string","value":"lrnt/git-bouncer"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3015e11a5d2c90986124de73bf1fd0f5a8563360"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"bouncer/cli/base.py"},"max_issues_repo_name":{"kind":"string","value":"lrnt/git-bouncer"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3015e11a5d2c90986124de73bf1fd0f5a8563360"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"bouncer/cli/base.py"},"max_forks_repo_name":{"kind":"string","value":"lrnt/git-bouncer"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3015e11a5d2c90986124de73bf1fd0f5a8563360"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import configparser\nimport sys\nimport inspect\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\n\ndef opt(*args, **kwargs):\n def decorator(method):\n if not hasattr(method, 'options'):\n method.options = []\n method.options.append((args, kwargs))\n return method\n return decorator\n\n\ndef noopts(method):\n method.options = []\n return method\n\n\nclass HelpMixin(object):\n def help(self):\n print('available commands:')\n for name, command in self.commands.items():\n description = str(command.__doc__ or '').strip('\\n')\n print(' ', name.ljust(10), description)\n return 1\n\n\nclass SubParser(HelpMixin):\n def __init__(self, commands):\n self.commands = self._commands(commands)\n\n def _commands(self, commands):\n prog = sys.argv[0]\n result = {}\n for cmd in commands:\n name = getattr(cmd, '_name', None)\n if not name:\n continue\n cmd.prog = prog\n result[name] = cmd\n return result\n\n def run(self):\n args = sys.argv[1:]\n for index, arg in enumerate(args):\n if arg in self.commands.keys():\n args.pop(index)\n return self.commands[arg](args)\n return self.help()\n\n\nclass Command(HelpMixin):\n def __init__(self):\n self.global_options = []\n self.commands = self._methods_with_opts()\n\n def _methods_with_opts(self):\n result = {}\n for name in dir(self):\n if name.startswith('__'):\n continue\n method = getattr(self, name)\n if not hasattr(method, 'options'):\n continue\n result[name] = method\n return result\n\n def _parse_args(self, method, args):\n prog = '{} {} {}'.format(self.prog, self._name, method.__name__)\n parser = ArgumentParser(\n prog=prog,\n description=(method.__doc__ or ''),\n formatter_class=RawDescriptionHelpFormatter\n )\n\n for opt in method.options + self.global_options:\n parser.add_argument(*opt[0], **opt[1])\n\n return vars(parser.parse_args(args))\n\n def _call_method(self, method, args):\n # Find out which arguments the method expects\n expected_args, _, _, _ = inspect.getargspec(method)\n expected_args.remove('self')\n\n self_args = self._parse_args(method, args)\n method_args = {}\n\n # Get the expected method arguments, ignore rest\n for name in expected_args:\n if name in args:\n method_args[name] = args.pop(name)\n\n # Put rest of the arguments in self\n for name, value in self_args.items():\n setattr(self, name, value)\n\n self.pre_command()\n\n return method(**method_args)\n\n def __call__(self, args):\n for index, arg in enumerate(args):\n if arg in self.commands.keys():\n args.pop(index)\n return self._call_method(self.commands[arg], args)\n return self.help()\n\n def opt(self, *args, **kwargs):\n self.global_options.append((args, kwargs))\n\n def pre_command(self):\n pass\n\n\nclass BaseCommand(Command):\n def __init__(self):\n super(BaseCommand, self).__init__()\n self.opt(\n '-c', dest='config_path', help='Configuration file',\n default='~/.test.conf'\n )\n\n def pre_command(self):\n config = configparser.ConfigParser()\n config.read(self.config_path)\n print(config.sections())\n"},"avg_line_length":{"kind":"number","value":27.3816793893,"string":"27.381679"},"max_line_length":{"kind":"number","value":72,"string":"72"},"alphanum_fraction":{"kind":"number","value":0.5781990521,"string":"0.578199"},"count_classes":{"kind":"number","value":3180,"string":"3,180"},"score_classes":{"kind":"number","value":0.8865347086701979,"string":"0.886535"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":257,"string":"257"},"score_documentation":{"kind":"number","value":0.07164761639252858,"string":"0.071648"}}},{"rowIdx":3786,"cells":{"hexsha":{"kind":"string","value":"b99c4d9fb380e0635cac67dff2a6820b500bf34f"},"size":{"kind":"number","value":13728,"string":"13,728"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Examples/ExampleCodes_ssccoorriinngg.py"},"max_stars_repo_name":{"kind":"string","value":"MahdadJafarzadeh/ssccoorriinngg"},"max_stars_repo_head_hexsha":{"kind":"string","value":"63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-04-28T12:50:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-05-13T08:52:42.000Z"},"max_issues_repo_path":{"kind":"string","value":"Examples/ExampleCodes_ssccoorriinngg.py"},"max_issues_repo_name":{"kind":"string","value":"MahdadJafarzadeh/ssccoorriinngg"},"max_issues_repo_head_hexsha":{"kind":"string","value":"63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Examples/ExampleCodes_ssccoorriinngg.py"},"max_forks_repo_name":{"kind":"string","value":"MahdadJafarzadeh/ssccoorriinngg"},"max_forks_repo_head_hexsha":{"kind":"string","value":"63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-07-14T13:48:56.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-07-14T13:48:56.000Z"},"content":{"kind":"string","value":"#%% Import libs\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\nimport h5py\nimport time\nfrom ssccoorriinngg import ssccoorriinngg\nimport numpy as np\nfrom sklearn.model_selection import cross_validate\n\n\n#%% Picking featureset of interest and apply classification\nObject = ssccoorriinngg(filename='', channel='', fs = 200, T = 30)\npath = 'C:/PhD/ML in depression/'\nfname = 'feat42_Fp1-Fp2_train'\nfeats = 'featureset'\nlabels = 'labels'\n# Train set\nX_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels)\n# Test set\nfname = 'feat42_Fp1-Fp2_test'\nX_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels)\n\n# Define the scoring criteria:\nscoring = {'accuracy' : make_scorer(accuracy_score), \n 'precision' : make_scorer(precision_score),\n 'recall' : make_scorer(recall_score), \n 'f1_score' : make_scorer(f1_score)} \n# Cross-validation using logistic Random Forests\ny_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10)\nAcc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF)\n# Cross-validation using XGBoost\ny_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, \n cv = 10 , max_depth=3, learning_rate=.1)\nAcc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb)\n#%% Outcome measures\n# Defien required metrics here:\nMetrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score']\nfor metric in Metrics:\n #RF\n r1 = results_RF[metric].mean()\n std1 = results_RF[metric].std()\n print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}')\n # xgb\n r2 = results_xgb[metric].mean()\n std2 = results_xgb[metric].std()\n print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}')\n # SVM\n r3 = results_SVM[metric].mean()\n std3 = results_SVM[metric].std()\n print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}')\n # LR\n r4 = results_LR[metric].mean()\n std4 = results_LR[metric].std()\n print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}')\n#%% Applying Randomized grid search to find the best config. of RF\n\nBestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y,\n estimator = RandomForestClassifier(), scoring = scoring,\n n_estimators = [int(x) for x in np.arange(10, 500, 20)],\n max_features = ['log2', 'sqrt'],\n max_depth = [int(x) for x in np.arange(10, 100, 30)],\n min_samples_split = [2, 5, 10],\n min_samples_leaf = [1, 2, 4],\n bootstrap = [True, False],\n n_iter = 100, cv = 10)\n\n#%% Test feature selection methods ##\n# PCA\nPCA_out = Object.FeatSelect_PCA(X, y, n_components = 5)\n# Boruta\nranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7)\n# Lasso\nFeat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1)\n#ANOVA\nFeat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80)\n#Recruisive\nranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20)\n#### NOW TEST CLASSIFIERS WITH SELECTED FEATS\nresults_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10)\n\n\n#%% Example save featureset\npath = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'\nObject.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3')\n\n#%% Example load features:\nX, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/',\n fname = 'feat42_N3_fp2-M1', \n feats = 'featureset', \n labels = 'labels')\n\n#%% Combining some REM and SWS epochs\n\nObject.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/',\n ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1',\n REM_fname = 'tr90_fp1-M2_fp2-M1',\n saving = True, fname_save = 'tr90_N3&REM_fp1-M2')\n\n#%% How to save some results?\ndirectory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' \nfname = '42feats_N3'\nwith h5py.File((directory+fname + '.h5'), 'w') as wf:\n # Accuracies\n dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy'])\n dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy'])\n dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy'])\n dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy'])\n # Precision\n dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision'])\n dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision'])\n dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision'])\n dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision'])\n # Recall\n dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall'])\n dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall'])\n dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall'])\n dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall'])\n # f1-score\n dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score'])\n dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score'])\n dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score'])\n dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score'])\n\n#%% Extracting features from more than one channel:\ntic = time.time() \n ########### Central electrodes #############\nmain_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"\nsave_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'\n\nfname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\")\nfname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\")\nch_C4 = 'C4-M1'\nch_C3 = 'C3-M2'\n\nObject_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30)\nX_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() \nObject_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM')\n \nObject_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30)\nX_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() \nObject_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM')\n\nObject_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30)\nX_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() \nObject_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3')\n\nObject_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30)\nX_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() \nObject_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3')\n\n\n ########### Occipital electrodes #############\nmain_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"\nfname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\")\nfname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\")\nch_O2 = 'O2-M1'\nch_O1 = 'O1-M2'\nObject_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30)\nX_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() \nObject_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM')\n \nObject_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30)\nX_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() \nObject_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM')\n\nObject_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30)\nX_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() \nObject_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3')\n\nObject_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30)\nX_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() \nObject_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3')\n\n ########### Fp electrodes #############\nmain_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"\nfname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\")\nfname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\")\nch_fp2 = 'fp2-M1'\nch_fp1 = 'fp1-M2'\nObject_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30)\nX_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() \nObject_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM')\n \nObject_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30)\nX_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() \nObject_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM')\n\nObject_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30)\nX_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() \nObject_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3')\n\nObject_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30)\nX_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() \nObject_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3')\ntoc = time.time()\nprint(f'time taken: {toc - tic}')\n########## Concatenate all features #########\n# RIGHT hemisphere - REM\nX_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM))\nX_rh_REM = np.column_stack((X_rh_REM,X_O2_REM))\n# RIGHT hemisphere - N3\nX_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3))\nX_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3))\n# LEFT hemisphere - REM\nX_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM))\nX_lh_REM = np.column_stack((X_lh_REM,X_O1_REM))\n# LEFT hemisphere - N3\nX_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3))\nX_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3))\n\n# Both sides - REM\nX_REM = np.column_stack((X_rh_REM, X_lh_REM))\n# Both sides - N3\nX_N3 = np.column_stack((X_rh_N3, X_lh_N3))\n# Combine SWS and REM\nX_SWS_REM = np.row_stack((X_N3, X_REM))\ny_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM))\n# SAVE ALL COMBINATIONS\nObject = ML_Depression(filename='', channel='', fs = 200, T = 30)\n# one hemisphere\nObject.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM')\nObject.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM')\nObject.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3')\nObject.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3')\n# Both hemisphere\nObject.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3')\nObject.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM')\n# Both hemispheres- SWS &REM combination\nObject.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM')\n\n#%% Load features from different brain regions, sleep stage and combine them\nObject = ML_Depression(filename='', channel='', fs = 200, T = 30)\npath = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'\nsave_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'\nfeats = 'featureset'\nlabels = 'labels'\n# Pick right hemisphere N3\nfname_rh_N3 = 'feat42_rh_N3'\nX_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels)\n# Pick left hemisphere N3\nfname_lh_N3 = 'feat42_lh_N3'\nX_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels)\n# Pick right hemisphere REM\nfname_rh_REM = 'feat42_rh_REM'\nX_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels)\n# Pick LEFT hemisphere REM\nfname_lh_REM = 'feat42_lh_REM'\nX_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels)\n# Combine them\nX_N3 = np.column_stack((X_rh_N3, X_lh_N3))\n\nX_REM = np.column_stack((X_rh_REM, X_lh_REM))\n# Save combination\nObject.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3')\nObject.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')\n"},"avg_line_length":{"kind":"number","value":53.2093023256,"string":"53.209302"},"max_line_length":{"kind":"number","value":127,"string":"127"},"alphanum_fraction":{"kind":"number","value":0.682983683,"string":"0.682984"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":3709,"string":"3,709"},"score_documentation":{"kind":"number","value":0.27017773892773894,"string":"0.270178"}}},{"rowIdx":3787,"cells":{"hexsha":{"kind":"string","value":"b99d08420cae81be117acdda96af821aba38eea2"},"size":{"kind":"number","value":6891,"string":"6,891"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"igibson/examples/behavior/behavior_demo_collection.py"},"max_stars_repo_name":{"kind":"string","value":"suresh-guttikonda/iGibson"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a69e623058180146466cd52d4bb3c00d1facdacf"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"igibson/examples/behavior/behavior_demo_collection.py"},"max_issues_repo_name":{"kind":"string","value":"suresh-guttikonda/iGibson"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a69e623058180146466cd52d4bb3c00d1facdacf"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"igibson/examples/behavior/behavior_demo_collection.py"},"max_forks_repo_name":{"kind":"string","value":"suresh-guttikonda/iGibson"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a69e623058180146466cd52d4bb3c00d1facdacf"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nMain BEHAVIOR demo collection entrypoint\n\"\"\"\n\nimport argparse\nimport copy\nimport datetime\nimport os\n\nimport bddl\nimport numpy as np\n\nimport igibson\nfrom igibson.activity.activity_base import iGBEHAVIORActivityInstance\nfrom igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings\nfrom igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings\nfrom igibson.simulator import Simulator\nfrom igibson.utils.ig_logging import IGLogWriter\n\nPOST_TASK_STEPS = 200\nPHYSICS_WARMING_STEPS = 200\n\n\ndef parse_args():\n scene_choices = [\n \"Beechwood_0_int\",\n \"Beechwood_1_int\",\n \"Benevolence_0_int\",\n \"Benevolence_1_int\",\n \"Benevolence_2_int\",\n \"Ihlen_0_int\",\n \"Ihlen_1_int\",\n \"Merom_0_int\",\n \"Merom_1_int\",\n \"Pomaria_0_int\",\n \"Pomaria_1_int\",\n \"Pomaria_2_int\",\n \"Rs_int\",\n \"Wainscott_0_int\",\n \"Wainscott_1_int\",\n ]\n\n task_id_choices = [0, 1]\n parser = argparse.ArgumentParser(description=\"Run and collect an ATUS demo\")\n parser.add_argument(\n \"--task\", type=str, required=True, nargs=\"?\", help=\"Name of ATUS activity matching parent folder in bddl.\"\n )\n parser.add_argument(\n \"--task_id\",\n type=int,\n required=True,\n choices=task_id_choices,\n nargs=\"?\",\n help=\"BDDL integer ID, matching suffix of bddl.\",\n )\n parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of vr log\")\n parser.add_argument(\n \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson interactive scenes.\"\n )\n parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving logfiles.\")\n parser.add_argument(\n \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using pre-initialized scene caches.\"\n )\n parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling data.\")\n parser.add_argument(\n \"--no_vr\", action=\"store_true\", help=\"Whether to turn off VR recording and save random actions.\"\n )\n parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of steps to record before stopping.\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n bddl.set_backend(\"iGibson\")\n collect_demo(\n args.task,\n args.task_id,\n args.scene,\n args.vr_log_path,\n args.disable_save,\n args.max_steps,\n args.no_vr,\n args.disable_scene_cache,\n args.profile,\n )\n\n\ndef collect_demo(\n task,\n task_id,\n scene,\n vr_log_path=None,\n disable_save=False,\n max_steps=-1,\n no_vr=False,\n disable_scene_cache=False,\n profile=False,\n):\n # HDR files for PBR rendering\n hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\")\n hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\")\n light_modulation_map_filename = os.path.join(\n igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\"\n )\n background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\")\n\n # VR rendering settings\n vr_rendering_settings = MeshRendererSettings(\n optimized=True,\n fullscreen=False,\n env_texture_filename=hdr_texture,\n env_texture_filename2=hdr_texture2,\n env_texture_filename3=background_texture,\n light_modulation_map_filename=light_modulation_map_filename,\n enable_shadow=True,\n enable_pbr=True,\n msaa=False,\n light_dimming_factor=1.0,\n )\n\n # VR system settings\n mode = \"headless\" if no_vr else \"vr\"\n s = Simulator(\n mode=mode,\n rendering_settings=vr_rendering_settings,\n vr_settings=VrSettings(use_vr=True),\n physics_timestep=1 / 300.0,\n render_timestep=1 / 30.0,\n )\n igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)\n\n scene_kwargs = None\n online_sampling = True\n\n if not disable_scene_cache:\n scene_kwargs = {\n \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id),\n }\n online_sampling = False\n\n igbhvr_act_inst.initialize_simulator(\n simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling\n )\n vr_agent = igbhvr_act_inst.simulator.robots[0]\n\n if not no_vr:\n vr_cs = VrConditionSwitcher(\n igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction\n )\n\n log_writer = None\n if not disable_save:\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n if vr_log_path is None:\n vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp)\n log_writer = IGLogWriter(\n s,\n log_filepath=vr_log_path,\n task=igbhvr_act_inst,\n store_vr=False if no_vr else True,\n vr_robot=vr_agent,\n profiling_mode=profile,\n filter_objects=True,\n )\n log_writer.set_up_data_storage()\n\n satisfied_predicates_cached = {}\n post_task_steps = copy.deepcopy(POST_TASK_STEPS)\n physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)\n\n steps = 0\n while max_steps < 0 or steps < max_steps:\n igbhvr_act_inst.simulator.step(print_stats=profile)\n task_done, satisfied_predicates = igbhvr_act_inst.check_success()\n\n if no_vr:\n if steps < 2:\n action = np.zeros((28,))\n action[19] = 1\n action[27] = 1\n else:\n action = np.random.uniform(-0.01, 0.01, size=(28,))\n else:\n action = igbhvr_act_inst.simulator.gen_vr_robot_action()\n if steps < physics_warming_steps:\n action = np.zeros_like(action)\n\n vr_agent.update(action)\n\n if not no_vr:\n if satisfied_predicates != satisfied_predicates_cached:\n vr_cs.refresh_condition(switch=False)\n satisfied_predicates_cached = satisfied_predicates\n\n if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"):\n vr_cs.refresh_condition()\n\n if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"):\n vr_cs.toggle_show_state()\n\n if log_writer and not disable_save:\n log_writer.process_frame()\n\n if task_done:\n post_task_steps -= 1\n if post_task_steps == 0:\n break\n\n steps += 1\n\n if log_writer and not disable_save:\n log_writer.end_log_session()\n\n s.disconnect()\n\n\nif __name__ == \"__main__\":\n main()\n"},"avg_line_length":{"kind":"number","value":31.4657534247,"string":"31.465753"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.6528805689,"string":"0.652881"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1308,"string":"1,308"},"score_documentation":{"kind":"number","value":0.18981279930343928,"string":"0.189813"}}},{"rowIdx":3788,"cells":{"hexsha":{"kind":"string","value":"b99e3b0ee335439a781ae231769595415a1dc6ec"},"size":{"kind":"number","value":546,"string":"546"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"wagtail/wagtailadmin/menu.py"},"max_stars_repo_name":{"kind":"string","value":"digitalmarmalade/wagtail"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ac4d23172ff3f42746625630583b17d243fb9822"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-11-05T18:02:04.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2015-11-05T18:02:04.000Z"},"max_issues_repo_path":{"kind":"string","value":"wagtail/wagtailadmin/menu.py"},"max_issues_repo_name":{"kind":"string","value":"digitalmarmalade/wagtail"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ac4d23172ff3f42746625630583b17d243fb9822"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"wagtail/wagtailadmin/menu.py"},"max_forks_repo_name":{"kind":"string","value":"digitalmarmalade/wagtail"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ac4d23172ff3f42746625630583b17d243fb9822"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.utils.text import slugify\nfrom django.utils.html import format_html\n\n\nclass MenuItem(object):\n def __init__(self, label, url, name=None, classnames='', order=1000):\n self.label = label\n self.url = url\n self.classnames = classnames\n self.name = (name or slugify(unicode(label)))\n self.order = order\n\n def render_html(self):\n return format_html(\n u\"\"\"
  • {3}
  • \"\"\",\n self.name, self.url, self.classnames, self.label)\n"},"avg_line_length":{"kind":"number","value":32.1176470588,"string":"32.117647"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.6117216117,"string":"0.611722"},"count_classes":{"kind":"number","value":463,"string":"463"},"score_classes":{"kind":"number","value":0.847985347985348,"string":"0.847985"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":68,"string":"68"},"score_documentation":{"kind":"number","value":0.12454212454212454,"string":"0.124542"}}},{"rowIdx":3789,"cells":{"hexsha":{"kind":"string","value":"b99ee5dfe9849188796ff8d2b024b524adedb8d2"},"size":{"kind":"number","value":1950,"string":"1,950"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"django_mfa/migrations/0001_initial.py"},"max_stars_repo_name":{"kind":"string","value":"timgates42/django-mfa"},"max_stars_repo_head_hexsha":{"kind":"string","value":"89eeb83f7da3ea24f205b40b13c7f9d33ea15b99"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"django_mfa/migrations/0001_initial.py"},"max_issues_repo_name":{"kind":"string","value":"timgates42/django-mfa"},"max_issues_repo_head_hexsha":{"kind":"string","value":"89eeb83f7da3ea24f205b40b13c7f9d33ea15b99"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"django_mfa/migrations/0001_initial.py"},"max_forks_repo_name":{"kind":"string","value":"timgates42/django-mfa"},"max_forks_repo_head_hexsha":{"kind":"string","value":"89eeb83f7da3ea24f205b40b13c7f9d33ea15b99"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Generated by Django 2.1.5 on 2019-03-26 11:35\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='U2FKey',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('last_used_at', models.DateTimeField(null=True)),\n ('public_key', models.TextField(unique=True)),\n ('key_handle', models.TextField()),\n ('app_id', models.TextField()),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='UserOTP',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)),\n ('secret_key', models.CharField(blank=True, max_length=100)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='UserRecoveryCodes',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('secret_code', models.CharField(max_length=10)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')),\n ],\n ),\n ]\n"},"avg_line_length":{"kind":"number","value":41.4893617021,"string":"41.489362"},"max_line_length":{"kind":"number","value":143,"string":"143"},"alphanum_fraction":{"kind":"number","value":0.598974359,"string":"0.598974"},"count_classes":{"kind":"number","value":1791,"string":"1,791"},"score_classes":{"kind":"number","value":0.9184615384615384,"string":"0.918462"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":272,"string":"272"},"score_documentation":{"kind":"number","value":0.13948717948717948,"string":"0.139487"}}},{"rowIdx":3790,"cells":{"hexsha":{"kind":"string","value":"b99f21827c3ba7ccbcab4806c878cdacfa139e20"},"size":{"kind":"number","value":317,"string":"317"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"app/logger_example/main.py"},"max_stars_repo_name":{"kind":"string","value":"khanh-nguyen-code/my-collection"},"max_stars_repo_head_hexsha":{"kind":"string","value":"31581ef0b1dae67aafb1f4e64b9973a38cc01edf"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"app/logger_example/main.py"},"max_issues_repo_name":{"kind":"string","value":"khanh-nguyen-code/my-collection"},"max_issues_repo_head_hexsha":{"kind":"string","value":"31581ef0b1dae67aafb1f4e64b9973a38cc01edf"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"app/logger_example/main.py"},"max_forks_repo_name":{"kind":"string","value":"khanh-nguyen-code/my-collection"},"max_forks_repo_head_hexsha":{"kind":"string","value":"31581ef0b1dae67aafb1f4e64b9973a38cc01edf"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from my_collection import logger\n\nif __name__ == \"__main__\":\n logger.now().debug(\"debug1\")\n logger.now().debug(\"debug2\")\n logger.now().info(\"hello1\")\n logger.now().info(\"hello2\")\n logger.now().with_field(\"key\", \"val\").error(\"with field1\")\n logger.now().with_field(\"key\", \"val\").error(\"with field2\")\n"},"avg_line_length":{"kind":"number","value":31.7,"string":"31.7"},"max_line_length":{"kind":"number","value":62,"string":"62"},"alphanum_fraction":{"kind":"number","value":0.6466876972,"string":"0.646688"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":88,"string":"88"},"score_documentation":{"kind":"number","value":0.277602523659306,"string":"0.277603"}}},{"rowIdx":3791,"cells":{"hexsha":{"kind":"string","value":"b9a14f8cda479b51cbe9296c63d8ae7397078bc7"},"size":{"kind":"number","value":760,"string":"760"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"robotframework_iperf3/__main__.py"},"max_stars_repo_name":{"kind":"string","value":"scathaig/robotframework-iperf3"},"max_stars_repo_head_hexsha":{"kind":"string","value":"cfeeb3e265777403d7eb06fcfa6d69650f2a5e67"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"robotframework_iperf3/__main__.py"},"max_issues_repo_name":{"kind":"string","value":"scathaig/robotframework-iperf3"},"max_issues_repo_head_hexsha":{"kind":"string","value":"cfeeb3e265777403d7eb06fcfa6d69650f2a5e67"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"robotframework_iperf3/__main__.py"},"max_forks_repo_name":{"kind":"string","value":"scathaig/robotframework-iperf3"},"max_forks_repo_head_hexsha":{"kind":"string","value":"cfeeb3e265777403d7eb06fcfa6d69650f2a5e67"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import argparse\nfrom robotremoteserver import RobotRemoteServer\nfrom .iperf3 import Iperf3\n\n\nif __name__ == '__main__':\n\n # create commandline parser\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.prog = 'python3 -m robotframework_iperf3'\n\n # add parser options\n parser.add_argument(\n \"-a\",\n \"--address\",\n type=str,\n help=\"server listen address\",\n default='0.0.0.0')\n\n parser.add_argument(\n \"-p\",\n \"--port\",\n type=int,\n help=\"server listen port\",\n default=8270)\n\n args = parser.parse_args()\n\n server = RobotRemoteServer(\n Iperf3(),\n host=args.address,\n port=args.port\n )\n\n server.serve()\n"},"avg_line_length":{"kind":"number","value":21.1111111111,"string":"21.111111"},"max_line_length":{"kind":"number","value":92,"string":"92"},"alphanum_fraction":{"kind":"number","value":0.6197368421,"string":"0.619737"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":170,"string":"170"},"score_documentation":{"kind":"number","value":0.2236842105263158,"string":"0.223684"}}},{"rowIdx":3792,"cells":{"hexsha":{"kind":"string","value":"b9a1ae11b40a499e6f6854e1a273c2ff226ef650"},"size":{"kind":"number","value":692,"string":"692"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py"},"max_stars_repo_name":{"kind":"string","value":"ahmedengu/h2o-3"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":6098,"string":"6,098"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-05-22T02:46:12.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T16:54:51.000Z"},"max_issues_repo_path":{"kind":"string","value":"h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py"},"max_issues_repo_name":{"kind":"string","value":"ahmedengu/h2o-3"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":2517,"string":"2,517"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2015-05-23T02:10:54.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-30T17:03:39.000Z"},"max_forks_repo_path":{"kind":"string","value":"h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py"},"max_forks_repo_name":{"kind":"string","value":"ahmedengu/h2o-3"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2199,"string":"2,199"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2015-05-22T04:09:55.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T22:20:45.000Z"},"content":{"kind":"string","value":"df8.cbind(df9)\n\n# A B C D A0 B0 C0 D0\n# ----- ------ ------ ------ ------ ----- ----- -----\n# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86\n# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27\n# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25\n# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 \n# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52\n# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09\n# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63\n# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42\n# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45\n# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05\n# \n# [100 rows x 8 columns]"},"avg_line_length":{"kind":"number","value":43.25,"string":"43.25"},"max_line_length":{"kind":"number","value":54,"string":"54"},"alphanum_fraction":{"kind":"number","value":0.460982659,"string":"0.460983"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":663,"string":"663"},"score_documentation":{"kind":"number","value":0.958092485549133,"string":"0.958092"}}},{"rowIdx":3793,"cells":{"hexsha":{"kind":"string","value":"b9a1dbb5125acea57356714e95e66c8e3a612e30"},"size":{"kind":"number","value":1101,"string":"1,101"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"FluentPython/dynamic_attr_and_prop/frozen_json.py"},"max_stars_repo_name":{"kind":"string","value":"xu6148152/Binea_Python_Project"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d943eb5f4685d08f080b372dcf1a7cbd5d63efed"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"FluentPython/dynamic_attr_and_prop/frozen_json.py"},"max_issues_repo_name":{"kind":"string","value":"xu6148152/Binea_Python_Project"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d943eb5f4685d08f080b372dcf1a7cbd5d63efed"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"FluentPython/dynamic_attr_and_prop/frozen_json.py"},"max_forks_repo_name":{"kind":"string","value":"xu6148152/Binea_Python_Project"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d943eb5f4685d08f080b372dcf1a7cbd5d63efed"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nfrom collections import abc\nfrom keyword import iskeyword\n\n\nclass FronzenJSON:\n def __init__(self, mapping):\n self._data = {}\n for key, value in mapping.items():\n if iskeyword(key):\n key += '_'\n # self._data = dict(mapping)\n self._data[key] = value\n\n def __getattr__(self, name):\n if hasattr(self._data, name):\n return getattr(self._data, name)\n else:\n # return FronzenJSON.build(self._data[name])\n return FronzenJSON(self._data[name])\n\n @classmethod\n def build(cls, obj):\n if isinstance(obj, abc.Mapping):\n return cls(obj)\n elif isinstance(obj, abc.MutableMapping):\n return [cls.build(item) for item in obj]\n else:\n return obj\n\n def __new__(cls, arg):\n if isinstance(arg, abc.Mapping):\n return super().__new__(cls)\n elif isinstance(arg, abc.MutableSequence):\n return [cls[item] for item in arg]\n else:\n return arg\n"},"avg_line_length":{"kind":"number","value":27.525,"string":"27.525"},"max_line_length":{"kind":"number","value":56,"string":"56"},"alphanum_fraction":{"kind":"number","value":0.5613079019,"string":"0.561308"},"count_classes":{"kind":"number","value":990,"string":"990"},"score_classes":{"kind":"number","value":0.8991825613079019,"string":"0.899183"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":246,"string":"246"},"score_decorators":{"kind":"number","value":0.22343324250681199,"string":"0.223433"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":122,"string":"122"},"score_documentation":{"kind":"number","value":0.11080835603996367,"string":"0.110808"}}},{"rowIdx":3794,"cells":{"hexsha":{"kind":"string","value":"b9a20089dfb3f5c8a3472d1f3be189af236d4d44"},"size":{"kind":"number","value":4062,"string":"4,062"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pomdp_problems/tag/models/transition_model.py"},"max_stars_repo_name":{"kind":"string","value":"Semanti1/pomdp_findit"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b96c1c06aab4b485fa005654cf6438ff63718083"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pomdp_problems/tag/models/transition_model.py"},"max_issues_repo_name":{"kind":"string","value":"Semanti1/pomdp_findit"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b96c1c06aab4b485fa005654cf6438ff63718083"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pomdp_problems/tag/models/transition_model.py"},"max_forks_repo_name":{"kind":"string","value":"Semanti1/pomdp_findit"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b96c1c06aab4b485fa005654cf6438ff63718083"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"The Tag problem. Implemented according to the paper `Anytime Point-Based\r\nApproximations for Large POMDPs `_.\r\n\r\nTransition model: the robot moves deterministically. The target's movement\r\n depends on the robot; With Pr=0.8 the target moves away from the robot,\r\n and with Pr=0.2, the target stays at the same place. The target never\r\n moves closer to the robot.\r\n\"\"\"\r\nimport copy\r\nimport pomdp_py\r\nimport pomdp_problems.util as util\r\nimport pomdp_problems.tag.constants as constants\r\nfrom pomdp_problems.tag.domain.action import *\r\n\r\nclass TagTransitionModel(pomdp_py.TransitionModel):\r\n\r\n def __init__(self,\r\n grid_map,\r\n target_motion_policy):\r\n self._grid_map = grid_map\r\n self.target_motion_policy = target_motion_policy\r\n\r\n @classmethod\r\n def if_move_by(cls, grid_map, position, action):\r\n if isinstance(action, MotionAction):\r\n dx, dy = action.motion\r\n next_position = (position[0] + dx,\r\n position[1] + dy)\r\n if grid_map.valid_pose(next_position):\r\n return next_position\r\n return position\r\n\r\n def probability(self, next_state, state, action, **kwargs):\r\n # Robot motion\r\n expected_robot_position = TagTransitionModel.if_move_by(self._grid_map,\r\n state.robot_position,\r\n action)\r\n if expected_robot_position != next_state.robot_position:\r\n return constants.EPSILON\r\n\r\n if isinstance(action, TagAction):\r\n if next_state.target_position == next_state.robot_position:\r\n if next_state.target_found:\r\n return 1.0 - constants.EPSILON\r\n else:\r\n return constants.EPSILON\r\n else:\r\n if next_state.target_found:\r\n return constants.EPSILON\r\n else:\r\n return 1.0 - constants.EPSILON\r\n\r\n # Target motion\r\n valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)\r\n return self.target_motion_policy.probability(next_state.target_position,\r\n state.target_position,\r\n state.robot_position,\r\n valid_target_motion_actions)\r\n \r\n def sample(self, state, action, argmax=False):\r\n # Robot motion\r\n next_state = copy.deepcopy(state)\r\n next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map,\r\n state.robot_position,\r\n action)\r\n\r\n # If Tag action\r\n if isinstance(action, TagAction):\r\n if not state.target_found:\r\n if state.robot_position == state.target_position:\r\n next_state.target_found = True\r\n return next_state\r\n\r\n # Target motion\r\n valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) \r\n if not argmax:\r\n next_state.target_position = self.target_motion_policy.random(state.robot_position,\r\n state.target_position,\r\n valid_target_motion_actions)\r\n else:\r\n next_state.target_position = self.target_motion_policy.mpe(state.robot_position,\r\n state.target_position,\r\n valid_target_motion_actions)\r\n return next_state\r\n\r\n def argmax(self, state, action, **kwargs):\r\n return self.sample(state, action, argmax=True)\r\n \r\n"},"avg_line_length":{"kind":"number","value":45.6404494382,"string":"45.640449"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.5379123584,"string":"0.537912"},"count_classes":{"kind":"number","value":3466,"string":"3,466"},"score_classes":{"kind":"number","value":0.8532742491383555,"string":"0.853274"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":359,"string":"359"},"score_decorators":{"kind":"number","value":0.08838010832102412,"string":"0.08838"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":494,"string":"494"},"score_documentation":{"kind":"number","value":0.12161496799606106,"string":"0.121615"}}},{"rowIdx":3795,"cells":{"hexsha":{"kind":"string","value":"b9a21ff5a8c4fcb07930580d031f6847ecfaed43"},"size":{"kind":"number","value":4731,"string":"4,731"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"packit/fedpkg.py"},"max_stars_repo_name":{"kind":"string","value":"bocekm/packit"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b5da23c0fa3f205537551b9ed212d8f77d00d705"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"packit/fedpkg.py"},"max_issues_repo_name":{"kind":"string","value":"bocekm/packit"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b5da23c0fa3f205537551b9ed212d8f77d00d705"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"packit/fedpkg.py"},"max_forks_repo_name":{"kind":"string","value":"bocekm/packit"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b5da23c0fa3f205537551b9ed212d8f77d00d705"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# MIT License\n#\n# Copyright (c) 2019 Red Hat, Inc.\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom packit.exceptions import PackitCommandFailedError\n\nfrom packit.utils import commands # so we can mock utils\nfrom packit.utils.logging import logger\n\n\nclass FedPKG:\n \"\"\"\n Part of the code is from release-bot:\n\n https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py\n \"\"\"\n\n def __init__(\n self, fas_username: str = None, directory: str = None, stage: bool = False\n ):\n self.fas_username = fas_username\n self.directory = directory\n self.stage = stage\n self.fedpkg_exec = \"fedpkg-stage\" if stage else \"fedpkg\"\n\n def __repr__(self):\n return (\n \"FedPKG(\"\n f\"fas_username='{self.fas_username}', \"\n f\"directory='{self.directory}', \"\n f\"stage='{self.stage}')\"\n )\n\n def new_sources(self, sources=\"\", fail=True):\n if not Path(self.directory).is_dir():\n raise Exception(\"Cannot access fedpkg repository:\")\n\n return commands.run_command_remote(\n cmd=[self.fedpkg_exec, \"new-sources\", sources],\n cwd=self.directory,\n error_message=\"Adding new sources failed:\",\n fail=fail,\n )\n\n def build(\n self,\n scratch: bool = False,\n nowait: bool = False,\n koji_target: Optional[str] = None,\n srpm_path: Optional[Path] = None,\n ):\n \"\"\"\n build in koji\n\n :param scratch: scratch (temporary) build or not?\n :param nowait: False == wait for the build to finish\n :param koji_target: koji target to build in (`koji list-targets`)\n :param srpm_path: use selected SRPM for build, not dist-git repo & ref\n :return:\n \"\"\"\n cmd = [self.fedpkg_exec, \"build\"]\n if scratch:\n cmd.append(\"--scratch\")\n if nowait:\n cmd.append(\"--nowait\")\n if koji_target:\n cmd += [\"--target\", koji_target]\n if srpm_path:\n cmd += [\"--srpm\", str(srpm_path)]\n\n try:\n commands.run_command_remote(\n cmd=cmd,\n cwd=self.directory,\n error_message=\"Submission of build to koji failed.\",\n fail=True,\n )\n\n except PackitCommandFailedError as ex:\n # fail on the fedpkg side, the build is triggered\n if (\n \"watch_tasks() got an unexpected keyword argument 'ki_handler'\"\n in ex.stderr_output\n ):\n logger.info(\n \"The 'fedpkg build' command crashed which is a known issue: \"\n \"the build is submitted in koji anyway.\"\n )\n logger.debug(ex.stdout_output)\n\n else:\n raise\n\n def clone(self, package_name: str, target_path: str, anonymous: bool = False):\n \"\"\"\n clone a dist-git repo; this has to be done in current env\n b/c we don't have the keytab in sandbox\n \"\"\"\n cmd = [self.fedpkg_exec]\n if self.fas_username:\n cmd += [\"--user\", self.fas_username]\n cmd += [\"-q\", \"clone\"]\n if anonymous:\n cmd += [\"-a\"]\n cmd += [package_name, target_path]\n\n error_msg = (\n f\"Packit failed to clone the repository {package_name}; \"\n \"please make sure that you are authorized to clone repositories \"\n \"from Fedora dist-git - this may require SSH keys set up or \"\n \"Kerberos ticket being active.\"\n )\n commands.run_command(cmd=cmd, error_message=error_msg)\n"},"avg_line_length":{"kind":"number","value":35.0444444444,"string":"35.044444"},"max_line_length":{"kind":"number","value":82,"string":"82"},"alphanum_fraction":{"kind":"number","value":0.6098076517,"string":"0.609808"},"count_classes":{"kind":"number","value":3412,"string":"3,412"},"score_classes":{"kind":"number","value":0.7212005918410485,"string":"0.721201"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":2434,"string":"2,434"},"score_documentation":{"kind":"number","value":0.5144789685056014,"string":"0.514479"}}},{"rowIdx":3796,"cells":{"hexsha":{"kind":"string","value":"b9a3c97262cf3c50a695832e8941374463a78067"},"size":{"kind":"number","value":901,"string":"901"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tests/test_MaskedArrayCollection.py"},"max_stars_repo_name":{"kind":"string","value":"ahaldane/NDducktype_tests"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4876416e5fbff7ba0d85445c0eeae432d6e80014"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-06-18T14:18:39.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-07-22T18:05:52.000Z"},"max_issues_repo_path":{"kind":"string","value":"tests/test_MaskedArrayCollection.py"},"max_issues_repo_name":{"kind":"string","value":"ahaldane/NDducktype_tests"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4876416e5fbff7ba0d85445c0eeae432d6e80014"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-07-19T15:44:09.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-07-28T23:22:21.000Z"},"max_forks_repo_path":{"kind":"string","value":"tests/test_MaskedArrayCollection.py"},"max_forks_repo_name":{"kind":"string","value":"ahaldane/NDducktype_tests"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4876416e5fbff7ba0d85445c0eeae432d6e80014"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-06-20T00:20:13.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-09-20T21:42:52.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\nfrom ndarray_ducktypes.ArrayCollection import ArrayCollection\nfrom ndarray_ducktypes.MaskedArray import MaskedArray\nfrom ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection\nimport numpy as np\n\n# Tests for Masked ArrayCollections.\n#\n# First try: Simply make an arraycollection of MaskedArrays. Downside: this\n# strategy does not give a \"filled\" method. Probably to get a masked\n# ArrayCollection we should really subclass ArrayCollection to have a\n# fill_value and a filled() method\n\n#a = MaskedArray(np.arange(10), np.arange(10)%3)\n#b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2)\n\n#c = ArrayCollection([('age', a), ('weight', b)])\n#print(repr(c))\n#c['age'] += 100\n#print(repr(c))\n\n## second try: Subclass of ArrayCollection\n\n#c = MaskedArrayCollection([('age', a), ('weight', b)])\n#print(repr(c))\n#c['age'] += 100\n#print(repr(c))\n#print(repr(c.filled()))\n"},"avg_line_length":{"kind":"number","value":31.0689655172,"string":"31.068966"},"max_line_length":{"kind":"number","value":75,"string":"75"},"alphanum_fraction":{"kind":"number","value":0.7380688124,"string":"0.738069"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":668,"string":"668"},"score_documentation":{"kind":"number","value":0.7413984461709212,"string":"0.741398"}}},{"rowIdx":3797,"cells":{"hexsha":{"kind":"string","value":"b9a4cbf5401cd86949e3f94c13bc464c4725fcee"},"size":{"kind":"number","value":192704,"string":"192,704"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"rpc/gen/core_pb2.py"},"max_stars_repo_name":{"kind":"string","value":"jasonjoo2010/core"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7c05ddbdac2e05a3d96db28f8bdfacf661907b82"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"rpc/gen/core_pb2.py"},"max_issues_repo_name":{"kind":"string","value":"jasonjoo2010/core"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7c05ddbdac2e05a3d96db28f8bdfacf661907b82"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"rpc/gen/core_pb2.py"},"max_forks_repo_name":{"kind":"string","value":"jasonjoo2010/core"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7c05ddbdac2e05a3d96db28f8bdfacf661907b82"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: core.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='core.proto',\n package='pb',\n syntax='proto3',\n serialized_pb=_b('\\n\\ncore.proto\\x12\\x02pb\\\"\\x07\\n\\x05\\x45mpty\\\"\\xb4\\x01\\n\\x15ListContainersOptions\\x12\\x0f\\n\\x07\\x61ppname\\x18\\x01 \\x01(\\t\\x12\\x12\\n\\nentrypoint\\x18\\x02 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x03 \\x01(\\t\\x12\\x35\\n\\x06labels\\x18\\x04 \\x03(\\x0b\\x32%.pb.ListContainersOptions.LabelsEntry\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"L\\n\\x13\\x44\\x65ployStatusOptions\\x12\\x0f\\n\\x07\\x61ppname\\x18\\x01 \\x01(\\t\\x12\\x12\\n\\nentrypoint\\x18\\x02 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x03 \\x01(\\t\\\"v\\n\\x13\\x44\\x65ployStatusMessage\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x61ppname\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nentrypoint\\x18\\x03 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x04 \\x01(\\t\\x12\\n\\n\\x02id\\x18\\x05 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x61ta\\x18\\x06 \\x01(\\x0c\\\"0\\n\\x03Pod\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x65sc\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x66\\x61vor\\x18\\x03 \\x01(\\t\\\"\\x1d\\n\\x04Pods\\x12\\x15\\n\\x04pods\\x18\\x01 \\x03(\\x0b\\x32\\x07.pb.Pod\\\"\\xfc\\x02\\n\\x0bPodResource\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12%\\n\\x03\\x63pu\\x18\\x02 \\x03(\\x0b\\x32\\x18.pb.PodResource.CpuEntry\\x12+\\n\\x06memory\\x18\\x03 \\x03(\\x0b\\x32\\x1b.pb.PodResource.MemoryEntry\\x12\\'\\n\\x04\\x64iff\\x18\\x04 \\x03(\\x0b\\x32\\x19.pb.PodResource.DiffEntry\\x12+\\n\\x06\\x64\\x65tail\\x18\\x05 \\x03(\\x0b\\x32\\x1b.pb.PodResource.DetailEntry\\x1a*\\n\\x08\\x43puEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x01:\\x02\\x38\\x01\\x1a-\\n\\x0bMemoryEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x01:\\x02\\x38\\x01\\x1a+\\n\\tDiffEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x02\\x38\\x01\\x1a-\\n\\x0b\\x44\\x65tailEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"5\\n\\x12ListNetworkOptions\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06\\x64river\\x18\\x02 \\x01(\\t\\\"(\\n\\x07Network\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07subnets\\x18\\x02 \\x03(\\t\\\")\\n\\x08Networks\\x12\\x1d\\n\\x08networks\\x18\\x01 \\x03(\\x0b\\x32\\x0b.pb.Network\\\"\\x9e\\x03\\n\\x04Node\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x65ndpoint\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07podname\\x18\\x03 \\x01(\\t\\x12\\x1e\\n\\x03\\x63pu\\x18\\x04 \\x03(\\x0b\\x32\\x11.pb.Node.CpuEntry\\x12\\x10\\n\\x08\\x63pu_used\\x18\\x05 \\x01(\\x01\\x12\\x0e\\n\\x06memory\\x18\\x06 \\x01(\\x03\\x12\\x13\\n\\x0bmemory_used\\x18\\x07 \\x01(\\x03\\x12\\x11\\n\\tavailable\\x18\\x08 \\x01(\\x08\\x12$\\n\\x06labels\\x18\\t \\x03(\\x0b\\x32\\x14.pb.Node.LabelsEntry\\x12\\x13\\n\\x0binit_memory\\x18\\n \\x01(\\x03\\x12\\'\\n\\x08init_cpu\\x18\\x0b \\x03(\\x0b\\x32\\x15.pb.Node.InitCpuEntry\\x12\\x0c\\n\\x04info\\x18\\x0c \\x01(\\t\\x1a*\\n\\x08\\x43puEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a.\\n\\x0cInitCpuEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\" \\n\\x05Nodes\\x12\\x17\\n\\x05nodes\\x18\\x01 \\x03(\\x0b\\x32\\x08.pb.Node\\\"E\\n\\rNodeAvailable\\x12\\x10\\n\\x08nodename\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07podname\\x18\\x02 \\x01(\\t\\x12\\x11\\n\\tavailable\\x18\\x03 \\x01(\\x08\\\"\\xb8\\x03\\n\\tContainer\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07podname\\x18\\x02 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04name\\x18\\x04 \\x01(\\t\\x12#\\n\\x03\\x63pu\\x18\\x05 \\x03(\\x0b\\x32\\x16.pb.Container.CpuEntry\\x12\\r\\n\\x05quota\\x18\\x06 \\x01(\\x01\\x12\\x0e\\n\\x06memory\\x18\\x07 \\x01(\\x03\\x12\\x12\\n\\nprivileged\\x18\\x08 \\x01(\\x08\\x12)\\n\\x06labels\\x18\\t \\x03(\\x0b\\x32\\x19.pb.Container.LabelsEntry\\x12+\\n\\x07publish\\x18\\n \\x03(\\x0b\\x32\\x1a.pb.Container.PublishEntry\\x12\\r\\n\\x05image\\x18\\x0b \\x01(\\t\\x12\\x0f\\n\\x07inspect\\x18\\x0c \\x01(\\x0c\\x12\\x13\\n\\x0bstatus_data\\x18\\r \\x01(\\x0c\\x1a*\\n\\x08\\x43puEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a.\\n\\x0cPublishEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"k\\n\\x18\\x43ontainerDeployedOptions\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x61ppname\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nentrypoint\\x18\\x03 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x04 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x61ta\\x18\\x05 \\x01(\\x0c\\\"/\\n\\nContainers\\x12!\\n\\ncontainers\\x18\\x01 \\x03(\\x0b\\x32\\r.pb.Container\\\"\\x19\\n\\x0b\\x43ontainerID\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\\"\\x1b\\n\\x0c\\x43ontainerIDs\\x12\\x0b\\n\\x03ids\\x18\\x01 \\x03(\\t\\\"4\\n\\x16RemoveContainerOptions\\x12\\x0b\\n\\x03ids\\x18\\x01 \\x03(\\t\\x12\\r\\n\\x05\\x66orce\\x18\\x02 \\x01(\\x08\\\"7\\n\\x0eReallocOptions\\x12\\x0b\\n\\x03ids\\x18\\x01 \\x03(\\t\\x12\\x0b\\n\\x03\\x63pu\\x18\\x02 \\x01(\\x01\\x12\\x0b\\n\\x03mem\\x18\\x03 \\x01(\\x03\\\":\\n\\rAddPodOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05\\x66\\x61vor\\x18\\x02 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x65sc\\x18\\x03 \\x01(\\t\\\" \\n\\x10RemovePodOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\\"\\x1d\\n\\rGetPodOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\\"\\xf7\\x01\\n\\x0e\\x41\\x64\\x64NodeOptions\\x12\\x10\\n\\x08nodename\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x65ndpoint\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07podname\\x18\\x03 \\x01(\\t\\x12\\n\\n\\x02\\x63\\x61\\x18\\x04 \\x01(\\t\\x12\\x0c\\n\\x04\\x63\\x65rt\\x18\\x05 \\x01(\\t\\x12\\x0b\\n\\x03key\\x18\\x06 \\x01(\\t\\x12\\x0b\\n\\x03\\x63pu\\x18\\x07 \\x01(\\x05\\x12\\r\\n\\x05share\\x18\\x08 \\x01(\\x05\\x12\\x0e\\n\\x06memory\\x18\\t \\x01(\\x03\\x12.\\n\\x06labels\\x18\\n \\x03(\\x0b\\x32\\x1e.pb.AddNodeOptions.LabelsEntry\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"6\\n\\x11RemoveNodeOptions\\x12\\x10\\n\\x08nodename\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07podname\\x18\\x02 \\x01(\\t\\\"3\\n\\x0eGetNodeOptions\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x02 \\x01(\\t\\\"0\\n\\x10ListNodesOptions\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03\\x61ll\\x18\\x02 \\x01(\\x08\\\"\\x8e\\x04\\n\\x05\\x42uild\\x12\\x0c\\n\\x04\\x62\\x61se\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04repo\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07version\\x18\\x03 \\x01(\\t\\x12\\x0b\\n\\x03\\x64ir\\x18\\x04 \\x01(\\t\\x12\\x11\\n\\tsubmodule\\x18\\x05 \\x01(\\x08\\x12\\x10\\n\\x08\\x63ommands\\x18\\x06 \\x03(\\t\\x12!\\n\\x04\\x65nvs\\x18\\x07 \\x03(\\x0b\\x32\\x13.pb.Build.EnvsEntry\\x12!\\n\\x04\\x61rgs\\x18\\x08 \\x03(\\x0b\\x32\\x13.pb.Build.ArgsEntry\\x12%\\n\\x06labels\\x18\\t \\x03(\\x0b\\x32\\x15.pb.Build.LabelsEntry\\x12+\\n\\tartifacts\\x18\\n \\x03(\\x0b\\x32\\x18.pb.Build.ArtifactsEntry\\x12#\\n\\x05\\x63\\x61\\x63he\\x18\\x0b \\x03(\\x0b\\x32\\x14.pb.Build.CacheEntry\\x1a+\\n\\tEnvsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a+\\n\\tArgsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a\\x30\\n\\x0e\\x41rtifactsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a,\\n\\nCacheEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"z\\n\\x06\\x42uilds\\x12\\x0e\\n\\x06stages\\x18\\x01 \\x03(\\t\\x12&\\n\\x06\\x62uilds\\x18\\x02 \\x03(\\x0b\\x32\\x16.pb.Builds.BuildsEntry\\x1a\\x38\\n\\x0b\\x42uildsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\x18\\n\\x05value\\x18\\x02 \\x01(\\x0b\\x32\\t.pb.Build:\\x02\\x38\\x01\\\"s\\n\\x11\\x42uildImageOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04user\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03uid\\x18\\x03 \\x01(\\x05\\x12\\x0c\\n\\x04tags\\x18\\x04 \\x03(\\t\\x12\\x1a\\n\\x06\\x62uilds\\x18\\x05 \\x01(\\x0b\\x32\\n.pb.Builds\\x12\\x0b\\n\\x03tar\\x18\\x06 \\x01(\\x0c\\\"F\\n\\x0bHookOptions\\x12\\x13\\n\\x0b\\x61\\x66ter_start\\x18\\x01 \\x03(\\t\\x12\\x13\\n\\x0b\\x62\\x65\\x66ore_stop\\x18\\x02 \\x03(\\t\\x12\\r\\n\\x05\\x66orce\\x18\\x03 \\x01(\\x08\\\"U\\n\\x12HealthCheckOptions\\x12\\x11\\n\\ttcp_ports\\x18\\x01 \\x03(\\t\\x12\\x11\\n\\thttp_port\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04\\x63ode\\x18\\x04 \\x01(\\x05\\\"u\\n\\nLogOptions\\x12\\x0c\\n\\x04type\\x18\\x01 \\x01(\\t\\x12*\\n\\x06\\x63onfig\\x18\\x02 \\x03(\\x0b\\x32\\x1a.pb.LogOptions.ConfigEntry\\x1a-\\n\\x0b\\x43onfigEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"\\xca\\x02\\n\\x11\\x45ntrypointOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ommand\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nprivileged\\x18\\x03 \\x01(\\x08\\x12\\x0b\\n\\x03\\x64ir\\x18\\x04 \\x01(\\t\\x12\\x1b\\n\\x03log\\x18\\x05 \\x01(\\x0b\\x32\\x0e.pb.LogOptions\\x12\\x0f\\n\\x07publish\\x18\\x06 \\x03(\\t\\x12+\\n\\x0bhealthcheck\\x18\\x07 \\x01(\\x0b\\x32\\x16.pb.HealthCheckOptions\\x12\\x1d\\n\\x04hook\\x18\\x08 \\x01(\\x0b\\x32\\x0f.pb.HookOptions\\x12\\x16\\n\\x0erestart_policy\\x18\\t \\x01(\\t\\x12\\x33\\n\\x07sysctls\\x18\\n \\x03(\\x0b\\x32\\\".pb.EntrypointOptions.SysctlsEntry\\x1a.\\n\\x0cSysctlsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"\\x88\\x06\\n\\rDeployOptions\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12)\\n\\nentrypoint\\x18\\x02 \\x01(\\x0b\\x32\\x15.pb.EntrypointOptions\\x12\\x0f\\n\\x07podname\\x18\\x03 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x04 \\x01(\\t\\x12\\r\\n\\x05image\\x18\\x05 \\x01(\\t\\x12\\x12\\n\\nextra_args\\x18\\x06 \\x01(\\t\\x12\\x11\\n\\tcpu_quota\\x18\\x07 \\x01(\\x01\\x12\\x0e\\n\\x06memory\\x18\\x08 \\x01(\\x03\\x12\\r\\n\\x05\\x63ount\\x18\\t \\x01(\\x05\\x12\\x0b\\n\\x03\\x65nv\\x18\\n \\x03(\\t\\x12\\x0b\\n\\x03\\x64ns\\x18\\x0b \\x03(\\t\\x12\\x13\\n\\x0b\\x65xtra_hosts\\x18\\x0c \\x03(\\t\\x12\\x0f\\n\\x07volumes\\x18\\r \\x03(\\t\\x12\\x31\\n\\x08networks\\x18\\x0e \\x03(\\x0b\\x32\\x1f.pb.DeployOptions.NetworksEntry\\x12\\x13\\n\\x0bnetworkmode\\x18\\x0f \\x01(\\t\\x12\\x0c\\n\\x04user\\x18\\x10 \\x01(\\t\\x12\\r\\n\\x05\\x64\\x65\\x62ug\\x18\\x11 \\x01(\\x08\\x12\\x11\\n\\topenStdin\\x18\\x12 \\x01(\\x08\\x12-\\n\\x06labels\\x18\\x13 \\x03(\\x0b\\x32\\x1d.pb.DeployOptions.LabelsEntry\\x12\\x35\\n\\nnodelabels\\x18\\x14 \\x03(\\x0b\\x32!.pb.DeployOptions.NodelabelsEntry\\x12\\x15\\n\\rdeploy_method\\x18\\x15 \\x01(\\t\\x12)\\n\\x04\\x64\\x61ta\\x18\\x16 \\x03(\\x0b\\x32\\x1b.pb.DeployOptions.DataEntry\\x12\\x11\\n\\tsoftlimit\\x18\\x17 \\x01(\\x08\\x12\\x13\\n\\x0bnodes_limit\\x18\\x18 \\x01(\\x05\\x1a/\\n\\rNetworksEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a-\\n\\x0bLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a\\x31\\n\\x0fNodelabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a+\\n\\tDataEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x0c:\\x02\\x38\\x01\\\"\\xb5\\x02\\n\\x0eReplaceOptions\\x12$\\n\\tdeployOpt\\x18\\x01 \\x01(\\x0b\\x32\\x11.pb.DeployOptions\\x12\\r\\n\\x05\\x66orce\\x18\\x02 \\x01(\\x08\\x12;\\n\\rfilter_labels\\x18\\x03 \\x03(\\x0b\\x32$.pb.ReplaceOptions.FilterLabelsEntry\\x12*\\n\\x04\\x63opy\\x18\\x04 \\x03(\\x0b\\x32\\x1c.pb.ReplaceOptions.CopyEntry\\x12\\x0b\\n\\x03ids\\x18\\x05 \\x03(\\t\\x12\\x16\\n\\x0enetworkinherit\\x18\\x06 \\x01(\\x08\\x1a\\x33\\n\\x11\\x46ilterLabelsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\x1a+\\n\\tCopyEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"T\\n\\x11\\x43\\x61\\x63heImageOptions\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x02 \\x01(\\t\\x12\\x0e\\n\\x06images\\x18\\x03 \\x03(\\t\\x12\\x0c\\n\\x04step\\x18\\x04 \\x01(\\x05\\\"d\\n\\x12RemoveImageOptions\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x02 \\x01(\\t\\x12\\x0e\\n\\x06images\\x18\\x03 \\x03(\\t\\x12\\x0c\\n\\x04step\\x18\\x04 \\x01(\\x05\\x12\\r\\n\\x05prune\\x18\\x05 \\x01(\\x08\\\"\\x1a\\n\\tCopyPaths\\x12\\r\\n\\x05paths\\x18\\x01 \\x03(\\t\\\"{\\n\\x0b\\x43opyOptions\\x12-\\n\\x07targets\\x18\\x01 \\x03(\\x0b\\x32\\x1c.pb.CopyOptions.TargetsEntry\\x1a=\\n\\x0cTargetsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\x1c\\n\\x05value\\x18\\x02 \\x01(\\x0b\\x32\\r.pb.CopyPaths:\\x02\\x38\\x01\\\",\\n\\x0b\\x45rrorDetail\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x03\\x12\\x0f\\n\\x07message\\x18\\x02 \\x01(\\t\\\"\\x87\\x01\\n\\x11\\x42uildImageMessage\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06status\\x18\\x02 \\x01(\\t\\x12\\x10\\n\\x08progress\\x18\\x03 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x04 \\x01(\\t\\x12\\x0e\\n\\x06stream\\x18\\x05 \\x01(\\t\\x12%\\n\\x0c\\x65rror_detail\\x18\\x06 \\x01(\\x0b\\x32\\x0f.pb.ErrorDetail\\\"\\xea\\x02\\n\\x16\\x43reateContainerMessage\\x12\\x0f\\n\\x07podname\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08nodename\\x18\\x02 \\x01(\\t\\x12\\n\\n\\x02id\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04name\\x18\\x04 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x05 \\x01(\\t\\x12\\x0f\\n\\x07success\\x18\\x06 \\x01(\\x08\\x12\\x30\\n\\x03\\x63pu\\x18\\x07 \\x03(\\x0b\\x32#.pb.CreateContainerMessage.CpuEntry\\x12\\r\\n\\x05quota\\x18\\x08 \\x01(\\x01\\x12\\x0e\\n\\x06memory\\x18\\t \\x01(\\x03\\x12\\x38\\n\\x07publish\\x18\\n \\x03(\\x0b\\x32\\'.pb.CreateContainerMessage.PublishEntry\\x12\\x0c\\n\\x04hook\\x18\\x0b \\x01(\\x0c\\x1a*\\n\\x08\\x43puEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\x1a.\\n\\x0cPublishEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\t:\\x02\\x38\\x01\\\"\\x80\\x01\\n\\x17ReplaceContainerMessage\\x12*\\n\\x06\\x63reate\\x18\\x01 \\x01(\\x0b\\x32\\x1a.pb.CreateContainerMessage\\x12*\\n\\x06remove\\x18\\x02 \\x01(\\x0b\\x32\\x1a.pb.RemoveContainerMessage\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\\"7\\n\\x11RunAndWaitMessage\\x12\\x14\\n\\x0c\\x63ontainer_id\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x61ta\\x18\\x02 \\x01(\\x0c\\\"V\\n\\x11\\x43\\x61\\x63heImageMessage\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07success\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08nodename\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07message\\x18\\x04 \\x01(\\t\\\"F\\n\\x12RemoveImageMessage\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07success\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08messages\\x18\\x03 \\x03(\\t\\\"C\\n\\x16RemoveContainerMessage\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07success\\x18\\x02 \\x01(\\x08\\x12\\x0c\\n\\x04hook\\x18\\x03 \\x01(\\t\\\"5\\n\\x16ReallocResourceMessage\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07success\\x18\\x02 \\x01(\\x08\\\"b\\n\\x0b\\x43opyMessage\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06status\\x18\\x02 \\x01(\\t\\x12\\x0c\\n\\x04name\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04path\\x18\\x04 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x05 \\x01(\\t\\x12\\x0c\\n\\x04\\x64\\x61ta\\x18\\x06 \\x01(\\x0c\\\"J\\n\\x11RunAndWaitOptions\\x12(\\n\\rDeployOptions\\x18\\x01 \\x01(\\x0b\\x32\\x11.pb.DeployOptions\\x12\\x0b\\n\\x03\\x43md\\x18\\x02 \\x01(\\x0c\\\"4\\n\\x17\\x43ontrolContainerOptions\\x12\\x0b\\n\\x03ids\\x18\\x01 \\x03(\\t\\x12\\x0c\\n\\x04type\\x18\\x02 \\x01(\\t\\\"B\\n\\x17\\x43ontrolContainerMessage\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x02 \\x01(\\t\\x12\\x0c\\n\\x04hook\\x18\\x03 \\x01(\\x0c\\x32\\xcb\\x0c\\n\\x07\\x43oreRPC\\x12!\\n\\x08ListPods\\x12\\t.pb.Empty\\x1a\\x08.pb.Pods\\\"\\x00\\x12&\\n\\x06\\x41\\x64\\x64Pod\\x12\\x11.pb.AddPodOptions\\x1a\\x07.pb.Pod\\\"\\x00\\x12.\\n\\tRemovePod\\x12\\x14.pb.RemovePodOptions\\x1a\\t.pb.Empty\\\"\\x00\\x12&\\n\\x06GetPod\\x12\\x11.pb.GetPodOptions\\x1a\\x07.pb.Pod\\\"\\x00\\x12\\x36\\n\\x0eGetPodResource\\x12\\x11.pb.GetPodOptions\\x1a\\x0f.pb.PodResource\\\"\\x00\\x12)\\n\\x07\\x41\\x64\\x64Node\\x12\\x12.pb.AddNodeOptions\\x1a\\x08.pb.Node\\\"\\x00\\x12.\\n\\nRemoveNode\\x12\\x15.pb.RemoveNodeOptions\\x1a\\x07.pb.Pod\\\"\\x00\\x12\\x31\\n\\x10SetNodeAvailable\\x12\\x11.pb.NodeAvailable\\x1a\\x08.pb.Node\\\"\\x00\\x12)\\n\\x07GetNode\\x12\\x12.pb.GetNodeOptions\\x1a\\x08.pb.Node\\\"\\x00\\x12\\x30\\n\\x0cGetContainer\\x12\\x0f.pb.ContainerID\\x1a\\r.pb.Container\\\"\\x00\\x12\\x33\\n\\rGetContainers\\x12\\x10.pb.ContainerIDs\\x1a\\x0e.pb.Containers\\\"\\x00\\x12/\\n\\rGetNodeByName\\x12\\x12.pb.GetNodeOptions\\x1a\\x08.pb.Node\\\"\\x00\\x12\\x31\\n\\x0cListPodNodes\\x12\\x14.pb.ListNodesOptions\\x1a\\t.pb.Nodes\\\"\\x00\\x12\\x36\\n\\x0cListNetworks\\x12\\x16.pb.ListNetworkOptions\\x1a\\x0c.pb.Networks\\\"\\x00\\x12=\\n\\x0eListContainers\\x12\\x19.pb.ListContainersOptions\\x1a\\x0e.pb.Containers\\\"\\x00\\x12:\\n\\x12ListNodeContainers\\x12\\x12.pb.GetNodeOptions\\x1a\\x0e.pb.Containers\\\"\\x00\\x12>\\n\\x11\\x43ontainerDeployed\\x12\\x1c.pb.ContainerDeployedOptions\\x1a\\t.pb.Empty\\\"\\x00\\x12,\\n\\x04\\x43opy\\x12\\x0f.pb.CopyOptions\\x1a\\x0f.pb.CopyMessage\\\"\\x00\\x30\\x01\\x12>\\n\\nBuildImage\\x12\\x15.pb.BuildImageOptions\\x1a\\x15.pb.BuildImageMessage\\\"\\x00\\x30\\x01\\x12>\\n\\nCacheImage\\x12\\x15.pb.CacheImageOptions\\x1a\\x15.pb.CacheImageMessage\\\"\\x00\\x30\\x01\\x12\\x41\\n\\x0bRemoveImage\\x12\\x16.pb.RemoveImageOptions\\x1a\\x16.pb.RemoveImageMessage\\\"\\x00\\x30\\x01\\x12\\x44\\n\\x0c\\x44\\x65ployStatus\\x12\\x17.pb.DeployStatusOptions\\x1a\\x17.pb.DeployStatusMessage\\\"\\x00\\x30\\x01\\x12@\\n\\nRunAndWait\\x12\\x15.pb.RunAndWaitOptions\\x1a\\x15.pb.RunAndWaitMessage\\\"\\x00(\\x01\\x30\\x01\\x12\\x44\\n\\x0f\\x43reateContainer\\x12\\x11.pb.DeployOptions\\x1a\\x1a.pb.CreateContainerMessage\\\"\\x00\\x30\\x01\\x12G\\n\\x10ReplaceContainer\\x12\\x12.pb.ReplaceOptions\\x1a\\x1b.pb.ReplaceContainerMessage\\\"\\x00\\x30\\x01\\x12M\\n\\x0fRemoveContainer\\x12\\x1a.pb.RemoveContainerOptions\\x1a\\x1a.pb.RemoveContainerMessage\\\"\\x00\\x30\\x01\\x12P\\n\\x10\\x43ontrolContainer\\x12\\x1b.pb.ControlContainerOptions\\x1a\\x1b.pb.ControlContainerMessage\\\"\\x00\\x30\\x01\\x12\\x45\\n\\x0fReallocResource\\x12\\x12.pb.ReallocOptions\\x1a\\x1a.pb.ReallocResourceMessage\\\"\\x00\\x30\\x01\\x62\\x06proto3')\n)\n\n\n\n\n_EMPTY = _descriptor.Descriptor(\n name='Empty',\n full_name='pb.Empty',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=18,\n serialized_end=25,\n)\n\n\n_LISTCONTAINERSOPTIONS_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.ListContainersOptions.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.ListContainersOptions.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.ListContainersOptions.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_LISTCONTAINERSOPTIONS = _descriptor.Descriptor(\n name='ListContainersOptions',\n full_name='pb.ListContainersOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='appname', full_name='pb.ListContainersOptions.appname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='entrypoint', full_name='pb.ListContainersOptions.entrypoint', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.ListContainersOptions.nodename', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.ListContainersOptions.labels', index=3,\n number=4, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_LISTCONTAINERSOPTIONS_LABELSENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=28,\n serialized_end=208,\n)\n\n\n_DEPLOYSTATUSOPTIONS = _descriptor.Descriptor(\n name='DeployStatusOptions',\n full_name='pb.DeployStatusOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='appname', full_name='pb.DeployStatusOptions.appname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='entrypoint', full_name='pb.DeployStatusOptions.entrypoint', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.DeployStatusOptions.nodename', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=210,\n serialized_end=286,\n)\n\n\n_DEPLOYSTATUSMESSAGE = _descriptor.Descriptor(\n name='DeployStatusMessage',\n full_name='pb.DeployStatusMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='action', full_name='pb.DeployStatusMessage.action', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='appname', full_name='pb.DeployStatusMessage.appname', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='entrypoint', full_name='pb.DeployStatusMessage.entrypoint', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.DeployStatusMessage.nodename', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.DeployStatusMessage.id', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='data', full_name='pb.DeployStatusMessage.data', index=5,\n number=6, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=288,\n serialized_end=406,\n)\n\n\n_POD = _descriptor.Descriptor(\n name='Pod',\n full_name='pb.Pod',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.Pod.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='desc', full_name='pb.Pod.desc', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='favor', full_name='pb.Pod.favor', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=408,\n serialized_end=456,\n)\n\n\n_PODS = _descriptor.Descriptor(\n name='Pods',\n full_name='pb.Pods',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='pods', full_name='pb.Pods.pods', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=458,\n serialized_end=487,\n)\n\n\n_PODRESOURCE_CPUENTRY = _descriptor.Descriptor(\n name='CpuEntry',\n full_name='pb.PodResource.CpuEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.PodResource.CpuEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.PodResource.CpuEntry.value', index=1,\n number=2, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=689,\n serialized_end=731,\n)\n\n_PODRESOURCE_MEMORYENTRY = _descriptor.Descriptor(\n name='MemoryEntry',\n full_name='pb.PodResource.MemoryEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.PodResource.MemoryEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.PodResource.MemoryEntry.value', index=1,\n number=2, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=733,\n serialized_end=778,\n)\n\n_PODRESOURCE_DIFFENTRY = _descriptor.Descriptor(\n name='DiffEntry',\n full_name='pb.PodResource.DiffEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.PodResource.DiffEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.PodResource.DiffEntry.value', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=780,\n serialized_end=823,\n)\n\n_PODRESOURCE_DETAILENTRY = _descriptor.Descriptor(\n name='DetailEntry',\n full_name='pb.PodResource.DetailEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.PodResource.DetailEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.PodResource.DetailEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=825,\n serialized_end=870,\n)\n\n_PODRESOURCE = _descriptor.Descriptor(\n name='PodResource',\n full_name='pb.PodResource',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.PodResource.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.PodResource.cpu', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.PodResource.memory', index=2,\n number=3, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='diff', full_name='pb.PodResource.diff', index=3,\n number=4, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='detail', full_name='pb.PodResource.detail', index=4,\n number=5, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_PODRESOURCE_CPUENTRY, _PODRESOURCE_MEMORYENTRY, _PODRESOURCE_DIFFENTRY, _PODRESOURCE_DETAILENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=490,\n serialized_end=870,\n)\n\n\n_LISTNETWORKOPTIONS = _descriptor.Descriptor(\n name='ListNetworkOptions',\n full_name='pb.ListNetworkOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.ListNetworkOptions.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='driver', full_name='pb.ListNetworkOptions.driver', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=872,\n serialized_end=925,\n)\n\n\n_NETWORK = _descriptor.Descriptor(\n name='Network',\n full_name='pb.Network',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.Network.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='subnets', full_name='pb.Network.subnets', index=1,\n number=2, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=927,\n serialized_end=967,\n)\n\n\n_NETWORKS = _descriptor.Descriptor(\n name='Networks',\n full_name='pb.Networks',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='networks', full_name='pb.Networks.networks', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=969,\n serialized_end=1010,\n)\n\n\n_NODE_CPUENTRY = _descriptor.Descriptor(\n name='CpuEntry',\n full_name='pb.Node.CpuEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Node.CpuEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Node.CpuEntry.value', index=1,\n number=2, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1290,\n serialized_end=1332,\n)\n\n_NODE_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.Node.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Node.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Node.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_NODE_INITCPUENTRY = _descriptor.Descriptor(\n name='InitCpuEntry',\n full_name='pb.Node.InitCpuEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Node.InitCpuEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Node.InitCpuEntry.value', index=1,\n number=2, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1381,\n serialized_end=1427,\n)\n\n_NODE = _descriptor.Descriptor(\n name='Node',\n full_name='pb.Node',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.Node.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='endpoint', full_name='pb.Node.endpoint', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.Node.podname', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.Node.cpu', index=3,\n number=4, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu_used', full_name='pb.Node.cpu_used', index=4,\n number=5, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.Node.memory', index=5,\n number=6, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory_used', full_name='pb.Node.memory_used', index=6,\n number=7, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='available', full_name='pb.Node.available', index=7,\n number=8, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.Node.labels', index=8,\n number=9, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='init_memory', full_name='pb.Node.init_memory', index=9,\n number=10, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='init_cpu', full_name='pb.Node.init_cpu', index=10,\n number=11, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='info', full_name='pb.Node.info', index=11,\n number=12, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_NODE_CPUENTRY, _NODE_LABELSENTRY, _NODE_INITCPUENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1013,\n serialized_end=1427,\n)\n\n\n_NODES = _descriptor.Descriptor(\n name='Nodes',\n full_name='pb.Nodes',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='nodes', full_name='pb.Nodes.nodes', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1429,\n serialized_end=1461,\n)\n\n\n_NODEAVAILABLE = _descriptor.Descriptor(\n name='NodeAvailable',\n full_name='pb.NodeAvailable',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.NodeAvailable.nodename', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.NodeAvailable.podname', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='available', full_name='pb.NodeAvailable.available', index=2,\n number=3, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1463,\n serialized_end=1532,\n)\n\n\n_CONTAINER_CPUENTRY = _descriptor.Descriptor(\n name='CpuEntry',\n full_name='pb.Container.CpuEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Container.CpuEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Container.CpuEntry.value', index=1,\n number=2, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1290,\n serialized_end=1332,\n)\n\n_CONTAINER_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.Container.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Container.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Container.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_CONTAINER_PUBLISHENTRY = _descriptor.Descriptor(\n name='PublishEntry',\n full_name='pb.Container.PublishEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Container.PublishEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Container.PublishEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1929,\n serialized_end=1975,\n)\n\n_CONTAINER = _descriptor.Descriptor(\n name='Container',\n full_name='pb.Container',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.Container.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.Container.podname', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.Container.nodename', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.Container.name', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.Container.cpu', index=4,\n number=5, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='quota', full_name='pb.Container.quota', index=5,\n number=6, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.Container.memory', index=6,\n number=7, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='privileged', full_name='pb.Container.privileged', index=7,\n number=8, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.Container.labels', index=8,\n number=9, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='publish', full_name='pb.Container.publish', index=9,\n number=10, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='image', full_name='pb.Container.image', index=10,\n number=11, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='inspect', full_name='pb.Container.inspect', index=11,\n number=12, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='status_data', full_name='pb.Container.status_data', index=12,\n number=13, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_CONTAINER_CPUENTRY, _CONTAINER_LABELSENTRY, _CONTAINER_PUBLISHENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1535,\n serialized_end=1975,\n)\n\n\n_CONTAINERDEPLOYEDOPTIONS = _descriptor.Descriptor(\n name='ContainerDeployedOptions',\n full_name='pb.ContainerDeployedOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.ContainerDeployedOptions.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='appname', full_name='pb.ContainerDeployedOptions.appname', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='entrypoint', full_name='pb.ContainerDeployedOptions.entrypoint', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.ContainerDeployedOptions.nodename', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='data', full_name='pb.ContainerDeployedOptions.data', index=4,\n number=5, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1977,\n serialized_end=2084,\n)\n\n\n_CONTAINERS = _descriptor.Descriptor(\n name='Containers',\n full_name='pb.Containers',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='containers', full_name='pb.Containers.containers', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2086,\n serialized_end=2133,\n)\n\n\n_CONTAINERID = _descriptor.Descriptor(\n name='ContainerID',\n full_name='pb.ContainerID',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.ContainerID.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2135,\n serialized_end=2160,\n)\n\n\n_CONTAINERIDS = _descriptor.Descriptor(\n name='ContainerIDs',\n full_name='pb.ContainerIDs',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='ids', full_name='pb.ContainerIDs.ids', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2162,\n serialized_end=2189,\n)\n\n\n_REMOVECONTAINEROPTIONS = _descriptor.Descriptor(\n name='RemoveContainerOptions',\n full_name='pb.RemoveContainerOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='ids', full_name='pb.RemoveContainerOptions.ids', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='force', full_name='pb.RemoveContainerOptions.force', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2191,\n serialized_end=2243,\n)\n\n\n_REALLOCOPTIONS = _descriptor.Descriptor(\n name='ReallocOptions',\n full_name='pb.ReallocOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='ids', full_name='pb.ReallocOptions.ids', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.ReallocOptions.cpu', index=1,\n number=2, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='mem', full_name='pb.ReallocOptions.mem', index=2,\n number=3, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2245,\n serialized_end=2300,\n)\n\n\n_ADDPODOPTIONS = _descriptor.Descriptor(\n name='AddPodOptions',\n full_name='pb.AddPodOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.AddPodOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='favor', full_name='pb.AddPodOptions.favor', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='desc', full_name='pb.AddPodOptions.desc', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2302,\n serialized_end=2360,\n)\n\n\n_REMOVEPODOPTIONS = _descriptor.Descriptor(\n name='RemovePodOptions',\n full_name='pb.RemovePodOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.RemovePodOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2362,\n serialized_end=2394,\n)\n\n\n_GETPODOPTIONS = _descriptor.Descriptor(\n name='GetPodOptions',\n full_name='pb.GetPodOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.GetPodOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2396,\n serialized_end=2425,\n)\n\n\n_ADDNODEOPTIONS_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.AddNodeOptions.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.AddNodeOptions.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.AddNodeOptions.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_ADDNODEOPTIONS = _descriptor.Descriptor(\n name='AddNodeOptions',\n full_name='pb.AddNodeOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.AddNodeOptions.nodename', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='endpoint', full_name='pb.AddNodeOptions.endpoint', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.AddNodeOptions.podname', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='ca', full_name='pb.AddNodeOptions.ca', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cert', full_name='pb.AddNodeOptions.cert', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.AddNodeOptions.key', index=5,\n number=6, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.AddNodeOptions.cpu', index=6,\n number=7, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='share', full_name='pb.AddNodeOptions.share', index=7,\n number=8, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.AddNodeOptions.memory', index=8,\n number=9, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.AddNodeOptions.labels', index=9,\n number=10, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_ADDNODEOPTIONS_LABELSENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2428,\n serialized_end=2675,\n)\n\n\n_REMOVENODEOPTIONS = _descriptor.Descriptor(\n name='RemoveNodeOptions',\n full_name='pb.RemoveNodeOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.RemoveNodeOptions.nodename', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.RemoveNodeOptions.podname', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2677,\n serialized_end=2731,\n)\n\n\n_GETNODEOPTIONS = _descriptor.Descriptor(\n name='GetNodeOptions',\n full_name='pb.GetNodeOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.GetNodeOptions.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.GetNodeOptions.nodename', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2733,\n serialized_end=2784,\n)\n\n\n_LISTNODESOPTIONS = _descriptor.Descriptor(\n name='ListNodesOptions',\n full_name='pb.ListNodesOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.ListNodesOptions.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='all', full_name='pb.ListNodesOptions.all', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2786,\n serialized_end=2834,\n)\n\n\n_BUILD_ENVSENTRY = _descriptor.Descriptor(\n name='EnvsEntry',\n full_name='pb.Build.EnvsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Build.EnvsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Build.EnvsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3132,\n serialized_end=3175,\n)\n\n_BUILD_ARGSENTRY = _descriptor.Descriptor(\n name='ArgsEntry',\n full_name='pb.Build.ArgsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Build.ArgsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Build.ArgsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3177,\n serialized_end=3220,\n)\n\n_BUILD_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.Build.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Build.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Build.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_BUILD_ARTIFACTSENTRY = _descriptor.Descriptor(\n name='ArtifactsEntry',\n full_name='pb.Build.ArtifactsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Build.ArtifactsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Build.ArtifactsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3269,\n serialized_end=3317,\n)\n\n_BUILD_CACHEENTRY = _descriptor.Descriptor(\n name='CacheEntry',\n full_name='pb.Build.CacheEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Build.CacheEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Build.CacheEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3319,\n serialized_end=3363,\n)\n\n_BUILD = _descriptor.Descriptor(\n name='Build',\n full_name='pb.Build',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='base', full_name='pb.Build.base', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='repo', full_name='pb.Build.repo', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='version', full_name='pb.Build.version', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='dir', full_name='pb.Build.dir', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='submodule', full_name='pb.Build.submodule', index=4,\n number=5, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='commands', full_name='pb.Build.commands', index=5,\n number=6, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='envs', full_name='pb.Build.envs', index=6,\n number=7, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='args', full_name='pb.Build.args', index=7,\n number=8, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.Build.labels', index=8,\n number=9, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='artifacts', full_name='pb.Build.artifacts', index=9,\n number=10, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cache', full_name='pb.Build.cache', index=10,\n number=11, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_BUILD_ENVSENTRY, _BUILD_ARGSENTRY, _BUILD_LABELSENTRY, _BUILD_ARTIFACTSENTRY, _BUILD_CACHEENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=2837,\n serialized_end=3363,\n)\n\n\n_BUILDS_BUILDSENTRY = _descriptor.Descriptor(\n name='BuildsEntry',\n full_name='pb.Builds.BuildsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.Builds.BuildsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.Builds.BuildsEntry.value', index=1,\n number=2, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3431,\n serialized_end=3487,\n)\n\n_BUILDS = _descriptor.Descriptor(\n name='Builds',\n full_name='pb.Builds',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='stages', full_name='pb.Builds.stages', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='builds', full_name='pb.Builds.builds', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_BUILDS_BUILDSENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3365,\n serialized_end=3487,\n)\n\n\n_BUILDIMAGEOPTIONS = _descriptor.Descriptor(\n name='BuildImageOptions',\n full_name='pb.BuildImageOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.BuildImageOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='user', full_name='pb.BuildImageOptions.user', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='uid', full_name='pb.BuildImageOptions.uid', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='tags', full_name='pb.BuildImageOptions.tags', index=3,\n number=4, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='builds', full_name='pb.BuildImageOptions.builds', index=4,\n number=5, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='tar', full_name='pb.BuildImageOptions.tar', index=5,\n number=6, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3489,\n serialized_end=3604,\n)\n\n\n_HOOKOPTIONS = _descriptor.Descriptor(\n name='HookOptions',\n full_name='pb.HookOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='after_start', full_name='pb.HookOptions.after_start', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='before_stop', full_name='pb.HookOptions.before_stop', index=1,\n number=2, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='force', full_name='pb.HookOptions.force', index=2,\n number=3, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3606,\n serialized_end=3676,\n)\n\n\n_HEALTHCHECKOPTIONS = _descriptor.Descriptor(\n name='HealthCheckOptions',\n full_name='pb.HealthCheckOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='tcp_ports', full_name='pb.HealthCheckOptions.tcp_ports', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='http_port', full_name='pb.HealthCheckOptions.http_port', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='url', full_name='pb.HealthCheckOptions.url', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='code', full_name='pb.HealthCheckOptions.code', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3678,\n serialized_end=3763,\n)\n\n\n_LOGOPTIONS_CONFIGENTRY = _descriptor.Descriptor(\n name='ConfigEntry',\n full_name='pb.LogOptions.ConfigEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.LogOptions.ConfigEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.LogOptions.ConfigEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3837,\n serialized_end=3882,\n)\n\n_LOGOPTIONS = _descriptor.Descriptor(\n name='LogOptions',\n full_name='pb.LogOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='type', full_name='pb.LogOptions.type', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='config', full_name='pb.LogOptions.config', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_LOGOPTIONS_CONFIGENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3765,\n serialized_end=3882,\n)\n\n\n_ENTRYPOINTOPTIONS_SYSCTLSENTRY = _descriptor.Descriptor(\n name='SysctlsEntry',\n full_name='pb.EntrypointOptions.SysctlsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.EntrypointOptions.SysctlsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.EntrypointOptions.SysctlsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4169,\n serialized_end=4215,\n)\n\n_ENTRYPOINTOPTIONS = _descriptor.Descriptor(\n name='EntrypointOptions',\n full_name='pb.EntrypointOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.EntrypointOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='command', full_name='pb.EntrypointOptions.command', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='privileged', full_name='pb.EntrypointOptions.privileged', index=2,\n number=3, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='dir', full_name='pb.EntrypointOptions.dir', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='log', full_name='pb.EntrypointOptions.log', index=4,\n number=5, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='publish', full_name='pb.EntrypointOptions.publish', index=5,\n number=6, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='healthcheck', full_name='pb.EntrypointOptions.healthcheck', index=6,\n number=7, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='hook', full_name='pb.EntrypointOptions.hook', index=7,\n number=8, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='restart_policy', full_name='pb.EntrypointOptions.restart_policy', index=8,\n number=9, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='sysctls', full_name='pb.EntrypointOptions.sysctls', index=9,\n number=10, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_ENTRYPOINTOPTIONS_SYSCTLSENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=3885,\n serialized_end=4215,\n)\n\n\n_DEPLOYOPTIONS_NETWORKSENTRY = _descriptor.Descriptor(\n name='NetworksEntry',\n full_name='pb.DeployOptions.NetworksEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.DeployOptions.NetworksEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.DeployOptions.NetworksEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4804,\n serialized_end=4851,\n)\n\n_DEPLOYOPTIONS_LABELSENTRY = _descriptor.Descriptor(\n name='LabelsEntry',\n full_name='pb.DeployOptions.LabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.DeployOptions.LabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.DeployOptions.LabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=163,\n serialized_end=208,\n)\n\n_DEPLOYOPTIONS_NODELABELSENTRY = _descriptor.Descriptor(\n name='NodelabelsEntry',\n full_name='pb.DeployOptions.NodelabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.DeployOptions.NodelabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.DeployOptions.NodelabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4900,\n serialized_end=4949,\n)\n\n_DEPLOYOPTIONS_DATAENTRY = _descriptor.Descriptor(\n name='DataEntry',\n full_name='pb.DeployOptions.DataEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.DeployOptions.DataEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.DeployOptions.DataEntry.value', index=1,\n number=2, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4951,\n serialized_end=4994,\n)\n\n_DEPLOYOPTIONS = _descriptor.Descriptor(\n name='DeployOptions',\n full_name='pb.DeployOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.DeployOptions.name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='entrypoint', full_name='pb.DeployOptions.entrypoint', index=1,\n number=2, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.DeployOptions.podname', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.DeployOptions.nodename', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='image', full_name='pb.DeployOptions.image', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='extra_args', full_name='pb.DeployOptions.extra_args', index=5,\n number=6, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu_quota', full_name='pb.DeployOptions.cpu_quota', index=6,\n number=7, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.DeployOptions.memory', index=7,\n number=8, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='count', full_name='pb.DeployOptions.count', index=8,\n number=9, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='env', full_name='pb.DeployOptions.env', index=9,\n number=10, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='dns', full_name='pb.DeployOptions.dns', index=10,\n number=11, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='extra_hosts', full_name='pb.DeployOptions.extra_hosts', index=11,\n number=12, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='volumes', full_name='pb.DeployOptions.volumes', index=12,\n number=13, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='networks', full_name='pb.DeployOptions.networks', index=13,\n number=14, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='networkmode', full_name='pb.DeployOptions.networkmode', index=14,\n number=15, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='user', full_name='pb.DeployOptions.user', index=15,\n number=16, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='debug', full_name='pb.DeployOptions.debug', index=16,\n number=17, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='openStdin', full_name='pb.DeployOptions.openStdin', index=17,\n number=18, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='labels', full_name='pb.DeployOptions.labels', index=18,\n number=19, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodelabels', full_name='pb.DeployOptions.nodelabels', index=19,\n number=20, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='deploy_method', full_name='pb.DeployOptions.deploy_method', index=20,\n number=21, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='data', full_name='pb.DeployOptions.data', index=21,\n number=22, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='softlimit', full_name='pb.DeployOptions.softlimit', index=22,\n number=23, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodes_limit', full_name='pb.DeployOptions.nodes_limit', index=23,\n number=24, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_DEPLOYOPTIONS_NETWORKSENTRY, _DEPLOYOPTIONS_LABELSENTRY, _DEPLOYOPTIONS_NODELABELSENTRY, _DEPLOYOPTIONS_DATAENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4218,\n serialized_end=4994,\n)\n\n\n_REPLACEOPTIONS_FILTERLABELSENTRY = _descriptor.Descriptor(\n name='FilterLabelsEntry',\n full_name='pb.ReplaceOptions.FilterLabelsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.ReplaceOptions.FilterLabelsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.ReplaceOptions.FilterLabelsEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5210,\n serialized_end=5261,\n)\n\n_REPLACEOPTIONS_COPYENTRY = _descriptor.Descriptor(\n name='CopyEntry',\n full_name='pb.ReplaceOptions.CopyEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.ReplaceOptions.CopyEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.ReplaceOptions.CopyEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5263,\n serialized_end=5306,\n)\n\n_REPLACEOPTIONS = _descriptor.Descriptor(\n name='ReplaceOptions',\n full_name='pb.ReplaceOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='deployOpt', full_name='pb.ReplaceOptions.deployOpt', index=0,\n number=1, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='force', full_name='pb.ReplaceOptions.force', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='filter_labels', full_name='pb.ReplaceOptions.filter_labels', index=2,\n number=3, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='copy', full_name='pb.ReplaceOptions.copy', index=3,\n number=4, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='ids', full_name='pb.ReplaceOptions.ids', index=4,\n number=5, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='networkinherit', full_name='pb.ReplaceOptions.networkinherit', index=5,\n number=6, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_REPLACEOPTIONS_FILTERLABELSENTRY, _REPLACEOPTIONS_COPYENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=4997,\n serialized_end=5306,\n)\n\n\n_CACHEIMAGEOPTIONS = _descriptor.Descriptor(\n name='CacheImageOptions',\n full_name='pb.CacheImageOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.CacheImageOptions.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.CacheImageOptions.nodename', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='images', full_name='pb.CacheImageOptions.images', index=2,\n number=3, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='step', full_name='pb.CacheImageOptions.step', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5308,\n serialized_end=5392,\n)\n\n\n_REMOVEIMAGEOPTIONS = _descriptor.Descriptor(\n name='RemoveImageOptions',\n full_name='pb.RemoveImageOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.RemoveImageOptions.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.RemoveImageOptions.nodename', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='images', full_name='pb.RemoveImageOptions.images', index=2,\n number=3, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='step', full_name='pb.RemoveImageOptions.step', index=3,\n number=4, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='prune', full_name='pb.RemoveImageOptions.prune', index=4,\n number=5, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5394,\n serialized_end=5494,\n)\n\n\n_COPYPATHS = _descriptor.Descriptor(\n name='CopyPaths',\n full_name='pb.CopyPaths',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='paths', full_name='pb.CopyPaths.paths', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5496,\n serialized_end=5522,\n)\n\n\n_COPYOPTIONS_TARGETSENTRY = _descriptor.Descriptor(\n name='TargetsEntry',\n full_name='pb.CopyOptions.TargetsEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.CopyOptions.TargetsEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.CopyOptions.TargetsEntry.value', index=1,\n number=2, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5586,\n serialized_end=5647,\n)\n\n_COPYOPTIONS = _descriptor.Descriptor(\n name='CopyOptions',\n full_name='pb.CopyOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='targets', full_name='pb.CopyOptions.targets', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_COPYOPTIONS_TARGETSENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5524,\n serialized_end=5647,\n)\n\n\n_ERRORDETAIL = _descriptor.Descriptor(\n name='ErrorDetail',\n full_name='pb.ErrorDetail',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='code', full_name='pb.ErrorDetail.code', index=0,\n number=1, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='message', full_name='pb.ErrorDetail.message', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5649,\n serialized_end=5693,\n)\n\n\n_BUILDIMAGEMESSAGE = _descriptor.Descriptor(\n name='BuildImageMessage',\n full_name='pb.BuildImageMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.BuildImageMessage.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='status', full_name='pb.BuildImageMessage.status', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='progress', full_name='pb.BuildImageMessage.progress', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error', full_name='pb.BuildImageMessage.error', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='stream', full_name='pb.BuildImageMessage.stream', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error_detail', full_name='pb.BuildImageMessage.error_detail', index=5,\n number=6, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5696,\n serialized_end=5831,\n)\n\n\n_CREATECONTAINERMESSAGE_CPUENTRY = _descriptor.Descriptor(\n name='CpuEntry',\n full_name='pb.CreateContainerMessage.CpuEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.CreateContainerMessage.CpuEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.CreateContainerMessage.CpuEntry.value', index=1,\n number=2, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1290,\n serialized_end=1332,\n)\n\n_CREATECONTAINERMESSAGE_PUBLISHENTRY = _descriptor.Descriptor(\n name='PublishEntry',\n full_name='pb.CreateContainerMessage.PublishEntry',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='key', full_name='pb.CreateContainerMessage.PublishEntry.key', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='value', full_name='pb.CreateContainerMessage.PublishEntry.value', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001')),\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=1929,\n serialized_end=1975,\n)\n\n_CREATECONTAINERMESSAGE = _descriptor.Descriptor(\n name='CreateContainerMessage',\n full_name='pb.CreateContainerMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='podname', full_name='pb.CreateContainerMessage.podname', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.CreateContainerMessage.nodename', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.CreateContainerMessage.id', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.CreateContainerMessage.name', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error', full_name='pb.CreateContainerMessage.error', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='success', full_name='pb.CreateContainerMessage.success', index=5,\n number=6, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='cpu', full_name='pb.CreateContainerMessage.cpu', index=6,\n number=7, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='quota', full_name='pb.CreateContainerMessage.quota', index=7,\n number=8, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='memory', full_name='pb.CreateContainerMessage.memory', index=8,\n number=9, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='publish', full_name='pb.CreateContainerMessage.publish', index=9,\n number=10, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='hook', full_name='pb.CreateContainerMessage.hook', index=10,\n number=11, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[_CREATECONTAINERMESSAGE_CPUENTRY, _CREATECONTAINERMESSAGE_PUBLISHENTRY, ],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=5834,\n serialized_end=6196,\n)\n\n\n_REPLACECONTAINERMESSAGE = _descriptor.Descriptor(\n name='ReplaceContainerMessage',\n full_name='pb.ReplaceContainerMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='create', full_name='pb.ReplaceContainerMessage.create', index=0,\n number=1, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='remove', full_name='pb.ReplaceContainerMessage.remove', index=1,\n number=2, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error', full_name='pb.ReplaceContainerMessage.error', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6199,\n serialized_end=6327,\n)\n\n\n_RUNANDWAITMESSAGE = _descriptor.Descriptor(\n name='RunAndWaitMessage',\n full_name='pb.RunAndWaitMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='container_id', full_name='pb.RunAndWaitMessage.container_id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='data', full_name='pb.RunAndWaitMessage.data', index=1,\n number=2, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6329,\n serialized_end=6384,\n)\n\n\n_CACHEIMAGEMESSAGE = _descriptor.Descriptor(\n name='CacheImageMessage',\n full_name='pb.CacheImageMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='image', full_name='pb.CacheImageMessage.image', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='success', full_name='pb.CacheImageMessage.success', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nodename', full_name='pb.CacheImageMessage.nodename', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='message', full_name='pb.CacheImageMessage.message', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6386,\n serialized_end=6472,\n)\n\n\n_REMOVEIMAGEMESSAGE = _descriptor.Descriptor(\n name='RemoveImageMessage',\n full_name='pb.RemoveImageMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='image', full_name='pb.RemoveImageMessage.image', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='success', full_name='pb.RemoveImageMessage.success', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='messages', full_name='pb.RemoveImageMessage.messages', index=2,\n number=3, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6474,\n serialized_end=6544,\n)\n\n\n_REMOVECONTAINERMESSAGE = _descriptor.Descriptor(\n name='RemoveContainerMessage',\n full_name='pb.RemoveContainerMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.RemoveContainerMessage.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='success', full_name='pb.RemoveContainerMessage.success', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='hook', full_name='pb.RemoveContainerMessage.hook', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6546,\n serialized_end=6613,\n)\n\n\n_REALLOCRESOURCEMESSAGE = _descriptor.Descriptor(\n name='ReallocResourceMessage',\n full_name='pb.ReallocResourceMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.ReallocResourceMessage.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='success', full_name='pb.ReallocResourceMessage.success', index=1,\n number=2, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6615,\n serialized_end=6668,\n)\n\n\n_COPYMESSAGE = _descriptor.Descriptor(\n name='CopyMessage',\n full_name='pb.CopyMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.CopyMessage.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='status', full_name='pb.CopyMessage.status', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='name', full_name='pb.CopyMessage.name', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='path', full_name='pb.CopyMessage.path', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error', full_name='pb.CopyMessage.error', index=4,\n number=5, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='data', full_name='pb.CopyMessage.data', index=5,\n number=6, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6670,\n serialized_end=6768,\n)\n\n\n_RUNANDWAITOPTIONS = _descriptor.Descriptor(\n name='RunAndWaitOptions',\n full_name='pb.RunAndWaitOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='DeployOptions', full_name='pb.RunAndWaitOptions.DeployOptions', index=0,\n number=1, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='Cmd', full_name='pb.RunAndWaitOptions.Cmd', index=1,\n number=2, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6770,\n serialized_end=6844,\n)\n\n\n_CONTROLCONTAINEROPTIONS = _descriptor.Descriptor(\n name='ControlContainerOptions',\n full_name='pb.ControlContainerOptions',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='ids', full_name='pb.ControlContainerOptions.ids', index=0,\n number=1, type=9, cpp_type=9, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='type', full_name='pb.ControlContainerOptions.type', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6846,\n serialized_end=6898,\n)\n\n\n_CONTROLCONTAINERMESSAGE = _descriptor.Descriptor(\n name='ControlContainerMessage',\n full_name='pb.ControlContainerMessage',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='id', full_name='pb.ControlContainerMessage.id', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='error', full_name='pb.ControlContainerMessage.error', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='hook', full_name='pb.ControlContainerMessage.hook', index=2,\n number=3, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=6900,\n serialized_end=6966,\n)\n\n_LISTCONTAINERSOPTIONS_LABELSENTRY.containing_type = _LISTCONTAINERSOPTIONS\n_LISTCONTAINERSOPTIONS.fields_by_name['labels'].message_type = _LISTCONTAINERSOPTIONS_LABELSENTRY\n_PODS.fields_by_name['pods'].message_type = _POD\n_PODRESOURCE_CPUENTRY.containing_type = _PODRESOURCE\n_PODRESOURCE_MEMORYENTRY.containing_type = _PODRESOURCE\n_PODRESOURCE_DIFFENTRY.containing_type = _PODRESOURCE\n_PODRESOURCE_DETAILENTRY.containing_type = _PODRESOURCE\n_PODRESOURCE.fields_by_name['cpu'].message_type = _PODRESOURCE_CPUENTRY\n_PODRESOURCE.fields_by_name['memory'].message_type = _PODRESOURCE_MEMORYENTRY\n_PODRESOURCE.fields_by_name['diff'].message_type = _PODRESOURCE_DIFFENTRY\n_PODRESOURCE.fields_by_name['detail'].message_type = _PODRESOURCE_DETAILENTRY\n_NETWORKS.fields_by_name['networks'].message_type = _NETWORK\n_NODE_CPUENTRY.containing_type = _NODE\n_NODE_LABELSENTRY.containing_type = _NODE\n_NODE_INITCPUENTRY.containing_type = _NODE\n_NODE.fields_by_name['cpu'].message_type = _NODE_CPUENTRY\n_NODE.fields_by_name['labels'].message_type = _NODE_LABELSENTRY\n_NODE.fields_by_name['init_cpu'].message_type = _NODE_INITCPUENTRY\n_NODES.fields_by_name['nodes'].message_type = _NODE\n_CONTAINER_CPUENTRY.containing_type = _CONTAINER\n_CONTAINER_LABELSENTRY.containing_type = _CONTAINER\n_CONTAINER_PUBLISHENTRY.containing_type = _CONTAINER\n_CONTAINER.fields_by_name['cpu'].message_type = _CONTAINER_CPUENTRY\n_CONTAINER.fields_by_name['labels'].message_type = _CONTAINER_LABELSENTRY\n_CONTAINER.fields_by_name['publish'].message_type = _CONTAINER_PUBLISHENTRY\n_CONTAINERS.fields_by_name['containers'].message_type = _CONTAINER\n_ADDNODEOPTIONS_LABELSENTRY.containing_type = _ADDNODEOPTIONS\n_ADDNODEOPTIONS.fields_by_name['labels'].message_type = _ADDNODEOPTIONS_LABELSENTRY\n_BUILD_ENVSENTRY.containing_type = _BUILD\n_BUILD_ARGSENTRY.containing_type = _BUILD\n_BUILD_LABELSENTRY.containing_type = _BUILD\n_BUILD_ARTIFACTSENTRY.containing_type = _BUILD\n_BUILD_CACHEENTRY.containing_type = _BUILD\n_BUILD.fields_by_name['envs'].message_type = _BUILD_ENVSENTRY\n_BUILD.fields_by_name['args'].message_type = _BUILD_ARGSENTRY\n_BUILD.fields_by_name['labels'].message_type = _BUILD_LABELSENTRY\n_BUILD.fields_by_name['artifacts'].message_type = _BUILD_ARTIFACTSENTRY\n_BUILD.fields_by_name['cache'].message_type = _BUILD_CACHEENTRY\n_BUILDS_BUILDSENTRY.fields_by_name['value'].message_type = _BUILD\n_BUILDS_BUILDSENTRY.containing_type = _BUILDS\n_BUILDS.fields_by_name['builds'].message_type = _BUILDS_BUILDSENTRY\n_BUILDIMAGEOPTIONS.fields_by_name['builds'].message_type = _BUILDS\n_LOGOPTIONS_CONFIGENTRY.containing_type = _LOGOPTIONS\n_LOGOPTIONS.fields_by_name['config'].message_type = _LOGOPTIONS_CONFIGENTRY\n_ENTRYPOINTOPTIONS_SYSCTLSENTRY.containing_type = _ENTRYPOINTOPTIONS\n_ENTRYPOINTOPTIONS.fields_by_name['log'].message_type = _LOGOPTIONS\n_ENTRYPOINTOPTIONS.fields_by_name['healthcheck'].message_type = _HEALTHCHECKOPTIONS\n_ENTRYPOINTOPTIONS.fields_by_name['hook'].message_type = _HOOKOPTIONS\n_ENTRYPOINTOPTIONS.fields_by_name['sysctls'].message_type = _ENTRYPOINTOPTIONS_SYSCTLSENTRY\n_DEPLOYOPTIONS_NETWORKSENTRY.containing_type = _DEPLOYOPTIONS\n_DEPLOYOPTIONS_LABELSENTRY.containing_type = _DEPLOYOPTIONS\n_DEPLOYOPTIONS_NODELABELSENTRY.containing_type = _DEPLOYOPTIONS\n_DEPLOYOPTIONS_DATAENTRY.containing_type = _DEPLOYOPTIONS\n_DEPLOYOPTIONS.fields_by_name['entrypoint'].message_type = _ENTRYPOINTOPTIONS\n_DEPLOYOPTIONS.fields_by_name['networks'].message_type = _DEPLOYOPTIONS_NETWORKSENTRY\n_DEPLOYOPTIONS.fields_by_name['labels'].message_type = _DEPLOYOPTIONS_LABELSENTRY\n_DEPLOYOPTIONS.fields_by_name['nodelabels'].message_type = _DEPLOYOPTIONS_NODELABELSENTRY\n_DEPLOYOPTIONS.fields_by_name['data'].message_type = _DEPLOYOPTIONS_DATAENTRY\n_REPLACEOPTIONS_FILTERLABELSENTRY.containing_type = _REPLACEOPTIONS\n_REPLACEOPTIONS_COPYENTRY.containing_type = _REPLACEOPTIONS\n_REPLACEOPTIONS.fields_by_name['deployOpt'].message_type = _DEPLOYOPTIONS\n_REPLACEOPTIONS.fields_by_name['filter_labels'].message_type = _REPLACEOPTIONS_FILTERLABELSENTRY\n_REPLACEOPTIONS.fields_by_name['copy'].message_type = _REPLACEOPTIONS_COPYENTRY\n_COPYOPTIONS_TARGETSENTRY.fields_by_name['value'].message_type = _COPYPATHS\n_COPYOPTIONS_TARGETSENTRY.containing_type = _COPYOPTIONS\n_COPYOPTIONS.fields_by_name['targets'].message_type = _COPYOPTIONS_TARGETSENTRY\n_BUILDIMAGEMESSAGE.fields_by_name['error_detail'].message_type = _ERRORDETAIL\n_CREATECONTAINERMESSAGE_CPUENTRY.containing_type = _CREATECONTAINERMESSAGE\n_CREATECONTAINERMESSAGE_PUBLISHENTRY.containing_type = _CREATECONTAINERMESSAGE\n_CREATECONTAINERMESSAGE.fields_by_name['cpu'].message_type = _CREATECONTAINERMESSAGE_CPUENTRY\n_CREATECONTAINERMESSAGE.fields_by_name['publish'].message_type = _CREATECONTAINERMESSAGE_PUBLISHENTRY\n_REPLACECONTAINERMESSAGE.fields_by_name['create'].message_type = _CREATECONTAINERMESSAGE\n_REPLACECONTAINERMESSAGE.fields_by_name['remove'].message_type = _REMOVECONTAINERMESSAGE\n_RUNANDWAITOPTIONS.fields_by_name['DeployOptions'].message_type = _DEPLOYOPTIONS\nDESCRIPTOR.message_types_by_name['Empty'] = _EMPTY\nDESCRIPTOR.message_types_by_name['ListContainersOptions'] = _LISTCONTAINERSOPTIONS\nDESCRIPTOR.message_types_by_name['DeployStatusOptions'] = _DEPLOYSTATUSOPTIONS\nDESCRIPTOR.message_types_by_name['DeployStatusMessage'] = _DEPLOYSTATUSMESSAGE\nDESCRIPTOR.message_types_by_name['Pod'] = _POD\nDESCRIPTOR.message_types_by_name['Pods'] = _PODS\nDESCRIPTOR.message_types_by_name['PodResource'] = _PODRESOURCE\nDESCRIPTOR.message_types_by_name['ListNetworkOptions'] = _LISTNETWORKOPTIONS\nDESCRIPTOR.message_types_by_name['Network'] = _NETWORK\nDESCRIPTOR.message_types_by_name['Networks'] = _NETWORKS\nDESCRIPTOR.message_types_by_name['Node'] = _NODE\nDESCRIPTOR.message_types_by_name['Nodes'] = _NODES\nDESCRIPTOR.message_types_by_name['NodeAvailable'] = _NODEAVAILABLE\nDESCRIPTOR.message_types_by_name['Container'] = _CONTAINER\nDESCRIPTOR.message_types_by_name['ContainerDeployedOptions'] = _CONTAINERDEPLOYEDOPTIONS\nDESCRIPTOR.message_types_by_name['Containers'] = _CONTAINERS\nDESCRIPTOR.message_types_by_name['ContainerID'] = _CONTAINERID\nDESCRIPTOR.message_types_by_name['ContainerIDs'] = _CONTAINERIDS\nDESCRIPTOR.message_types_by_name['RemoveContainerOptions'] = _REMOVECONTAINEROPTIONS\nDESCRIPTOR.message_types_by_name['ReallocOptions'] = _REALLOCOPTIONS\nDESCRIPTOR.message_types_by_name['AddPodOptions'] = _ADDPODOPTIONS\nDESCRIPTOR.message_types_by_name['RemovePodOptions'] = _REMOVEPODOPTIONS\nDESCRIPTOR.message_types_by_name['GetPodOptions'] = _GETPODOPTIONS\nDESCRIPTOR.message_types_by_name['AddNodeOptions'] = _ADDNODEOPTIONS\nDESCRIPTOR.message_types_by_name['RemoveNodeOptions'] = _REMOVENODEOPTIONS\nDESCRIPTOR.message_types_by_name['GetNodeOptions'] = _GETNODEOPTIONS\nDESCRIPTOR.message_types_by_name['ListNodesOptions'] = _LISTNODESOPTIONS\nDESCRIPTOR.message_types_by_name['Build'] = _BUILD\nDESCRIPTOR.message_types_by_name['Builds'] = _BUILDS\nDESCRIPTOR.message_types_by_name['BuildImageOptions'] = _BUILDIMAGEOPTIONS\nDESCRIPTOR.message_types_by_name['HookOptions'] = _HOOKOPTIONS\nDESCRIPTOR.message_types_by_name['HealthCheckOptions'] = _HEALTHCHECKOPTIONS\nDESCRIPTOR.message_types_by_name['LogOptions'] = _LOGOPTIONS\nDESCRIPTOR.message_types_by_name['EntrypointOptions'] = _ENTRYPOINTOPTIONS\nDESCRIPTOR.message_types_by_name['DeployOptions'] = _DEPLOYOPTIONS\nDESCRIPTOR.message_types_by_name['ReplaceOptions'] = _REPLACEOPTIONS\nDESCRIPTOR.message_types_by_name['CacheImageOptions'] = _CACHEIMAGEOPTIONS\nDESCRIPTOR.message_types_by_name['RemoveImageOptions'] = _REMOVEIMAGEOPTIONS\nDESCRIPTOR.message_types_by_name['CopyPaths'] = _COPYPATHS\nDESCRIPTOR.message_types_by_name['CopyOptions'] = _COPYOPTIONS\nDESCRIPTOR.message_types_by_name['ErrorDetail'] = _ERRORDETAIL\nDESCRIPTOR.message_types_by_name['BuildImageMessage'] = _BUILDIMAGEMESSAGE\nDESCRIPTOR.message_types_by_name['CreateContainerMessage'] = _CREATECONTAINERMESSAGE\nDESCRIPTOR.message_types_by_name['ReplaceContainerMessage'] = _REPLACECONTAINERMESSAGE\nDESCRIPTOR.message_types_by_name['RunAndWaitMessage'] = _RUNANDWAITMESSAGE\nDESCRIPTOR.message_types_by_name['CacheImageMessage'] = _CACHEIMAGEMESSAGE\nDESCRIPTOR.message_types_by_name['RemoveImageMessage'] = _REMOVEIMAGEMESSAGE\nDESCRIPTOR.message_types_by_name['RemoveContainerMessage'] = _REMOVECONTAINERMESSAGE\nDESCRIPTOR.message_types_by_name['ReallocResourceMessage'] = _REALLOCRESOURCEMESSAGE\nDESCRIPTOR.message_types_by_name['CopyMessage'] = _COPYMESSAGE\nDESCRIPTOR.message_types_by_name['RunAndWaitOptions'] = _RUNANDWAITOPTIONS\nDESCRIPTOR.message_types_by_name['ControlContainerOptions'] = _CONTROLCONTAINEROPTIONS\nDESCRIPTOR.message_types_by_name['ControlContainerMessage'] = _CONTROLCONTAINERMESSAGE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nEmpty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(\n DESCRIPTOR = _EMPTY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Empty)\n ))\n_sym_db.RegisterMessage(Empty)\n\nListContainersOptions = _reflection.GeneratedProtocolMessageType('ListContainersOptions', (_message.Message,), dict(\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _LISTCONTAINERSOPTIONS_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ListContainersOptions.LabelsEntry)\n ))\n ,\n DESCRIPTOR = _LISTCONTAINERSOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ListContainersOptions)\n ))\n_sym_db.RegisterMessage(ListContainersOptions)\n_sym_db.RegisterMessage(ListContainersOptions.LabelsEntry)\n\nDeployStatusOptions = _reflection.GeneratedProtocolMessageType('DeployStatusOptions', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYSTATUSOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployStatusOptions)\n ))\n_sym_db.RegisterMessage(DeployStatusOptions)\n\nDeployStatusMessage = _reflection.GeneratedProtocolMessageType('DeployStatusMessage', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYSTATUSMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployStatusMessage)\n ))\n_sym_db.RegisterMessage(DeployStatusMessage)\n\nPod = _reflection.GeneratedProtocolMessageType('Pod', (_message.Message,), dict(\n DESCRIPTOR = _POD,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Pod)\n ))\n_sym_db.RegisterMessage(Pod)\n\nPods = _reflection.GeneratedProtocolMessageType('Pods', (_message.Message,), dict(\n DESCRIPTOR = _PODS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Pods)\n ))\n_sym_db.RegisterMessage(Pods)\n\nPodResource = _reflection.GeneratedProtocolMessageType('PodResource', (_message.Message,), dict(\n\n CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(\n DESCRIPTOR = _PODRESOURCE_CPUENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.PodResource.CpuEntry)\n ))\n ,\n\n MemoryEntry = _reflection.GeneratedProtocolMessageType('MemoryEntry', (_message.Message,), dict(\n DESCRIPTOR = _PODRESOURCE_MEMORYENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.PodResource.MemoryEntry)\n ))\n ,\n\n DiffEntry = _reflection.GeneratedProtocolMessageType('DiffEntry', (_message.Message,), dict(\n DESCRIPTOR = _PODRESOURCE_DIFFENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.PodResource.DiffEntry)\n ))\n ,\n\n DetailEntry = _reflection.GeneratedProtocolMessageType('DetailEntry', (_message.Message,), dict(\n DESCRIPTOR = _PODRESOURCE_DETAILENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.PodResource.DetailEntry)\n ))\n ,\n DESCRIPTOR = _PODRESOURCE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.PodResource)\n ))\n_sym_db.RegisterMessage(PodResource)\n_sym_db.RegisterMessage(PodResource.CpuEntry)\n_sym_db.RegisterMessage(PodResource.MemoryEntry)\n_sym_db.RegisterMessage(PodResource.DiffEntry)\n_sym_db.RegisterMessage(PodResource.DetailEntry)\n\nListNetworkOptions = _reflection.GeneratedProtocolMessageType('ListNetworkOptions', (_message.Message,), dict(\n DESCRIPTOR = _LISTNETWORKOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ListNetworkOptions)\n ))\n_sym_db.RegisterMessage(ListNetworkOptions)\n\nNetwork = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), dict(\n DESCRIPTOR = _NETWORK,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Network)\n ))\n_sym_db.RegisterMessage(Network)\n\nNetworks = _reflection.GeneratedProtocolMessageType('Networks', (_message.Message,), dict(\n DESCRIPTOR = _NETWORKS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Networks)\n ))\n_sym_db.RegisterMessage(Networks)\n\nNode = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(\n\n CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(\n DESCRIPTOR = _NODE_CPUENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Node.CpuEntry)\n ))\n ,\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _NODE_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Node.LabelsEntry)\n ))\n ,\n\n InitCpuEntry = _reflection.GeneratedProtocolMessageType('InitCpuEntry', (_message.Message,), dict(\n DESCRIPTOR = _NODE_INITCPUENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Node.InitCpuEntry)\n ))\n ,\n DESCRIPTOR = _NODE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Node)\n ))\n_sym_db.RegisterMessage(Node)\n_sym_db.RegisterMessage(Node.CpuEntry)\n_sym_db.RegisterMessage(Node.LabelsEntry)\n_sym_db.RegisterMessage(Node.InitCpuEntry)\n\nNodes = _reflection.GeneratedProtocolMessageType('Nodes', (_message.Message,), dict(\n DESCRIPTOR = _NODES,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Nodes)\n ))\n_sym_db.RegisterMessage(Nodes)\n\nNodeAvailable = _reflection.GeneratedProtocolMessageType('NodeAvailable', (_message.Message,), dict(\n DESCRIPTOR = _NODEAVAILABLE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.NodeAvailable)\n ))\n_sym_db.RegisterMessage(NodeAvailable)\n\nContainer = _reflection.GeneratedProtocolMessageType('Container', (_message.Message,), dict(\n\n CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINER_CPUENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Container.CpuEntry)\n ))\n ,\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINER_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Container.LabelsEntry)\n ))\n ,\n\n PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINER_PUBLISHENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Container.PublishEntry)\n ))\n ,\n DESCRIPTOR = _CONTAINER,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Container)\n ))\n_sym_db.RegisterMessage(Container)\n_sym_db.RegisterMessage(Container.CpuEntry)\n_sym_db.RegisterMessage(Container.LabelsEntry)\n_sym_db.RegisterMessage(Container.PublishEntry)\n\nContainerDeployedOptions = _reflection.GeneratedProtocolMessageType('ContainerDeployedOptions', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINERDEPLOYEDOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ContainerDeployedOptions)\n ))\n_sym_db.RegisterMessage(ContainerDeployedOptions)\n\nContainers = _reflection.GeneratedProtocolMessageType('Containers', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINERS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Containers)\n ))\n_sym_db.RegisterMessage(Containers)\n\nContainerID = _reflection.GeneratedProtocolMessageType('ContainerID', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINERID,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ContainerID)\n ))\n_sym_db.RegisterMessage(ContainerID)\n\nContainerIDs = _reflection.GeneratedProtocolMessageType('ContainerIDs', (_message.Message,), dict(\n DESCRIPTOR = _CONTAINERIDS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ContainerIDs)\n ))\n_sym_db.RegisterMessage(ContainerIDs)\n\nRemoveContainerOptions = _reflection.GeneratedProtocolMessageType('RemoveContainerOptions', (_message.Message,), dict(\n DESCRIPTOR = _REMOVECONTAINEROPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemoveContainerOptions)\n ))\n_sym_db.RegisterMessage(RemoveContainerOptions)\n\nReallocOptions = _reflection.GeneratedProtocolMessageType('ReallocOptions', (_message.Message,), dict(\n DESCRIPTOR = _REALLOCOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReallocOptions)\n ))\n_sym_db.RegisterMessage(ReallocOptions)\n\nAddPodOptions = _reflection.GeneratedProtocolMessageType('AddPodOptions', (_message.Message,), dict(\n DESCRIPTOR = _ADDPODOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.AddPodOptions)\n ))\n_sym_db.RegisterMessage(AddPodOptions)\n\nRemovePodOptions = _reflection.GeneratedProtocolMessageType('RemovePodOptions', (_message.Message,), dict(\n DESCRIPTOR = _REMOVEPODOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemovePodOptions)\n ))\n_sym_db.RegisterMessage(RemovePodOptions)\n\nGetPodOptions = _reflection.GeneratedProtocolMessageType('GetPodOptions', (_message.Message,), dict(\n DESCRIPTOR = _GETPODOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.GetPodOptions)\n ))\n_sym_db.RegisterMessage(GetPodOptions)\n\nAddNodeOptions = _reflection.GeneratedProtocolMessageType('AddNodeOptions', (_message.Message,), dict(\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _ADDNODEOPTIONS_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.AddNodeOptions.LabelsEntry)\n ))\n ,\n DESCRIPTOR = _ADDNODEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.AddNodeOptions)\n ))\n_sym_db.RegisterMessage(AddNodeOptions)\n_sym_db.RegisterMessage(AddNodeOptions.LabelsEntry)\n\nRemoveNodeOptions = _reflection.GeneratedProtocolMessageType('RemoveNodeOptions', (_message.Message,), dict(\n DESCRIPTOR = _REMOVENODEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemoveNodeOptions)\n ))\n_sym_db.RegisterMessage(RemoveNodeOptions)\n\nGetNodeOptions = _reflection.GeneratedProtocolMessageType('GetNodeOptions', (_message.Message,), dict(\n DESCRIPTOR = _GETNODEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.GetNodeOptions)\n ))\n_sym_db.RegisterMessage(GetNodeOptions)\n\nListNodesOptions = _reflection.GeneratedProtocolMessageType('ListNodesOptions', (_message.Message,), dict(\n DESCRIPTOR = _LISTNODESOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ListNodesOptions)\n ))\n_sym_db.RegisterMessage(ListNodesOptions)\n\nBuild = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), dict(\n\n EnvsEntry = _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILD_ENVSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build.EnvsEntry)\n ))\n ,\n\n ArgsEntry = _reflection.GeneratedProtocolMessageType('ArgsEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILD_ARGSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build.ArgsEntry)\n ))\n ,\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILD_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build.LabelsEntry)\n ))\n ,\n\n ArtifactsEntry = _reflection.GeneratedProtocolMessageType('ArtifactsEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILD_ARTIFACTSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build.ArtifactsEntry)\n ))\n ,\n\n CacheEntry = _reflection.GeneratedProtocolMessageType('CacheEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILD_CACHEENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build.CacheEntry)\n ))\n ,\n DESCRIPTOR = _BUILD,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Build)\n ))\n_sym_db.RegisterMessage(Build)\n_sym_db.RegisterMessage(Build.EnvsEntry)\n_sym_db.RegisterMessage(Build.ArgsEntry)\n_sym_db.RegisterMessage(Build.LabelsEntry)\n_sym_db.RegisterMessage(Build.ArtifactsEntry)\n_sym_db.RegisterMessage(Build.CacheEntry)\n\nBuilds = _reflection.GeneratedProtocolMessageType('Builds', (_message.Message,), dict(\n\n BuildsEntry = _reflection.GeneratedProtocolMessageType('BuildsEntry', (_message.Message,), dict(\n DESCRIPTOR = _BUILDS_BUILDSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Builds.BuildsEntry)\n ))\n ,\n DESCRIPTOR = _BUILDS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.Builds)\n ))\n_sym_db.RegisterMessage(Builds)\n_sym_db.RegisterMessage(Builds.BuildsEntry)\n\nBuildImageOptions = _reflection.GeneratedProtocolMessageType('BuildImageOptions', (_message.Message,), dict(\n DESCRIPTOR = _BUILDIMAGEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.BuildImageOptions)\n ))\n_sym_db.RegisterMessage(BuildImageOptions)\n\nHookOptions = _reflection.GeneratedProtocolMessageType('HookOptions', (_message.Message,), dict(\n DESCRIPTOR = _HOOKOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.HookOptions)\n ))\n_sym_db.RegisterMessage(HookOptions)\n\nHealthCheckOptions = _reflection.GeneratedProtocolMessageType('HealthCheckOptions', (_message.Message,), dict(\n DESCRIPTOR = _HEALTHCHECKOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.HealthCheckOptions)\n ))\n_sym_db.RegisterMessage(HealthCheckOptions)\n\nLogOptions = _reflection.GeneratedProtocolMessageType('LogOptions', (_message.Message,), dict(\n\n ConfigEntry = _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), dict(\n DESCRIPTOR = _LOGOPTIONS_CONFIGENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.LogOptions.ConfigEntry)\n ))\n ,\n DESCRIPTOR = _LOGOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.LogOptions)\n ))\n_sym_db.RegisterMessage(LogOptions)\n_sym_db.RegisterMessage(LogOptions.ConfigEntry)\n\nEntrypointOptions = _reflection.GeneratedProtocolMessageType('EntrypointOptions', (_message.Message,), dict(\n\n SysctlsEntry = _reflection.GeneratedProtocolMessageType('SysctlsEntry', (_message.Message,), dict(\n DESCRIPTOR = _ENTRYPOINTOPTIONS_SYSCTLSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.EntrypointOptions.SysctlsEntry)\n ))\n ,\n DESCRIPTOR = _ENTRYPOINTOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.EntrypointOptions)\n ))\n_sym_db.RegisterMessage(EntrypointOptions)\n_sym_db.RegisterMessage(EntrypointOptions.SysctlsEntry)\n\nDeployOptions = _reflection.GeneratedProtocolMessageType('DeployOptions', (_message.Message,), dict(\n\n NetworksEntry = _reflection.GeneratedProtocolMessageType('NetworksEntry', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYOPTIONS_NETWORKSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployOptions.NetworksEntry)\n ))\n ,\n\n LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYOPTIONS_LABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployOptions.LabelsEntry)\n ))\n ,\n\n NodelabelsEntry = _reflection.GeneratedProtocolMessageType('NodelabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYOPTIONS_NODELABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployOptions.NodelabelsEntry)\n ))\n ,\n\n DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict(\n DESCRIPTOR = _DEPLOYOPTIONS_DATAENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployOptions.DataEntry)\n ))\n ,\n DESCRIPTOR = _DEPLOYOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.DeployOptions)\n ))\n_sym_db.RegisterMessage(DeployOptions)\n_sym_db.RegisterMessage(DeployOptions.NetworksEntry)\n_sym_db.RegisterMessage(DeployOptions.LabelsEntry)\n_sym_db.RegisterMessage(DeployOptions.NodelabelsEntry)\n_sym_db.RegisterMessage(DeployOptions.DataEntry)\n\nReplaceOptions = _reflection.GeneratedProtocolMessageType('ReplaceOptions', (_message.Message,), dict(\n\n FilterLabelsEntry = _reflection.GeneratedProtocolMessageType('FilterLabelsEntry', (_message.Message,), dict(\n DESCRIPTOR = _REPLACEOPTIONS_FILTERLABELSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.FilterLabelsEntry)\n ))\n ,\n\n CopyEntry = _reflection.GeneratedProtocolMessageType('CopyEntry', (_message.Message,), dict(\n DESCRIPTOR = _REPLACEOPTIONS_COPYENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.CopyEntry)\n ))\n ,\n DESCRIPTOR = _REPLACEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReplaceOptions)\n ))\n_sym_db.RegisterMessage(ReplaceOptions)\n_sym_db.RegisterMessage(ReplaceOptions.FilterLabelsEntry)\n_sym_db.RegisterMessage(ReplaceOptions.CopyEntry)\n\nCacheImageOptions = _reflection.GeneratedProtocolMessageType('CacheImageOptions', (_message.Message,), dict(\n DESCRIPTOR = _CACHEIMAGEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CacheImageOptions)\n ))\n_sym_db.RegisterMessage(CacheImageOptions)\n\nRemoveImageOptions = _reflection.GeneratedProtocolMessageType('RemoveImageOptions', (_message.Message,), dict(\n DESCRIPTOR = _REMOVEIMAGEOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemoveImageOptions)\n ))\n_sym_db.RegisterMessage(RemoveImageOptions)\n\nCopyPaths = _reflection.GeneratedProtocolMessageType('CopyPaths', (_message.Message,), dict(\n DESCRIPTOR = _COPYPATHS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CopyPaths)\n ))\n_sym_db.RegisterMessage(CopyPaths)\n\nCopyOptions = _reflection.GeneratedProtocolMessageType('CopyOptions', (_message.Message,), dict(\n\n TargetsEntry = _reflection.GeneratedProtocolMessageType('TargetsEntry', (_message.Message,), dict(\n DESCRIPTOR = _COPYOPTIONS_TARGETSENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CopyOptions.TargetsEntry)\n ))\n ,\n DESCRIPTOR = _COPYOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CopyOptions)\n ))\n_sym_db.RegisterMessage(CopyOptions)\n_sym_db.RegisterMessage(CopyOptions.TargetsEntry)\n\nErrorDetail = _reflection.GeneratedProtocolMessageType('ErrorDetail', (_message.Message,), dict(\n DESCRIPTOR = _ERRORDETAIL,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ErrorDetail)\n ))\n_sym_db.RegisterMessage(ErrorDetail)\n\nBuildImageMessage = _reflection.GeneratedProtocolMessageType('BuildImageMessage', (_message.Message,), dict(\n DESCRIPTOR = _BUILDIMAGEMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.BuildImageMessage)\n ))\n_sym_db.RegisterMessage(BuildImageMessage)\n\nCreateContainerMessage = _reflection.GeneratedProtocolMessageType('CreateContainerMessage', (_message.Message,), dict(\n\n CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict(\n DESCRIPTOR = _CREATECONTAINERMESSAGE_CPUENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.CpuEntry)\n ))\n ,\n\n PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict(\n DESCRIPTOR = _CREATECONTAINERMESSAGE_PUBLISHENTRY,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.PublishEntry)\n ))\n ,\n DESCRIPTOR = _CREATECONTAINERMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage)\n ))\n_sym_db.RegisterMessage(CreateContainerMessage)\n_sym_db.RegisterMessage(CreateContainerMessage.CpuEntry)\n_sym_db.RegisterMessage(CreateContainerMessage.PublishEntry)\n\nReplaceContainerMessage = _reflection.GeneratedProtocolMessageType('ReplaceContainerMessage', (_message.Message,), dict(\n DESCRIPTOR = _REPLACECONTAINERMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReplaceContainerMessage)\n ))\n_sym_db.RegisterMessage(ReplaceContainerMessage)\n\nRunAndWaitMessage = _reflection.GeneratedProtocolMessageType('RunAndWaitMessage', (_message.Message,), dict(\n DESCRIPTOR = _RUNANDWAITMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RunAndWaitMessage)\n ))\n_sym_db.RegisterMessage(RunAndWaitMessage)\n\nCacheImageMessage = _reflection.GeneratedProtocolMessageType('CacheImageMessage', (_message.Message,), dict(\n DESCRIPTOR = _CACHEIMAGEMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CacheImageMessage)\n ))\n_sym_db.RegisterMessage(CacheImageMessage)\n\nRemoveImageMessage = _reflection.GeneratedProtocolMessageType('RemoveImageMessage', (_message.Message,), dict(\n DESCRIPTOR = _REMOVEIMAGEMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemoveImageMessage)\n ))\n_sym_db.RegisterMessage(RemoveImageMessage)\n\nRemoveContainerMessage = _reflection.GeneratedProtocolMessageType('RemoveContainerMessage', (_message.Message,), dict(\n DESCRIPTOR = _REMOVECONTAINERMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RemoveContainerMessage)\n ))\n_sym_db.RegisterMessage(RemoveContainerMessage)\n\nReallocResourceMessage = _reflection.GeneratedProtocolMessageType('ReallocResourceMessage', (_message.Message,), dict(\n DESCRIPTOR = _REALLOCRESOURCEMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ReallocResourceMessage)\n ))\n_sym_db.RegisterMessage(ReallocResourceMessage)\n\nCopyMessage = _reflection.GeneratedProtocolMessageType('CopyMessage', (_message.Message,), dict(\n DESCRIPTOR = _COPYMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.CopyMessage)\n ))\n_sym_db.RegisterMessage(CopyMessage)\n\nRunAndWaitOptions = _reflection.GeneratedProtocolMessageType('RunAndWaitOptions', (_message.Message,), dict(\n DESCRIPTOR = _RUNANDWAITOPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.RunAndWaitOptions)\n ))\n_sym_db.RegisterMessage(RunAndWaitOptions)\n\nControlContainerOptions = _reflection.GeneratedProtocolMessageType('ControlContainerOptions', (_message.Message,), dict(\n DESCRIPTOR = _CONTROLCONTAINEROPTIONS,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ControlContainerOptions)\n ))\n_sym_db.RegisterMessage(ControlContainerOptions)\n\nControlContainerMessage = _reflection.GeneratedProtocolMessageType('ControlContainerMessage', (_message.Message,), dict(\n DESCRIPTOR = _CONTROLCONTAINERMESSAGE,\n __module__ = 'core_pb2'\n # @@protoc_insertion_point(class_scope:pb.ControlContainerMessage)\n ))\n_sym_db.RegisterMessage(ControlContainerMessage)\n\n\n_LISTCONTAINERSOPTIONS_LABELSENTRY.has_options = True\n_LISTCONTAINERSOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_PODRESOURCE_CPUENTRY.has_options = True\n_PODRESOURCE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_PODRESOURCE_MEMORYENTRY.has_options = True\n_PODRESOURCE_MEMORYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_PODRESOURCE_DIFFENTRY.has_options = True\n_PODRESOURCE_DIFFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_PODRESOURCE_DETAILENTRY.has_options = True\n_PODRESOURCE_DETAILENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_NODE_CPUENTRY.has_options = True\n_NODE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_NODE_LABELSENTRY.has_options = True\n_NODE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_NODE_INITCPUENTRY.has_options = True\n_NODE_INITCPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_CONTAINER_CPUENTRY.has_options = True\n_CONTAINER_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_CONTAINER_LABELSENTRY.has_options = True\n_CONTAINER_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_CONTAINER_PUBLISHENTRY.has_options = True\n_CONTAINER_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_ADDNODEOPTIONS_LABELSENTRY.has_options = True\n_ADDNODEOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILD_ENVSENTRY.has_options = True\n_BUILD_ENVSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILD_ARGSENTRY.has_options = True\n_BUILD_ARGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILD_LABELSENTRY.has_options = True\n_BUILD_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILD_ARTIFACTSENTRY.has_options = True\n_BUILD_ARTIFACTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILD_CACHEENTRY.has_options = True\n_BUILD_CACHEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_BUILDS_BUILDSENTRY.has_options = True\n_BUILDS_BUILDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_LOGOPTIONS_CONFIGENTRY.has_options = True\n_LOGOPTIONS_CONFIGENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_ENTRYPOINTOPTIONS_SYSCTLSENTRY.has_options = True\n_ENTRYPOINTOPTIONS_SYSCTLSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_DEPLOYOPTIONS_NETWORKSENTRY.has_options = True\n_DEPLOYOPTIONS_NETWORKSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_DEPLOYOPTIONS_LABELSENTRY.has_options = True\n_DEPLOYOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_DEPLOYOPTIONS_NODELABELSENTRY.has_options = True\n_DEPLOYOPTIONS_NODELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_DEPLOYOPTIONS_DATAENTRY.has_options = True\n_DEPLOYOPTIONS_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_REPLACEOPTIONS_FILTERLABELSENTRY.has_options = True\n_REPLACEOPTIONS_FILTERLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_REPLACEOPTIONS_COPYENTRY.has_options = True\n_REPLACEOPTIONS_COPYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_COPYOPTIONS_TARGETSENTRY.has_options = True\n_COPYOPTIONS_TARGETSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_CREATECONTAINERMESSAGE_CPUENTRY.has_options = True\n_CREATECONTAINERMESSAGE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n_CREATECONTAINERMESSAGE_PUBLISHENTRY.has_options = True\n_CREATECONTAINERMESSAGE_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\\001'))\n\n_CORERPC = _descriptor.ServiceDescriptor(\n name='CoreRPC',\n full_name='pb.CoreRPC',\n file=DESCRIPTOR,\n index=0,\n options=None,\n serialized_start=6969,\n serialized_end=8580,\n methods=[\n _descriptor.MethodDescriptor(\n name='ListPods',\n full_name='pb.CoreRPC.ListPods',\n index=0,\n containing_service=None,\n input_type=_EMPTY,\n output_type=_PODS,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='AddPod',\n full_name='pb.CoreRPC.AddPod',\n index=1,\n containing_service=None,\n input_type=_ADDPODOPTIONS,\n output_type=_POD,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='RemovePod',\n full_name='pb.CoreRPC.RemovePod',\n index=2,\n containing_service=None,\n input_type=_REMOVEPODOPTIONS,\n output_type=_EMPTY,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetPod',\n full_name='pb.CoreRPC.GetPod',\n index=3,\n containing_service=None,\n input_type=_GETPODOPTIONS,\n output_type=_POD,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetPodResource',\n full_name='pb.CoreRPC.GetPodResource',\n index=4,\n containing_service=None,\n input_type=_GETPODOPTIONS,\n output_type=_PODRESOURCE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='AddNode',\n full_name='pb.CoreRPC.AddNode',\n index=5,\n containing_service=None,\n input_type=_ADDNODEOPTIONS,\n output_type=_NODE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='RemoveNode',\n full_name='pb.CoreRPC.RemoveNode',\n index=6,\n containing_service=None,\n input_type=_REMOVENODEOPTIONS,\n output_type=_POD,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='SetNodeAvailable',\n full_name='pb.CoreRPC.SetNodeAvailable',\n index=7,\n containing_service=None,\n input_type=_NODEAVAILABLE,\n output_type=_NODE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetNode',\n full_name='pb.CoreRPC.GetNode',\n index=8,\n containing_service=None,\n input_type=_GETNODEOPTIONS,\n output_type=_NODE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetContainer',\n full_name='pb.CoreRPC.GetContainer',\n index=9,\n containing_service=None,\n input_type=_CONTAINERID,\n output_type=_CONTAINER,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetContainers',\n full_name='pb.CoreRPC.GetContainers',\n index=10,\n containing_service=None,\n input_type=_CONTAINERIDS,\n output_type=_CONTAINERS,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='GetNodeByName',\n full_name='pb.CoreRPC.GetNodeByName',\n index=11,\n containing_service=None,\n input_type=_GETNODEOPTIONS,\n output_type=_NODE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ListPodNodes',\n full_name='pb.CoreRPC.ListPodNodes',\n index=12,\n containing_service=None,\n input_type=_LISTNODESOPTIONS,\n output_type=_NODES,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ListNetworks',\n full_name='pb.CoreRPC.ListNetworks',\n index=13,\n containing_service=None,\n input_type=_LISTNETWORKOPTIONS,\n output_type=_NETWORKS,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ListContainers',\n full_name='pb.CoreRPC.ListContainers',\n index=14,\n containing_service=None,\n input_type=_LISTCONTAINERSOPTIONS,\n output_type=_CONTAINERS,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ListNodeContainers',\n full_name='pb.CoreRPC.ListNodeContainers',\n index=15,\n containing_service=None,\n input_type=_GETNODEOPTIONS,\n output_type=_CONTAINERS,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ContainerDeployed',\n full_name='pb.CoreRPC.ContainerDeployed',\n index=16,\n containing_service=None,\n input_type=_CONTAINERDEPLOYEDOPTIONS,\n output_type=_EMPTY,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='Copy',\n full_name='pb.CoreRPC.Copy',\n index=17,\n containing_service=None,\n input_type=_COPYOPTIONS,\n output_type=_COPYMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='BuildImage',\n full_name='pb.CoreRPC.BuildImage',\n index=18,\n containing_service=None,\n input_type=_BUILDIMAGEOPTIONS,\n output_type=_BUILDIMAGEMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='CacheImage',\n full_name='pb.CoreRPC.CacheImage',\n index=19,\n containing_service=None,\n input_type=_CACHEIMAGEOPTIONS,\n output_type=_CACHEIMAGEMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='RemoveImage',\n full_name='pb.CoreRPC.RemoveImage',\n index=20,\n containing_service=None,\n input_type=_REMOVEIMAGEOPTIONS,\n output_type=_REMOVEIMAGEMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='DeployStatus',\n full_name='pb.CoreRPC.DeployStatus',\n index=21,\n containing_service=None,\n input_type=_DEPLOYSTATUSOPTIONS,\n output_type=_DEPLOYSTATUSMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='RunAndWait',\n full_name='pb.CoreRPC.RunAndWait',\n index=22,\n containing_service=None,\n input_type=_RUNANDWAITOPTIONS,\n output_type=_RUNANDWAITMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='CreateContainer',\n full_name='pb.CoreRPC.CreateContainer',\n index=23,\n containing_service=None,\n input_type=_DEPLOYOPTIONS,\n output_type=_CREATECONTAINERMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ReplaceContainer',\n full_name='pb.CoreRPC.ReplaceContainer',\n index=24,\n containing_service=None,\n input_type=_REPLACEOPTIONS,\n output_type=_REPLACECONTAINERMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='RemoveContainer',\n full_name='pb.CoreRPC.RemoveContainer',\n index=25,\n containing_service=None,\n input_type=_REMOVECONTAINEROPTIONS,\n output_type=_REMOVECONTAINERMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ControlContainer',\n full_name='pb.CoreRPC.ControlContainer',\n index=26,\n containing_service=None,\n input_type=_CONTROLCONTAINEROPTIONS,\n output_type=_CONTROLCONTAINERMESSAGE,\n options=None,\n ),\n _descriptor.MethodDescriptor(\n name='ReallocResource',\n full_name='pb.CoreRPC.ReallocResource',\n index=27,\n containing_service=None,\n input_type=_REALLOCOPTIONS,\n output_type=_REALLOCRESOURCEMESSAGE,\n options=None,\n ),\n])\n_sym_db.RegisterServiceDescriptor(_CORERPC)\n\nDESCRIPTOR.services_by_name['CoreRPC'] = _CORERPC\n\n# @@protoc_insertion_point(module_scope)\n"},"avg_line_length":{"kind":"number","value":38.8751260843,"string":"38.875126"},"max_line_length":{"kind":"number","value":16249,"string":"16,249"},"alphanum_fraction":{"kind":"number","value":0.7313392561,"string":"0.731339"},"count_classes":{"kind":"number","value":0,"string":"0"},"score_classes":{"kind":"number","value":0,"string":"0"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":40959,"string":"40,959"},"score_documentation":{"kind":"number","value":0.21254877947525738,"string":"0.212549"}}},{"rowIdx":3798,"cells":{"hexsha":{"kind":"string","value":"b9a524c2d76717a70aa199aeb8c04e4579e1a276"},"size":{"kind":"number","value":2217,"string":"2,217"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/models/text_node.py"},"max_stars_repo_name":{"kind":"string","value":"moevm/nosql1h19-text-graph"},"max_stars_repo_head_hexsha":{"kind":"string","value":"410f156ad4f232f8aa060d43692ab020610ddfd4"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/models/text_node.py"},"max_issues_repo_name":{"kind":"string","value":"moevm/nosql1h19-text-graph"},"max_issues_repo_head_hexsha":{"kind":"string","value":"410f156ad4f232f8aa060d43692ab020610ddfd4"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/models/text_node.py"},"max_forks_repo_name":{"kind":"string","value":"moevm/nosql1h19-text-graph"},"max_forks_repo_head_hexsha":{"kind":"string","value":"410f156ad4f232f8aa060d43692ab020610ddfd4"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from neomodel import StructuredNode, StringProperty, JSONProperty, \\\n Relationship, IntegerProperty\nimport numpy as np\nimport re\n\nfrom models.text_relation import TextRelation\n\n\n__all__ = ['TextNode']\n\n\nclass TextNode(StructuredNode):\n order_id = IntegerProperty(required=True, unique_index=True)\n label = StringProperty(required=True)\n text = StringProperty(required=True)\n alg_results = JSONProperty()\n link = Relationship('TextNode', 'ALG', model=TextRelation)\n\n def short(self):\n res = ''.join([word.strip() + ' '\n for word in re.split(r'[\\n ]', self.text, 5)[:5]])\n return res\n\n def describe(self):\n return f\"\"\"\n

    Фрагмент: {self.order_id}

    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    \n Информация о вершине\n
    Количество символов{self.character_num()}
    Количество слов{self.words_num()}
    Количество предложений{self.sentences_num()}
    Количество связей{len(self.link)}
    \n \"\"\"\n\n def preview(self, frag_num=0):\n leading = 3\n if frag_num > 0:\n leading = int(np.floor(np.log10(frag_num))) + 1\n if str(self.order_id) != str(self.label):\n return f\"{str(self.order_id).zfill(leading)}: \" \\\n + f\"[{self.label}] {self.short()}...\"\n else:\n return f\"{str(self.order_id).zfill(leading)}: \" \\\n + f\"[{self.label}] {self.short()}...\"\n return f\"[{self.label}] {self.short()}...\"\n\n def words_num(self):\n return len(self.text.split())\n\n def character_num(self):\n return len(self.text)\n\n def sentences_num(self):\n return len([s for s in self.text.split('.') if len(s) > 2])\n"},"avg_line_length":{"kind":"number","value":31.6714285714,"string":"31.671429"},"max_line_length":{"kind":"number","value":73,"string":"73"},"alphanum_fraction":{"kind":"number","value":0.4884979702,"string":"0.488498"},"count_classes":{"kind":"number","value":2088,"string":"2,088"},"score_classes":{"kind":"number","value":0.903114186851211,"string":"0.903114"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":1105,"string":"1,105"},"score_documentation":{"kind":"number","value":0.47794117647058826,"string":"0.477941"}}},{"rowIdx":3799,"cells":{"hexsha":{"kind":"string","value":"b9a529f9f36fb2cce0a38f16148b6bc2117ab033"},"size":{"kind":"number","value":2655,"string":"2,655"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"tests/test_bishop_generate.py"},"max_stars_repo_name":{"kind":"string","value":"otaviocarvalho/chess-negamax"},"max_stars_repo_head_hexsha":{"kind":"string","value":"21f1066611e581dac3257d3f46c71ca2b09b5964"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-04-04T15:58:29.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-04-07T11:45:02.000Z"},"max_issues_repo_path":{"kind":"string","value":"tests/test_bishop_generate.py"},"max_issues_repo_name":{"kind":"string","value":"otaviocarvalho/chess-negamax"},"max_issues_repo_head_hexsha":{"kind":"string","value":"21f1066611e581dac3257d3f46c71ca2b09b5964"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2015-04-27T19:02:06.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2015-04-27T19:02:06.000Z"},"max_forks_repo_path":{"kind":"string","value":"tests/test_bishop_generate.py"},"max_forks_repo_name":{"kind":"string","value":"otaviocarvalho/chess-negamax"},"max_forks_repo_head_hexsha":{"kind":"string","value":"21f1066611e581dac3257d3f46c71ca2b09b5964"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2015-10-04T00:22:17.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-04-07T11:44:56.000Z"},"content":{"kind":"string","value":"import unittest\nfrom .helpers import StubBoard, StubPiece, C, WHITE, BLACK\n\nclass TestBishopGenerate(unittest.TestCase):\n def get_bishop(self, board, team, position):\n from chess.models import Bishop\n return Bishop(board, team, position)\n\n def compare_list(self, expected, results):\n compared = []\n\n for e in expected:\n for r in results:\n if e[0] == r[0] and e[1] == r[1]:\n compared.append(True)\n break\n else:\n compared.append(False)\n\n return compared\n\n def test_generate_topright(self):\n board = StubBoard()\n board[C('h7')] = StubPiece(board, BLACK, C('h7'))\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n expected = [C('f5'), C('g6'), C('h7')]\n\n correct = self.compare_list(expected, results)\n self.assertTrue(all(correct))\n\n def test_generate_topleft(self):\n board = StubBoard()\n board[C('c6')] = StubPiece(board, WHITE, C('c6'))\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n expected = [C('d5')]\n\n correct = self.compare_list(expected, results)\n self.assertTrue(all(correct))\n\n expected = [C('c6')]\n correct = self.compare_list(expected, results)\n self.assertFalse(any(correct))\n\n def test_generate_bottomleft(self):\n board = StubBoard()\n board[C('c2')] = StubPiece(board, BLACK, C('c2'))\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n expected = [C('d3'), C('c2')]\n\n correct = self.compare_list(expected, results)\n self.assertTrue(all(correct))\n\n expected = [C('b1')]\n correct = self.compare_list(expected, results)\n self.assertFalse(any(correct))\n\n def test_generate_bottomright(self):\n board = StubBoard()\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n expected = [C('f3'), C('g2'), C('h1')]\n\n correct = self.compare_list(expected, results)\n self.assertTrue(all(correct))\n\n def test_generate_amount(self):\n board = StubBoard()\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n\n self.assertEqual(len(results), 13)\n\n\n board = StubBoard()\n board[C('c6')] = StubPiece(board, WHITE, C('c6'))\n bishop = self.get_bishop(board, WHITE, C('e4'))\n results = bishop.generate()\n\n self.assertEqual(len(results), 10)\n\n\nif __name__ == '__main__':\n unittest.main()"},"avg_line_length":{"kind":"number","value":30.8720930233,"string":"30.872093"},"max_line_length":{"kind":"number","value":58,"string":"58"},"alphanum_fraction":{"kind":"number","value":0.581920904,"string":"0.581921"},"count_classes":{"kind":"number","value":2530,"string":"2,530"},"score_classes":{"kind":"number","value":0.9529190207156308,"string":"0.952919"},"count_generators":{"kind":"number","value":0,"string":"0"},"score_generators":{"kind":"number","value":0,"string":"0"},"count_decorators":{"kind":"number","value":0,"string":"0"},"score_decorators":{"kind":"number","value":0,"string":"0"},"count_async_functions":{"kind":"number","value":0,"string":"0"},"score_async_functions":{"kind":"number","value":0,"string":"0"},"count_documentation":{"kind":"number","value":110,"string":"110"},"score_documentation":{"kind":"number","value":0.04143126177024482,"string":"0.041431"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":37,"numItemsPerPage":100,"numTotalItems":12962249,"offset":3700,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzkyNTIyMCwic3ViIjoiL2RhdGFzZXRzL3l0emkvdGhlLXN0YWNrLWRlZHVwLXB5dGhvbi1zY29yZWQiLCJleHAiOjE3NTc5Mjg4MjAsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.Fily1xJMZvqCKENxi1075jW9o_h1BJKbDPy7snVud_J6f-THhzzSLpnRTM65FhbT0VneHTBjuXfbTh8zc64RCw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
    hexsha
    stringlengths
    40
    40
    size
    int64
    5
    2.06M
    ext
    stringclasses
    10 values
    lang
    stringclasses
    1 value
    max_stars_repo_path
    stringlengths
    3
    248
    max_stars_repo_name
    stringlengths
    5
    125
    max_stars_repo_head_hexsha
    stringlengths
    40
    78
    max_stars_repo_licenses
    listlengths
    1
    10
    max_stars_count
    int64
    1
    191k
    max_stars_repo_stars_event_min_datetime
    stringlengths
    24
    24
    max_stars_repo_stars_event_max_datetime
    stringlengths
    24
    24
    max_issues_repo_path
    stringlengths
    3
    248
    max_issues_repo_name
    stringlengths
    5
    125
    max_issues_repo_head_hexsha
    stringlengths
    40
    78
    max_issues_repo_licenses
    listlengths
    1
    10
    max_issues_count
    int64
    1
    67k
    max_issues_repo_issues_event_min_datetime
    stringlengths
    24
    24
    max_issues_repo_issues_event_max_datetime
    stringlengths
    24
    24
    max_forks_repo_path
    stringlengths
    3
    248
    max_forks_repo_name
    stringlengths
    5
    125
    max_forks_repo_head_hexsha
    stringlengths
    40
    78
    max_forks_repo_licenses
    listlengths
    1
    10
    max_forks_count
    int64
    1
    105k
    max_forks_repo_forks_event_min_datetime
    stringlengths
    24
    24
    max_forks_repo_forks_event_max_datetime
    stringlengths
    24
    24
    content
    stringlengths
    5
    2.06M
    avg_line_length
    float64
    1
    1.02M
    max_line_length
    int64
    3
    1.03M
    alphanum_fraction
    float64
    0
    1
    count_classes
    int64
    0
    1.6M
    score_classes
    float64
    0
    1
    count_generators
    int64
    0
    651k
    score_generators
    float64
    0
    1
    count_decorators
    int64
    0
    990k
    score_decorators
    float64
    0
    1
    count_async_functions
    int64
    0
    235k
    score_async_functions
    float64
    0
    1
    count_documentation
    int64
    0
    1.04M
    score_documentation
    float64
    0
    1
    b95f2f6c2258ef8998ac2a053019013dbf870640
    2,351
    py
    Python
    account/views.py
    KimSoungRyoul/drf_unitteset_study_project
    9a0d824bdc6343eeba6209299c077a6e9d280516
    [ "MIT" ]
    null
    null
    null
    account/views.py
    KimSoungRyoul/drf_unitteset_study_project
    9a0d824bdc6343eeba6209299c077a6e9d280516
    [ "MIT" ]
    null
    null
    null
    account/views.py
    KimSoungRyoul/drf_unitteset_study_project
    9a0d824bdc6343eeba6209299c077a6e9d280516
    [ "MIT" ]
    null
    null
    null
    # Create your views here. from django.db.models import QuerySet from django.utils.decorators import method_decorator from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status from rest_framework.permissions import IsAuthenticated, AllowAny from rest_framework.response import Response from rest_framework.viewsets import mixins from account.documents import DjangoFilterDescriptionInspector from account.models import Customer from account.serializers import CustomerInfoSerializer, SignUpFormSerializer @method_decorator(name='retrieve', decorator=swagger_auto_schema( operation_description="회원 개인정보 조회 API", filter_inspectors=[DjangoFilterDescriptionInspector], )) @method_decorator(name='create', decorator=swagger_auto_schema( operation_description="회원 가입 API", )) @method_decorator(name='update', decorator=swagger_auto_schema( operation_description="회원 정보 수정 API", )) @method_decorator(name='destroy', decorator=swagger_auto_schema( operation_description="회원 탈퇴 API", )) class CustomerAPIViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset: QuerySet = Customer.objects permission_classes = (IsAuthenticated,) http_method_names = ['get', 'post', 'put', 'delete'] def get_serializer_class(self): if self.request.method == 'POST': return SignUpFormSerializer elif self.request.method == 'GET': return CustomerInfoSerializer elif self.request.method == 'PUT': return SignUpFormSerializer elif self.request.method == 'DELETE': return SignUpFormSerializer def get_permissions(self): if self.request.method == 'POST': permission_classes = [AllowAny] return [permission() for permission in permission_classes] def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
    40.534483
    103
    0.722671
    1,317
    0.549896
    0
    0
    1,854
    0.774113
    0
    0
    218
    0.091023
    b95fe9aa9fab4f285d9028f8b01c9820d83254e4
    3,831
    py
    Python
    src/front-door/azext_front_door/_validators.py
    Mannan2812/azure-cli-extensions
    e2b34efe23795f6db9c59100534a40f0813c3d95
    [ "MIT" ]
    207
    2017-11-29T06:59:41.000Z
    2022-03-31T10:00:53.000Z
    src/front-door/azext_front_door/_validators.py
    Mannan2812/azure-cli-extensions
    e2b34efe23795f6db9c59100534a40f0813c3d95
    [ "MIT" ]
    4,061
    2017-10-27T23:19:56.000Z
    2022-03-31T23:18:30.000Z
    src/front-door/azext_front_door/_validators.py
    Mannan2812/azure-cli-extensions
    e2b34efe23795f6db9c59100534a40f0813c3d95
    [ "MIT" ]
    802
    2017-10-11T17:36:26.000Z
    2022-03-31T22:24:32.000Z
    # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import argparse def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network', resource_name_dest='front_door_name'): def _validate_name_or_id(cmd, namespace): from azure.cli.core.commands.client_factory import get_subscription_id from msrestazure.tools import is_valid_resource_id, resource_id subscription_id = get_subscription_id(cmd.cli_ctx) resource_group = namespace.resource_group_name names_or_ids = getattr(namespace, dest) is_list = True # treat single values as a list, but convert back in the end if not isinstance(names_or_ids, list): is_list = False names_or_ids = [names_or_ids] if names_or_ids == [None] or not names_or_ids: return ids = [] for val in names_or_ids: id_params = { 'subscription': subscription_id, 'resource_group': resource_group, 'namespace': resource_namespace, 'type': resource_type, 'name': getattr(namespace, resource_name_dest) if child_type else val, 'child_type_1': child_type, 'child_name_1': val if child_type else None } if not is_valid_resource_id(val): val = resource_id(**id_params) ids.append(val) setattr(namespace, dest, ids if is_list else ids[0]) return _validate_name_or_id def validate_waf_policy(cmd, namespace): get_name_or_id_validator( dest='waf_policy', resource_type='WebApplicationFirewallPolicy' )(cmd, namespace) def validate_keyvault(cmd, namespace): get_name_or_id_validator( dest='vault', resource_type='vaults', resource_namespace='Microsoft.Keyvault' )(cmd, namespace) def validate_load_balancing_settings(cmd, namespace): get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace) def validate_probe_settings(cmd, namespace): get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace) def validate_frontend_endpoints(cmd, namespace): get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace) def validate_backend_pool(cmd, namespace): get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace) def validate_rules_engine(cmd, namespace): get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace) # pylint: disable=protected-access class MatchConditionAction(argparse._AppendAction): # pylint: disable=no-self-use def parse_match_condition(self, values): from azext_front_door.vendored_sdks.models import MatchCondition if not isinstance(values, list): values = values.split(' ') try: return MatchCondition( match_variable=values[0], operator=values[1], match_value=values[2:] ) except IndexError: from knack.util import CLIError raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]') def __call__(self, parser, namespace, values, option_string=None): match_condition = self.parse_match_condition(values) super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
    35.472222
    119
    0.645262
    871
    0.227356
    0
    0
    0
    0
    0
    0
    923
    0.240929
    b960f3f5be88ef82754359823e7c6a9b7ed78089
    7,763
    py
    Python
    mimesis/data/int/development.py
    DevAerial/mimesis
    33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
    [ "MIT" ]
    null
    null
    null
    mimesis/data/int/development.py
    DevAerial/mimesis
    33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
    [ "MIT" ]
    1
    2022-03-26T07:46:59.000Z
    2022-03-26T07:47:20.000Z
    mimesis/data/int/development.py
    DevAerial/mimesis
    33c58ae43e2f6ebc11e5ea7ebe8ac8917b2e1c0b
    [ "MIT" ]
    null
    null
    null
    """Provides all the data related to the development.""" LICENSES = [ "Apache License, 2.0 (Apache-2.0)", "The BSD 3-Clause License", "The BSD 2-Clause License", "GNU General Public License (GPL)", "General Public License (LGPL)", "MIT License (MIT)", "Mozilla Public License 2.0 (MPL-2.0)", "Common Development and Distribution License (CDDL-1.0)", "Eclipse Public License (EPL-1.0)", ] PROGRAMMING_LANGS = [ "ASP", "Assembly", "AutoIt", "Awk", "Bash", "C", "C Shell", "C#", "C++", "Caml", "Ceylon", "Clojure", "CoffeeScript", "Common Lisp", "D", "Dart", "Delphi", "Dylan", "ECMAScript", "Elixir", "Emacs Lisp", "Erlang", "F#", "Falcon", "Fortran", "GNU Octave", "Go", "Groovy", "Haskell", "haXe", "Io", "J#", "Java", "JavaScript", "Julia", "Kotlin", "Lisp", "Lua", "Mathematica", "Objective-C", "OCaml", "Perl", "PHP", "PL-I", "PL-SQL", "PowerShell", "Prolog", "Python", "R", "Racket", "Ruby", "Rust", "Scala", "Scheme", "Smalltalk", "Tcl", "Tex", "Transact-SQL", "TypeScript", "Z shell", ] OS = [ "Arch", "CentOS", "Debian", "Fedora", "FreeBSD", "Gentoo", "Kali", "Lubuntu", "Manjaro", "Mint", "OS X", "macOS", "OpenBSD", "PCLinuxOS", "Slackware", "Ubuntu", "Windows 10", "Windows 7", "Windows 8", "Windows 8.1", "Zorin", "elementaryOS", "macOS", "openSUSE", ] FOLDERS = [ "Development", "Downloads", "Documents", "Music", "Video", "Work", "Pictures", "Desktop", "Study", ] PROJECT_NAMES = [ "aardonyx", "abelisaurus", "achelousaurus", "achillobator", "acrocanthosaurus", "aegyptosaurus", "afrovenator", "agilisaurus", "alamosaurus", "albertaceratops", "albertosaurus", "alectrosaurus", "alioramus", "allosaurus", "alvarezsaurus", "amargasaurus", "ammosaurus", "ampelosaurus", "amygdalodon", "anatotitan", "anchiceratops", "anchisaurus", "ankylosaurus", "anserimimus", "antarctopelta", "antarctosaurus", "apatosaurus", "aragosaurus", "aralosaurus", "archaeoceratops", "archaeopteryx", "archaeornithomimus", "argentinosaurus", "arrhinoceratops", "atlascopcosaurus", "aucasaurus", "austrosaurus", "avaceratops", "avalonia", "avimimus", "azendohsaurus", "bactrosaurus", "bagaceratops", "bambiraptor", "barapasaurus", "barosaurus", "baryonyx", "becklespinax", "beipiaosaurus", "bellusaurus", "borogovia", "brachiosaurus", "brachyceratops", "bugenasaura", "buitreraptor", "camarasaurus", "camptosaurus", "carnotaurus", "caudipteryx", "cedarpelta", "centrosaurus", "ceratosaurus", "cetiosauriscus", "cetiosaurus", "chaoyangsaurus", "chasmosaurus", "chialingosaurus", "chindesaurus", "chinshakiangosaurus", "chirostenotes", "chubutisaurus", "chungkingosaurus", "citipati", "coelophysis", "coelurus", "coloradisaurus", "compsognathus", "conchoraptor", "confuciusornis", "corythosaurus", "cryolophosaurus", "dacentrurus", "daspletosaurus", "datousaurus", "deinocheirus", "deinonychus", "deltadromeus", "diceratops", "dicraeosaurus", "dilophosaurus", "diplodocus", "dracorex", "dravidosaurus", "dromaeosaurus", "dromiceiomimus", "dryosaurus", "dryptosaurus", "dubreuillosaurus", "edmontonia", "edmontosaurus", "einiosaurus", "elaphrosaurus", "emausaurus", "eolambia", "eoraptor", "eotyrannus", "equijubus", "erketu", "erlikosaurus", "euhelopus", "euoplocephalus", "europasaurus", "euskelosaurus", "eustreptospondylus", "fukuiraptor", "fukuisaurus", "gallimimus", "gargoyleosaurus", "garudimimus", "gasosaurus", "gasparinisaura", "gastonia", "giganotosaurus", "gilmoreosaurus", "giraffatitan", "gobisaurus", "gorgosaurus", "goyocephale", "graciliceratops", "gryposaurus", "guaibasaurus", "guanlong", "hadrosaurus", "hagryphus", "haplocanthosaurus", "harpymimus", "herrerasaurus", "hesperosaurus", "heterodontosaurus", "homalocephale", "huayangosaurus", "hylaeosaurus", "hypacrosaurus", "hypselosaurus", "hypsilophodon", "iguanodon", "indosuchus", "ingenia", "irritator", "isisaurus", "janenschia", "jaxartosaurus", "jingshanosaurus", "jinzhousaurus", "jobaria", "juravenator", "kentrosaurus", "khaan", "kotasaurus", "kritosaurus", "lamaceratops", "lambeosaurus", "lapparentosaurus", "leaellynasaura", "leptoceratops", "lesothosaurus", "lexovisaurus", "liaoceratops", "liaoxiornis", "ligabuesaurus", "liliensternus", "lophorhothon", "lophostropheus", "lufengosaurus", "lurdusaurus", "lycorhinus", "magyarosaurus", "maiasaura", "majungatholus", "malawisaurus", "mamenchisaurus", "mapusaurus", "marshosaurus", "masiakasaurus", "massospondylus", "maxakalisaurus", "megalosaurus", "melanorosaurus", "metriacanthosaurus", "microceratops", "micropachycephalosaurus", "microraptor", "minmi", "monolophosaurus", "mononykus", "mussaurus", "muttaburrasaurus", "nanotyrannus", "nanshiungosaurus", "nemegtosaurus", "neovenator", "neuquenosaurus", "nigersaurus", "nipponosaurus", "noasaurus", "nodosaurus", "nomingia", "nothronychus", "nqwebasaurus", "omeisaurus", "ornitholestes", "ornithomimus", "orodromeus", "oryctodromeus", "othnielia", "ouranosaurus", "oviraptor", "rebbachisaurus", "rhabdodon", "rhoetosaurus", "rinchenia", "riojasaurus", "rugops", "saichania", "saltasaurus", "saltopus", "sarcosaurus", "saurolophus", "sauropelta", "saurophaganax", "saurornithoides", "scelidosaurus", "scutellosaurus", "secernosaurus", "segisaurus", "segnosaurus", "seismosaurus", "shamosaurus", "shanag", "shantungosaurus", "shunosaurus", "shuvuuia", "silvisaurus", "sinocalliopteryx", "sinornithosaurus", "sinosauropteryx", "sinraptor", "sinvenator", "zalmoxes", "zephyrosaurus", "zuniceratops", "byzantine", "svengali", "accolade", "acrimony", "angst", "anomaly", "antidote", "baroque", "bona_fide", "bourgeois", "bravado", "brogue", "brusque", "cacophony", "caustic", "charisma", "cloying", "deja-vu", "dichotomy", "elan", "ennui", "epitome", "esoteric", "euphemism", "faux pas", "fiasco", "finagle", "glib", "harbinger", "hedonist", "heresy", "idyllic", "insidious", "junket", "kitsch", "litany", "lurid", "malaise", "malinger", "mantra", "maudlin", "mercenary", "misnomer", "nirvana", "oblivion", "ogle", "ostracize", "panacea", "paradox", "peevish", "propriety", "revel", "rhetoric", "spartan", "stigma", "stoic", "suave", "sycophant", "tirade", "tryst", "untenable", "vicarious", "vile", "waft", "zealous", ]
    17.845977
    61
    0.551977
    0
    0
    0
    0
    0
    0
    0
    0
    5,167
    0.665593
    b962302fa813576c8cf57a4deea0db5f25dfb918
    620
    py
    Python
    docs/mathparse.py
    pcmoritz/flow
    bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
    [ "MIT" ]
    16
    2018-05-25T06:30:28.000Z
    2020-08-08T00:03:47.000Z
    docs/mathparse.py
    pcmoritz/flow
    bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
    [ "MIT" ]
    46
    2018-05-22T21:32:55.000Z
    2019-06-12T13:10:02.000Z
    docs/mathparse.py
    pcmoritz/flow
    bc97132e9e2d05262bb6bbad5bda173fd9f4ae92
    [ "MIT" ]
    6
    2018-06-22T14:59:14.000Z
    2019-08-29T06:00:34.000Z
    """ A preliminary attempt at parsing an RST file's math syntax in order to make math render as inline rather than display mode. This doesn't work as of yet but might be useful. It could, however, be not useful if there's a pandoc option for converting .md to .rst that makes math inline and not display. Keeping it around, though. """ import re s = """Define .. math:: v_{des} as the desired velocity, .. math:: 1^k a vector of ones of length""" with open('/Users/nishant/Downloads/tutorialtest.rst', 'r') as myfile: s = myfile.read() print([elem[11:-2] for elem in re.findall('\n.. math:: *\S*\n\n', s)])
    22.962963
    70
    0.693548
    0
    0
    0
    0
    0
    0
    0
    0
    504
    0.812903
    b96253f9f9bc87e42d80842aebed3aa7dacb859b
    1,994
    py
    Python
    lib/layout/primitives.py
    tailhook/pyzza
    610be6ee4bea9b64f8226faf7338523fdafdf2cf
    [ "MIT" ]
    2
    2015-08-07T15:39:25.000Z
    2019-03-31T12:45:37.000Z
    lib/layout/primitives.py
    tailhook/pyzza
    610be6ee4bea9b64f8226faf7338523fdafdf2cf
    [ "MIT" ]
    null
    null
    null
    lib/layout/primitives.py
    tailhook/pyzza
    610be6ee4bea9b64f8226faf7338523fdafdf2cf
    [ "MIT" ]
    null
    null
    null
    from layout import Shape, Widget from flash.text.engine import TextBlock, TextElement @package('layout') class Poly(Shape): __slots__ = ('fillcolor', 'sequence') def __init__(self, name, fillcolor, seq, states): super().__init__(name, states) self.fillcolor = fillcolor self.sequence = seq def draw(self, w, h): g = self.graphics g.clear() for line in values(self.sequence): g.beginFill(self.fillcolor) g.moveTo(int(line[0][0]*w), int(line[0][1]*h)) for idx in range(1, line.length): g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h)) g.endFill() @package('layout') class RoundRect(Shape): __slots__ = ('fillcolor', 'radius') def __init__(self, name, fillcolor, radius, states): super().__init__(name, states) self.fillcolor = fillcolor self.radius = radius def draw(self, width, height): g = self.graphics g.clear() g.beginFill(self.fillcolor) g.drawRoundRect(0, 0, width, height, self.radius, self.radius) g.endFill() @package('layout') class TextLine(Widget): __slots__ = ('format', 'text', 'textline') def __init__(self, format, text, name, states): self.format = format self.text = text super().__init__(name, states) def draw(self, width, height): if self.textline: self.removeChild(self.textline) tb = TextBlock() tb.content = TextElement(self.text, self.format) self.textline = tb.createTextLine(None, width) self.addChild(self.textline) @package('layout') class CenteredLine(TextLine): def __init__(self, format, text, name, states): super().__init__(format, text, name, states) def draw(self, width, height): super().draw(width, height) self.textline.x = int((width - self.textline.width)/2) self.textline.y = int((height - self.textline.height)/2)
    32.688525
    70
    0.609829
    1,824
    0.914744
    0
    0
    1,900
    0.952859
    0
    0
    96
    0.048144
    b963a238595dc05d6bc40e6f5888099b52a8fc14
    20,515
    py
    Python
    tests/testing_server.py
    ImportTaste/WebRequest
    0cc385622624de16ec980e0c12d9080d593cab74
    [ "WTFPL" ]
    null
    null
    null
    tests/testing_server.py
    ImportTaste/WebRequest
    0cc385622624de16ec980e0c12d9080d593cab74
    [ "WTFPL" ]
    null
    null
    null
    tests/testing_server.py
    ImportTaste/WebRequest
    0cc385622624de16ec980e0c12d9080d593cab74
    [ "WTFPL" ]
    null
    null
    null
    import traceback import uuid import socket import logging import os import base64 import zlib import gzip import time import datetime from http import cookies from http.server import BaseHTTPRequestHandler from http.server import HTTPServer from threading import Thread import WebRequest def capture_expected_headers(expected_headers, test_context, is_chromium=False, is_selenium_garbage_chromium=False, is_annoying_pjs=False, skip_header_checks=False): # print("Capturing expected headers:") # print(expected_headers) assert isinstance(expected_headers, dict), "expected_headers must be a dict. Passed a %s" & type(expected_headers) for key, val in expected_headers.items(): assert isinstance(key, str) assert isinstance(val, str) cookie_key = uuid.uuid4().hex log = logging.getLogger("Main.TestServer") sucuri_reqs_1 = 0 sucuri_reqs_2 = 0 sucuri_reqs_3 = 0 class MockServerRequestHandler(BaseHTTPRequestHandler): def log_message(self, format, *args): return def validate_headers(self): for key, value in expected_headers.items(): if (is_annoying_pjs or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Encoding': # So PhantomJS monkeys with accept-encoding headers # Just ignore that particular header, I guess. pass # Selenium is fucking retarded, and I can't override the user-agent # and other assorted parameters via their API at all. elif (is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept-Language': pass elif (is_annoying_pjs or is_chromium or is_selenium_garbage_chromium or skip_header_checks) and key == 'Accept': pass elif not skip_header_checks: v1 = value.replace(" ", "") v2 = self.headers[key] if v2 is None: v2 = "" v2 = v2.replace(" ", "") test_context.assertEqual(v1, v2, msg="Mismatch in header parameter '{}' : '{}' -> '{}' ({})".format( key, value, self.headers[key], { 'is_annoying_pjs' : is_annoying_pjs, 'is_chromium' : is_chromium, 'is_selenium_garbage_chromium' : is_selenium_garbage_chromium, 'skip_header_checks' : skip_header_checks, }, ) ) def _get_handler(self): # Process an HTTP GET request and return a response with an HTTP 200 status. # print("Path: ", self.path) # print("Headers: ", self.headers) # print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[])) try: self.validate_headers() except Exception: self.send_response(500) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Headers failed validation!") raise if self.path == "/": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/favicon.ico": self.send_response(404) self.end_headers() elif self.path == "/raw-txt": self.send_response(200) self.send_header('Content-type', "text/plain") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/html-decode": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"Root OK?") elif self.path == "/html/real": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Root OK?</body></html>") elif self.path == "/compressed/deflate": self.send_response(200) self.send_header('Content-Encoding', 'deflate') self.send_header('Content-type', "text/html") self.end_headers() inb = b"Root OK?" cobj = zlib.compressobj(wbits=-zlib.MAX_WBITS) t1 = cobj.compress(inb) + cobj.flush() self.wfile.write(t1) elif self.path == "/compressed/gzip": self.send_response(200) self.send_header('Content-Encoding', 'gzip') self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(gzip.compress(b"Root OK?")) elif self.path == "/json/invalid": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"LOLWAT") elif self.path == "/json/valid": self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b'{"oh" : "hai"}') elif self.path == "/json/no-coding": self.send_response(200) self.end_headers() self.wfile.write(b'{"oh" : "hai"}') elif self.path == "/filename/path-only.txt": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename/path-only-trailing-slash/": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename/content-disposition": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.txt") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/path-only.txt": self.send_response(200) self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.txt") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-html-suffix": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.html") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-1": self.send_response(200) self.send_header('Content-Disposition', "filename='lolercoaster.html'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-2": self.send_response(200) self.send_header('Content-Disposition', "filename=\'lolercoaster.html\'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-spaces-1": self.send_response(200) self.send_header('Content-Disposition', "filename='loler coaster.html'") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/content-disposition-quotes-spaces-2": self.send_response(200) self.send_header('Content-Disposition', "filename=\"loler coaster.html\"") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/filename_mime/explicit-html-mime": self.send_response(200) self.send_header('Content-Disposition', "filename=lolercoaster.html") self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"LOLWAT?") elif self.path == "/redirect/bad-1": self.send_response(302) self.end_headers() elif self.path == "/redirect/bad-2": self.send_response(302) self.send_header('location', "bad-2") self.end_headers() elif self.path == "/redirect/bad-3": self.send_response(302) self.send_header('location', "gopher://www.google.com") self.end_headers() elif self.path == "/redirect/from-1": self.send_response(302) self.send_header('location', "to-1") self.end_headers() elif self.path == "/redirect/to-1": self.send_response(200) self.end_headers() self.wfile.write(b"Redirect-To-1") elif self.path == "/redirect/from-2": self.send_response(302) self.send_header('uri', "to-2") self.end_headers() elif self.path == "/redirect/to-2": self.send_response(200) self.end_headers() self.wfile.write(b"Redirect-To-2") elif self.path == "/redirect/from-3": self.send_response(302) newurl = "http://{}:{}".format(self.server.server_address[0], self.server.server_address[1]) self.send_header('uri', newurl) self.end_headers() elif self.path == "/password/expect": # print("Password") # print(self.headers) self.send_response(200) self.end_headers() if not 'Authorization' in self.headers: self.wfile.write(b"Password not sent!!") return val = self.headers['Authorization'] passval = val.split(" ")[-1] passstr = base64.b64decode(passval) if passstr == b'lol:wat': self.wfile.write(b"Password Ok?") else: self.wfile.write(b"Password Bad!") elif self.path == "/content/have-title": self.send_response(200) self.end_headers() self.wfile.write(b"<html><head><title>I can haz title?</title></head><body>This page has a title!</body></html>") elif self.path == "/content/no-title": self.send_response(200) self.end_headers() self.wfile.write(b"<html><head></head><body>This page has no title. Sadface.jpg</body></html>") elif self.path == "/binary_ctnt": self.send_response(200) self.send_header('Content-type', "image/jpeg") self.end_headers() self.wfile.write(b"Binary!\x00\x01\x02\x03") elif self.path == "/binary_ctnt": self.send_response(200) self.send_header('Content-type', "image/jpeg") self.end_headers() self.wfile.write(b"Binary!\x00\x01\x02\x03") ################################################################################################################################## # Cookie stuff ################################################################################################################################## elif self.path == '/cookie_test': cook = cookies.SimpleCookie() cook['cookie_test_key'] = cookie_key cook['cookie_test_key']['path'] = "/" cook['cookie_test_key']['domain'] = "" expiration = datetime.datetime.now() + datetime.timedelta(days=30) cook['cookie_test_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST") self.send_response(200) self.send_header('Content-type', "text/html") self.send_header('Set-Cookie', cook['cookie_test_key'].OutputString()) self.end_headers() self.wfile.write(b"<html><body>CF Cookie Test</body></html>") elif self.path == '/cookie_require': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cookie_test_key' and cook_value == cookie_key: self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Cookie forwarded properly!</body></html>") return self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><body>Cookie is missing</body></html>") ################################################################################################################################## # Sucuri validation ################################################################################################################################## elif self.path == '/sucuri_shit_3': # I'd like to get this down to just 2 requests (cookie bounce, and fetch). # Doing that requires pulling html content out of chromium, though. # Annoying. nonlocal sucuri_reqs_3 sucuri_reqs_3 += 1 if sucuri_reqs_3 > 3: raise RuntimeError("Too many requests to sucuri_shit_3 (%s)!" % sucuri_reqs_3) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p3)?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) elif self.path == '/sucuri_shit_2': # This particular path is the one we should already have a cookie for. # As such, we expect one request only nonlocal sucuri_reqs_2 sucuri_reqs_2 += 1 if sucuri_reqs_2 > 1: raise RuntimeError("Too many requests to sucuri_shit_2 (%s)!" % sucuri_reqs_2) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target preemptive Sucuri page!</title></head><body>Preemptive waf circumvented OK (p2)?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) elif self.path == '/sucuri_shit': nonlocal sucuri_reqs_1 sucuri_reqs_1 += 1 if sucuri_reqs_1 > 4: raise RuntimeError("Too many requests to sucuri_shit (%s)!" % sucuri_reqs_1) # print("Fetch for ", self.path) # print("Cookies:", self.headers.get_all('Cookie', failobj=[])) if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'sucuri_cloudproxy_uuid_6293e0004' and cook_value == '04cbb56494ebedbcd19a61b2d728c478': # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target Sucuri page!</title></head><body>Sucuri Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'sucuri_garbage.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(plain_contents) ################################################################################################################################## # Cloudflare validation ################################################################################################################################## elif self.path == '/cloudflare_under_attack_shit_2': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key: # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.server_version = "cloudflare is garbage" self.send_response(503) self.send_header('Server', "cloudflare is garbage") self.send_header('Content-type','text/html') self.end_headers() self.wfile.write(plain_contents) elif self.path == '/cloudflare_under_attack_shit': if self.headers.get_all('Cookie', failobj=[]): cook = self.headers.get_all('Cookie', failobj=[])[0] cook_key, cook_value = cook.split("=", 1) if cook_key == 'cloudflare_validate_key' and cook_value == cookie_key: # if cook[''] self.send_response(200) self.send_header('Content-type', "text/html") self.end_headers() self.wfile.write(b"<html><head><title>At target CF page!</title></head><body>CF Redirected OK?</body></html>") return container_dir = os.path.dirname(__file__) fpath = os.path.join(container_dir, "waf_garbage", 'cf_js_challenge_03_12_2018.html') with open(fpath, "rb") as fp: plain_contents = fp.read() self.server_version = "cloudflare is garbage" self.send_response(503) self.send_header('Server', "cloudflare is garbage") self.send_header('Content-type','text/html') self.end_headers() self.wfile.write(plain_contents) elif self.path == '/cdn-cgi/l/chk_jschl?jschl_vc=427c2b1cd4fba29608ee81b200e94bfa&pass=1543827239.915-44n9IE20mS&jschl_answer=9.66734594': cook = cookies.SimpleCookie() cook['cloudflare_validate_key'] = cookie_key cook['cloudflare_validate_key']['path'] = "/" cook['cloudflare_validate_key']['domain'] = "" expiration = datetime.datetime.now() + datetime.timedelta(days=30) cook['cloudflare_validate_key']["expires"] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S PST") self.send_response(200) self.send_header('Content-type', "text/html") self.send_header('Set-Cookie', cook['cloudflare_validate_key'].OutputString()) self.end_headers() body = "<html><body>Setting cookies.<script>window.location.href='/cloudflare_under_attack_shit'</script></body></html>" self.wfile.write(body.encode("utf-8")) ################################################################################################################################## # Handle requests for an unknown path ################################################################################################################################## else: test_context.assertEqual(self.path, "This shouldn't happen!") def do_GET(self): # Process an HTTP GET request and return a response with an HTTP 200 status. log.info("Request for URL path: '%s'", self.path) # print("Headers: ", self.headers) # print("Cookie(s): ", self.headers.get_all('Cookie', failobj=[])) try: return self._get_handler() except Exception as e: log.error("Exception in handler!") for line in traceback.format_exc().split("\n"): log.error(line) raise e return MockServerRequestHandler def get_free_port(): s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM) s.bind(('localhost', 0)) address, port = s.getsockname() s.close() return port def start_server(assertion_class, from_wg, port_override = None, is_chromium = None, is_selenium_garbage_chromium = False, is_annoying_pjs = False, skip_header_checks = False ): # Configure mock server. if port_override: mock_server_port = port_override else: mock_server_port = get_free_port() expected_headers = dict(from_wg.browserHeaders) print(from_wg) print(expected_headers) assert isinstance(expected_headers, dict) captured_server = capture_expected_headers( expected_headers = expected_headers, test_context = assertion_class, is_chromium = is_chromium, is_selenium_garbage_chromium = is_selenium_garbage_chromium, is_annoying_pjs = is_annoying_pjs, skip_header_checks = skip_header_checks ) retries = 4 for x in range(retries + 1): try: mock_server = HTTPServer(('0.0.0.0', mock_server_port), captured_server) break except OSError: time.sleep(0.2) if x >= retries: raise # Start running mock server in a separate thread. # Daemon threads automatically shut down when the main process exits. mock_server_thread = Thread(target=mock_server.serve_forever) mock_server_thread.setDaemon(True) mock_server_thread.start() return mock_server_port, mock_server, mock_server_thread if __name__ == '__main__': wg = WebRequest.WebGetRobust() srv = start_server( assertion_class = None, from_wg = wg, skip_header_checks = True) print("running server on port: ", srv) while 1: time.sleep(1)
    32.929374
    165
    0.640653
    17,801
    0.867707
    0
    0
    0
    0
    0
    0
    7,644
    0.372605
    b963e6196b8baa521ce89adb40142bf81a9183a6
    3,770
    py
    Python
    calcgrades.py
    qrowsxi/calcgrades
    93c71c1afef8dde5174726ae1702b71ccba633de
    [ "MIT" ]
    null
    null
    null
    calcgrades.py
    qrowsxi/calcgrades
    93c71c1afef8dde5174726ae1702b71ccba633de
    [ "MIT" ]
    null
    null
    null
    calcgrades.py
    qrowsxi/calcgrades
    93c71c1afef8dde5174726ae1702b71ccba633de
    [ "MIT" ]
    null
    null
    null
    import csv import math import numpy as np import pandas import scipy.optimize import sys import argparse def ineq_constraint_1(v): return np.array([vi for vi in v]) def ineq_constraint_2(v): return np.array([-vi + 30 for vi in v]) class WeightAverage: def __init__(self, mean, csv): self.df = pandas.read_csv(csv) self.course = self.df['name'] self.expected_mean = mean self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0] self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))]) self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0] self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0] self.tot_credits = sum(self.owned_credits) + sum(self.credits) def weight_average(self, v): term1 = 0 term2 = 0 for i in range(0, len(self.owned_grades)): term1 = term1 + self.owned_grades[i] * self.owned_credits[i] for i in range(0, len(v)): term2 = term2 + v[i] * self.credits[i] return (term1 + term2) / self.tot_credits def eq_constraint(self, v): return self.weight_average(v) - self.expected_mean def solve(self): cons = ( {'type': 'eq', 'fun': self.eq_constraint}, {'type': 'ineq', 'fun': ineq_constraint_1}, {'type': 'ineq', 'fun': ineq_constraint_2}) res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons) if not res.success: return None return res.x def error_no_solution(): print("Mean not possible with current vote :(") exit(0) def output_result(solver, sol): avg = solver.weight_average(sol) df = solver.df print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110") if sol is None: print("Not Possible with current grades :(") exit() for index, row in df.query('grade > 0').iterrows(): print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}") i = 0 for index, row in df.query('grade == 0').iterrows(): print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}") i += 1 return 0 def main(): name = "calcGrades" description = """CalcGrades is an utility which purpose is to compute the minimum grades required to get a certain weight average of the grades over the credits, given the desired output and the grades already owned.""" parser = argparse.ArgumentParser(name, description=description) parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean') parser.add_argument('--file',dest='file', default='courses.csv', type=str, help='path to the csv file containing the courses (default: courses.csv)') parser.add_argument('--floor', default=False, action='store_true', help='apply floor operation instead of round to solution') parser.add_argument('--ceil', default=False, action='store_true', help='apply ceil operation instead of round to solution') args = parser.parse_args() mean = args.mean courses = args.file solver = WeightAverage(mean, courses) sol = solver.solve() if sol is None: error_no_solution() if args.ceil: sol = [math.ceil(x) for x in sol] elif args.floor: sol = [math.floor(x) for x in sol] else: sol = [round(x) for x in sol] output_result(solver, sol) return 0 if __name__ == '__main__': main()
    35.566038
    116
    0.609284
    1,464
    0.388329
    0
    0
    0
    0
    0
    0
    991
    0.262865
    b9652ceb78b45d3bef98c61d48e3cd4630133615
    19,317
    py
    Python
    sdk/python/pulumi_google_native/testing/v1/test_matrix.py
    AaronFriel/pulumi-google-native
    75d1cda425e33d4610348972cd70bddf35f1770d
    [ "Apache-2.0" ]
    44
    2021-04-18T23:00:48.000Z
    2022-02-14T17:43:15.000Z
    sdk/python/pulumi_google_native/testing/v1/test_matrix.py
    AaronFriel/pulumi-google-native
    75d1cda425e33d4610348972cd70bddf35f1770d
    [ "Apache-2.0" ]
    354
    2021-04-16T16:48:39.000Z
    2022-03-31T17:16:39.000Z
    sdk/python/pulumi_google_native/testing/v1/test_matrix.py
    AaronFriel/pulumi-google-native
    75d1cda425e33d4610348972cd70bddf35f1770d
    [ "Apache-2.0" ]
    8
    2021-04-24T17:46:51.000Z
    2022-01-05T10:40:21.000Z
    # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['TestMatrixArgs', 'TestMatrix'] @pulumi.input_type class TestMatrixArgs: def __init__(__self__, *, environment_matrix: pulumi.Input['EnvironmentMatrixArgs'], result_storage: pulumi.Input['ResultStorageArgs'], test_specification: pulumi.Input['TestSpecificationArgs'], client_info: Optional[pulumi.Input['ClientInfoArgs']] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a TestMatrix resource. :param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on. :param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written. :param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test. :param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test. :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. :param pulumi.Input[str] project: The cloud project that owns the test matrix. """ pulumi.set(__self__, "environment_matrix", environment_matrix) pulumi.set(__self__, "result_storage", result_storage) pulumi.set(__self__, "test_specification", test_specification) if client_info is not None: pulumi.set(__self__, "client_info", client_info) if fail_fast is not None: pulumi.set(__self__, "fail_fast", fail_fast) if flaky_test_attempts is not None: pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts) if project is not None: pulumi.set(__self__, "project", project) if request_id is not None: pulumi.set(__self__, "request_id", request_id) @property @pulumi.getter(name="environmentMatrix") def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']: """ The devices the tests are being executed on. """ return pulumi.get(self, "environment_matrix") @environment_matrix.setter def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']): pulumi.set(self, "environment_matrix", value) @property @pulumi.getter(name="resultStorage") def result_storage(self) -> pulumi.Input['ResultStorageArgs']: """ Where the results for the matrix are written. """ return pulumi.get(self, "result_storage") @result_storage.setter def result_storage(self, value: pulumi.Input['ResultStorageArgs']): pulumi.set(self, "result_storage", value) @property @pulumi.getter(name="testSpecification") def test_specification(self) -> pulumi.Input['TestSpecificationArgs']: """ How to run the test. """ return pulumi.get(self, "test_specification") @test_specification.setter def test_specification(self, value: pulumi.Input['TestSpecificationArgs']): pulumi.set(self, "test_specification", value) @property @pulumi.getter(name="clientInfo") def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]: """ Information about the client which invoked the test. """ return pulumi.get(self, "client_info") @client_info.setter def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]): pulumi.set(self, "client_info", value) @property @pulumi.getter(name="failFast") def fail_fast(self) -> Optional[pulumi.Input[bool]]: """ If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. """ return pulumi.get(self, "fail_fast") @fail_fast.setter def fail_fast(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "fail_fast", value) @property @pulumi.getter(name="flakyTestAttempts") def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]: """ The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. """ return pulumi.get(self, "flaky_test_attempts") @flaky_test_attempts.setter def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "flaky_test_attempts", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The cloud project that owns the test matrix. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="requestId") def request_id(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "request_id") @request_id.setter def request_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "request_id", value) class TestMatrix(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None, environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None, result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None, test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None, __props__=None): """ Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices. Auto-naming is currently not supported for this resource. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test. :param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on. :param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. :param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. :param pulumi.Input[str] project: The cloud project that owns the test matrix. :param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written. :param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test. """ ... @overload def __init__(__self__, resource_name: str, args: TestMatrixArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices. Auto-naming is currently not supported for this resource. Note - this resource's API doesn't support deletion. When deleted, the resource will persist on Google Cloud even though it will be deleted from Pulumi state. :param str resource_name: The name of the resource. :param TestMatrixArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None, environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None, fail_fast: Optional[pulumi.Input[bool]] = None, flaky_test_attempts: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, request_id: Optional[pulumi.Input[str]] = None, result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None, test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TestMatrixArgs.__new__(TestMatrixArgs) __props__.__dict__["client_info"] = client_info if environment_matrix is None and not opts.urn: raise TypeError("Missing required property 'environment_matrix'") __props__.__dict__["environment_matrix"] = environment_matrix __props__.__dict__["fail_fast"] = fail_fast __props__.__dict__["flaky_test_attempts"] = flaky_test_attempts __props__.__dict__["project"] = project __props__.__dict__["request_id"] = request_id if result_storage is None and not opts.urn: raise TypeError("Missing required property 'result_storage'") __props__.__dict__["result_storage"] = result_storage if test_specification is None and not opts.urn: raise TypeError("Missing required property 'test_specification'") __props__.__dict__["test_specification"] = test_specification __props__.__dict__["invalid_matrix_details"] = None __props__.__dict__["outcome_summary"] = None __props__.__dict__["state"] = None __props__.__dict__["test_executions"] = None __props__.__dict__["test_matrix_id"] = None __props__.__dict__["timestamp"] = None super(TestMatrix, __self__).__init__( 'google-native:testing/v1:TestMatrix', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix': """ Get an existing TestMatrix resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = TestMatrixArgs.__new__(TestMatrixArgs) __props__.__dict__["client_info"] = None __props__.__dict__["environment_matrix"] = None __props__.__dict__["fail_fast"] = None __props__.__dict__["flaky_test_attempts"] = None __props__.__dict__["invalid_matrix_details"] = None __props__.__dict__["outcome_summary"] = None __props__.__dict__["project"] = None __props__.__dict__["result_storage"] = None __props__.__dict__["state"] = None __props__.__dict__["test_executions"] = None __props__.__dict__["test_matrix_id"] = None __props__.__dict__["test_specification"] = None __props__.__dict__["timestamp"] = None return TestMatrix(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="clientInfo") def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']: """ Information about the client which invoked the test. """ return pulumi.get(self, "client_info") @property @pulumi.getter(name="environmentMatrix") def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']: """ The devices the tests are being executed on. """ return pulumi.get(self, "environment_matrix") @property @pulumi.getter(name="failFast") def fail_fast(self) -> pulumi.Output[bool]: """ If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation. """ return pulumi.get(self, "fail_fast") @property @pulumi.getter(name="flakyTestAttempts") def flaky_test_attempts(self) -> pulumi.Output[int]: """ The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns. """ return pulumi.get(self, "flaky_test_attempts") @property @pulumi.getter(name="invalidMatrixDetails") def invalid_matrix_details(self) -> pulumi.Output[str]: """ Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state. """ return pulumi.get(self, "invalid_matrix_details") @property @pulumi.getter(name="outcomeSummary") def outcome_summary(self) -> pulumi.Output[str]: """ Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED. """ return pulumi.get(self, "outcome_summary") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The cloud project that owns the test matrix. """ return pulumi.get(self, "project") @property @pulumi.getter(name="resultStorage") def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']: """ Where the results for the matrix are written. """ return pulumi.get(self, "result_storage") @property @pulumi.getter def state(self) -> pulumi.Output[str]: """ Indicates the current progress of the test matrix. """ return pulumi.get(self, "state") @property @pulumi.getter(name="testExecutions") def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]: """ The list of test executions that the service creates for this matrix. """ return pulumi.get(self, "test_executions") @property @pulumi.getter(name="testMatrixId") def test_matrix_id(self) -> pulumi.Output[str]: """ Unique id set by the service. """ return pulumi.get(self, "test_matrix_id") @property @pulumi.getter(name="testSpecification") def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']: """ How to run the test. """ return pulumi.get(self, "test_specification") @property @pulumi.getter def timestamp(self) -> pulumi.Output[str]: """ The time this test matrix was initially created. """ return pulumi.get(self, "timestamp")
    50.436031
    458
    0.67671
    18,864
    0.976549
    0
    0
    15,506
    0.802713
    0
    0
    9,637
    0.498887
    b965c021bcb2dac479172708e85ad9ed89f09ef2
    5,427
    py
    Python
    View/View.py
    MoriokaReimen/ConfigHeaderGenerator
    73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
    [ "MIT" ]
    null
    null
    null
    View/View.py
    MoriokaReimen/ConfigHeaderGenerator
    73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
    [ "MIT" ]
    null
    null
    null
    View/View.py
    MoriokaReimen/ConfigHeaderGenerator
    73ba5d3bd5269d7e6881ec79b6fc0121ff2fb03e
    [ "MIT" ]
    null
    null
    null
    import tkinter as tk import tkinter.messagebox from Control import Control class View: def __init__(self, control : Control.Control): self.control = control # Init Window self.root = tk.Tk() self.root.title(u"Header File Generator") self.root.geometry("700x800") self.config_frame = tk.Frame(self.root) # Config Table lb_symbol = tk.Label(self.config_frame, width = 20) lb_symbol["text"] = "Symbol" lb_symbol.grid(row = 0, column = 0) lb_description = tk.Label(self.config_frame, width = 40) lb_description["text"] = "Detail" lb_description.grid(row = 0, column = 1) lb_enable = tk.Label(self.config_frame, width = 10) lb_enable["text"] = "Enable" lb_enable.grid(row = 0, column = 2) for i, config in enumerate(self.control.getConfigs()): symbol_entry = tk.Entry(self.config_frame, width=20) symbol_entry.insert(tk.END, config.symbol) symbol_entry.config(state = tk.DISABLED) symbol_entry.config(disabledforeground = "black", disabledbackground = "white") symbol_entry.grid(row= i + 1, column = 0) detail_entry = tk.Entry(self.config_frame, width=40) detail_entry.insert(tk.END, config.detail) detail_entry.config(state = tk.DISABLED) detail_entry.config(disabledforeground = "black", disabledbackground = "white") detail_entry.grid(row= i + 1, column = 1) bt_enable = tk.Button(self.config_frame, text="ON", width= 5) bt_enable["text"] = "ON" if config.enable else "OFF" color = "green" if config.enable else "red" bt_enable.config(bg=color, activebackground = color) bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button) bt_enable.grid(row = i + 1, column = 2) self.config_frame.pack(side=tk.TOP, anchor=tk.NW) self.value_config_frame = tk.Frame(self.root) # Config Table lb_symbol = tk.Label(self.value_config_frame, width = 20) lb_symbol["text"] = "Symbol" lb_symbol.grid(row = 0, column = 0) lb_description = tk.Label(self.value_config_frame, width = 40) lb_description["text"] = "Detail" lb_description.grid(row = 0, column = 1) lb_value = tk.Label(self.value_config_frame, width = 10) lb_value["text"] = "Value" lb_value.grid(row = 0, column = 2) lb_enable = tk.Label(self.value_config_frame, width = 10) lb_enable["text"] = "Enable" lb_enable.grid(row = 0, column = 3) for i, val_config in enumerate(self.control.getValConfigs()): symbol_entry = tk.Entry(self.value_config_frame, width=20) symbol_entry.insert(tk.END, val_config.symbol) symbol_entry.config(state = tk.DISABLED) symbol_entry.config(disabledforeground = "black", disabledbackground = "white") symbol_entry.grid(row= i + 1, column = 0) detail_entry = tk.Entry(self.value_config_frame, width=40) detail_entry.insert(tk.END, val_config.detail) detail_entry.config(state = tk.DISABLED) detail_entry.config(disabledforeground = "black", disabledbackground = "white") detail_entry.grid(row= i + 1, column = 1) value_entry = tk.Entry(self.value_config_frame, width=10) value_entry.insert(tk.END, val_config.value) value_entry.config(state = tk.DISABLED) value_entry.config(disabledforeground = "black", disabledbackground = "white") value_entry.grid(row= i + 1, column = 2) bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5) bt_enable["text"] = "ON" if val_config.enable else "OFF" color = "green" if val_config.enable else "red" bt_enable.config(bg=color, activebackground = color) bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button) bt_enable.grid(row = i + 1, column = 3) self.value_config_frame.pack(side=tk.TOP, anchor=tk.W) # Generator Button self.bt_generate = tk.Button(self.root) self.bt_generate["text"] = "Generate Header" self.bt_generate["command"] = self.generateHeader self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE) def start(self): self.root.mainloop() def generateHeader(self): self.control.generateHeader() tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path)) def update(self): pass def toggle_config_enable(self, id, button : tk.Button): config = self.control.getConfigs()[id] config.enable = not config.enable button["text"] = "ON" if config.enable else "OFF" color = "green" if config.enable else "red" button.config(bg=color, activebackground = color) def toggle_val_config_enable(self, id, button : tk.Button): val_config = self.control.getValConfigs()[id] val_config.enable = not val_config.enable button["text"] = "ON" if val_config.enable else "OFF" color = "green" if val_config.enable else "red" button.config(bg=color, activebackground = color)
    43.071429
    112
    0.629445
    5,350
    0.985812
    0
    0
    0
    0
    0
    0
    463
    0.085314
    b9669e29ffa745ca4256305d7461bcbe497cc930
    1,428
    py
    Python
    tests/bugs/core_3355_test.py
    FirebirdSQL/firebird-qa
    96af2def7f905a06f178e2a80a2c8be4a4b44782
    [ "MIT" ]
    1
    2022-02-05T11:37:13.000Z
    2022-02-05T11:37:13.000Z
    tests/bugs/core_3355_test.py
    FirebirdSQL/firebird-qa
    96af2def7f905a06f178e2a80a2c8be4a4b44782
    [ "MIT" ]
    1
    2021-09-03T11:47:00.000Z
    2021-09-03T12:42:10.000Z
    tests/bugs/core_3355_test.py
    FirebirdSQL/firebird-qa
    96af2def7f905a06f178e2a80a2c8be4a4b44782
    [ "MIT" ]
    1
    2021-06-30T14:14:16.000Z
    2021-06-30T14:14:16.000Z
    #coding:utf-8 # # id: bugs.core_3355 # title: Wrong comparsion of DATE and TIMESTAMP if index is used # decription: # tracker_id: CORE-3355 # min_versions: ['2.1.5'] # versions: 3.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """create table tdate (id integer not null primary key, val date); create index tdateix1 on tdate (val); commit; insert into tdate values (0, '1997-12-31'); insert into tdate values (1, '1998-01-01'); insert into tdate values (2, '1998-01-02'); insert into tdate values (3, '1998-01-03'); insert into tdate values (4, '1998-01-04'); insert into tdate values (5, '1998-01-05'); commit; """ db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1) test_script_1 = """select count(*) from tdate where val >= timestamp'1998-01-04 12:00:00.0000'; select count(*) from tdate where val < timestamp'1998-01-04 12:00:00.0000'; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ COUNT ===================== 1 COUNT ===================== 5 """ @pytest.mark.version('>=3.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_stdout == act_1.clean_expected_stdout
    25.052632
    95
    0.641457
    0
    0
    0
    0
    183
    0.128151
    0
    0
    953
    0.667367
    b967ba0197b144171458b230c2dfe31844ba0b72
    5,231
    py
    Python
    dags/download_decrypt_transfer_files.py
    hms-dbmi/bch-pic-sure-airflow-dags
    0c1e6f07da4e270581942e551ac30284474921d4
    [ "Apache-2.0" ]
    null
    null
    null
    dags/download_decrypt_transfer_files.py
    hms-dbmi/bch-pic-sure-airflow-dags
    0c1e6f07da4e270581942e551ac30284474921d4
    [ "Apache-2.0" ]
    null
    null
    null
    dags/download_decrypt_transfer_files.py
    hms-dbmi/bch-pic-sure-airflow-dags
    0c1e6f07da4e270581942e551ac30284474921d4
    [ "Apache-2.0" ]
    null
    null
    null
    """ @author: anilkdegala """ import os from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator, BranchPythonOperator from datetime import date, timedelta, datetime from collections import OrderedDict from scripts.dag_pebbles import DagPebbles from airflow.configuration import conf from scripts.configurations import * from airflow.operators.dummy_operator import DummyOperator default_args = { "owner": "anilkdegala", "depends_on_past": True, "max_active_runs": 1, "start_date": datetime(2015, 6, 1), "is_active": True, "is_paused_upon_creation": False, } def begin_pipeline(**kwargs): print("begin_pipeline:") files = kwargs['dag_run'].conf.get('files') download_decrypt_arguments = '' transfer_arguments_list = [] for f in files: print("download_decrypt_transfer_files: file: ", f['name'], ', location: ', f['path']) output = f['name']+','+f['path']+','+f['final_name'] download_decrypt_arguments = download_decrypt_arguments + " " + output transfer_arguments_list.append(DATA_LOCATION + "/"+f['final_name']) transfer_arguments = ",".join(transfer_arguments_list) print("final download_decrypt_arguments: ",download_decrypt_arguments) print("final transfer_arguments: ",transfer_arguments) kwargs["ti"].xcom_push(key="download_decrypt_arguments", value=download_decrypt_arguments) kwargs["ti"].xcom_push(key="transfer_arguments", value=transfer_arguments) def pipeline_enable_check(**kwargs): dp = DagPebbles() if dp.pipeline_enable_check('DATA_LOAD'): return "pipeline_check_passed" else: return "pipeline_check_skipped" def pipeline_check_passed(**kwargs): print("pipeline_check_passed:") def end_pipeline(**kwargs): print("end_pipeline:") def pipeline_check_skipped(**kwargs): print("pipeline_check_skipped:") def cleanup(**kwargs): dp = DagPebbles() print("cleanup") def notify(**kwargs): dp = DagPebbles() print("notify") def end(**kwargs): dp = DagPebbles() print("end") with DAG( "DOWNLOAD_DECRYPT_TRANSFER", description="Download, Decrypt, Transfer files (Source: S3, Staging: EC2: Target: RDS Oracle)", default_args=default_args, schedule_interval=None, catchup=False, orientation="TB", tags=['Utils'], dagrun_timeout=timedelta(hours=240) ) as dag: t_pipeline_begin = PythonOperator( task_id="begin_pipeline", python_callable=begin_pipeline, provide_context=True, dag=dag, ) t_check_pipeline = BranchPythonOperator( task_id="check_pipeline", python_callable=pipeline_enable_check, provide_context=True, dag=dag, ) t_pipeline_check_passed = PythonOperator( task_id="pipeline_check_passed", python_callable=pipeline_check_passed, provide_context=True, dag=dag, ) t_pipeline_check_skipped = PythonOperator( task_id="pipeline_check_skipped", python_callable=pipeline_check_skipped, provide_context=True, dag=dag, ) download_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/download_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}}" t_download_files = BashOperator( task_id='download_files', bash_command=download_files_cmd, dag=dag) decrypt_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/decrypt_files.sh "+"{{ ti.xcom_pull(key='download_decrypt_arguments')}} " t_decrypt_files = BashOperator( task_id='decrypt_files', bash_command=decrypt_files_cmd, dag=dag) transfer_files_cmd = "/opt/bitnami/airflow/airflow-data/scripts/transfer_files_rds.pl "+"{{ ti.xcom_pull(key='transfer_arguments')}} " t_transfer_files = BashOperator( task_id='transfer_files', bash_command=transfer_files_cmd, dag=dag) t_end_pipeline = PythonOperator( task_id="end_pipeline", python_callable=end_pipeline, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_notify = PythonOperator( task_id="send_notifications", python_callable=notify, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_cleanup = PythonOperator( task_id="cleanup", python_callable=cleanup, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_end = PythonOperator( task_id="end", python_callable=end, provide_context=True, trigger_rule="none_failed", dag=dag, ) t_pipeline_begin >> t_check_pipeline t_check_pipeline >> t_pipeline_check_skipped >> t_end_pipeline t_check_pipeline >> t_pipeline_check_passed >> t_download_files >> t_decrypt_files >> t_transfer_files >> t_end_pipeline t_end_pipeline >> t_cleanup >> t_notify >> t_end
    30.770588
    171
    0.664118
    0
    0
    0
    0
    0
    0
    0
    0
    1,239
    0.236857
    b96834dcae4311b040352e86ae4bdc019619193a
    7,518
    py
    Python
    keystone-moon/keystone/endpoint_policy/controllers.py
    hashnfv/hashnfv-moon
    daaba34fa2ed4426bc0fde359e54a5e1b872208c
    [ "Apache-2.0" ]
    null
    null
    null
    keystone-moon/keystone/endpoint_policy/controllers.py
    hashnfv/hashnfv-moon
    daaba34fa2ed4426bc0fde359e54a5e1b872208c
    [ "Apache-2.0" ]
    null
    null
    null
    keystone-moon/keystone/endpoint_policy/controllers.py
    hashnfv/hashnfv-moon
    daaba34fa2ed4426bc0fde359e54a5e1b872208c
    [ "Apache-2.0" ]
    1
    2021-03-21T11:38:30.000Z
    2021-03-21T11:38:30.000Z
    # Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import controller from keystone.common import dependency from keystone import notifications @dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api') class EndpointPolicyV3Controller(controller.V3Controller): collection_name = 'endpoints' member_name = 'endpoint' def __init__(self): super(EndpointPolicyV3Controller, self).__init__() notifications.register_event_callback( 'deleted', 'endpoint', self._on_endpoint_delete) notifications.register_event_callback( 'deleted', 'service', self._on_service_delete) notifications.register_event_callback( 'deleted', 'region', self._on_region_delete) notifications.register_event_callback( 'deleted', 'policy', self._on_policy_delete) def _on_endpoint_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_endpoint( payload['resource_info']) def _on_service_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_service( payload['resource_info']) def _on_region_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_region( payload['resource_info']) def _on_policy_delete(self, service, resource_type, operation, payload): self.endpoint_policy_api.delete_association_by_policy( payload['resource_info']) @controller.protected() def create_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Create an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.create_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def check_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Check an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.check_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def delete_policy_association_for_endpoint(self, context, policy_id, endpoint_id): """Delete an association between a policy and an endpoint.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_endpoint(endpoint_id) self.endpoint_policy_api.delete_policy_association( policy_id, endpoint_id=endpoint_id) @controller.protected() def create_policy_association_for_service(self, context, policy_id, service_id): """Create an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id) @controller.protected() def check_policy_association_for_service(self, context, policy_id, service_id): """Check an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id) @controller.protected() def delete_policy_association_for_service(self, context, policy_id, service_id): """Delete an association between a policy and a service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id) @controller.protected() def create_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Create an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def check_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Check an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def delete_policy_association_for_region_and_service( self, context, policy_id, service_id, region_id): """Delete an association between a policy and region+service.""" self.policy_api.get_policy(policy_id) self.catalog_api.get_service(service_id) self.catalog_api.get_region(region_id) self.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id, region_id=region_id) @controller.protected() def get_policy_for_endpoint(self, context, endpoint_id): """Get the effective policy for an endpoint.""" self.catalog_api.get_endpoint(endpoint_id) ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id) # NOTE(henry-nash): since the collection and member for this class is # set to endpoints, we have to handle wrapping this policy entity # ourselves. self._add_self_referential_link(context, ref) return {'policy': ref} # NOTE(henry-nash): As in the catalog controller, we must ensure that the # legacy_endpoint_id does not escape. @classmethod def filter_endpoint(cls, ref): if 'legacy_endpoint_id' in ref: ref.pop('legacy_endpoint_id') return ref @classmethod def wrap_member(cls, context, ref): ref = cls.filter_endpoint(ref) return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref) @controller.protected() def list_endpoints_for_policy(self, context, policy_id): """List endpoints with the effective association to a policy.""" self.policy_api.get_policy(policy_id) refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id) return EndpointPolicyV3Controller.wrap_collection(context, refs)
    45.017964
    79
    0.699654
    6,754
    0.898377
    0
    0
    6,827
    0.908087
    0
    0
    1,723
    0.229183
    b96893ff0c22487256e91c812d37a56c2c479eb3
    11,886
    py
    Python
    src/nibetaseries/cli/run.py
    ipacheco-uy/NiBetaSeries
    3d8716552f22f925524d80af9aace09469c22d4d
    [ "MIT" ]
    1
    2019-10-03T21:20:48.000Z
    2019-10-03T21:20:48.000Z
    src/nibetaseries/cli/run.py
    ipacheco-uy/NiBetaSeries
    3d8716552f22f925524d80af9aace09469c22d4d
    [ "MIT" ]
    null
    null
    null
    src/nibetaseries/cli/run.py
    ipacheco-uy/NiBetaSeries
    3d8716552f22f925524d80af9aace09469c22d4d
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python # -*- coding: utf-8 -*- """ Module that contains the command line app. Why does this file exist, and why not put this in __main__? You might be tempted to import things from __main__ later, but that will cause problems: the code will get executed twice: - When you run `python -m nibetaseries` python will execute ``__main__.py`` as a script. That means there won't be any ``nibetaseries.__main__`` in ``sys.modules``. - When you import __main__ it will get executed again (as a module) because there's no ``nibetaseries.__main__`` in ``sys.modules``. Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration """ from __future__ import absolute_import import os import argparse from argparse import RawTextHelpFormatter from glob import glob from multiprocessing import cpu_count from nipype import config as ncfg def get_parser(): """Build parser object""" from ..__init__ import __version__ import sys verstr = 'nibs v{}'.format(__version__) parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments', formatter_class=RawTextHelpFormatter) parser.add_argument('bids_dir', help='The directory with the input dataset ' 'formatted according to the BIDS standard.') parser.add_argument('derivatives_pipeline', help='The pipeline that contains ' 'minimally preprocessed img, brainmask, and confounds.tsv') parser.add_argument('output_dir', help='The directory where the output directory ' 'and files should be stored. If you are running group level analysis ' 'this folder should be prepopulated with the results of the' 'participant level analysis.') parser.add_argument('analysis_level', choices=['participant', 'group'], help='Level of the analysis that will be performed ' 'Multiple participant level analyses can be run independently ' '(in parallel) using the same output_dir') parser.add_argument('-v', '--version', action='version', version=verstr) # Atlas Arguments (Required Options) atlas_args = parser.add_argument_group('Required Atlas Arguments') atlas_args.add_argument('-a', '--atlas-img', action='store', required=('-l' in sys.argv or '--atlas-lut' in sys.argv), help='input atlas nifti where each voxel within a "region" ' 'is labeled with the same integer and there is a unique ' 'integer associated with each region of interest.') atlas_args.add_argument('-l', '--atlas-lut', action='store', required=('-a' in sys.argv or '--atlas-img' in sys.argv), help='atlas look up table (tsv) formatted with the columns: ' 'index, regions which correspond to the regions in the ' 'nifti file specified by --atlas-img.') # preprocessing options proc_opts = parser.add_argument_group('Options for processing') proc_opts.add_argument('--estimator', default='lss', choices=['lss', 'lsa'], help='beta series modeling method') proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0, help='select a smoothing kernel (mm)') proc_opts.add_argument('-hp', '--high-pass', action='store', type=float, default=0.0078125, help='high pass filter (Hz)') proc_opts.add_argument('-c', '--confounds', help='The confound column names ' 'that are to be included in nuisance regression. ' 'write the confounds you wish to include separated by a space', nargs="+") proc_opts.add_argument('--hrf-model', default='glover', choices=['glover', 'spm', 'fir', 'glover + derivative', 'glover + derivative + dispersion', 'spm + derivative', 'spm + derivative + dispersion'], help='convolve your regressors ' 'with one of the following hemodynamic response functions') proc_opts.add_argument('--fir-delays', default=None, nargs='+', type=int, help='FIR delays in volumes', metavar='VOL') proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files ' 'are stored (i.e. non-essential files). ' 'This directory can be deleted once you are reasonably ' 'certain nibs finished as expected.') # Image Selection options image_opts = parser.add_argument_group('Options for selecting images') parser.add_argument('--participant-label', nargs="+", help='The label(s) of the participant(s) ' 'that should be analyzed. The label ' 'corresponds to sub-<participant_label> from the BIDS spec ' '(so it does not include "sub-"). If this parameter is not ' 'provided all subjects should be analyzed. Multiple ' 'participants can be specified with a space separated list.') image_opts.add_argument('--session-label', action='store', default=None, help='select a session to analyze') image_opts.add_argument('-t', '--task-label', action='store', default=None, help='select a specific task to be processed') image_opts.add_argument('--run-label', action='store', default=None, help='select a run to analyze') image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym', choices=['MNI152NLin2009cAsym'], help='select a bold derivative in a specific space to be used') image_opts.add_argument('--description-label', action='store', default=None, help='select a bold file with particular ' '`desc` label to process') image_opts.add_argument('--exclude-description-label', action='store_true', default=False, help='exclude this `desc` label from nibetaseries') # performance options g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int, help='maximum number of threads across all processes') g_perfm.add_argument('--use-plugin', action='store', default=None, help='nipype plugin configuration file') # misc options misc = parser.add_argument_group('misc options') misc.add_argument('--graph', action='store_true', default=False, help='generates a graph png of the workflow') return parser def main(): from ..workflows.base import init_nibetaseries_participant_wf # get commandline options opts = get_parser().parse_args() # check inputs if (opts.hrf_model == 'fir') and (opts.fir_delays is None): raise ValueError('If the FIR HRF model is selected, ' 'FIR delays must be provided.') # Set up directories # TODO: set up some sort of versioning system bids_dir = os.path.abspath(opts.bids_dir) derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline) output_dir = os.path.abspath(opts.output_dir) os.makedirs(output_dir, exist_ok=True) log_dir = os.path.join(output_dir, 'logs') os.makedirs(log_dir, exist_ok=True) if opts.work_dir: work_dir = os.path.abspath(opts.work_dir) else: work_dir = os.path.join(os.getcwd(), 'nibetaseries_work') os.makedirs(work_dir, exist_ok=True) # only for a subset of subjects if opts.participant_label: subject_list = opts.participant_label # for all subjects else: subject_dirs = glob(os.path.join(bids_dir, "sub-*")) subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs] # Nipype plugin configuration # Load base plugin_settings from file if --use-plugin if opts.use_plugin is not None: from yaml import load as loadyml with open(opts.use_plugin) as f: plugin_settings = loadyml(f) plugin_settings.setdefault('plugin_args', {}) else: # Defaults plugin_settings = { 'plugin': 'MultiProc', 'plugin_args': { 'raise_insufficient': False, 'maxtasksperchild': 1, } } # Resource management options # Note that we're making strong assumptions about valid plugin args # This may need to be revisited if people try to use batch plugins nthreads = plugin_settings['plugin_args'].get('n_procs') # Permit overriding plugin config with specific CLI options if nthreads is None or opts.nthreads is not None: nthreads = opts.nthreads if nthreads is None or nthreads < 1: nthreads = cpu_count() plugin_settings['plugin_args']['n_procs'] = nthreads # Nipype config (logs and execution) ncfg.update_config({ 'logging': {'log_directory': log_dir, 'log_to_file': True}, 'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt', 'parameterize_dirs': False}, }) # running participant level if opts.analysis_level == "participant": nibetaseries_participant_wf = init_nibetaseries_participant_wf( estimator=opts.estimator, atlas_img=os.path.abspath(opts.atlas_img), atlas_lut=os.path.abspath(opts.atlas_lut), bids_dir=bids_dir, derivatives_pipeline_dir=derivatives_pipeline_dir, exclude_description_label=opts.exclude_description_label, fir_delays=opts.fir_delays, hrf_model=opts.hrf_model, high_pass=opts.high_pass, output_dir=output_dir, run_label=opts.run_label, selected_confounds=opts.confounds, session_label=opts.session_label, smoothing_kernel=opts.smoothing_kernel, space_label=opts.space_label, subject_list=subject_list, task_label=opts.task_label, description_label=opts.description_label, work_dir=work_dir, ) if opts.graph: nibetaseries_participant_wf.write_graph(graph2use='colored', format='svg', simple_form=True) try: nibetaseries_participant_wf.run(**plugin_settings) except RuntimeError as e: if "Workflow did not execute cleanly" in str(e): print("Workflow did not execute cleanly") else: raise e elif opts.analysis_level == "group": raise NotImplementedError('group analysis not currently implemented') def init(): if __name__ == "__main__": raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n" "Please `pip install` NiBetaSeries and use the `nibs` command") init()
    46.611765
    98
    0.595406
    0
    0
    0
    0
    0
    0
    0
    0
    4,930
    0.414774
    b9693ae1ef191dd2735a2abba99bb1bc689af26f
    2,727
    py
    Python
    custom_components/senz/config_flow.py
    astrandb/senz_hass
    6725d37fd9c6d250ac10a16e68c56908bf1c8404
    [ "MIT" ]
    2
    2022-01-15T09:55:58.000Z
    2022-02-10T10:13:35.000Z
    custom_components/senz/config_flow.py
    astrandb/senz_hass
    6725d37fd9c6d250ac10a16e68c56908bf1c8404
    [ "MIT" ]
    4
    2022-01-15T19:41:28.000Z
    2022-02-14T16:01:47.000Z
    custom_components/senz/config_flow.py
    astrandb/senz_hass
    6725d37fd9c6d250ac10a16e68c56908bf1c8404
    [ "MIT" ]
    null
    null
    null
    """Config flow for SENZ WiFi.""" from __future__ import annotations import logging from typing import Any import voluptuous as vol from homeassistant.components import persistent_notification from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers import config_entry_oauth2_flow from .const import DOMAIN from .pysenz import PreAPI class OAuth2FlowHandler( config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN ): """Config flow to handle SENZ WiFi OAuth2 authentication.""" DOMAIN = DOMAIN @property def logger(self) -> logging.Logger: """Return logger.""" return logging.getLogger(__name__) @property def extra_authorize_data(self) -> dict: """Extra data that needs to be appended to the authorize url.""" return { "scope": "restapi offline_access", } async def async_step_reauth( self, entry: dict[str, Any] | None = None ) -> FlowResult: """Perform reauth upon an API authentication error.""" self.entry = entry persistent_notification.async_create( self.hass, f"Senz integration for account {entry['auth_implementation']} needs to be re-authenticated. Please go to the [integrations page](/config/integrations) to re-configure it.", "Senz re-authentication", "senz_reauth", ) return await self.async_step_reauth_confirm() async def async_step_reauth_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Dialog that informs the user that reauth is required.""" if user_input is None: return self.async_show_form( step_id="reauth_confirm", description_placeholders={"account": self.entry["auth_implementation"]}, data_schema=vol.Schema({}), errors={}, ) persistent_notification.async_dismiss(self.hass, "senz_reauth") return await self.async_step_user() async def async_oauth_create_entry(self, data: dict) -> dict: """Create an oauth config entry or update existing entry for reauth.""" pre_api = PreAPI(self.hass) resp = await pre_api.getAccount(data["token"]["access_token"]) account = resp["userName"] existing_entry = await self.async_set_unique_id(account) if existing_entry: self.hass.config_entries.async_update_entry(existing_entry, data=data) await self.hass.config_entries.async_reload(existing_entry.entry_id) return self.async_abort(reason="reauth_successful") return self.async_create_entry(title=account, data=data)
    34.518987
    184
    0.671067
    2,363
    0.86652
    0
    0
    321
    0.117712
    1,829
    0.6707
    708
    0.259626
    b9697b05a9b44247d80463465fa92118d707fb98
    6,465
    py
    Python
    astropy_helpers/git_helpers.py
    bsipocz/astropy-helpers
    4999df1cfb6a5022347b0cef9caf8a556517c625
    [ "PSF-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
    9
    2019-12-06T13:12:33.000Z
    2021-10-05T12:47:15.000Z
    astropy_helpers/git_helpers.py
    bsipocz/astropy-helpers
    4999df1cfb6a5022347b0cef9caf8a556517c625
    [ "PSF-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
    2
    2019-11-28T17:20:27.000Z
    2019-12-09T18:44:35.000Z
    astropy_helpers/git_helpers.py
    bsipocz/astropy-helpers
    4999df1cfb6a5022347b0cef9caf8a556517c625
    [ "PSF-2.0", "BSD-2-Clause", "BSD-3-Clause" ]
    3
    2019-11-28T17:04:22.000Z
    2021-10-19T13:12:34.000Z
    # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Utilities for retrieving revision information from a project's git repository. """ # Do not remove the following comment; it is used by # astropy_helpers.version_helpers to determine the beginning of the code in # this module # BEGIN import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 128: # git returns 128 if the command is not run from within a git # repository tree. In this case, a warning is produced above but we # return the default dev version of '0'. return '0' elif not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() # This function is tested but it is only ever executed within a subprocess when # creating a fake package, so it doesn't get picked up by coverage metrics. def _get_repo_path(pathname, levels=None): # pragma: no cover """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
    33.324742
    79
    0.612065
    0
    0
    0
    0
    0
    0
    0
    0
    3,176
    0.491261
    b96b280416f0d557826ffa670a7914f2d45e5fc5
    526
    py
    Python
    src/sot_talos_balance/test/test_feet_admittance.py
    imaroger/sot-talos-balance
    5e56700b4e105273ecf6feb3474789beac469a77
    [ "BSD-2-Clause" ]
    null
    null
    null
    src/sot_talos_balance/test/test_feet_admittance.py
    imaroger/sot-talos-balance
    5e56700b4e105273ecf6feb3474789beac469a77
    [ "BSD-2-Clause" ]
    null
    null
    null
    src/sot_talos_balance/test/test_feet_admittance.py
    imaroger/sot-talos-balance
    5e56700b4e105273ecf6feb3474789beac469a77
    [ "BSD-2-Clause" ]
    null
    null
    null
    '''Test feet admittance control''' from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient try: # Python 2 input = raw_input # noqa except NameError: pass run_test('appli_feet_admittance.py') run_ft_calibration('robot.ftc') input("Wait before running the test") print('Set saturation value') runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]') input("Wait before dumping the data") runCommandClient('dump_tracer(robot.tracer)')
    25.047619
    97
    0.752852
    0
    0
    0
    0
    0
    0
    0
    0
    266
    0.505703
    b96bb8e94e8bbfe556cc0ad3a314b6991573aa47
    544
    py
    Python
    tests/test_db.py
    davebryson/py-tendermint
    ec6a38a54950d9841759b0f2ed93659b58948a03
    [ "Apache-2.0" ]
    24
    2017-08-18T20:36:27.000Z
    2020-03-27T08:55:39.000Z
    tests/test_db.py
    davebryson/py-tendermint
    ec6a38a54950d9841759b0f2ed93659b58948a03
    [ "Apache-2.0" ]
    6
    2017-10-14T05:50:34.000Z
    2019-06-03T08:39:49.000Z
    tests/test_db.py
    davebryson/py-tendermint
    ec6a38a54950d9841759b0f2ed93659b58948a03
    [ "Apache-2.0" ]
    5
    2018-01-09T11:07:06.000Z
    2019-06-02T14:34:34.000Z
    import os from tendermint.db import VanillaDB from tendermint.utils import home_dir def test_database(): dbfile = home_dir('temp', 'test.db') db = VanillaDB(dbfile) db.set(b'dave',b'one') result = db.get(b'dave') assert(b'one' == result) db.set(b'dave',b'two') result = db.get(b'dave') assert(b'two' == result) assert(None == db.get(b'doesntexist')) assert(db.exists(b'dave')) db.delete(b'dave') assert(db.exists(b'dave') == False) if os.path.exists(dbfile): os.remove(dbfile)
    20.923077
    42
    0.621324
    0
    0
    0
    0
    0
    0
    0
    0
    102
    0.1875
    b96d766a7c5eab27eb3785b1277b6beccda7c9ed
    1,446
    py
    Python
    auth/tests/test_views.py
    asb29/Redundant
    ee816fd41f9217610bd11f757cf9175288723c70
    [ "MIT" ]
    null
    null
    null
    auth/tests/test_views.py
    asb29/Redundant
    ee816fd41f9217610bd11f757cf9175288723c70
    [ "MIT" ]
    null
    null
    null
    auth/tests/test_views.py
    asb29/Redundant
    ee816fd41f9217610bd11f757cf9175288723c70
    [ "MIT" ]
    null
    null
    null
    from django.test import TestCase from django.test import Client class RegisterTestCase(TestCase): def test_register(self): c = Client() # on success redirects to / response = c.post('/accounts/register/', { 'username': 'asdas', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertRedirects(response, '/') # passwords don't match response = c.post('/accounts/register/', { 'username': 'asdasdasd1', 'password1': 'asdasdasd1', 'password2': 'asdasdasd2' }) self.assertEquals(response.status_code, 200) # username is empty response = c.post('/accounts/register/', { 'username': '', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertEquals(response.status_code, 200) # no password response = c.post('/accounts/register/', { 'username': 'asdasdasd', 'password1': '', 'password2': '' }) self.assertEquals(response.status_code, 200) # username and password are similar response = c.post('/accounts/register/', { 'username': 'asdasdasd0', 'password1': 'asdasdasd1', 'password2': 'asdasdasd1' }) self.assertEquals(response.status_code, 200)
    30.125
    52
    0.53527
    1,379
    0.953665
    0
    0
    0
    0
    0
    0
    533
    0.368603
    b96f6c5854c1e905c9ad5d8f08d016972c710a1f
    4,134
    py
    Python
    projects/OneNet/onenet/head.py
    iFighting/OneNet
    6e33b46d2aa13131262833c75f0fd1c3d224ef03
    [ "MIT" ]
    2
    2021-06-16T01:31:17.000Z
    2021-11-25T15:27:28.000Z
    projects/OneNet/onenet/head.py
    xieenze/OneNet
    3b06ad6832727cef4c0262389de4cdbb2a666197
    [ "MIT" ]
    null
    null
    null
    projects/OneNet/onenet/head.py
    xieenze/OneNet
    3b06ad6832727cef4c0262389de4cdbb2a666197
    [ "MIT" ]
    1
    2021-02-04T06:38:42.000Z
    2021-02-04T06:38:42.000Z
    # # Modified by Peize Sun # Contact: [email protected] # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ OneNet Transformer class. Copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MHattention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers """ import copy import math from typing import Optional, List import torch from torch import nn, Tensor import torch.nn.functional as F from detectron2.modeling.poolers import ROIPooler, cat from detectron2.structures import Boxes from .deconv import CenternetDeconv class Head(nn.Module): def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]): super().__init__() # Build heads. num_classes = cfg.MODEL.OneNet.NUM_CLASSES d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1] activation = cfg.MODEL.OneNet.ACTIVATION self.deconv = CenternetDeconv(cfg, backbone_shape) self.num_classes = num_classes self.d_model = d_model self.num_classes = num_classes self.activation = _get_activation_fn(activation) self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1) self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1) self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1) # Init parameters. prior_prob = cfg.MODEL.OneNet.PRIOR_PROB self.bias_value = -math.log((1 - prior_prob) / prior_prob) self._reset_parameters() def _reset_parameters(self): # init all parameters. for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # initialize the bias for focal loss. if p.shape[-1] == self.num_classes: nn.init.constant_(p, self.bias_value) def forward(self, features_list): features = self.deconv(features_list) locations = self.locations(features)[None] feat = self.activation(self.feat1(features)) class_logits = self.cls_score(feat) pred_ltrb = F.relu(self.ltrb_pred(feat)) pred_bboxes = self.apply_ltrb(locations, pred_ltrb) return class_logits, pred_bboxes def apply_ltrb(self, locations, pred_ltrb): """ :param locations: (1, 2, H, W) :param pred_ltrb: (N, 4, H, W) """ pred_boxes = torch.zeros_like(pred_ltrb) pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1 pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1 pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2 pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2 return pred_boxes @torch.no_grad() def locations(self, features, stride=4): """ Arguments: features: (N, C, H, W) Return: locations: (2, H, W) """ h, w = features.size()[-2:] device = features.device shifts_x = torch.arange( 0, w * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( 0, h * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2 locations = locations.reshape(h, w, 2).permute(2, 0, 1) return locations def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
    32.296875
    94
    0.600871
    3,152
    0.762458
    0
    0
    859
    0.207789
    0
    0
    830
    0.200774
    b96fae5c29fd446ea7199733a629bbe0f6190046
    49,876
    py
    Python
    mermaid/utils.py
    HastingsGreer/mermaid
    bd13c5fc427eb8cd9054973a8eaaeb302078182d
    [ "Apache-2.0" ]
    120
    2019-10-29T23:53:02.000Z
    2022-03-30T02:59:58.000Z
    mermaid/utils.py
    AlexanderChristgau/mermaid
    ba07883cc3cb5982e4655048a434b4495cb49c6d
    [ "Apache-2.0" ]
    10
    2019-11-05T09:28:35.000Z
    2022-01-09T19:12:51.000Z
    mermaid/utils.py
    AlexanderChristgau/mermaid
    ba07883cc3cb5982e4655048a434b4495cb49c6d
    [ "Apache-2.0" ]
    19
    2019-11-10T13:34:39.000Z
    2022-03-13T20:30:10.000Z
    """Various utility functions. .. todo:: Reorganize this package in a more meaningful way. """ from __future__ import print_function from __future__ import absolute_import # from builtins import str # from builtins import range import torch from torch.nn.parameter import Parameter from torch.autograd import Variable from .libraries.modules.stn_nd import STN_ND_BCXYZ from .data_wrapper import AdaptVal from .data_wrapper import MyTensor from . import smoother_factory as sf from .data_wrapper import USE_CUDA import numpy as np from . import finite_differences as fd import torch.nn as nn import torch.nn.init as init from . import module_parameters as pars from .spline_interpolation import SplineInterpolation_ND_BCXYZ import os try: from .libraries.functions.nn_interpolation import get_nn_interpolation except ImportError: print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). ' 'Some functionality may not be available.') def my_hasnan(x): """Check if any input elements are NaNs. :param x: numpy array :return: True if NaNs are present, False else """ return (x != x).any() def create_symlink_with_correct_ext(sf, tf): abs_s = os.path.abspath(sf) ext_s = os.path.splitext(abs_s)[1] abs_t = os.path.abspath(tf) root_t,ext_t = os.path.splitext(abs_t) abs_t_with_right_ext = root_t + ext_s if os.path.isfile(abs_t_with_right_ext): if os.path.samefile(abs_s,abs_t_with_right_ext): # nothing to do here, these are already the same file return else: os.remove(abs_t_with_right_ext) # now we can do the symlink os.symlink(abs_s,abs_t_with_right_ext) def combine_dict(d1,d2): """Creates a dictionary which has entries from both of them. :param d1: dictionary 1 :param d2: dictionary 2 :return: resulting dictionary """ d = d1.copy() d.update(d2) return d def get_parameter_list_from_parameter_dict(pd): """Takes a dictionary which contains key value pairs for model parameters and converts it into a list of parameters that can be used as an input to an optimizer. :param pd: parameter dictionary :return: list of parameters """ pl = [] for key in pd: pl.append(pd[key]) return pl def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd): """Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys based on memory id. :param pd: parameter dictionary :return: tuple of (parameter_list, name_dictionary) """ par_to_name_dict = dict() pl = [] for key in pd: pl.append(pd[key]) par_to_name_dict[pd[key]] = key return pl, par_to_name_dict def remove_infs_from_variable(v): # 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor # 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor # 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor # todo: maybe find a cleaner way of handling this # this is to make sure that subsequent sums work (hence will be smaller than it could be, # but values of this size should not occur in practice anyway sz = v.size() reduction_factor = np.prod(np.array(sz)) condition = True if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32: return torch.clamp(v, min=(np.asscalar(np.finfo('float32').min))/reduction_factor, max=(np.asscalar(np.finfo('float32').max))/reduction_factor) elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor: return torch.clamp(v, min=(np.asscalar(np.finfo('float64').min))/reduction_factor, max=(np.asscalar(np.finfo('float64').max))/reduction_factor) elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor: return torch.clamp(v, min=(np.asscalar(np.finfo('float16').min))/reduction_factor, max=(np.asscalar(np.finfo('float16').max))/reduction_factor) else: raise ValueError('Unknown data type: ' + str( type(v.data))) def lift_to_dimension(A, dim): """Creates a view of A of dimension dim (by adding dummy dimensions if necessary). :param A: numpy array :param dim: desired dimension of view :return: returns view of A of appropriate dimension """ current_dim = len(A.shape) if current_dim > dim: raise ValueError('Can only add dimensions, but not remove them') if current_dim == dim: return A else: return A.reshape([1]*(dim-current_dim)+list(A.shape)) def get_dim_of_affine_transform(Ab): """Returns the number of dimensions corresponding to an affine transformation of the form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply [a1;a2;a3;b], i.e., all columns stacked on top of each other. :param Ab: parameter vector :return: dimensionality of transform (1,2,or 3) """ nr = len(Ab) if nr==2: return 1 elif nr==6: return 2 elif nr==12: return 3 else: raise ValueError('Only supports dimensions 1, 2, and 3.') def set_affine_transform_to_identity(Ab): """Sets the affine transformation as given by the column vector Ab to the identity transform. :param Ab: Affine parameter vector (will be overwritten with the identity transform) :return: """ dim = get_dim_of_affine_transform(Ab) if dim==1: Ab.zero_() Ab[0]=1. elif dim==2: Ab.zero_() Ab[0]=1. Ab[3]=1. elif dim==3: Ab.zero_() Ab[0]=1. Ab[4]=1. Ab[8]=1. else: raise ValueError('Only supports dimensions 1, 2, and 3.') def set_affine_transform_to_identity_multiN(Ab): """Set the affine transforms to the identity (in the case of arbitrary batch size). :param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans. :return: """ sz = Ab.size() nr_of_images = sz[0] for nrI in range(nr_of_images): set_affine_transform_to_identity(Ab[nrI, :]) def get_inverse_affine_param(Ab): """Computes inverse of affine transformation. Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb :param Ab: B x pars (batch size x param. vector) :return: Inverse of affine parameters """ dim =0 if Ab.shape[1] == 2: dim = 1 elif Ab.shape[1] == 6: dim = 2 elif Ab.shape[1] == 12: dim = 3 if dim not in [1, 2, 3]: raise ValueError('Only supports dimensions 1, 2, and 3.') Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2) Ab_inv = torch.zeros_like(Ab) for n in range(Ab.shape[0]): tm_inv = torch.inverse(Ab[n, :, :dim]) Ab_inv[n, :, :dim] = tm_inv Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim]) inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1) return inv_affine_param def update_affine_param(Ab, Cd): """Update affine parameters. Formally: C(Ax+b)+d = CAx+Cb+d :param Ab: B x pars (batch size x param. vector) :return: Updated affine parameters """ dim = 0 if Ab.shape[1]==2: dim = 1 elif Ab.shape[1]==6: dim = 2 elif Ab.shape[1]==12: dim = 3 if dim not in [1, 2, 3]: raise ValueError('Only supports dimensions 1, 2, and 3.') Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2) Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2) updated_param = torch.zeros_like(Ab) for n in range(Ab.shape[0]): tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim]) updated_param[n,:,:dim] = tm_param updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim] updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1) return updated_param def apply_affine_transform_to_map(Ab,phi): """Applies an affine transform to a map. :param Ab: affine transform parameter column vector :param phi: map; format nrCxXxYxZ (nrC corresponds to dimension) :return: returns transformed map """ sz = phi.size() dim = len(sz) - 1 if dim not in [1,2,3]: raise ValueError('Only supports dimensions 1, 2, and 3.') phiR = MyTensor(sz).zero_().type_as(phi) if dim == 1: phiR = phi * Ab[0] + Ab[1] elif dim == 2: phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1 phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2 elif dim == 3: phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9] phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10] phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11] else: raise ValueError('Only supports dimensions 1, 2, and 3.') return phiR def apply_affine_transform_to_map_multiNC(Ab,phi): """Applies an affine transform to maps (for arbitrary batch size). :param Ab: affine transform parameter column vectors (batch size x param. vector) :param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension) :return: returns transformed maps """ sz = phi.size() dim = get_dim_of_affine_transform(Ab[0,:]) nr_of_images = Ab.size()[0] if nr_of_images != sz[0]: raise ValueError('Incompatible number of affine transforms') if dim != len(sz)-2: raise ValueError('Incompatible number of affine transforms') phiR = MyTensor(sz).zero_().type_as(phi) for nrI in range(nr_of_images): phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...]) return phiR def compute_normalized_gaussian(X, mu, sig): """Computes a normalized Gaussian. :param X: map with coordinates at which to evaluate :param mu: array indicating the mean :param sig: array indicating the standard deviations for the different dimensions :return: Normalized Gaussian evaluated at coordinates in X Example:: >>> mu, sig = [1,1], [1,1] >>> X = [0,0] >>> print(compute_normalized_gaussian(X, mu, sig) """ dim = len(mu) if dim == 1: g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.))) g = g/g.sum() return g elif dim == 2: g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.)) - np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))) g = g/g.sum() return g elif dim == 3: g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.)) -np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)) -np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.))) g = g / g.sum() return g else: raise ValueError('Can only compute Gaussians in dimensions 1-3') def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True): if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]: raise ValueError('Currently only orders 0 to 9 are supported') if spline_order == 0: # return get_warped_label_map(I0,phi,spacing) stn = STN_ND_BCXYZ(spacing, zero_boundary, use_bilinear=False, use_01_input=use_01_input) elif spline_order == 1: stn = STN_ND_BCXYZ(spacing,zero_boundary, use_bilinear=True, use_01_input=use_01_input) else: stn = SplineInterpolation_ND_BCXYZ(spacing, spline_order) I1_warped = stn(I0, phi) return I1_warped def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): """Warps image. :param I0: image to warp, image size XxYxZ :param phi: map for the warping, size dimxXxYxZ :param spacing: image spacing [dx,dy,dz] :return: returns the warped image of size XxYxZ """ # implements this by creating a different view (effectively adding dimensions) Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))), phi.view(torch.Size([1] + list(phi.size()))), spacing, spline_order, zero_boundary, use_01_input) return Iw.view(I0.size()) def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True): """Warps image. :param I0: image to warp, image size BxCxXxYxZ :param phi: map for the warping, size BxdimxXxYxZ :param spacing: image spacing [dx,dy,dz] :return: returns the warped image of size BxCxXxYxZ """ dim = I0.dim()-2 if dim == 1: return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) elif dim == 2: return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) elif dim == 3: return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input) else: raise ValueError('Images can only be warped in dimensions 1 to 3') def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize): """Computes spacing for the low-res parametrization from image spacing. :param spacing: image spacing :param sz: size of image :param lowResSize: size of low re parameterization :return: returns spacing of low res parameterization """ #todo: check that this is the correct way of doing it return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1) def _get_low_res_size_from_size(sz, factor): """Returns the corresponding low-res size from a (high-res) sz. :param sz: size (high-res) :param factor: low-res factor (needs to be <1) :return: low res size """ if (factor is None) or (factor >= 1): print('WARNING: Could not compute low_res_size as factor was ' + str(factor)) return np.array(sz) else: low_res_sz = np.array(sz) low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16') return low_res_sz def _compute_low_res_image(I, spacing, low_res_size, spline_order): import mermaid.image_sampling as IS sampler = IS.ResampleImage() low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order) return low_res_image def individual_parameters_to_model_parameters(ind_pars): model_pars = dict() if type(ind_pars) == type(dict()): # should already be in the right format model_pars = ind_pars else: # if ind_pars is not a dictionary assume that they come from the optimizer # (i.e., list and each list element has a dictionary with keys 'name' and 'model_params' for par in ind_pars: model_pars[par['name']] = par['model_params'] return model_pars def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing): """Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`. :param lam: scalar momentum, BxCxXxYxZ :param I: image, BxCxXxYxZ :param sz: size of image :param spacing: spacing of image :return: returns the vector momentum """ nrOfI = sz[0] # number of images m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC nrOfC = sz[1] for c in range(nrOfC): # loop over all the channels and add the results m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...], I[:, c, ...], nrOfI, sz[2::], spacing) return m def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing): """Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`. :param lam: scalar momentum, batchxXxYxZ :param I: image, batchXxYxZ :param sz: size of image :param spacing: spacing of image :return: returns the vector momentum """ fdt = fd.FD_torch(spacing) dim = len(sz) m = create_ND_vector_field_variable_multiN(sz, nrOfI) if dim == 1: m[:, 0, :] = fdt.dXc(I)*lam elif dim == 2: m[:, 0, :, :] = fdt.dXc(I)*lam m[:, 1, :, :] = fdt.dYc(I)*lam elif dim == 3: m[:, 0, :, :, :] = fdt.dXc(I)*lam m[:, 1, :, :, :] = fdt.dYc(I)*lam m[:, 2, :, :, :] = fdt.dZc(I)*lam else: raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3') return m def create_ND_vector_field_variable_multiN(sz, nr_of_images=1): """ Create vector field torch Variable of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nr_of_images, dim]+list(csz)) return MyTensor(*(csz.tolist())).normal_(0., 1e-7) def create_ND_vector_field_variable(sz): """Create vector field torch Variable of given size. :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :return: returns vector field of size dimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([dim]+list(csz)) return MyTensor(*(csz.tolist())).normal_(0.,1e-7) def create_vector_parameter(nr_of_elements): """Creates a vector parameters with a specified number of elements. :param nr_of_elements: number of vector elements :return: returns the parameter vector """ return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7)) def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False): """Create vector field torch Parameter of given size. :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ dim = len(sz) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI, dim]+list(csz)) if get_field_from_external_network: tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7) tmp.requires_grad = True else: tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7)) return tmp def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False): """ Create vector field torch Parameter of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :return: returns vector field of size nrOfIxdimxXxYxZ """ nr_of_mg_weights = len(gaussian_std_weights) csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI,nr_of_mg_weights]+list(csz)) weights = torch.empty(*csz) # set the default if sched =='w_K_w': gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights] for g in range(nr_of_mg_weights): weights[:, g, ...] = gaussian_std_weights[g] tmp = AdaptVal(weights) if get_preweight_from_network: tmp.requires_grad = True else: tmp = Parameter(tmp) return tmp def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1): """ Create vector field torch Parameter of given size :param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D) :param nrOfI: number of images :param nrOfC: number of channels :return: returns vector field of size nrOfIxnrOfCxXxYxZ """ csz = np.array(sz) # just to make sure it is a numpy array csz = np.array([nrOfI,nrOfC]+list(csz)) return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7)) def centered_identity_map_multiN(sz, spacing, dtype='float32'): """ Create a centered identity map (shifted so it is centered around 0) :param sz: size of an image in BxCxXxYxZ format :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map """ dim = len(sz) - 2 nrOfI = sz[0] if dim == 1: id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype) elif dim == 2: id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype) elif dim == 3: id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype) else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') for n in range(nrOfI): id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype) return id def identity_map_multiN(sz,spacing,dtype='float32'): """ Create an identity map :param sz: size of an image in BxCxXxYxZ format :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map """ dim = len(sz)-2 nrOfI = int(sz[0]) if dim == 1: id = np.zeros([nrOfI,1,sz[2]],dtype=dtype) elif dim == 2: id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype) elif dim == 3: id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype) else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') for n in range(nrOfI): id[n,...] = identity_map(sz[2::],spacing,dtype=dtype) return id def centered_identity_map(sz, spacing, dtype='float32'): """ Returns a centered identity map (with 0 in the middle) if the sz is odd Otherwise shifts everything by 0.5*spacing :param sz: just the spatial dimensions, i.e., XxYxZ :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map of dimension dimxXxYxZ """ dim = len(sz) if dim == 1: id = np.mgrid[0:sz[0]] elif dim == 2: id = np.mgrid[0:sz[0], 0:sz[1]] elif dim == 3: id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # now get it into range [0,(sz-1)*spacing]^d id = np.array(id.astype(dtype)) if dim == 1: id = id.reshape(1, sz[0]) # add a dummy first index for d in range(dim): id[d] *= spacing[d] if sz[d]%2==0: #even id[d] -= spacing[d]*(sz[d]//2) else: #odd id[d] -= spacing[d]*((sz[d]+1)//2) # and now store it in a dim+1 array if dim == 1: idnp = np.zeros([1, sz[0]], dtype=dtype) idnp[0, :] = id[0] elif dim == 2: idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) idnp[0, :, :] = id[0] idnp[1, :, :] = id[1] elif dim == 3: idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype) idnp[0, :, :, :] = id[0] idnp[1, :, :, :] = id[1] idnp[2, :, :, :] = id[2] else: raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map') return idnp # # def centered_min_normalized_identity_map(sz, spacing, dtype='float32'): # """ # Returns a centered identity map (with 0 in the middle) if the sz is odd # Otherwise shifts everything by 0.5*spacing # # :param sz: just the spatial dimensions, i.e., XxYxZ # :param spacing: list with spacing information [sx,sy,sz] # :param dtype: numpy data-type ('float32', 'float64', ...) # :return: returns the identity map of dimension dimxXxYxZ # """ # dim = len(sz) # if dim == 1: # id = np.mgrid[0:sz[0]] # elif dim == 2: # id = np.mgrid[0:sz[0], 0:sz[1]] # elif dim == 3: # id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]] # else: # raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # # min_spacing = np.min(spacing) # spacing_ratio = spacing/min_spacing # # # # now get it into range [0,(sz-1)*spacing]^d # id = np.array(id.astype(dtype)) # if dim == 1: # id = id.reshape(1, sz[0]) # add a dummy first index # # for d in range(dim): # id[d] *= spacing[d] # if sz[d]%2==0: # #even # id[d] -= spacing[d]*(sz[d]//2) # else: # #odd # id[d] -= spacing[d]*((sz[d]+1)//2) # # # and now store it in a dim+1 array and rescale by the ratio # if dim == 1: # idnp = np.zeros([1, sz[0]], dtype=dtype) # idnp[0, :] = id[0] * spacing_ratio[0] # elif dim == 2: # idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) # idnp[0, :, :] = id[0] * spacing_ratio[0] # idnp[1, :, :] = id[1] * spacing_ratio[1] # elif dim == 3: # idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype) # idnp[0, :, :, :] = id[0] * spacing_ratio[0] # idnp[1, :, :, :] = id[1] * spacing_ratio[1] # idnp[2, :, :, :] = id[2] * spacing_ratio[2] # else: # raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map') # # return idnp # # def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True): # if do_transform: # min_spacing = np.min(spacing) # spacing_ratio =min_spacing/spacing # dim = spacing.size # spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio)) # sp_sz = [1]+[dim] +[1]*dim # spacing_ratio_t = spacing_ratio_t.view(*sp_sz) # new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list] # else: # new_var_list = var_list # return new_var_list # def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True): # if do_transform: # min_spacing = np.min(spacing) # spacing_ratio =spacing/min_spacing # dim = spacing.size # spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio)) # sp_sz = [1]+[dim] +[1]*dim # spacing_ratio_t = spacing_ratio_t.view(*sp_sz) # new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list] # else: # new_var_list = var_list # return new_var_list # def identity_map(sz,spacing,dtype='float32'): """ Returns an identity map. :param sz: just the spatial dimensions, i.e., XxYxZ :param spacing: list with spacing information [sx,sy,sz] :param dtype: numpy data-type ('float32', 'float64', ...) :return: returns the identity map of dimension dimxXxYxZ """ dim = len(sz) if dim==1: id = np.mgrid[0:sz[0]] elif dim==2: id = np.mgrid[0:sz[0],0:sz[1]] elif dim==3: id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') # now get it into range [0,(sz-1)*spacing]^d id = np.array( id.astype(dtype) ) if dim==1: id = id.reshape(1,sz[0]) # add a dummy first index for d in range(dim): id[d]*=spacing[d] #id[d]*=2./(sz[d]-1) #id[d]-=1. # and now store it in a dim+1 array if dim==1: idnp = np.zeros([1, sz[0]], dtype=dtype) idnp[0,:] = id[0] elif dim==2: idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype) idnp[0,:, :] = id[0] idnp[1,:, :] = id[1] elif dim==3: idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype) idnp[0,:, :, :] = id[0] idnp[1,:, :, :] = id[1] idnp[2,:, :, :] = id[2] else: raise ValueError('Only dimensions 1-3 are currently supported for the identity map') return idnp def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05): """generate a smooth weight mask for the omt """ dim = len(img_sz) mask_sz = [1,1]+ list(img_sz) mask = AdaptVal(torch.ones(*mask_sz))*mask_value if dim ==2: mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1 elif dim==3: mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1 sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing) mask = sm.smooth(mask) return mask.detach() def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2): """generate a smooth weight mask for the omt """ dim = len(img_sz) mask_sz = [1,1]+ list(img_sz) mask = AdaptVal(torch.zeros(*mask_sz)) if dim ==2: mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1 elif dim==3: mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1 sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing) mask = sm.smooth(mask) if pow ==2: mask = mask**2 if pow ==3: mask = mask*mask*mask return mask # def compute_omt_const(stds,param,dim): # omt_power = param['forward_model']['smoother']['omt_power'] # omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty'] # min_std = torch.min(stds) # max_std = torch.max(stds) # omt_const = torch.abs(torch.log(max_std/stds))**omt_power # omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power) # omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2) # sz = [1]+ [len(stds)] +[1]*(dim+1) # return omt_const.view(*sz) def get_single_gaussian_smoother(gaussian_std,sz,spacing): s_m_params = pars.ParameterDict() s_m_params['smoother']['type'] = 'gaussian' s_m_params['smoother']['gaussian_std'] = gaussian_std s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params) return s_m def get_warped_label_map(label_map, phi, spacing, sched='nn'): if sched == 'nn': warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True) # check if here should be add assert assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise" else: raise ValueError(" the label warping method is not implemented") return warped_label_map def t2np(v): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ return (v.detach()).cpu().numpy() def cxyz_to_xyzc( v ): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ dim = len(v.shape)-2 if dim ==2: v = v.permute(0,2,3,1) if dim ==3: v = v.permute(0,2,3,4,1) return v def get_scalar(v): if isinstance(v, float): return v elif isinstance(v, np.ndarray) and v.size == 1: return float(v) def checkNan(x): """" input should be list of Variable """ return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x] def noramlized_spacing_to_smallest(spacing): min_sp = np.min(spacing) spacing[spacing>min_sp]=min_sp return spacing def time_warped_function(f): def __time_warped_function(input=None): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() output = f(input) end.record() # Waits for everything to finish running torch.cuda.synchronize() print(start.elapsed_time(end)) return output return __time_warped_function def interoplate_boundary_right(tensor): dim = len(tensor.shape)-2 if dim==1: tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3] if dim==2: tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:] tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3] if dim==3: tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :] tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3] tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3] def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None): """ :param I: B C X Y Z :param spacing: spx spy spz :param desiredSize: B C X Y Z :param spline_order: :param zero_boundary: :param identity_map: :return: """ if spacing is None: img_sz = I.shape[2:] spacing = 1. / (np.array(img_sz) - 1) if identity_map is not None: # todo will remove, currently fix for symmetric training if I.shape[0] != identity_map.shape[0]: n_batch = I.shape[0] desiredSize = desiredSize.copy() desiredSize[0] = n_batch identity_map = identity_map[:n_batch] resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order, zero_boundary=zero_boundary, identity_map=identity_map) return resampled def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None): """ Resample an image to a given desired size :param I: Input image (expected to be of BxCxXxYxZ format) :param spacing: array describing the spatial spacing :param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D) :return: returns a tuple: the downsampled image, the new spacing after downsampling """ desiredSize = desiredSize[2:] is_numpy = False if not isinstance(I, torch.Tensor): I = torch.Tensor(I) is_numpy = True sz = np.array(list(I.size())) # check that the batch size and the number of channels is the same nrOfI = sz[0] nrOfC = sz[1] desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize)) newspacing = spacing * ((sz[2::].astype('float') - 1.) / ( desiredSizeNC[2::].astype('float') - 1.)) ########################################### if identity_map is not None: idDes = identity_map else: idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing))) # now use this map for resampling ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary) return ID if not is_numpy else ID.numpy(), newspacing def get_res_size_from_size(sz, factor): """ Returns the corresponding low-res size from a (high-res) sz :param sz: size (high-res) :param factor: low-res factor (needs to be <1) :return: low res size """ if (factor is None): print('WARNING: Could not compute low_res_size as factor was ' + str(factor)) return sz else: lowResSize = np.array(sz) if not isinstance(factor, list): lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16') else: lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16') if lowResSize[-1] % 2 != 0: lowResSize[-1] -= 1 print( '\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n') return lowResSize def get_res_spacing_from_spacing(spacing, sz, lowResSize): """ Computes spacing for the low-res parameterization from image spacing :param spacing: image spacing :param sz: size of image :param lowResSize: size of low re parameterization :return: returns spacing of low res parameterization """ # todo: check that this is the correct way of doing it return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1) ########################################## Adaptive Net ###################################################3 def space_normal(tensors, std=0.1): """ space normalize for the net kernel :param tensor: :param mean: :param std: :return: """ if isinstance(tensors, Variable): space_normal(tensors.data, std=std) return tensors for n in range(tensors.size()[0]): for c in range(tensors.size()[1]): dim = tensors[n][c].dim() sz = tensors[n][c].size() mus = np.zeros(dim) stds = std * np.ones(dim) print('WARNING: What should the spacing be here? Needed for new identity map code') raise ValueError('Double check the spacing here before running this code') spacing = np.ones(dim) centered_id = centered_identity_map(sz,spacing) g = compute_normalized_gaussian(centered_id, mus, stds) tensors[n,c] = torch.from_numpy(g) def weights_init_uniform(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.uniform(m.weight.data, 0.038, 0.042) elif classname.find('Linear') != -1: init.uniform(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_normal(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: space_normal(m.weight.data) elif classname.find('Linear') != -1: space_normal(m.weight.data) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_rd_normal(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.normal(m.weight.data) elif classname.find('Linear') != -1: init.normal(m.weight.data) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_xavier(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.xavier_normal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_kaiming(m): classname = m.__class__.__name__ # print(classname) if classname.find('Conv') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def weights_init_orthogonal(m): classname = m.__class__.__name__ print(classname) if classname.find('Conv') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('Linear') != -1: init.orthogonal(m.weight.data, gain=1) elif classname.find('BatchNorm2d') != -1: init.uniform(m.weight.data, 1.0, 0.02) init.constant(m.bias.data, 0.0) def init_weights(net, init_type='normal'): print('initialization method [%s]' % init_type) if init_type == 'rd_normal': net.apply(weights_init_rd_normal) elif init_type == 'normal': net.apply(weights_init_normal) elif init_type == 'uniform': net.apply(weights_init_uniform) elif init_type == 'xavier': net.apply(weights_init_xavier) elif init_type == 'kaiming': net.apply(weights_init_kaiming) elif init_type == 'orthogonal': net.apply(weights_init_orthogonal) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) def organize_data(moving, target, sched='depth_concat'): if sched == 'depth_concat': input = torch.cat([moving, target], dim=1) elif sched == 'width_concat': input = torch.cat((moving, target), dim=3) elif sched == 'list_concat': input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0) elif sched == 'difference': input = moving-target return input def bh(m,gi,go): print("Grad Input") print((torch.sum(gi[0].data), torch.sum(gi[1].data))) print("Grad Output") print(torch.sum(go[0].data)) return gi[0], gi[1], gi[2] class ConvBnRel(nn.Module): # conv + bn (optional) + relu def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False, bn=False, reverse=False, bias=False): super(ConvBnRel, self).__init__() padding = int((kernel_size - 1) // 2) if same_padding else 0 if not reverse: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias) else: self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias) #y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta #When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants. self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.active_unit is not None: x = self.active_unit(x) return x class FcRel(nn.Module): # fc+ relu(option) def __init__(self, in_features, out_features, active_unit='relu'): super(FcRel, self).__init__() self.fc = nn.Linear(in_features, out_features) if active_unit == 'relu': self.active_unit = nn.ReLU(inplace=True) elif active_unit == 'elu': self.active_unit = nn.ELU(inplace=True) else: self.active_unit = None def forward(self, x): x = self.fc(x) if self.active_unit is not None: x = self.active_unit(x) return x class AdpSmoother(nn.Module): """ a simple conv. implementation, generate displacement field """ def __init__(self, inputs, dim, net_sched=None): # settings should include [using_bias, using bn, using elu] # inputs should be a dictionary could contain ['s'],['t'] super(AdpSmoother, self).__init__() self.dim = dim self.net_sched = 'm_only' self.s = inputs['s'].detach() self.t = inputs['t'].detach() self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True) self.get_net_sched() #self.net.register_backward_hook(bh) def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5): # return the self.net and self.net_input padding_size = (kernel_size-1)//2 if self.net_sched == 'm_only': if debugging: self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2) else: net = \ [ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched =='m_f_s': if debugging: self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_d_s': if debugging: self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_f_s_t': if debugging: self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) elif self.net_sched == 'm_d_s_f_t': if debugging: self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False) else: net = \ [ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn), ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)] if using_sigmoid: net += [nn.Sigmoid()] self.net = nn.Sequential(*net) def prepare_data(self, m, new_s): input=None if self.net_sched == 'm_only': input = m elif self.net_sched == 'm_f_s': input = organize_data(m,self.s,sched='depth_concat') elif self.net_sched == 'm_d_s': input = organize_data(m, new_s, sched='depth_concat') elif self.net_sched == 'm_f_s_t': input = organize_data(m, self.s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') elif self.net_sched == 'm_f_s_t': input = organize_data(m, self.s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') elif self.net_sched == 'm_d_s_f_t': input = organize_data(m, new_s, sched='depth_concat') input = organize_data(input, self.t, sched='depth_concat') return input def forward(self, m,new_s=None): m = m * self.mask input = self.prepare_data(m,new_s) x= input x = self.net(x) return x
    34.805304
    130
    0.602675
    6,600
    0.132328
    0
    0
    0
    0
    0
    0
    17,362
    0.348103
    b96fca03cef0164231c4fa09bc83db6c5b2aa7db
    1,093
    py
    Python
    examples/io/plot_read_evoked.py
    fmamashli/mne-python
    52f064415e7c9fa8fe243d22108dcdf3d86505b9
    [ "BSD-3-Clause" ]
    3
    2021-01-04T08:45:56.000Z
    2021-05-19T12:25:59.000Z
    examples/io/plot_read_evoked.py
    fmamashli/mne-python
    52f064415e7c9fa8fe243d22108dcdf3d86505b9
    [ "BSD-3-Clause" ]
    28
    2020-05-07T00:58:34.000Z
    2020-08-29T23:02:17.000Z
    examples/io/plot_read_evoked.py
    fmamashli/mne-python
    52f064415e7c9fa8fe243d22108dcdf3d86505b9
    [ "BSD-3-Clause" ]
    3
    2019-01-28T13:48:00.000Z
    2019-07-10T16:02:11.000Z
    """ ================================== Reading and writing an evoked file ================================== This script shows how to read and write evoked datasets. """ # Author: Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) from mne import read_evokeds from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis-ave.fif' # Reading condition = 'Left Auditory' evoked = read_evokeds(fname, condition=condition, baseline=(None, 0), proj=True) ############################################################################### # Show result as a butterfly plot: # By using exclude=[] bad channels are not excluded and are shown in red evoked.plot(exclude=[], time_unit='s') # Show result as a 2D image (x: time, y: channels, color: amplitude) evoked.plot_image(exclude=[], time_unit='s') ############################################################################### # Use :func:`mne.Evoked.save` or :func:`mne.write_evokeds` to write the evoked # responses to a file.
    29.540541
    79
    0.569076
    0
    0
    0
    0
    0
    0
    0
    0
    751
    0.6871
    b970d836b7397be4bc4d63762c0eec8adfb90a91
    611
    py
    Python
    source/monkeyPatches/__init__.py
    lukaszgo1/nvda
    38a2efd1e1bff7db4471cb7afa03ab1590b7adef
    [ "bzip2-1.0.6" ]
    19
    2016-05-11T05:15:31.000Z
    2022-03-17T12:40:10.000Z
    source/monkeyPatches/__init__.py
    lukaszgo1/nvda
    38a2efd1e1bff7db4471cb7afa03ab1590b7adef
    [ "bzip2-1.0.6" ]
    307
    2015-08-27T11:22:33.000Z
    2022-03-29T10:43:34.000Z
    source/monkeyPatches/__init__.py
    lukaszgo1/nvda
    38a2efd1e1bff7db4471cb7afa03ab1590b7adef
    [ "bzip2-1.0.6" ]
    14
    2016-03-28T07:31:49.000Z
    2022-03-30T04:56:35.000Z
    # A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2021 NV Access Limited # This file is covered by the GNU General Public License. # See the file COPYING for more details. from . import wxMonkeyPatches applyWxMonkeyPatches = wxMonkeyPatches.apply def applyMonkeyPatches(): # Apply several monkey patches to comtypes # F401 - imported but unused: Patches are applied during import from . import comtypesMonkeyPatches # noqa: F401 # Apply patches to Enum, prevent cyclic references on ValueError during construction from . import enumPatches enumPatches.replace__new__()
    30.55
    86
    0.761047
    0
    0
    0
    0
    0
    0
    0
    0
    387
    0.633388
    b970f8ccb56e24dd8d65fd92869bbf7790f6e611
    5,298
    py
    Python
    yt_dlp/extractor/ninenow.py
    nxtreaming/yt-dlp
    385ffb467b2285e85a2a5495b90314ba1f8e0700
    [ "Unlicense" ]
    11
    2022-01-06T22:09:50.000Z
    2022-03-12T22:26:22.000Z
    yt_dlp/extractor/ninenow.py
    nxtreaming/yt-dlp
    385ffb467b2285e85a2a5495b90314ba1f8e0700
    [ "Unlicense" ]
    4
    2022-02-25T08:20:18.000Z
    2022-03-17T16:16:20.000Z
    yt_dlp/extractor/ninenow.py
    nxtreaming/yt-dlp
    385ffb467b2285e85a2a5495b90314ba1f8e0700
    [ "Unlicense" ]
    3
    2022-02-19T08:59:13.000Z
    2022-03-06T16:11:21.000Z
    from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, float_or_none, smuggle_url, str_or_none, try_get, unified_strdate, unified_timestamp, ) class NineNowIE(InfoExtractor): IE_NAME = '9now.com.au' _VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)' _GEO_COUNTRIES = ['AU'] _TESTS = [{ # clip 'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc', 'md5': '17cf47d63ec9323e562c9957a968b565', 'info_dict': { 'id': '16801', 'ext': 'mp4', 'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike', 'description': 'Is a boycott of the NAB Cup "on the table"?', 'uploader_id': '4460760524001', 'upload_date': '20160713', 'timestamp': 1468421266, }, 'skip': 'Only available in Australia', }, { # episode 'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19', 'only_matching': True, }, { # DRM protected 'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1', 'only_matching': True, }, { # episode of series 'url': 'https://www.9now.com.au/lego-masters/season-3/episode-3', 'info_dict': { 'id': '6249614030001', 'title': 'Episode 3', 'ext': 'mp4', 'season_number': 3, 'episode_number': 3, 'description': 'In the first elimination of the competition, teams will have 10 hours to build a world inside a snow globe.', 'uploader_id': '4460760524001', 'timestamp': 1619002200, 'upload_date': '20210421', }, 'expected_warnings': ['Ignoring subtitle tracks'], 'params':{ 'skip_download': True, } }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) page_data = self._parse_json(self._search_regex( r'window\.__data\s*=\s*({.*?});', webpage, 'page data', default='{}'), display_id, fatal=False) if not page_data: page_data = self._parse_json(self._parse_json(self._search_regex( r'window\.__data\s*=\s*JSON\.parse\s*\(\s*(".+?")\s*\)\s*;', webpage, 'page data'), display_id), display_id) for kind in ('episode', 'clip'): current_key = page_data.get(kind, {}).get( 'current%sKey' % kind.capitalize()) if not current_key: continue cache = page_data.get(kind, {}).get('%sCache' % kind, {}) if not cache: continue common_data = { 'episode': (cache.get(current_key) or list(cache.values())[0])[kind], 'season': (cache.get(current_key) or list(cache.values())[0]).get('season', None) } break else: raise ExtractorError('Unable to find video data') if not self.get_param('allow_unplayable_formats') and try_get(common_data, lambda x: x['episode']['video']['drm'], bool): self.report_drm(display_id) brightcove_id = try_get( common_data, lambda x: x['episode']['video']['brightcoveId'], compat_str) or 'ref:%s' % common_data['episode']['video']['referenceId'] video_id = str_or_none(try_get(common_data, lambda x: x['episode']['video']['id'])) or brightcove_id title = try_get(common_data, lambda x: x['episode']['name'], compat_str) season_number = try_get(common_data, lambda x: x['season']['seasonNumber'], int) episode_number = try_get(common_data, lambda x: x['episode']['episodeNumber'], int) timestamp = unified_timestamp(try_get(common_data, lambda x: x['episode']['airDate'], compat_str)) release_date = unified_strdate(try_get(common_data, lambda x: x['episode']['availability'], compat_str)) thumbnails_data = try_get(common_data, lambda x: x['episode']['image']['sizes'], dict) or {} thumbnails = [{ 'id': thumbnail_id, 'url': thumbnail_url, 'width': int_or_none(thumbnail_id[1:]), } for thumbnail_id, thumbnail_url in thumbnails_data.items()] return { '_type': 'url_transparent', 'url': smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': self._GEO_COUNTRIES}), 'id': video_id, 'title': title, 'description': try_get(common_data, lambda x: x['episode']['description'], compat_str), 'duration': float_or_none(try_get(common_data, lambda x: x['episode']['video']['duration'], float), 1000), 'thumbnails': thumbnails, 'ie_key': 'BrightcoveNew', 'season_number': season_number, 'episode_number': episode_number, 'timestamp': timestamp, 'release_date': release_date, }
    43.073171
    146
    0.575123
    5,058
    0.9547
    0
    0
    0
    0
    0
    0
    1,871
    0.353152
    b97242dec299cf214174fe1ceb1c2d4c7e16b595
    4,783
    py
    Python
    apex/fp16_utils/fused_weight_norm.py
    mcarilli/apex
    766e36c9e10fe4efd847c3f77c3b38974c89eab1
    [ "BSD-3-Clause" ]
    1
    2020-05-05T01:37:42.000Z
    2020-05-05T01:37:42.000Z
    apex/fp16_utils/fused_weight_norm.py
    mcarilli/apex
    766e36c9e10fe4efd847c3f77c3b38974c89eab1
    [ "BSD-3-Clause" ]
    1
    2018-06-24T18:56:56.000Z
    2018-06-24T18:56:56.000Z
    apex/fp16_utils/fused_weight_norm.py
    mcarilli/apex
    766e36c9e10fe4efd847c3f77c3b38974c89eab1
    [ "BSD-3-Clause" ]
    1
    2020-07-03T00:37:20.000Z
    2020-07-03T00:37:20.000Z
    import torch from torch.autograd import Variable from torch.autograd.function import Function, once_differentiable import apex_C def check_contig_cuda(tensors, names): for tensor, name in zip(tensors, names): if not tensor.is_contiguous(): raise RuntimeError(name+" with size {} is not contiguous" .format(tensor.size())) if not tensor.is_cuda: raise RuntimeError(name+".is_cuda = False." "Currently, only cuda tensors are supported.") class Fused_Weight_Norm(Function): """ Custom autograd function that implements weight norm, as presented in `<https://arxiv.org/abs/1602.07868>`_, along a tensor's slowest or fastest dimension using fused kernel launches for the forward and backward passes. Accepts fp32 or fp16 input; the output type will match the input type. Within the kernels, all calculations are performed in fp32 for numerical stability, regardless of input/output precision. """ @staticmethod def forward(ctx, input, g, dim=0): """ Args: input(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **v** in the paper. ``input`` should be contiguous. g(torch.cuda.FloatTensor or torch.cuda.HalfTensor): input tensor corresponding to **g** in the paper. ``g`` should be the same type as ``input``. dim(int, optional, default=0): Dimension across which to perform weightnorm. Currently, only the first or last dimension of the input tensor is supported. Returns: Output tensor corresponding to **w** in the paper. Output type and precision will match type and precision of ``input``. """ # torch.cuda.nvtx.range_push("FusedNorm.forward, input.size() = {}" # .format(input.size())) check_contig_cuda((input,g),("input","g")) """ This is ok, new() treats a torch.Size object properly. No need to unpack with an asterisk via new(*input.size()). """ output = input.new(input.size()).contiguous() """ For output with size (slow, faster, faster, ...fastest), we want norms with size (slow, 1, 1, ...1), so that if you want retrieve norms and apply the same normalizing factors to another Tensor "t" with the same size as output, "t/norms" will broadcast each element of norms across the corresponding slowest dim of t. """ if dim == 0: norm_size = (output.size(0),) + (1,)*(output.dim() - 1) elif dim == output.dim() - 1: norm_size = (1,)*(output.dim() - 1) + (output.size(-1),) else: raise RuntimeError("Currently, Fused_Weight_Norm only supports first or last dimension.") norms = torch.cuda.FloatTensor(*norm_size).contiguous() """ Beware: If you call the following: norms = torch.cuda.FloatTensor(norm_size).contiguous() the constructor sees a tuple: FloatTensor( (output_size(0),1,1,...) ) and creates a 1D tensor with values from the tuple: [output_size(0),1,1,...]. """ apex_C.weight_norm_fwd(output, norms, input, g, dim) ctx.save_for_backward(input, g) # save_for_backward can only save input or output tensors, # use ctx state to save the norms and dimension: ctx.norms = norms ctx.dim = dim return output @staticmethod @once_differentiable def backward(ctx, grad_output): """ Args: grad_output(torch.cuda.FloatTensor or torch.cuda.HalfTensor): Gradient of loss with respect to output **w**. ``grad_output`` should be contiguous for performance. Returns: Gradient of loss with respect to ``input`` and ``g``. The precision of these gradients will match the precision of ``grad_input``. """ check_contig_cuda((grad_output), ("grad_output")) savedInput, savedg = ctx.saved_tensors savedNorms = ctx.norms # We expect that these .contiguous() calls will be no-ops. They're present for safety. grad_output_contig = grad_output.contiguous() grad_input = grad_output_contig.new(grad_output.size()).contiguous() grad_g = savedg.new(savedg.size()).contiguous() apex_C.weight_norm_bwd(grad_input, grad_g, grad_output_contig, savedInput, savedg, savedNorms, ctx.dim) return grad_input, grad_g, None
    41.95614
    175
    0.604223
    4,238
    0.886055
    0
    0
    3,733
    0.780473
    0
    0
    2,826
    0.590843
    b9724b70833f729e47c38eb018294247250b7282
    23,312
    py
    Python
    bzt/modules/grinder.py
    gerardorf/taurus
    610872b4cf70af31d79a346db1aebd3466310d77
    [ "Apache-2.0" ]
    1
    2019-01-15T17:23:58.000Z
    2019-01-15T17:23:58.000Z
    bzt/modules/grinder.py
    gerardorf/taurus
    610872b4cf70af31d79a346db1aebd3466310d77
    [ "Apache-2.0" ]
    null
    null
    null
    bzt/modules/grinder.py
    gerardorf/taurus
    610872b4cf70af31d79a346db1aebd3466310d77
    [ "Apache-2.0" ]
    null
    null
    null
    """ Module holds all stuff regarding Grinder tool usage Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re import time from bzt import TaurusConfigError, ToolError from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader from bzt.modules.console import WidgetProvider, ExecutorWidget from bzt.modules.java import TaurusJavaHelper from bzt.requests_model import HTTPRequest from bzt.six import iteritems from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable): """ Grinder executor module """ def __init__(self): super(GrinderExecutor, self).__init__() self.script = None self.exec_id = "grinder-bzt-%s" % id(self) self.properties_file = None self.kpi_file = None self.cmd_line = None self.process = None self.end_time = None self.retcode = None self.java_helper = None def __write_base_props(self, fds): """ write base properties and base properties file contents to fds :param fds: fds :return: """ base_props_file = self.settings.get("properties-file") if base_props_file: fds.write("# Base Properies File Start: %s\n" % base_props_file) with open(base_props_file) as bpf: fds.write(bpf.read()) fds.write("# Base Properies File End: %s\n\n" % base_props_file) # base props base_props = self.settings.get("properties") if base_props: fds.write("# Base Properies Start\n") for key, val in iteritems(base_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Base Properies End\n\n") def __write_scenario_props(self, fds, scenario): """ Write scenario props and scenario file props to fds :param fds: :param scenario: dict :return: """ script_props_file = scenario.get("properties-file") if script_props_file: fds.write("# Script Properies File Start: %s\n" % script_props_file) with open(script_props_file) as spf: fds.write(spf.read()) fds.write("# Script Properies File End: %s\n\n" % script_props_file) # scenario props local_props = scenario.get("properties") if local_props: fds.write("# Scenario Properies Start\n") for key, val in iteritems(local_props): fds.write("%s=%s\n" % (key, val)) fds.write("# Scenario Properies End\n\n") def __write_bzt_props(self, fds): """ Write bzt properties to fds :param fds: :return: """ fds.write("# BZT Properies Start\n") fds.write("grinder.hostID=%s\n" % self.exec_id) fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/")) fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/")) load = self.get_load() if load.iterations or load.concurrency: fds.write("grinder.runs=%s\n" % load.iterations or 0) if load.concurrency: fds.write("grinder.threads=%s\n" % load.concurrency) if load.duration: fds.write("grinder.duration=%s\n" % int(load.duration * 1000)) fds.write("# taurus load values in case you need them\n") fds.write("taurus.concurrency=%s\n" % load.concurrency) fds.write("taurus.throughput=%s\n" % load.throughput) fds.write("taurus.ramp_up=%s\n" % load.ramp_up) fds.write("taurus.steps=%s\n" % load.steps) fds.write("taurus.hold_for=%s\n" % load.hold) fds.write("taurus.iterations=%s\n" % load.iterations) fds.write("# BZT Properies End\n") def prepare(self): self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w") self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w") self.install_required_tools() scenario = self.get_scenario() self.exec_id = self.label self.script = self.get_script_path() if not self.script: if "requests" in scenario: self.script = self.__scenario_from_requests() else: msg = "There must be a script file or requests for its generation " msg += "to run Grinder tool (%s)" % self.execution.get('scenario') raise TaurusConfigError(msg) self.properties_file = self.engine.create_artifact("grinder", ".properties") with open(self.properties_file, 'w') as fds: self.__write_base_props(fds) self.__write_scenario_props(fds, scenario) self.__write_bzt_props(fds) self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log") self.reader = DataLogReader(self.kpi_file, self.log) self.reader.report_by_url = self.settings.get("report-by-url", False) if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.engine.aggregator.add_underling(self.reader) # add logback configurations used by worker processes (logback-worker.xml) self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True) self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True) self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True) self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file] def startup(self): """ Should start the tool as fast as possible. """ self.env.set({"T_GRINDER_PREFIX": self.exec_id}) self.process = self.execute(self.cmd_line) def check(self): """ Checks if tool is still running. Also checks if resulting logs contains any data and throws exception otherwise. :return: bool :raise TaurusToolError: """ self.retcode = self.process.poll() if self.retcode is not None: if self.retcode != 0: raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics()) return True return False def shutdown(self): """ If tool is still running - let's stop it. """ shutdown_process(self.process, self.log) if self.start_time: self.end_time = time.time() self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time) def post_process(self): """ Collect data file artifact """ if self.kpi_file: self.engine.existing_artifact(self.kpi_file) super(GrinderExecutor, self).post_process() def __scenario_from_requests(self): """ Generate grinder scenario from requests :return: script """ script = self.engine.create_artifact("grinder_requests", ".py") builder = GrinderScriptBuilder(self.get_scenario(), self.log) builder.label = self.label builder.build_source_code() builder.save(script) return script def install_required_tools(self): grinder = self._get_tool(Grinder, config=self.settings) self.settings["path"] = grinder.tool_path self.java_helper = self._get_tool(TaurusJavaHelper) required_tools = [self._get_tool(TclLibrary), self._get_tool(JavaVM), self.java_helper, grinder] for tool in required_tools: if not tool.check_if_installed(): tool.install() def get_widget(self): if not self.widget: if self.script is not None: label = "Grinder: %s" % os.path.basename(self.script) else: label = None self.widget = ExecutorWidget(self, label) if self.get_load().ramp_up: self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup return self.widget def resource_files(self): resource_files = [] script_file_path = self.get_script_path() if script_file_path: resource_files.append(script_file_path) prop_file = self.get_scenario().get("properties-file") if prop_file: resource_files.append(prop_file) return resource_files def get_error_diagnostics(self): diagnostics = [] if self.stdout is not None: with open(self.stdout.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("Grinder STDOUT:\n" + contents) if self.stderr is not None: with open(self.stderr.name) as fds: contents = fds.read().strip() if contents.strip(): diagnostics.append("Grinder STDOUT:\n" + contents) return diagnostics class DataLogReader(ResultsReader): """ Class to read KPI from data log """ DELIMITER = "," DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes") def __init__(self, filename, parent_logger): super(DataLogReader, self).__init__() self.report_by_url = False self.log = parent_logger.getChild(self.__class__.__name__) self.file = FileReader(filename=filename, parent_logger=self.log) self.idx = {} self.partial_buffer = "" self.start_time = 0 self.end_time = 0 self.concurrency = 0 self.test_names = {} self.known_threads = set() def _read(self, last_pass=False): """ Generator method that returns next portion of data :param last_pass: """ self.log.debug("Reading grinder results...") self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass)) lnum = None start = time.time() for lnum, line in enumerate(self.lines): if not self.idx: if not line.startswith('data.'): self.__split(line) # to capture early test name records continue line = line[line.find(' '):] header_list = line.strip().split(self.DELIMITER) for _ix, field in enumerate(header_list): self.idx[field.strip()] = _ix data_fields, worker_id = self.__split(line) if not data_fields: self.log.debug("Skipping line: %s", line.strip()) continue yield self.parse_line(data_fields, worker_id, lnum) if lnum is not None: duration = time.time() - start if duration < 0.001: duration = 0.001 self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration) def parse_line(self, data_fields, worker_id, lnum): worker_id = worker_id.split('.')[1] t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0) r_time = int(data_fields[self.idx["Test time"]]) / 1000.0 latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0 r_code = data_fields[self.idx["HTTP response code"]].strip() con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0 con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0 bytes_count = int(data_fields[self.idx["HTTP response length"]].strip()) test_id = data_fields[self.idx["Test"]].strip() thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip() if thread_id not in self.known_threads: self.known_threads.add(thread_id) self.concurrency += 1 url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count) if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]): if not error_msg: if r_code != '0': error_msg = "HTTP %s" % r_code else: error_msg = "Java exception calling TestRunner" else: error_msg = None # suppress errors if self.report_by_url: label = url elif test_id in self.test_names: label = self.test_names[test_id] else: label = "Test #%s" % test_id source_id = '' # maybe use worker_id somehow? return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count def __split(self, line): if not line.endswith("\n"): self.partial_buffer += line return None, None line = "%s%s" % (self.partial_buffer, line) self.partial_buffer = "" line = line.strip() if not line.startswith('data.'): line_parts = line.split(' ') if len(line_parts) > 1: if line_parts[1] == 'starting,': # self.concurrency += 1 pass elif line_parts[1] == 'finished': if self.concurrency > 0: self.concurrency -= 1 elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}: test_id = line_parts[5][:-1] test_name = ' '.join(line_parts[6:]) self.test_names[test_id] = test_name self.log.debug("Recognized test id %s => %s", test_id, test_name) return None, None worker_id = line[:line.find(' ')] line = line[line.find(' '):] data_fields = line.split(self.DELIMITER) if not data_fields[1].strip().isdigit(): return None, None if len(data_fields) < max(self.idx.values()): return None, None return data_fields, worker_id def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count): url = '' error_msg = None for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize? line = self.lines[lineNo].strip() matched = self.DETAILS_REGEX.match(line) if not matched: continue if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5): return matched.group(2), matched.group(4) return url, error_msg class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?) VERSION = "3.11" LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar" def __init__(self, config=None, **kwargs): settings = config or {} grinder_path = settings.get("path", self.LOCAL_PATH) grinder_path = get_full_path(grinder_path) download_link = settings.get("download-link", "") super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs) self.version = self.VERSION self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version) def check_if_installed(self): self.log.debug("Trying %s: %s", self.tool_name, self.tool_path) try: out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"]) if err: out += err self.log.debug("%s stdout: %s", self.tool_name, out) return True except CALL_PROBLEMS as exc: self.log.warning("%s check failed: %s", self.tool_name, exc) return False def install(self): dest = get_full_path(self.tool_path, step_up=2) self.log.info("Will install %s into %s", self.tool_name, dest) grinder_dist = self._download(use_link=bool(self.download_link)) self.log.info("Unzipping %s", grinder_dist) unzip(grinder_dist, dest, 'grinder-' + self.version) os.remove(grinder_dist) self.log.info("Installed grinder successfully") if not self.check_if_installed(): raise ToolError("Unable to run %s after installation!" % self.tool_name) class GrinderMirrorsManager(MirrorsManager): MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \ "%203/{version}/grinder-{version}-binary.zip&dialog=true" DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \ "/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect" def __init__(self, http_client, parent_logger, grinder_version): self.grinder_version = grinder_version base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version) super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger) def _parse_mirrors(self): links = [] if self.page_source is not None: self.log.debug('Parsing mirrors...') base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \ "-binary.zip/download?use_mirror={mirror}" li_search_pattern = re.compile(r'<li id=".*?">') li_elements = li_search_pattern.findall(self.page_source) if li_elements: links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for link in li_elements] default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version) if default_link not in links: links.append(default_link) self.log.debug('Total mirrors: %d', len(links)) return links class GrinderScriptBuilder(PythonGenerator): IMPORTS = """ from net.grinder.script import Test from net.grinder.script.Grinder import grinder from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities from HTTPClient import NVPair """ def __init__(self, scenario, parent_logger): super(GrinderScriptBuilder, self).__init__(scenario, parent_logger) self.label = "BZT Requests" def build_source_code(self): self.log.debug("Generating Python script for Grinder") self.root.append(self.gen_comment("This script was generated by Taurus", indent=0)) self.root.append(self.add_imports()) self.root.append(self.gen_new_line()) default_address = self.scenario.get("default-address") url_arg = "url=%r" % default_address if default_address else "" self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0)) self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0)) self.root.append(self.gen_statement('test.record(request)', indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0)) self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0)) headers = self.scenario.get_headers() if not self.scenario.get("keepalive", True): headers['Connection'] = 'close' if headers: self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0)) for header, value in iteritems(headers): self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4)) self.root.append(self.gen_statement("])", indent=0)) global_timeout = dehumanize_time(self.scenario.get("timeout", None)) if global_timeout: self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0)) cookie_flag = int(self.scenario.get("store-cookie", True)) self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0)) self.root.append(self.gen_new_line()) self.root.append(self.gen_runner_class()) @staticmethod def __list_to_nvpair_list(items): return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]" def gen_runner_class(self): runner_classdef = self.gen_class_definition("TestRunner", ["object"]) sleep_method = self.gen_method_definition("rampUpSleeper", ["self"]) sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return")) sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')")) sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)")) sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)")) sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)")) sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)")) sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')")) sleep_method.append(self.gen_new_line()) runner_classdef.append(sleep_method) main_method = self.gen_method_definition("__call__", ["self"]) main_method.append(self.gen_statement("self.rampUpSleeper()")) for req in self.scenario.get_requests(): if not isinstance(req, HTTPRequest): msg = "Grinder script generator doesn't support '%s' blocks, skipping" self.log.warning(msg, req.NAME) continue method = req.method.upper() url = req.url local_headers = req.headers params = "[]" headers = self.__list_to_nvpair_list(iteritems(local_headers)) main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers))) think_time = dehumanize_time(req.priority_option('think-time')) if think_time: main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000))) runner_classdef.append(main_method) return runner_classdef
    40.82662
    119
    0.618823
    22,046
    0.945693
    1,254
    0.053792
    150
    0.006434
    0
    0
    5,650
    0.242364
    b972e358701b6b26d8d3c931dfecc57580620c15
    467
    py
    Python
    test/Fortran/fixture/myfortran_flags.py
    moroten/scons
    20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
    [ "MIT" ]
    1,403
    2017-11-23T14:24:01.000Z
    2022-03-30T20:59:39.000Z
    test/Fortran/fixture/myfortran_flags.py
    moroten/scons
    20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
    [ "MIT" ]
    3,708
    2017-11-27T13:47:12.000Z
    2022-03-29T17:21:17.000Z
    test/Fortran/fixture/myfortran_flags.py
    moroten/scons
    20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
    [ "MIT" ]
    281
    2017-12-01T23:48:38.000Z
    2022-03-31T15:25:44.000Z
    import getopt import sys comment = ('#' + sys.argv[1]).encode() opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy') optstring = '' length = len(comment) for opt, arg in opts: if opt == '-o': out = arg elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt infile = open(args[0], 'rb') outfile = open(out, 'wb') outfile.write((optstring + "\n").encode()) for l in infile.readlines(): if l[:length] != comment: outfile.write(l) sys.exit(0)
    27.470588
    67
    0.601713
    0
    0
    0
    0
    0
    0
    0
    0
    41
    0.087794
    b9736fc25869ac44481082e255dc93e0f52aa441
    9,015
    py
    Python
    zen_knit/organizer/__init__.py
    Zen-Reportz/zen_knit
    104c2693d2cc61520657131da769f5d59d2df8e9
    [ "MIT" ]
    30
    2021-12-25T15:39:42.000Z
    2022-02-25T04:53:44.000Z
    zen_knit/organizer/__init__.py
    Zen-Reportz/zen_knit
    104c2693d2cc61520657131da769f5d59d2df8e9
    [ "MIT" ]
    11
    2022-01-02T22:10:07.000Z
    2022-02-02T00:56:33.000Z
    zen_knit/organizer/__init__.py
    Zen-Reportz/zen_knit
    104c2693d2cc61520657131da769f5d59d2df8e9
    [ "MIT" ]
    2
    2022-01-27T13:22:46.000Z
    2022-01-30T05:01:59.000Z
    import io import os import base64 from pathlib import Path from nbconvert import filters from pygments.formatters.latex import LatexFormatter from zen_knit import formattor from zen_knit.data_types import ChunkOption, ExecutedData, OrganizedChunk, OrganizedData from zen_knit.formattor.html_formatter import HTMLFormatter mime_extensions = {"image/png" : "png", "image/jpg" : "jpg"} class BaseOrganizer: def __init__(self, executed_data: ExecutedData): self.format_started = False self.collected_string = "" self.fig_folder = None self.executed_data = executed_data self.formatted_doc = [] self.organized_data = OrganizedData( global_options = self.executed_data.global_options, chunks = [] ) self._create_output_folder_name() self._create_fig_folder() self._organize_doc() self._create_output_file_name() def _create_output_file_name(self): global_options = self.organized_data.global_options global_options.output.file_name = global_options.input.file_name.split(".")[0] + "."+ global_options.output.format def _create_output_folder_name(self): global_options = self.organized_data.global_options if global_options.output.dir is None: global_options.output.dir = global_options.input.dir def _create_fig_folder(self): output_folder = self.organized_data.global_options.output.dir Path(output_folder).mkdir(parents=True, exist_ok=True) fig_folder = os.path.join(output_folder, self.organized_data.global_options.output.fig_dir) self.fig_folder = fig_folder Path(fig_folder).mkdir(parents=True, exist_ok=True) def _parse_raw(self, data, output_type): if data.get("code_text_raw") is not None: if self._clean_up(data['code_text_raw']) is not None: if output_type in ("code"): t = {"type": "code", "str_data": data['code_text_raw'] } elif output_type in ("sql"): t = {"type": "sql", "str_data": data['code_text_raw'] } else: t = {"type": "markdown", "str_data": data['code_text_raw'] } self.organized_data.chunks.append(OrganizedChunk(**t)) return True else: return False def _coder_string(self, data): list_ = ["stream", "error"] if data["output_type"] is None: return False if data["output_type"] in list_: if data["output_type"] == "stream": if self._clean_up(data['text']) is not None: t = {"type": "se_data", "str_data": data['text'] } self.organized_data.chunks.append(OrganizedChunk(**t)) if data["output_type"] == "error": t = {"type": "se_data", "str_data": data["evalue"] + filters.strip_ansi("".join(data["traceback"])) } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _raw_string(self, data): if data["output_type"] is None: return False if data["output_type"] == "execute_result": if data.get("data") is not None: if 'matplotlib' in data["data"]["text/plain"]: # Doing nothing here return True else: if ((data["data"]["text/plain"][0] == "'") or (data["data"]["text/plain"][0] == '"')): temp = data["data"]["text/plain"][1:-1] else: temp = data["data"]["text/plain"] if "<table" in temp: t = {"type": "html_data", "str_data":temp.encode().decode() } self.organized_data.chunks.append(OrganizedChunk(**t)) return True # if "BokehJS" in temp: # t = {"type": "html_data", "str_data": "<script type='text/javascript'>" + temp.encode().decode() + "</script>" } # self.organized_data.chunks.append(OrganizedChunk(**t)) # return True if self._clean_up(temp) is not None: t = {"type": "e_data", "str_data":temp } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return True return False def _raw_plots(self, data, chunk_option:ChunkOption): if data["output_type"] is None: return False if data["output_type"] == "display_data": plot_infos = self._save_plots(data, chunk_option) t = {"type": "plot", "complex_data":{"plots": plot_infos, "options": chunk_option }} self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _save_plots(self, data, chunk_option:ChunkOption): figs = [] i = 1 for m in mime_extensions: if m in data["data"]: fig_full_path, fig_relative_path = self._build_file(mime_extensions[m], i, chunk_option.fig_caption, chunk_option.name) figs.append(fig_relative_path) bfig = base64.b64decode(data["data"][m]) with open(fig_full_path, "wb") as f: f.write(bfig) i += 1 return figs def _build_file(self, extension, index, fig_caption= None, name =None): fig_name = "" if fig_caption is not None: fig_name = fig_name + "_" + fig_caption if name is not None: fig_name = fig_name + "_" + name fig_name = fig_name + "_" + str(index) fig_name = fig_name + "." + extension return os.path.join(self.fig_folder, fig_name), os.path.join(self.fig_folder, fig_name) def _interactive_plots(self, data): if data["output_type"] is None: return False if data["output_type"] == "display_data": if "text/html" in data["data"]: print(self.executed_data.global_options.output.format) if self.executed_data.global_options.output.format != "html": raise Exception("output format is not HTML") else: t = {"type": "html_data", "str_data":data["data"]["text/html"].encode().decode() } self.organized_data.chunks.append(OrganizedChunk(**t)) return True return False def _organize_doc(self): for index, chunk in enumerate(self.executed_data.chunks): chunk_option = chunk.chunk.options if chunk_option.name: print(f"organizing {chunk_option.name}") else: print(f"organizing index {index}") results = chunk.results for result in results: data = result.data present = self._parse_raw(data, result.output_type) if present: continue present = self._coder_string(data) if present: continue present = self._raw_string(data) if present: continue present = self._interactive_plots(data) if present: continue present = self._raw_plots(data, chunk_option) if present: continue print("not supported format", data) t = [] c: OrganizedChunk for c in self.organized_data.chunks: last_chank: OrganizedChunk if len(t)> 0: last_chank = t[-1] else: last_chank = None if last_chank is None: t.append(c) else: if (c.type == last_chank.type) & (c.type != "plot"): last_chank.str_data = last_chank.str_data + "\n" + c.str_data else: t.append(c) self.organized_data.chunks = t @staticmethod def _clean_up(doc): d = doc.replace(" ", "").replace("\n", "") if len(d) != 0: return doc else: return None # markdown_file = self.executed_data.global_options.input_file_name.split(".")[0] + ".md" # markdown_file = os.path.join(self.executed_data.global_options.output_file_dir , markdown_file) # with open(markdown_file, "w") as f: # text = "\n".join(self.formatted_doc) # f.write(text)
    37.5625
    139
    0.533888
    8,590
    0.952856
    0
    0
    173
    0.01919
    0
    0
    1,463
    0.162285
    b974558759b358f82c2d72d79bab9c7dc3e35a76
    12,467
    py
    Python
    qibullet/robot_virtual.py
    mcaniot/qibullet
    9c5e1b319a18dd289263eb82f9d7303429bcbe21
    [ "Apache-2.0" ]
    null
    null
    null
    qibullet/robot_virtual.py
    mcaniot/qibullet
    9c5e1b319a18dd289263eb82f9d7303429bcbe21
    [ "Apache-2.0" ]
    null
    null
    null
    qibullet/robot_virtual.py
    mcaniot/qibullet
    9c5e1b319a18dd289263eb82f9d7303429bcbe21
    [ "Apache-2.0" ]
    null
    null
    null
    #!/usr/bin/env python # coding: utf-8 import sys import pybullet from qibullet.camera import * from qibullet.link import Link from qibullet.joint import Joint IS_VERSION_PYTHON_3 = sys.version_info[0] >= 3 class RobotVirtual: """ Mother class representing a virtual robot """ def __init__(self, description_file): """ Constructor Parameters: description_file - The file giving the description of the virtual robot. For now, only URDF is handled """ self.description_file = description_file self.physics_client = 0 self.active_camera = None self.camera_dict = dict() self.joint_dict = dict() self.link_dict = dict() def loadRobot(self, translation, quaternion, physicsClientId=0): """ Loads the robot into a simulation, loads the joints and the links descriptions. The joints are set to 0 rad. Parameters: translation - List containing 3 elements, the translation [x, y, z] of the robot in the WORLD frame quaternion - List containing 4 elements, the quaternion [x, y, z, q] of the robot in the WORLD frame physicsClientId - The id of the simulated instance in which the robot is supposed to be loaded Returns: boolean - True if the method ran correctly, False otherwise """ try: self.physics_client = physicsClientId self.robot_model = pybullet.loadURDF( self.description_file, translation, quaternion, useFixedBase=False, globalScaling=1.0, physicsClientId=self.physics_client, flags=pybullet.URDF_USE_SELF_COLLISION | pybullet.URDF_USE_MATERIAL_COLORS_FROM_MTL) except pybullet.error as e: raise pybullet.error("Cannot load robot model: " + str(e)) for i in range(pybullet.getNumJoints( self.robot_model, physicsClientId=self.physics_client)): if IS_VERSION_PYTHON_3: # PYTHON 3 version needs a conversion bytes to str joint_info = pybullet.getJointInfo( self.robot_model, i, physicsClientId=self.physics_client) self.link_dict[joint_info[12].decode('utf-8')] =\ Link(joint_info) if joint_info[2] == pybullet.JOINT_PRISMATIC or\ joint_info[2] == pybullet.JOINT_REVOLUTE: self.joint_dict[joint_info[1].decode('utf-8')] =\ Joint(joint_info) else: # PYTHON 2 Version joint_info = pybullet.getJointInfo( self.robot_model, i, physicsClientId=self.physics_client) self.link_dict[joint_info[12]] = Link(joint_info) if joint_info[2] == pybullet.JOINT_PRISMATIC or\ joint_info[2] == pybullet.JOINT_REVOLUTE: self.joint_dict[joint_info[1]] = Joint(joint_info) def getRobotModel(self): """ Returns the pybullet model to which the module is associated. Returns: robot_model - The pybullet model of the robot """ return self.robot_model def getPhysicsClientId(self): """ Returns the id of the simulated instance in which the module is loaded. Returns: physics_client - The id of the simulation in which the robot (possessing the module) is spawned """ return self.physics_client def setAngles(self, joint_names, joint_values, percentage_speeds): """ Set angles on the robot's joints. Tests have to be performed by the child class to guarantee the validity of the input parameters. Parameters: joint_names - List of string containing the name of the joints to be controlled joint_values - List of values corresponding to the angles in radians to be applied percentage_speeds - Percentages of the max speed to be used for each joint, has to be strictly superior to 0 and inferior or equal to 1 """ try: assert len(joint_names) ==\ len(joint_values) ==\ len(percentage_speeds) assert all( speed >= 0.0 and speed <= 1.0 for speed in percentage_speeds) except AssertionError: raise pybullet.error("Error in the setAngles parameters") for joint_name, joint_value, percentage_speed in zip( joint_names, joint_values, percentage_speeds): joint_speed =\ self.joint_dict[joint_name].getMaxVelocity() *\ percentage_speed pybullet.setJointMotorControl2( self.robot_model, self.joint_dict[joint_name].getIndex(), pybullet.POSITION_CONTROL, targetPosition=joint_value, maxVelocity=joint_speed, force=self.joint_dict[joint_name].getMaxEffort(), physicsClientId=self.physics_client) def getAnglesPosition(self, joint_names): """ Gets the position of the robot's joints in radians. If one of the joint doesn't exist, the method will raise a KeyError. Parameters: joint_names - List of string containing the names of the joints Returns: joint_positions - List of floats containing the joint's positions """ joint_positions = list() for joint_name in joint_names: joint_positions.append(pybullet.getJointState( self.robot_model, self.joint_dict[joint_name].getIndex(), physicsClientId=self.physics_client)[0]) return joint_positions def getAnglesVelocity(self, joint_names): """ Gets the velocity of the robot's joints in rad/s. If one of the joint doesn't exist, the method will raise a KeyError. Parameters: joint_names - List of string containing the names of the joints Returns: joint_velocities - List of floats containing the joint's velocities """ joint_velocities = list() for joint_name in joint_names: joint_velocities.append(pybullet.getJointState( self.robot_model, self.joint_dict[joint_name].getIndex(), physicsClientId=self.physics_client)[1]) return joint_velocities def subscribeCamera(self, camera_id, resolution=Camera.K_QVGA): """ Subscribe to the camera holding the camera id. WARNING: at the moment, only one camera can be subscribed. Parameters: camera_id - The id of the camera to be subscribed resolution - CameraResolution object, the resolution of the camera """ try: self.active_camera = self.camera_dict[camera_id] self.active_camera.subscribe(resolution=resolution) except KeyError: print("This camera does not exist, use a valid camera id") def unsubscribeCamera(self, camera_id): """ Unsubscribe from a camera, the one holding the camera id. Parameters: camera_id - The id of the camera to be unsubscribed """ try: # If no active camera is found, nothing is unsubscribed assert self.active_camera is not None if self.active_camera.getCameraId() == camera_id: self.active_camera.unsubscribe() self.active_camera = None except KeyError: print("This camera does not exist, use a valid camera id") except AssertionError: pass def getCameraFrame(self): """ Returns a camera frame. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: frame - The current camera frame as a formatted numpy array, directly exploitable from OpenCV """ try: assert self.active_camera is not None return self.active_camera.getFrame() except AssertionError: raise pybullet.error("No active camera, cannot retrieve any frame") def getCameraResolution(self): """ Returns the resolution of the active camera. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: resolution - a CameraResolution object describing the resolution of the active camera """ try: assert self.active_camera is not None return self.active_camera.getResolution() except KeyError: raise pybullet.error("No active camera, resolution unavailable") def getCameraLink(self): """ Returns the link of the active camera. Be advised that the subscribeCamera method needs to be called beforehand, otherwise a pybullet error will be raised. Returns: resolution - a Link object describing the link to which the active camera is attached """ try: assert self.active_camera is not None return self.active_camera.getCameraLink() except KeyError: raise pybullet.error("No active camera, cannot retrieve any link") def getActiveCamera(self): """ Returns the active camera of the robot. Returns: active_camera - Camera (CameraRgb or CameraDepth) object, the active camera of the robot. If there is no active camera, a None is returned """ return self.active_camera def getPosition(self): """ Gets the position of the robot's base in the world frame. Returns: x - The position of the robot's base on the x axis, in meters y - The positions of the robot's base on the y axis in meters theta - The rotation of the robot's base on the z axis in meters """ position, quaternions = pybullet.getBasePositionAndOrientation( self.robot_model, physicsClientId=self.physics_client) theta = pybullet.getEulerFromQuaternion(quaternions)[2] return position[0], position[1], theta def isSelfColliding(self, link_names): """ Specifies if a link is colliding with the rest of the virtual robot. Parameters: link_names - String or list of string containing the names of the links to be checked for self collision. WARNING: only the links with corresponding meshes should be used, otherwise the link cannot self collide Returns: self_colliding - Boolean, if True at least one of the links is self colliding """ try: if type(link_names) is str: assert link_names in self.link_dict.keys() names = [link_names] else: assert set(link_names).issubset(self.link_dict.keys()) names = list(link_names) for name in names: contact_tuple = pybullet.getContactPoints( bodyA=self.robot_model, bodyB=self.robot_model, linkIndexA=self.link_dict[name].getIndex(), physicsClientId=self.physics_client) contact_tuple += pybullet.getContactPoints( bodyA=self.robot_model, bodyB=self.robot_model, linkIndexB=self.link_dict[name].getIndex(), physicsClientId=self.physics_client) if len(contact_tuple) != 0: return True return False except AssertionError: raise pybullet.error( "Unauthorized link checking for self collisions")
    35.31728
    79
    0.593006
    12,256
    0.983075
    0
    0
    0
    0
    0
    0
    5,530
    0.443571
    b974d5d1bd35654f50415a8f7c66f3fb9a0316ab
    704
    py
    Python
    tests/test_formatter.py
    hbraux/kafkacli
    5f7ed23150932b66b484fb43dd6210b6c0968776
    [ "MIT" ]
    null
    null
    null
    tests/test_formatter.py
    hbraux/kafkacli
    5f7ed23150932b66b484fb43dd6210b6c0968776
    [ "MIT" ]
    null
    null
    null
    tests/test_formatter.py
    hbraux/kafkacli
    5f7ed23150932b66b484fb43dd6210b6c0968776
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python # -*- coding: utf-8 -*- import os import pytest import json from kafkacli.formatter import Formatter sampleJson = json.loads('{"a":"s", "b":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out == '{"a": "s", "b": 1}\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\n "a": "s",\n "b": 1\n}\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == \ '{"a": \x1b[34m"s"\x1b[39m, "b": \x1b[31m1\x1b[39m}\n'
    24.275862
    62
    0.640625
    0
    0
    0
    0
    0
    0
    0
    0
    173
    0.245739
    b9750e636d7a3d49a65558af431533fc2e745edb
    187
    py
    Python
    src/jobs/forms.py
    arc198/DJANGO-JOB-SITE
    d9547c4ee85751677ba6458380b609973c3b4a8d
    [ "MIT" ]
    20
    2018-05-04T18:42:35.000Z
    2021-03-18T07:15:12.000Z
    src/jobs/forms.py
    fleepgeek/django-jobsite
    d9547c4ee85751677ba6458380b609973c3b4a8d
    [ "MIT" ]
    5
    2020-02-11T22:22:33.000Z
    2021-06-10T20:18:05.000Z
    src/jobs/forms.py
    arc198/DJANGO-JOB-SITE
    d9547c4ee85751677ba6458380b609973c3b4a8d
    [ "MIT" ]
    8
    2018-05-04T19:03:23.000Z
    2020-09-23T00:24:46.000Z
    from django import forms from .models import Application class ApplicationForm(forms.ModelForm): class Meta: model = Application fields = ('resume', 'cover_letter',)
    23.375
    44
    0.700535
    128
    0.684492
    0
    0
    0
    0
    0
    0
    22
    0.117647
    b975e6fb7fb3fa8849afb4e4ce41618c2ce94c1b
    451
    py
    Python
    src/test/tests/unit/protocol.py
    ylee88/visit
    8e0920996d84fef70a7014b0d770360918d849d5
    [ "BSD-3-Clause" ]
    1
    2022-01-27T23:52:04.000Z
    2022-01-27T23:52:04.000Z
    src/test/tests/unit/protocol.py
    ylee88/visit
    8e0920996d84fef70a7014b0d770360918d849d5
    [ "BSD-3-Clause" ]
    null
    null
    null
    src/test/tests/unit/protocol.py
    ylee88/visit
    8e0920996d84fef70a7014b0d770360918d849d5
    [ "BSD-3-Clause" ]
    null
    null
    null
    # ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: protocolo.py # # Tests: vistprotocol unit test # # Mark C. Miller, Tue Jan 11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path("visitprotocol") res = sexe(tapp,ret_output=True) if res["return_code"] == 0: excode = 111 else: excode = 113 Exit(excode)
    26.529412
    78
    0.432373
    0
    0
    0
    0
    0
    0
    0
    0
    317
    0.702882
    b97645cb1bc48b7d30c6b37e139952912087b791
    3,348
    py
    Python
    pyMazeBacktrack.py
    Dozed12/pyMazeBacktrack
    aaa2a902fdca17dca6e2ee00e672b6bb38da5639
    [ "MIT" ]
    2
    2019-02-22T10:35:25.000Z
    2020-08-11T01:25:12.000Z
    pyMazeBacktrack.py
    Dozed12/pyMazeBacktrack
    aaa2a902fdca17dca6e2ee00e672b6bb38da5639
    [ "MIT" ]
    null
    null
    null
    pyMazeBacktrack.py
    Dozed12/pyMazeBacktrack
    aaa2a902fdca17dca6e2ee00e672b6bb38da5639
    [ "MIT" ]
    null
    null
    null
    import libtcodpy as libtcod from random import randint nSquares = 30 nTiles = nSquares * 2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1: if y - 2 <= 0: return 0 if table[x][y-2] == white: return 0 elif direction == 2: if x + 2 >= size: return 0 if table[x+2][y] == white: return 0 elif direction == 3: if y + 2 >= size: return 0 if table[x][y+2] == white: return 0 elif direction == 4: if x - 2 <= 0: return 0 if table[x-2][y] == white: return 0 return 1 def Possible(x,y,table,size): if x+2 < size: if table[x+2][y] == black: return 1 if x-2 > 0: if table[x-2][y] == black: return 1 if y+2 < size: if table[x][y+2] == black: return 1 if y-2 > 0: if table[x][y-2] == black: return 1 return 0 black = libtcod.black white = libtcod.white Table = [[0 for i in range(nTiles)]for i in range(nTiles)] for x in range(nTiles): for y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX = 1 CurrY = 1 Table[CurrX][CurrY] = white end = 0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if Dir == 1: Table[CurrX][CurrY - 1] = white CurrY -= 2 Table[CurrX][CurrY] = white elif Dir == 2: Table[CurrX + 1][CurrY] = white CurrX += 2 Table[CurrX][CurrY] = white elif Dir == 3: Table[CurrX][CurrY + 1] = white CurrY += 2 Table[CurrX][CurrY] = white elif Dir == 4: Table[CurrX - 1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir = Memory[MemorySize-1] if Dir == 1: CurrY += 2 elif Dir == 2: CurrX -= 2 elif Dir == 3: CurrY -= 2 elif Dir == 4: CurrX += 2 del Memory[MemorySize-1] if CurrX == 1 and CurrY == 1: end = 1 break #print for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() libtcod.console_wait_for_keypress(True)
    20.168675
    106
    0.496416
    0
    0
    0
    0
    0
    0
    0
    0
    48
    0.014337
    b978586a0e39802db346feaf3a0aa1c91c336f05
    3,011
    py
    Python
    source/tests/test_resources.py
    aws-solutions/maintaining-personalized-experiences-with-machine-learning
    3f6f1b0069df4828eae9b0835b717500189e4f71
    [ "Apache-2.0" ]
    6
    2021-09-23T16:33:24.000Z
    2022-03-31T11:45:13.000Z
    source/tests/test_resources.py
    aws-solutions/maintaining-personalized-experiences-with-machine-learning
    3f6f1b0069df4828eae9b0835b717500189e4f71
    [ "Apache-2.0" ]
    4
    2021-09-24T21:34:14.000Z
    2022-01-27T22:11:08.000Z
    source/tests/test_resources.py
    aws-solutions/maintaining-personalized-experiences-with-machine-learning
    3f6f1b0069df4828eae9b0835b717500189e4f71
    [ "Apache-2.0" ]
    9
    2021-09-23T23:24:46.000Z
    2022-02-12T04:53:16.000Z
    # ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ###################################################################################################################### import pytest from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( "klass,camel,dash,snake", [ (DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"), (Schema, "schema", "schema", "schema"), (Dataset, "dataset", "dataset", "dataset"), ( DatasetImportJob, "datasetImportJob", "dataset-import-job", "dataset_import_job", ), (Solution, "solution", "solution", "solution"), (SolutionVersion, "solutionVersion", "solution-version", "solution_version"), (Campaign, "campaign", "campaign", "campaign"), (EventTracker, "eventTracker", "event-tracker", "event_tracker"), ( BatchInferenceJob, "batchInferenceJob", "batch-inference-job", "batch_inference_job", ), (BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"), ], ids=[ "DatasetGroup", "Schema", "Dataset", "DatasetImportJob", "Solution", "SolutionVersion", "Campaign", "EventTracker", "BatchInferenceJob", "BatchSegmentJob,", ], ) def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel assert klass().name.dash == dash assert klass().name.snake == snake
    42.408451
    120
    0.454334
    0
    0
    0
    0
    1,332
    0.442378
    0
    0
    2,027
    0.673198
    b9787b11fbcd5779df09a2f0f27e44e75ad576ac
    1,870
    py
    Python
    app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
    orlandofv/sianna
    f07dd6dbc62a9604f31ab800e482e62f14fba766
    [ "MIT" ]
    null
    null
    null
    app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
    orlandofv/sianna
    f07dd6dbc62a9604f31ab800e482e62f14fba766
    [ "MIT" ]
    null
    null
    null
    app_venv/Lib/site-packages/phonenumbers/data/region_AG.py
    orlandofv/sianna
    f07dd6dbc62a9604f31ab800e482e62f14fba766
    [ "MIT" ]
    null
    null
    null
    """Auto-generated file, do not edit by hand. AG metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_AG = PhoneMetadata(id='AG', country_code=1, international_prefix='011', general_desc=PhoneNumberDesc(national_number_pattern='(?:268|[58]\\d\\d|900)\\d{7}', possible_length=(10,), possible_length_local_only=(7,)), fixed_line=PhoneNumberDesc(national_number_pattern='268(?:4(?:6[0-38]|84)|56[0-2])\\d{4}', example_number='2684601234', possible_length=(10,), possible_length_local_only=(7,)), mobile=PhoneNumberDesc(national_number_pattern='268(?:464|7(?:1[3-9]|[28]\\d|3[0246]|64|7[0-689]))\\d{4}', example_number='2684641234', possible_length=(10,), possible_length_local_only=(7,)), toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)), premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)), personal_number=PhoneNumberDesc(national_number_pattern='52(?:355[0-46-9]|4(?:5(?:2[024-9]|5[0-46-9])|60[1-9]|9(?:2[0-5]|49)))\\d{4}|52(?:3(?:[2-46-9][02-9]|5[02-46-9])|4(?:[2-478][02-9]|5[034]|6[2-9]|9[05-9])|7[2-4]\\d)\\d{5}|52[34][2-9]1[02-9]\\d{4}|5(?:00|2[1256]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)), voip=PhoneNumberDesc(national_number_pattern='26848[01]\\d{4}', example_number='2684801234', possible_length=(10,), possible_length_local_only=(7,)), pager=PhoneNumberDesc(national_number_pattern='26840[69]\\d{4}', example_number='2684061234', possible_length=(10,), possible_length_local_only=(7,)), national_prefix='1', national_prefix_for_parsing='1|([457]\\d{6})$', national_prefix_transform_rule='268\\1', leading_digits='268', mobile_number_portable_region=True)
    103.888889
    352
    0.711765
    0
    0
    0
    0
    0
    0
    0
    0
    638
    0.341176
    b97884a1b2bbd76cce01bb9efe2744d31832af25
    2,182
    py
    Python
    gradefiles-send.py
    lapets/bu-gsubmit-grading
    69c40a763908be1c954dce3e5e5aab854ac379ff
    [ "MIT" ]
    3
    2016-10-03T15:29:20.000Z
    2019-06-28T17:33:06.000Z
    gradefiles-send.py
    lapets/bu-gsubmit-grading
    69c40a763908be1c954dce3e5e5aab854ac379ff
    [ "MIT" ]
    null
    null
    null
    gradefiles-send.py
    lapets/bu-gsubmit-grading
    69c40a763908be1c954dce3e5e5aab854ac379ff
    [ "MIT" ]
    null
    null
    null
    ##################################################################### ## ## gradefiles-send.py ## ## Script to send grade files by email to enrolled students; the ## input grade file names should correspond to the user names of ## the students. ## ## from email.mime.text import MIMEText # For creating a message string. from subprocess import Popen, PIPE # For sending email on linux. import sys # For command line arguments. import os # For commands and file manipulation (walk, path, system). ##################################################################### ## Sending a simple email message. ## def send(txt, courseNumber, task, sender, targets): msg = MIMEText(txt) msg["From"] = sender + "@bu.edu" msg["To"] = ",".join([target + "@bu.edu" for target in targets]) msg["Cc"] = sender + "@bu.edu" msg["Subject"] = "CS " + courseNumber + " " + task + " grade" p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command line parameters. ## if len(sys.argv) == 6\ and (int(sys.argv[1][0:3]) in range(100,1000))\ and sys.argv[2] in ['Fall', 'Spring']\ and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] # Accepts course names like "591 X1." season = sys.argv[2] year = sys.argv[3] task = sys.argv[4] sender = sys.argv[5] else: print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n') exit() ##################################################################### ## Check for list of files. ## if not os.path.exists('./data'): print('No folder "data" containing grade files found. Exiting.') exit() ##################################################################### ## Send the grade files. ## for curdir, dirs, files in os.walk('./data/'): for file in files: txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split("_") send(txt, courseNumber, task, sender, targets) print('Sent grade file to ' + str(targets) + '.') #eof
    33.569231
    112
    0.519707
    0
    0
    0
    0
    0
    0
    0
    0
    1,177
    0.539413
    b9789c0f2981942a54633089abdf3245b58a73a3
    1,227
    py
    Python
    Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
    GalAster/16
    47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
    [ "Unlicense" ]
    3
    2019-10-03T01:51:38.000Z
    2019-10-04T16:15:43.000Z
    Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
    GalAster/16
    47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
    [ "Unlicense" ]
    null
    null
    null
    Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py
    GalAster/16
    47560a2132fbe4dda35a35dedfd7d8e6a8acc35a
    [ "Unlicense" ]
    1
    2020-03-17T12:58:52.000Z
    2020-03-17T12:58:52.000Z
    import os import pickle import tensorflow as tf import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb') sess = tf.InteractiveSession() G, D, Gs = pickle.load(file) saver = tf.train.Saver() save_path = "./target/" + name + "/" model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') # Save as protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile("./target/" + name + ".pb", "wb") as file: # 保存模型 file.write(output_graph_def.SerializeToString()) # 序列化输出
    34.083333
    74
    0.726976
    0
    0
    0
    0
    0
    0
    0
    0
    216
    0.173494
    b978dfcb152bc099b2de54896ed9a54dfbc29639
    6,890
    py
    Python
    src/moveGoogle.py
    Quanta-Robotics/Robot-Blueberry
    7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
    [ "MIT" ]
    25
    2021-06-08T07:09:30.000Z
    2021-12-30T06:28:35.000Z
    src/moveGoogle.py
    ICT-CoU/Robot-Blueberry
    d19fd1be037df9d67de64df57a87006d74cd6c43
    [ "MIT" ]
    2
    2021-05-23T12:54:51.000Z
    2021-06-07T17:47:56.000Z
    src/moveGoogle.py
    ICT-CoU/Robot-Blueberry
    d19fd1be037df9d67de64df57a87006d74cd6c43
    [ "MIT" ]
    14
    2021-06-08T13:02:28.000Z
    2021-12-30T20:07:18.000Z
    #!/usr/bin/env python import os import os.path import yaml import time import random import multiprocessing import RPi.GPIO as GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 = {'EN': 27, 'input1': 19, 'input2': 16} Motor2 = {'EN': 22, 'input1': 26, 'input2': 20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo == None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin) for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update = -update for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r = select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in range(0,15): r = random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select) for i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print (' i got value of t is : ',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15) print ('Offline t value is : ',t) p1 = multiprocessing.Process(target=expression,args=[t]) p1.start() say(speech)
    25.330882
    154
    0.560377
    0
    0
    0
    0
    0
    0
    0
    0
    456
    0.066183
    b978fbbcd4002601ca1e2723cae4385002e671d8
    2,063
    py
    Python
    src/onegov/translator_directory/models/language.py
    politbuero-kampagnen/onegov-cloud
    20148bf321b71f617b64376fe7249b2b9b9c4aa9
    [ "MIT" ]
    null
    null
    null
    src/onegov/translator_directory/models/language.py
    politbuero-kampagnen/onegov-cloud
    20148bf321b71f617b64376fe7249b2b9b9c4aa9
    [ "MIT" ]
    null
    null
    null
    src/onegov/translator_directory/models/language.py
    politbuero-kampagnen/onegov-cloud
    20148bf321b71f617b64376fe7249b2b9b9c4aa9
    [ "MIT" ]
    null
    null
    null
    from uuid import uuid4 from sqlalchemy import Index, Column, Text, Table, ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm import Base from onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ = 'languages' __table_args__ = ( Index('unique_name', 'name', unique=True), ) id = Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property def speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): """Having it as mother tongue...""" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count + self.writers_count + self.native_speakers_count ) == 0
    25.469136
    79
    0.650994
    1,028
    0.498303
    0
    0
    773
    0.374697
    0
    0
    305
    0.147843
    b97a0b2a9f0b601569ce8973596517ed7d8790ec
    3,588
    py
    Python
    tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
    djemeljanovs/tfjs
    ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
    [ "Apache-2.0" ]
    null
    null
    null
    tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
    djemeljanovs/tfjs
    ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
    [ "Apache-2.0" ]
    null
    null
    null
    tfjs-converter/python/tensorflowjs/converters/graph_rewrite_util.py
    djemeljanovs/tfjs
    ee4430cd7a04283ec09184a3fe9d3fb27496f1dc
    [ "Apache-2.0" ]
    null
    null
    null
    # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util # Custom op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op name for fused MatMul which starts with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): """Pulls a node def from a dictionary for a given name. Args: node_map: Dictionary containing an entry indexed by name for every node. name: Identifies the node we want to find. Returns: NodeDef of the node with the given name. Raises: ValueError: If the node isn't present in the dictionary. """ stripped_name = node_name_from_input(name) if stripped_name not in node_map: raise ValueError("No node named '%s' found in map." % name) return node_map[stripped_name] def values_from_const(node_def): """Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const. """ if node_def.op != "Const": raise ValueError( "Node named '%s' should be a Const op for values_from_const." % node_def.name) input_tensor = node_def.attr["value"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by gamma after normalization. def scale_after_normalization(node): if node.op == "BatchNormWithGlobalNormalization": return node.attr["scale_after_normalization"].b return True def node_name_from_input(node_name): """Strips off ports and other decorations to get the underlying node name.""" if node_name.startswith("^"): node_name = node_name[1:] m = re.search(r"(.*):\d+$", node_name) if m: node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): """Clean up the graph def by removing the skipped nodes and clean up the nodes with inputs that have been removed. Args: input_graph_def: GraphDef object to be cleaned. node_to_skip: Dict with node names to be skipped. inputs_to_remove: List of nodes to be removed from inputs of all nodes. Returns: GraphDef that has been cleaned. """ result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for i, input_node in enumerate(new_node.input): if input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library) result_graph_def.versions.CopyFrom(input_graph_def.versions) return result_graph_def
    33.849057
    80
    0.726031
    0
    0
    0
    0
    0
    0
    0
    0
    2,035
    0.567168
    b97af59ee4283114481f3e83dc8e3cf6244bb61c
    1,014
    py
    Python
    loss_fn/classification_loss_fns/binary_cross_entropy.py
    apple/ml-cvnets
    84d992f413e52c0468f86d23196efd9dad885e6f
    [ "AML" ]
    209
    2021-10-30T08:32:10.000Z
    2022-03-31T16:18:03.000Z
    loss_fn/classification_loss_fns/binary_cross_entropy.py
    apple/ml-cvnets
    84d992f413e52c0468f86d23196efd9dad885e6f
    [ "AML" ]
    12
    2021-12-04T10:47:11.000Z
    2022-03-31T15:39:40.000Z
    loss_fn/classification_loss_fns/binary_cross_entropy.py
    apple/ml-cvnets
    84d992f413e52c0468f86d23196efd9dad885e6f
    [ "AML" ]
    50
    2021-11-01T08:15:02.000Z
    2022-03-29T08:17:34.000Z
    # # For licensing see accompanying LICENSE file. # Copyright (C) 2022 Apple Inc. All Rights Reserved. # from torch.nn import functional as F from torch import Tensor import argparse from . import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name="binary_cross_entropy") class ClsBinaryCrossEntropy(BaseCriteria): """Binary CE for classification tasks""" def __init__(self, opts, *args, **kwargs) -> None: super().__init__() def forward( self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs ) -> Tensor: if target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction="sum", ) def __repr__(self) -> str: return "{}()".format(self.__class__.__name__)
    28.166667
    87
    0.667653
    691
    0.68146
    0
    0
    753
    0.742604
    0
    0
    173
    0.170611
    b97c7f15dd61f4851cffcb3982337f852b3b8da5
    576
    py
    Python
    Sorting/insertion_sort.py
    lakshyarawal/pythonPractice
    4b400342198a8270c5ac0c6306afb555f927c6c1
    [ "MIT" ]
    null
    null
    null
    Sorting/insertion_sort.py
    lakshyarawal/pythonPractice
    4b400342198a8270c5ac0c6306afb555f927c6c1
    [ "MIT" ]
    null
    null
    null
    Sorting/insertion_sort.py
    lakshyarawal/pythonPractice
    4b400342198a8270c5ac0c6306afb555f927c6c1
    [ "MIT" ]
    null
    null
    null
    """ Insertion Sort Algorithm:""" """Implementation""" def insertion_sort(arr) -> list: n = len(arr) for i in range(1, n): swap_index = i for j in range(i-1, -1, -1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -= 1 else: break return arr def main(): arr_input = [10, 5, 30, 1, 2, 5, 10, 10] a2 = insertion_sort(arr_input) print(a2) # Using the special variable # __name__ if __name__ == "__main__": main()
    19.2
    65
    0.522569
    0
    0
    0
    0
    0
    0
    0
    0
    100
    0.173611
    b97c828450c34038ee92e089e3f2b951d2113017
    903
    py
    Python
    nipype/interfaces/spm/__init__.py
    felixsc1/nipype
    e722d6170593583f16ddfcb95473e5d30b5f1d7c
    [ "Apache-2.0" ]
    8
    2019-05-29T09:38:30.000Z
    2021-01-20T03:36:59.000Z
    nipype/interfaces/spm/__init__.py
    felixsc1/nipype
    e722d6170593583f16ddfcb95473e5d30b5f1d7c
    [ "Apache-2.0" ]
    12
    2021-03-09T03:01:16.000Z
    2022-03-11T23:59:36.000Z
    nipype/interfaces/spm/__init__.py
    felixsc1/nipype
    e722d6170593583f16ddfcb95473e5d30b5f1d7c
    [ "Apache-2.0" ]
    1
    2020-07-17T12:49:49.000Z
    2020-07-17T12:49:49.000Z
    # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for spm.""" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation, ResliceToReference, DicomImport)
    53.117647
    77
    0.653378
    0
    0
    0
    0
    0
    0
    0
    0
    169
    0.187154
    b97cd7905f5c596cb6d79b67c2c80e83907421d9
    8,257
    py
    Python
    network.py
    tobloef/neural-network
    bd05a8b9eccc0f5a973782247d39f9b5aa33156c
    [ "MIT" ]
    3
    2018-01-06T22:27:58.000Z
    2018-08-12T20:29:51.000Z
    network.py
    tobloef/neural-network
    bd05a8b9eccc0f5a973782247d39f9b5aa33156c
    [ "MIT" ]
    1
    2018-03-31T18:49:56.000Z
    2018-04-19T04:52:33.000Z
    network.py
    tobloef/neural-network
    bd05a8b9eccc0f5a973782247d39f9b5aa33156c
    [ "MIT" ]
    null
    null
    null
    import numpy as np from mathUtils import * class Network(object): """ Model for a feedforward Neural Network that use backpropagation with stochastic gradient decent. """ def __init__(self, layerSizes, biasVectors, weightMatrices): """ Initialise the network with a list of layer sizes and lists for biases and weights for the neurons in the network. The first layer is the input layer and the last layer is the output layer. """ self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): """ Initialise a new network with random weights and biases. Input and output layers are included in the layerSizes list. The random weights and biases are generated using a Gaussian distribution, so the results are more probable to be around 0. """ biasVectors = [] """Generate biases for each neuron in each layer, except the input layer.""" for size in layerSizes[1:]: """ np.random.randn generates arrays of arrays of random numbers, based on the paramters. np.random.randn(3,2) will generate an array of 3 arrays with 2 random numbers. """ biasVectors.append(np.random.randn(size, 1)) """Generate weights for connections between layers.""" weightMatrices = [] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): """Return a vector of the network's outputs based on the given inputs, using feedforward.""" activations = inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): """ For every layer, get the bias vector and the weight matrix. Then get dot product between the weight matrix and the output vector and add the bias vector. This is the activation vector for the current layer. """ zVector = np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector) return activations def train(self, data, epochs, batchSize, rate, testData=None): """ Train the neural network using stochastic gradient descent. Smaller batches of random samples from the training are used to reduce the training time. The training date is a list of tuples (inputs, expected outputs). The learning rate is how much to change the values each batch. """ print("Training network with shape {}, batch size {} and learning rate {} for {} epochs...".format(self.layerSizes, batchSize, rate, epochs)) for e in range(epochs): np.random.shuffle(data) batches = [] for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData) print("Epoch #{} completed with {:.2f}% correctness.".format(e+1, 100/len(testData)*result)) else: print("Epoch #{} completed.".format(e)) def _tuneNetwork(self, batch, rate): """ Tune the weights and biases of the network by using backpropagation with gradient descend. """ """ Setup matrix and vector based on the weight matrix and bias vector filled with zeroes. This is used for storing each change to make for each vector, for each set of training date. """ sumBiasVectors = [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: """ Get a matrix/vector with the required changes to the network, based on that set of training data, and add it to a set of matrix/vector totalling the changes needed from all the training data. """ deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices """ Take each change for each set of training data, get the average of these and subtract them from the current weights and biases. Then use these as the new weights and biases. """ newBiasVectors = [] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected): """ Return a tuple with gradient of the cost function for each bias and weight, in the format (vector of bias changes, matrix of weight changes), for the specified set of training data. """ deltaBiasVectors = [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) """Store all activations for the entire network, starting with the input layer.""" activationVector = inputs activationVectors = [inputs] """Find the z-vector for layer in the network""" zVectors = [] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) """ * Start with output compared to expected, tune weights and biases based on the derivative of the cost function with respect to the weight/bias. * Then move onto each hidden layer and the input layer. """ deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): """Test the network with the specified test data and return the number of correct guesses.""" correctGuesses = 0 for inputs, expected in testData: """Increment correct guesses if the most active output is the expected one.""" outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if (guess == expected): correctGuesses += 1 return correctGuesses
    53.270968
    286
    0.657987
    8,213
    0.994671
    0
    0
    1,053
    0.127528
    0
    0
    3,275
    0.396633
    b97d4675d330154e0b12b91fbd601affd888ea29
    1,901
    py
    Python
    examples/airflow/dags/etl_orders_7_days.py
    phixMe/marquez
    06d71635369893b371a8a9c9e7023f11d7cbb1f8
    [ "Apache-2.0" ]
    null
    null
    null
    examples/airflow/dags/etl_orders_7_days.py
    phixMe/marquez
    06d71635369893b371a8a9c9e7023f11d7cbb1f8
    [ "Apache-2.0" ]
    null
    null
    null
    examples/airflow/dags/etl_orders_7_days.py
    phixMe/marquez
    06d71635369893b371a8a9c9e7023f11d7cbb1f8
    [ "Apache-2.0" ]
    null
    null
    null
    from datetime import datetime from marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago default_args = { 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['[email protected]'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id FROM orders AS o INNER JOIN menu_items AS mi ON mi.id = o.menu_item_id INNER JOIN categories AS c ON c.id = mi.category_id INNER JOIN menus AS m ON m.id = c.menu_id WHERE o.placed_on >= NOW() - interval '7 days' ''', dag=dag ) t1 >> t2 >> t3
    29.246154
    135
    0.681746
    0
    0
    0
    0
    0
    0
    0
    0
    1,256
    0.660705
    b97deb7d2bd255cd9a3d9f169d969333b63452ec
    313
    py
    Python
    sample/pizza.py
    marianarmorgado/python-starter
    8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
    [ "MIT" ]
    null
    null
    null
    sample/pizza.py
    marianarmorgado/python-starter
    8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
    [ "MIT" ]
    null
    null
    null
    sample/pizza.py
    marianarmorgado/python-starter
    8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2
    [ "MIT" ]
    null
    null
    null
    # store information about a pizza being ordered pizza = { 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] } # summarize the order print("You ordered a " + pizza['crust'] + "-crust pizza" + "with the following toppings:") for topping in pizza['toppings']: print("\t" + topping)
    26.083333
    59
    0.645367
    0
    0
    0
    0
    0
    0
    0
    0
    204
    0.651757
    b97e1419e0e45b84ecc462227c812c10beb92718
    181
    py
    Python
    YouTube/CursoEmVideo/python/ex012.py
    Fh-Shadow/Progamando
    f496d83c36e9a079ed06b4e7c34396c57f539de9
    [ "MIT" ]
    null
    null
    null
    YouTube/CursoEmVideo/python/ex012.py
    Fh-Shadow/Progamando
    f496d83c36e9a079ed06b4e7c34396c57f539de9
    [ "MIT" ]
    null
    null
    null
    YouTube/CursoEmVideo/python/ex012.py
    Fh-Shadow/Progamando
    f496d83c36e9a079ed06b4e7c34396c57f539de9
    [ "MIT" ]
    null
    null
    null
    a = float(input('Qual é o preço do produto? R$')) d = a - (a * 23 / 100) print('O produto que custava R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))
    45.25
    106
    0.607735
    0
    0
    0
    0
    0
    0
    0
    0
    120
    0.648649
    b97e5feb1052b87d359d8e3d9f63ba930bff8e66
    15,038
    py
    Python
    dnnlib/submission/submit.py
    gperdrizet/gansformer
    c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
    [ "MIT" ]
    1,172
    2021-03-02T02:00:44.000Z
    2022-03-31T02:46:45.000Z
    dnnlib/submission/submit.py
    gperdrizet/gansformer
    c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
    [ "MIT" ]
    37
    2021-03-03T14:11:11.000Z
    2022-03-12T15:40:15.000Z
    dnnlib/submission/submit.py
    gperdrizet/gansformer
    c68ba623aa498c83d8df4c4f0a3b5e3f63c773a5
    [ "MIT" ]
    138
    2021-03-02T06:37:10.000Z
    2022-03-30T14:59:09.000Z
    # Submit a function to be run either locally or in a computing cluster. # Compared to original StyleGAN implementation, we extend the support for automatic training resumption, # and network recompilation. import copy import inspect import os import pathlib import pickle import platform import pprint import re import shutil import sys import time import traceback from enum import Enum from .. import util from ..util import EasyDict from . import internal class SubmitTarget(Enum): # The target where the function should be run # LOCAL: Run it locally LOCAL = 1 class PathType(Enum): # Determines in which format should a path be formatted # WINDOWS: Format with Windows style # LINUX: Format with Linux/Posix style # AUTO: Use current OS type to select either WINDOWS or LINUX WINDOWS = 1 LINUX = 2 AUTO = 3 class PlatformExtras: # A mixed bag of values used by dnnlib heuristics # Attributes: # data_reader_buffer_size: Used by DataReader to size internal shared memory buffers # data_reader_process_count: Number of worker processes to spawn (zero for single # thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count = 0 # single threaded default _user_name_override = None class SubmitConfig(util.EasyDict): # Strongly typed config dict needed to submit runs # Attributes: # run_dir_root: Path to the run dir root. Can be optionally templated with tags # Needs to always be run through get_path_from_template # run_desc: Description of the run. Will be used in the run dir and task name # run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir # run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will # be the src directory inside the run dir # submit_target: Submit target enum value. Used to select where the run is actually launched # num_gpus: Number of GPUs used/requested for the run # print_info: Whether to print debug information when submitting # local.do_not_copy_source_files: Do not copy source files from the working directory to the # run dir. # run_id: Automatically populated value during submit # run_name: Automatically populated value during submit # run_dir: Automatically populated value during submit # run_func_name: Automatically populated value during submit # run_func_kwargs: Automatically populated value during submit # user_name: Automatically populated value during submit. Can be set by the user which will then # override the automatic value # task_name: Automatically populated value during submit # host_name: Automatically populated value during submit # platform_extras: Automatically populated values during submit. Used by various dnnlib libraries # such as the DataReader class def __init__(self): super().__init__() # run (set these) self.run_dir_root = "" # should always be passed through get_path_from_template self.run_desc = "" self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode", "_cudacache"] self.run_dir_extra_files = [] # submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.nvprof = False self.local = internal.local.TargetOptions() self.datasets = [] # (automatically populated) self.run_id = None self.run_name = None self.run_dir = None self.run_func_name = None self.run_func_kwargs = None self.user_name = None self.task_name = None self.host_name = "localhost" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: # Replace tags in the given path template and return either Windows or Linux formatted path # automatically select path type depending on running OS if path_type == PathType.AUTO: if platform.system() == "Windows": path_type = PathType.WINDOWS elif platform.system() == "Linux": path_type = PathType.LINUX else: raise RuntimeError("Unknown platform") path_template = path_template.replace("<USERNAME>", get_user_name()) # return correctly formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError("Unknown platform") def get_template_from_path(path: str) -> str: # Convert a normal path back to its template representation path = path.replace("\\", "/") return path def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: # Convert a normal path to template and the convert it back to a normal path with given path type path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) -> None: # Set the global username override value global _user_name_override _user_name_override = name def get_user_name(): # Get the current user name if _user_name_override is not None: return _user_name_override elif platform.system() == "Windows": return os.getlogin() elif platform.system() == "Linux": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return "unknown" else: raise RuntimeError("Unknown platform") def make_run_dir_path(*paths): # Make a path/filename that resides under the current submit run_dir # Args: # *paths: Path components to be passed to os.path.join # Returns: # A file/dirname rooted at submit_config.run_dir. If there's no # submit_config or run_dir, the base directory is the current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))` import dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str: # Create a new run dir with increasing ID number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir) and create_new: raise RuntimeError("The run dir already exists! ({0})".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: # Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id # Assumes IDs are numbers at the start of the directory names dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile("^\\d+") # match one or more digits at the start of the string run_id = 0 for dir_name in dir_names: m = r.match(dir_name) if m is not None: i = int(m.group()) run_id = max(run_id, i + 1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: # Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb")) with open(os.path.join(run_dir, "submit_config.txt"), "w") as f: pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert "." in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(".") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the actual run function call for handling logging, exceptions, typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True) else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh) logger = util.Logger(file_name = None, should_flush = True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False try: print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if "submit_config" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, "log.txt") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen after we close the logs and create a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If we hit an error, get out of the script now and signal the error # to whatever process that started this script. if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = ".stylegan2-cache") return open(file_or_url, "rb") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding = "latin1") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False, resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None: # Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place. # create_newdir: enforces the creation of a new run directory # resume: resumes a prior experiment using its existing run directory # load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is not None # unknown target # Disallow submitting jobs with zero num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0): raise RuntimeError("submit_config.num_gpus must be set to a non-zero value") if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name) # Farm specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption, load_config = True to load the prior submit_config file from the directory # (so to maintain the original configuration of the experiment rather than the newly provided # command-line arguments. if load_config: config_file = os.path.join(host_run_dir, "submit_config.pkl") if os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config["run_id"] = old_submit_config["run_id"] submit_config["run_name"] = old_submit_config["run_name"] if "resume_pkl" in old_submit_config["run_func_kwargs"]: submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"] submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"] _populate_run_dir(submit_config, host_run_dir) return farm.submit(submit_config, host_run_dir)
    43.337176
    238
    0.691847
    3,564
    0.237
    0
    0
    0
    0
    0
    0
    5,938
    0.394866
    b97f4f2077af2e6d4198d160e8fea133c49dee89
    4,187
    py
    Python
    pyecharts/custom/grid.py
    zilong305/pycharts
    6cf1bb7f17001a36da6a766615a78b1dbef5918f
    [ "MIT" ]
    null
    null
    null
    pyecharts/custom/grid.py
    zilong305/pycharts
    6cf1bb7f17001a36da6a766615a78b1dbef5918f
    [ "MIT" ]
    null
    null
    null
    pyecharts/custom/grid.py
    zilong305/pycharts
    6cf1bb7f17001a36da6a766615a78b1dbef5918f
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python # coding=utf-8 from pyecharts.option import grid class Grid(object): def __init__(self): self._chart = None self._js_dependencies = set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): """ :param chart: chart instance :param grid_width: Width of grid component. Adaptive by default. :param grid_height: Height of grid component. Adaptive by default. :param grid_top: Distance between grid component and the top side of the container. :param grid_bottom: Distance between grid component and the bottom side of the container. :param grid_left: Distance between grid component and the left side of the container. :param grid_right: Distance between grid component and the right side of the container. :return: """ if self._chart is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only identify for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): """ :param series: series data :return: """ _series, _xaxis, _yaxis, _legend, _title = series for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def render(self, path="render.html"): """ :param path: :return: """ self._chart.render(path) def render_embed(self): """ :return: """ return self._chart.render_embed() def show_config(self): """ :return: """ import pprint return pprint.pprint(self._chart._option) @property def chart(self): """ :return: """ return self._chart def _repr_html_(self): """ :return: """ return self._chart._repr_html_()
    31.961832
    100
    0.540482
    4,112
    0.982087
    0
    0
    99
    0.023645
    0
    0
    1,192
    0.284691
    b97f78c59a8296809ae879f2d6f8355b0f8c52d0
    4,588
    py
    Python
    smooch/conversations.py
    devinmcgloin/smooch
    c9561c3e7f1546efc58daa472b70f738d0d35e13
    [ "MIT" ]
    3
    2016-07-04T12:02:03.000Z
    2017-03-20T19:39:36.000Z
    smooch/conversations.py
    devinmcgloin/smooch
    c9561c3e7f1546efc58daa472b70f738d0d35e13
    [ "MIT" ]
    41
    2019-05-28T09:54:04.000Z
    2020-02-20T05:34:19.000Z
    smooch/conversations.py
    devinmcgloin/smooch
    c9561c3e7f1546efc58daa472b70f738d0d35e13
    [ "MIT" ]
    2
    2016-07-20T14:31:45.000Z
    2016-11-18T12:19:38.000Z
    import logging from .endpoint import ask def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning("send message called with invalid args user_id={} message={}".format(user_id, message)) return logging.debug("Sending message: user_id={0} message={1} sent_by_maker={2}".format(user_id, message, sent_by_maker)) role = "appMaker" if not sent_by_maker: role = "appUser" data = {"text": message, "role": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not user_id: logging.warning("get conversation called with invalid arg user_id={}".format(user_id)) return logging.debug("Get conversation: user_id={}".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options): """Note that amount is a integer which specifies the amount of cents in the transaction Smooch will default to the currency specified in your account settings.""" if not valid_args(user_id, message, options): logging.warning("request payment called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "buy", "text": short_text, "amount": result}) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options): """Sends a series of links. The options field is a dictionary in which the keys are descriptions and values uris""" if not valid_args(user_id, message, options): logging.warning("send links called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "link", "text": short_text, "uri": result}) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options): """Sends a series of options that you can listen for on your webhook. The options field is a dictionary in which the keys are descriptions and values the postback payload. You need to set up a webhook to listen for the postback.""" if not valid_args(user_id, message, options): logging.warning("send postback called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for short_text, result in options: buttons.append({ "type": "postback", "text": short_text, "payload": result }) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options): """Options is a list of tuples in which the first element is the type of the button, second the short text, and third the result for the specified type.""" if not valid_args(user_id, message, options): logging.warning("send buttons called with invalid args user_id={} message={} options={}" .format(user_id, message, options)) return role = "appMaker" buttons = [] for text, kind, result in options: buttons.append({ "type": kind, "text": text, "payload": result }) data = {"text": message, "role": role, "actions": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None): if options is not None: if user_id and message and options and type(options) is list: return True return False else: if user_id and message: return True return False
    30.586667
    129
    0.598954
    0
    0
    0
    0
    0
    0
    0
    0
    1,670
    0.363993
    b980ab008a2dab6e2778edec1d7d9e24b2315a73
    1,086
    py
    Python
    cifar/evalit.py
    Sharkbyteprojects/IRIS-ML_and_Deep-Learning
    f0e053cf7a0e69019bbba36e6da3e60d76105fe9
    [ "MIT" ]
    null
    null
    null
    cifar/evalit.py
    Sharkbyteprojects/IRIS-ML_and_Deep-Learning
    f0e053cf7a0e69019bbba36e6da3e60d76105fe9
    [ "MIT" ]
    null
    null
    null
    cifar/evalit.py
    Sharkbyteprojects/IRIS-ML_and_Deep-Learning
    f0e053cf7a0e69019bbba36e6da3e60d76105fe9
    [ "MIT" ]
    null
    null
    null
    import keras from keras.models import load_model from PIL import Image import matplotlib.pylab as plt import numpy as np import zipfile print("Extract") zip_ref = zipfile.ZipFile("./asset.zip", 'r') zip_ref.extractall(".") zip_ref.close() print("Load Model") model=load_model("cifar-model.h5") CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"] def calc(imname): test_image =Image.open("asset/"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype="float32") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print("START TEST") calc("lkw-image.jpg") calc("cat.jpg") calc("frog.jpg") calc("fog.jpg") calc("lfog.jpg") calc("d.jpg") calc("b.jpg") calc("bs.jpg") calc("plapper.jpg") calc("ds.jpg") print("Complete") print("End") quit(0)
    27.15
    88
    0.710866
    0
    0
    0
    0
    0
    0
    0
    0
    270
    0.248619
    b980be1e0d2b8db749e25a4f49c35cdddbdca9d9
    1,650
    py
    Python
    tt/urls.py
    samiksha-patil/Knowledge-Sharing-Platform
    22e61a659d5ad63fe656fa639dc897cbdebad4fe
    [ "bzip2-1.0.6" ]
    1
    2021-05-09T08:18:49.000Z
    2021-05-09T08:18:49.000Z
    tt/urls.py
    samiksha-patil/Knowledge-Sharing-Platform
    22e61a659d5ad63fe656fa639dc897cbdebad4fe
    [ "bzip2-1.0.6" ]
    9
    2021-03-19T01:11:35.000Z
    2022-03-12T00:20:13.000Z
    tt/urls.py
    samiksha-patil/Knowledge-Sharing-Platform
    22e61a659d5ad63fe656fa639dc897cbdebad4fe
    [ "bzip2-1.0.6" ]
    null
    null
    null
    """ tt URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ # Uncomment next two lines to enable admin: from django.contrib import admin from django.urls import path, include from users import views as user_views from django.contrib.auth import views as auth_views from upload import views as upload_views from django.conf import settings from django.conf.urls.static import static urlpatterns = [ # Uncomment the next line to enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
    35.869565
    100
    0.726061
    0
    0
    0
    0
    0
    0
    0
    0
    890
    0.539394
    b9814171798d1f2ddf5247c67182a7e7e032132e
    105
    py
    Python
    src/git/cmd.py
    danihodovic/dht
    636f54d70f8c6ca60ab48f2815b3e9e1a336d78f
    [ "MIT" ]
    2
    2021-01-21T15:04:32.000Z
    2021-01-21T16:23:32.000Z
    src/git/cmd.py
    danihodovic/dht
    636f54d70f8c6ca60ab48f2815b3e9e1a336d78f
    [ "MIT" ]
    2
    2020-12-30T20:34:51.000Z
    2021-01-17T20:02:02.000Z
    src/git/cmd.py
    danihodovic/dht
    636f54d70f8c6ca60ab48f2815b3e9e1a336d78f
    [ "MIT" ]
    null
    null
    null
    import os import click os.environ["GIT_PYTHON_REFRESH"] = "quiet" @click.group() def git(): pass
    9.545455
    42
    0.67619
    0
    0
    0
    0
    34
    0.32381
    0
    0
    27
    0.257143
    b98238142a5e4442e3c9fdd220f6bde9274299de
    570
    py
    Python
    TwitterImage2JPG.py
    Tymec/Playground
    5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
    [ "MIT" ]
    null
    null
    null
    TwitterImage2JPG.py
    Tymec/Playground
    5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
    [ "MIT" ]
    null
    null
    null
    TwitterImage2JPG.py
    Tymec/Playground
    5a4aaa4a88e084d8d31803485b1ec521ad49a3d1
    [ "MIT" ]
    1
    2019-02-19T10:32:07.000Z
    2019-02-19T10:32:07.000Z
    import glob import os def main(): os.chdir("F:/Downloads") extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"] file_list = list() for extension in extensions: file_list = file_list + glob.glob(extension) for file in file_list: for extension in extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension, '') + ".jpg" os.rename(file, new_name) print("Done!") if __name__ == __name__: main()
    22.8
    67
    0.585965
    0
    0
    0
    0
    0
    0
    0
    0
    72
    0.126316
    b982943f0b8c226209550f8c7f62a0e03d0b5ff5
    6,405
    py
    Python
    Data Analysis/classification.py
    Riccardo95Facchini/DIL-2019
    febeda55fd647943a1b8c49b3c5192fcd69fdaf5
    [ "MIT" ]
    null
    null
    null
    Data Analysis/classification.py
    Riccardo95Facchini/DIL-2019
    febeda55fd647943a1b8c49b3c5192fcd69fdaf5
    [ "MIT" ]
    null
    null
    null
    Data Analysis/classification.py
    Riccardo95Facchini/DIL-2019
    febeda55fd647943a1b8c49b3c5192fcd69fdaf5
    [ "MIT" ]
    null
    null
    null
    import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import classification_report #EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE NEXT CALLS DATA dataset = dataset.drop("contact", axis=1) dataset = dataset.drop("day", axis=1) dataset = dataset.drop("month", axis=1) dataset = dataset.drop("duration", axis=1) dataset = dataset.drop("campaign", axis=1) dataset = dataset.drop("pdays", axis=1) dataset = dataset.drop("previous", axis=1) dataset = dataset.drop("poutcome", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {"marital": {"married": 1, "single": 0, "divorced":-1}, "education": {"primary": 1, "secondary": 2, "tertiary": 3}, "default": {"yes": 1, "no": 0}, "housing": {"yes": 1, "no": 0}, "loan": {"yes": 1, "no": 0}, "y": {"yes": 1, "no": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X = pd.get_dummies(X, columns=["job"], prefix=["job"]) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn import tree from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as np import matplotlib.pyplot as plt import pandas as pd # TRAINING - TEST from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # SCALING from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=["job"], prefix=["job"]) #SPLIT FEATURE AND TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False, n_samples = len(sub), random_state = 27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling import SMOTE y = dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting up testing and training sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='lower right')
    25.722892
    98
    0.721624
    0
    0
    0
    0
    0
    0
    0
    0
    1,210
    0.188915
    b982c2b4e976b723dfa3208c1bc1e4ea51b77ac9
    5,562
    py
    Python
    tools/c7n_azure/tests/test_route_table.py
    anastasiia-zolochevska/cloud-custodian
    f25315a01bec808c16ab0e2d433d6151cf5769e4
    [ "Apache-2.0" ]
    2
    2020-01-20T19:46:28.000Z
    2020-08-19T14:20:27.000Z
    tools/c7n_azure/tests/test_route_table.py
    anastasiia-zolochevska/cloud-custodian
    f25315a01bec808c16ab0e2d433d6151cf5769e4
    [ "Apache-2.0" ]
    79
    2019-03-20T12:27:06.000Z
    2019-08-14T14:07:04.000Z
    tools/c7n_azure/tests/test_route_table.py
    anastasiia-zolochevska/cloud-custodian
    f25315a01bec808c16ab0e2d433d6151cf5769e4
    [ "Apache-2.0" ]
    2
    2019-04-22T15:20:23.000Z
    2019-08-27T12:37:51.000Z
    # Copyright 2015-2018 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run() self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, "Only one route table should be found") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), "The wrong route table was found") properties = route_table.get('properties') self.assertIsNotNone(properties, "Missing properties") subnets = properties.get('subnets') self.assertIsNotNone(subnets, "Missing subnets") self.assertEqual(1, len(subnets), "There should only be one subnet") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
    35.426752
    95
    0.538655
    4,920
    0.884574
    0
    0
    3,668
    0.659475
    0
    0
    1,900
    0.341604
    b98531b0567b9e4719006397ec461d3fa4999e4b
    11,730
    py
    Python
    proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
    pkthein/sparts_all_fam
    ff162e4ea8c3919a197dc0cc13fde6b32da113c7
    [ "Apache-2.0" ]
    1
    2019-04-03T18:31:36.000Z
    2019-04-03T18:31:36.000Z
    proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
    pkthein/sparts_all_fam
    ff162e4ea8c3919a197dc0cc13fde6b32da113c7
    [ "Apache-2.0" ]
    null
    null
    null
    proto/tp_artifact_1.0/build/lib/sawtooth_artifact/processor/handler.py
    pkthein/sparts_all_fam
    ff162e4ea8c3919a197dc0cc13fde6b32da113c7
    [ "Apache-2.0" ]
    null
    null
    null
    # Copyright 2016 Intel Corporation # Copyright 2017 Wind River # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib import logging import json from collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: """ Class for handling the Transaction Family : Artifact Attributes: namespace_prefix (str): The namespace prefix of the transaction family """ def __init__(self, namespace_prefix): """ Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace prefix of the transaction family """ self._namespace_prefix = namespace_prefix @property def family_name(self): """ type: str Returns the family name of the handler object. """ return "artifact" @property def family_versions(self): """ type: list of str Returns the family version of the handler object. """ return ["1.0"] @property def encodings(self): """ type: list of str Returns the encoding scheme used for the data for the handler object. """ return ["csv-utf8"] @property def namespaces(self): """ type: list of str Returns the namespaces associating with the handler object. """ return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def apply(self, transaction, context): """ Applys the payload from transaction onto the state storage. Args: transaction (Transaction): The transaction pertaining the payload context (State): The current state of the ledger Returns: type: State The new state of the ledger, which includes the data from the transaction, is returned to be stored on the state storage. Raises: InvalidTransaction: * If deserialization for payload from transaction failed * If "create" was called on non-unique uuid * If "amend" was called on non-existing uuid * If "Add..." were called on non-existing uuid * If invalid operation was called InternalError: * If deserialization of State.data failed """ # Parsing required fields from transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id = payload["uuid"] artifact_alias = payload["alias"] artifact_name = payload["name"] artifact_type = payload["content_type"] artifact_checksum = payload["checksum"] artifact_label = payload["label"] artifact_openchain = payload["openchain"] action = payload["action"] prev = payload["prev_block"] cur = payload["cur_block"] timestamp = payload["timestamp"] artifact_list = payload["artifact_list"] uri_list = payload["uri_list"] except ValueError: raise InvalidTransaction("Invalid payload serialization") # Soft sanity check and loading required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard sanity check before creating final payload for the state storage if len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact["uuid"] except ValueError: raise InternalError("Failed to deserialize data.") else: stored_artifact_id = stored_artifact = None if action == "create" and stored_artifact_id is not None: raise InvalidTransaction("Invalid Action-artifact already exists.") elif action == "create": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif action == "amend" and stored_artifact_id is not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif action == "AddArtifact" or action == "AddURI": if stored_artifact_id is None: raise InvalidTransaction( "Invalid Action-requires an existing artifact." ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) # Adding the final payload to the state storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): """ Constructs the payload to be stored in the state storage. Args: artifact_uuid (str): The uuid of the artifact artifact_alias (str): The alias of the artifact artifact_name (str): The name of the artifact artifact_type (str): The type of the artifact artifact_checksum (str): The checksum of the artifact artifact_label (str): The label of the artifact artifact_openchain (str): The openchain of the artifact prev (str): The previous block id of the transaction (default "0") cur (str): the current block id of the transaction timestamp (str): The UTC time for when the transaction was submitted artifact_list (list of dict): The list of the artifact uuid associated with the artifact (default []) uri_list (list of dict): The list of the uri associated with the artifact (default []) Returns: type: dict The dictionary pertaining all the param is created and returned to be stored on the state storage. """ return { "uuid" : artifact_id, "alias" : artifact_alias, "name" : artifact_name, "content_type" : artifact_type, "checksum" : artifact_checksum, "label" : artifact_label, "openchain" : artifact_openchain, "prev_block" : prev, "cur_block" : cur, "timestamp" : timestamp, "artifact_list" : artifact_list, "uri_list" : uri_list } def validate_transaction(artifact_id, action): """ Performs soft sanity check in order to improve runtime by eliminating the obvious exception errors. Args: artifact_id (str): The uuid of the artifact action (str): The command to be performed Raises: InvalidTransaction: If the uuid or the action are not passed in or the action is not a valid action. """ if not artifact_id: raise InvalidTransaction("Artifact ID is required") if not action: raise InvalidTransaction("Action is required") if action not in ("AddArtifact", "create", "AddURI", "amend"): raise InvalidTransaction("Invalid action: {}".format(action)) def make_artifact_address(namespace_prefix, artifact_id): """ Creates an artifact address which will be used to recover the associated UUID if the artifact already exists in the state storage; or, used as a key to store the new data into the state storage. Args: namespace_prefix (str): The prefix associating with the transaction family artifact_id (str): The uuid of the artifact Returns: type: str The address-to-be, which associates the uuid and the namespace prefix. """ return namespace_prefix + \ hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64] def _display(msg): """ Logs the message to the debug logger. Args: msg (str): The message that is to be logged into the debug logger """ n = msg.count("\n") if n > 0: msg = msg.split("\n") length = max(len(line) for line in msg) else: length = len(msg) msg = [msg] LOGGER.debug("+" + (length + 2) * "-" + "+") for line in msg: LOGGER.debug("+ " + line.center(length) + " +") LOGGER.debug("+" + (length + 2) * "-" + "+") ################################################################################ # # ################################################################################
    39.897959
    82
    0.521313
    5,899
    0.502899
    0
    0
    749
    0.063853
    0
    0
    6,596
    0.562319
    b9877d896f97460bc5a35787da6277925368bc9f
    764
    py
    Python
    ReviewsCollector.py
    fsandx/moodybooks
    5c13fe43849e4fa861a163c74411e9f796518bc9
    [ "MIT" ]
    null
    null
    null
    ReviewsCollector.py
    fsandx/moodybooks
    5c13fe43849e4fa861a163c74411e9f796518bc9
    [ "MIT" ]
    null
    null
    null
    ReviewsCollector.py
    fsandx/moodybooks
    5c13fe43849e4fa861a163c74411e9f796518bc9
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ STEP 2 Takes the list of urls in the json files and downloads the html files to local drive Start with: scrapy runspider ReviewsCollector.py """ import scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open("data/books.json") as f: self.data = json.load(f) for item in self.data: if (item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename = response.url.split("/")[-1] + '.html' with open('data/reviews/' + filename, 'wb+') as f: f.write(response.body)
    29.384615
    124
    0.611257
    539
    0.705497
    311
    0.407068
    0
    0
    0
    0
    284
    0.371728
    b9887b38cf06939bc8dd710e9861e2366862482a
    3,120
    py
    Python
    firelight/interfaces/light.py
    roshie548/firelight
    3a5af5e2a1e5784127baebcf1517ffddcaff4062
    [ "MIT" ]
    16
    2021-11-29T03:05:31.000Z
    2022-01-19T05:32:45.000Z
    firelight/interfaces/light.py
    roshie548/firelight
    3a5af5e2a1e5784127baebcf1517ffddcaff4062
    [ "MIT" ]
    null
    null
    null
    firelight/interfaces/light.py
    roshie548/firelight
    3a5af5e2a1e5784127baebcf1517ffddcaff4062
    [ "MIT" ]
    null
    null
    null
    from abc import ABC, abstractmethod from .color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): """Discover the lights and groups in this LightSystem.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of all the lights in the LightSystem.""" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on the lights in this group.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the lights in this group.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on this light.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the light.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError
    32.842105
    77
    0.641026
    3,050
    0.977564
    0
    0
    2,901
    0.929808
    0
    0
    680
    0.217949
    b98b6f0b6e5f35ef44fd272ec1f3a99b4d72acf0
    1,293
    py
    Python
    PolymorphismPYTHON/Polypy.py
    cadeng23/oop-cjgustafson
    cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
    [ "MIT" ]
    null
    null
    null
    PolymorphismPYTHON/Polypy.py
    cadeng23/oop-cjgustafson
    cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
    [ "MIT" ]
    null
    null
    null
    PolymorphismPYTHON/Polypy.py
    cadeng23/oop-cjgustafson
    cd3e5ca0e37f8b00a80516c6c8d5d6789a77d9a8
    [ "MIT" ]
    null
    null
    null
    import random class Family: def __init__(self,first, last, hair): self.first = first self.last = last self.hair = hair def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using the punnet square in genetics we know thatt a donor #with blue eyes and one with brown makes it 50/50 odds #that the childs eyes will be brown or blue if (temp == 1): self.EYES = ("Brown") else: self.EYES = ("Blue") return self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being born it will define what color hair and eyes # they may randomly get through inheritance class Kids(Family): pass #Eyes are marked as Grey because they are unknown for now # hair colors are brown because brown is the dominant hair color Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes() Son.Apply_eyes() print(Daughter.eyes) print(Son.eyes)
    23.089286
    66
    0.618716
    1,027
    0.794277
    0
    0
    0
    0
    0
    0
    493
    0.381284
    b98c3a1636cff18e5244db1f52b8e6e89e2c99b5
    1,494
    py
    Python
    homeassistant/components/device_tracker/owntracks.py
    evancohen/home-assistant
    dafc0ced6b07025c03417d8e7a2c0133b4c622fc
    [ "MIT" ]
    14
    2015-11-10T07:57:43.000Z
    2021-08-29T13:45:26.000Z
    homeassistant/components/device_tracker/owntracks.py
    evancohen/home-assistant
    dafc0ced6b07025c03417d8e7a2c0133b4c622fc
    [ "MIT" ]
    null
    null
    null
    homeassistant/components/device_tracker/owntracks.py
    evancohen/home-assistant
    dafc0ced6b07025c03417d8e7a2c0133b4c622fc
    [ "MIT" ]
    8
    2015-11-14T16:40:41.000Z
    2020-02-17T19:48:08.000Z
    """ homeassistant.components.device_tracker.owntracks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OwnTracks platform for the device tracker. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ """ import json import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see): """ Set up a OwnTracksks tracker. """ def owntracks_location_update(topic, payload, qos): """ MQTT message received. """ # Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload as JSON: %s', payload) return if not isinstance(data, dict) or data.get('_type') != 'location': return parts = topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if 'acc' in data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1) return True
    27.666667
    74
    0.582999
    0
    0
    0
    0
    0
    0
    0
    0
    617
    0.412985
    b98c6a6e2a07073f4614093d6ae5d6469afd6835
    48,027
    py
    Python
    src/models/end_to_end_event_coreference.py
    luyaojie/E3C
    4b2f33da4629211fd6a3738077794f821c7f7c8b
    [ "MIT" ]
    2
    2022-02-20T15:13:11.000Z
    2022-03-22T03:47:21.000Z
    src/models/end_to_end_event_coreference.py
    luyaojie/E3C
    4b2f33da4629211fd6a3738077794f821c7f7c8b
    [ "MIT" ]
    null
    null
    null
    src/models/end_to_end_event_coreference.py
    luyaojie/E3C
    4b2f33da4629211fd6a3738077794f821c7f7c8b
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python # -*- coding:utf-8 -*- # Created by Roger on 2019-09-10 # Mostly by AllenNLP import logging import math from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from overrides import overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register("end-to-end-event-coreference") class End2EndEventCoreferenceResolver(Model): """ This ``Model`` implements the coreference resolution model described "End-to-end Neural Coreference Resolution" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The basic outline of this model is to get an embedded representation of each span in the document. These span representations are scored and used to prune away spans that are unlikely to occur in a coreference cluster. For the remaining spans, the model decides which antecedent span (if any) they are coreferent with. The resulting coreference links, after applying transitivity, imply a clustering of the spans in the document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get as input to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for each word in the document. mention_feedforward : ``FeedForward`` This feedforward network is applied to the span representations which is then scored by a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network is applied to pairs of span representation, along with any pairwise features, which is then scored by a linear layer. feature_size: ``int`` The embedding size for all the embedded features, such as distances or span widths. max_span_width: ``int`` The maximum width of candidate spans. spans_per_word: float, required. A multiplier between zero and one which controls what percentage of candidate mention spans we retain with respect to the number of words in the document. max_antecedents: int, required. For each mention which survives the pruning stage, we consider this many antecedents. lexical_dropout: ``int`` The probability of dropping out dimensions of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate the regularization penalty during training. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width: int = 1, spans_per_word: float = 0.1, max_antecedents: int = 50, lexical_dropout: float = 0.2, pretrain_ed: bool = False, pretrain_coref: bool = False, coref_loss_weight: float = 1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float = None, local_window_size: int = 10, attention_type: str = 'dot', decoding: str = 'type-guided', type_threshold: float = -1., type_refine: bool = True, type_match_in_eval: bool = True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type = attention_type self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, "labels")) if context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination="x,y", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination="x,y", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None # NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10 possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x initializer(self) def _get_event_embedding(self, span_mask): """ :param span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) """ event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): """ :param top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) """ event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device < 0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, # type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ """ Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required. The output of a ``TextField`` representing the text of the document. spans : ``torch.IntTensor``, required. A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of indices into the text of the document. coref_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the cluster ids of each span, or -1 for those which do not appear in any clusters. event_type_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the event label of the specific span. realis_labels : ``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size, num_spans), representing the realis label of the specific span. metadata : ``List[Dict[str, Any]]``, optional (default = None). A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys from this dictionary, which respectively have the original text and the annotated gold coreference clusters for that instance. Returns ------- An output dictionary consisting of: top_spans : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing the start and end word indices of the top spans that survived the pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span the index (with respect to top_spans) of the possible antecedents the model considered. predicted_antecedents : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the index (with respect to antecedent_indices) of the most likely antecedent. -1 means there was no predicted link. loss : ``torch.FloatTensor``, optional A scalar loss to be optimised. """ # Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields return -1 when they are used as padding. As we do # some comparisons based on span widths when we attend over the # span representations that we generate from these indices, we # need them to be <= 0. This is only relevant in edge cases where # the number of spans we consider after the pruning stage is >= the # total number of spans, because in this case, it is possible we might # consider a masked span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) # torch.index_select only accepts 1D indices, but here # we need to select spans for each element in the batch. # This reformats the indices to take into account their # index into the batch. We precompute this here to make # the multiple calls to util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for which spans to consider as mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for antecedent spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now that we have our variables in terms of num_spans_to_keep, we need to # compare span pairs to decide each span's antecedent. Each span can only # have prior spans as antecedents, and we only consider up to max_antecedents # prior spans. So the first thing we do is construct a matrix mapping a span's # index to the indices of its allowed antecedents. Note that this is independent # of the batch dimension - it's just a function of the span's position in # top_spans. The spans are in document order, so we can just use the relative # index of the spans to know which other spans are allowed antecedents. # Once we have this matrix, we reformat our variables again to get embeddings # for all valid antecedents for each span. This gives us variables with shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we can use to make coreference decisions between valid span pairs. # Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to the antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for each span which survived the pruning stage, # a predicted antecedent. This implies a clustering if we group # mentions which refer to each other in a chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract one here because index 0 is the "no antecedent" class, # so this makes the indices line up with actual spans if the prediction # is greater than -1. predicted_antecedents -= 1 output_dict = {"top_spans": top_spans, "antecedent_indices": valid_antecedent_indices, "predicted_antecedents": predicted_antecedents, "coreference_scores": coreference_scores, } if coref_labels is not None and event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for the spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight # Now, compute the loss using the negative marginal log-likelihood. # This is equal to the log of the sum of the probabilities of all antecedent predictions # that would be consistent with the data, in the sense that we are minimising, for a # given span, the negative marginal log likelihood of all antecedents which are in the # same gold cluster as the span we are currently considering. Each span i predicts a # single antecedent j, but there might be several prior mentions k in the same # coreference cluster that would be valid antecedents. Our loss is the sum of the # probability assigned to all valid antecedents. This is a valid objective for # clustering as we don't mind which antecedent is predicted, so long as they are in # the same coreference cluster. if self._pretrain_ed: # All antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict["loss"] = coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not None: output_dict["document"] = [x["original_text"] for x in metadata] output_dict["offset"] = [x["token_offset"] for x in metadata] output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata] return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): """ Converts the list of spans and predicted antecedent indices into clusters of spans for each element in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on an instance or batch of instances. Returns ------- The same output dictionary, but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance in the batch, the list of clusters, which are in turn comprised of a list of (start, end) inclusive spans into the original document. """ return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool = False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {"c_p": coref_precision, "c_r": coref_recall, "c_f1": coref_f1, "m_p": mention_result['precision'], "m_r": mention_result['recall'], "m_f1": mention_result['f1-score'], "nil": self._nil_label_metric.get_metric(reset), "type": self._type_label_metric.get_metric(reset), "coref": self._coref_label_metric.get_metric(reset), "t_l": self._type_loss_metric.get_metric(reset), "c_l": self._coref_loss_metric.get_metric(reset), "a_f1": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): """ event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the event types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent spans we are considering for each top span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) """ event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): """ Computes an embedding representation of pairs of spans for the pairwise scoring function to consider. This includes both the original span representations, the element-wise similarity of the span representations, and an embedding representation of the distance between the two spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of the top spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent spans we are considering for each top span. Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets between each top span and its antecedent spans in terms of spans we are considering. Has shape (1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the pair of spans to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) """ # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): """ Generates a binary indicator for every pair of spans. This label is one if and only if the pair of spans belong to the same cluster. The labels are augmented with a dummy antecedent at the zeroth position, which represents the prediction that a span does not have any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The cluster id label for every span. The id is arbitrary, as we just care about the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster id label for every antecedent span. The id is arbitrary, as we just care about the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing whether a given pair of spans belong to the same cluster in the gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents + 1). """ # Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: """ Computes scores for every pair of spans. Additionally, a dummy label is included, representing the decision that the span is not coreferent with anything. For the dummy label, the score is always zero. For the true antecedent spans, the score consists of the pairwise antecedent score and the unary mention scores for the span and its antecedent. The factoring allows the model to blame many of the absent links on bad spans, enabling the pruning strategy used in the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of the mask for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing the unormalised score for each (span, antecedent) pair we considered. """ antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: """ This method generates possible antecedents per span which survived the pruning stage. This procedure is `generic across the batch`. The reason this is the case is that each span in a batch can be coreferent with any previous span, but here we are computing the possible `indices` of these spans. So, regardless of the batch, the 1st span _cannot_ have any antecedents, because there are none to select from. Similarly, each element can only predict previous spans, so this returns a matrix of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to (i - 1) - j if j <= i, or zero otherwise. Parameters ---------- num_spans_to_keep : ``int``, required. The number of spans that were kept while pruning. max_antecedents : ``int``, required. The maximum number of antecedent spans to consider for every span. device: ``int``, required. The CUDA device to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices of every antecedent to consider with respect to the top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between the span and each of its antecedents in terms of the number of considered spans (i.e not the word distance between the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing whether each antecedent span is valid. Required since different spans have different numbers of valid antecedents. For example, the first span in the document should have no valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. """ # Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets # In our matrix of indices, the upper triangular part will be negative # because the offsets will be > the target indices. We want to mask these, # because these are exactly the indices which we don't want to predict, per span. # We're generating a logspace mask here because we will eventually create a # distribution over these indices, so we need the 0 elements of the mask to be -inf # in order to not mess up the normalisation of the distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
    54.514188
    134
    0.629271
    43,414
    0.90395
    0
    0
    43,462
    0.904949
    0
    0
    19,498
    0.40598
    b98ccbb0c859fdccad6b30924e5845122d497aa5
    1,964
    py
    Python
    week2/7litersProblem.py
    vietanhtran2710/ArtificialIntelligenceHomework
    f4da761016d67477b50856cadf1e2560230d3f79
    [ "MIT" ]
    3
    2021-09-20T08:32:23.000Z
    2021-09-25T08:11:48.000Z
    week2/7litersProblem.py
    vietanhtran2710/ArtificialIntelligenceHomework
    f4da761016d67477b50856cadf1e2560230d3f79
    [ "MIT" ]
    null
    null
    null
    week2/7litersProblem.py
    vietanhtran2710/ArtificialIntelligenceHomework
    f4da761016d67477b50856cadf1e2560230d3f79
    [ "MIT" ]
    null
    null
    null
    """ Given 3 bottles of capacities 3, 5, and 9 liters, count number of all possible solutions to get 7 liters """ current_path = [[0, 0, 0]] CAPACITIES = (3, 5, 9) solutions_count = 0 def move_to_new_state(current_state): global solutions_count, current_path if 7 in current_state: solutions_count += 1 else: # Empty bottle for i in range(3): if current_state[i] != 0: new_state = list(current_state) new_state[i] = 0 if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for i in range(3): if current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i] if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one bottle to another for i in range(3): for j in range(3): if i != j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == "__main__": try: current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at least 44900799 solution
    35.709091
    91
    0.548371
    0
    0
    0
    0
    0
    0
    0
    0
    227
    0.11558
    b98d02f62eca1818cb1fb297d1c8644dd35ff288
    8,263
    py
    Python
    st2common/st2common/bootstrap/rulesregistrar.py
    avezraj/st2
    519c7f6819e52fb289c440bb7d1df7b558bb9ed7
    [ "Apache-2.0" ]
    null
    null
    null
    st2common/st2common/bootstrap/rulesregistrar.py
    avezraj/st2
    519c7f6819e52fb289c440bb7d1df7b558bb9ed7
    [ "Apache-2.0" ]
    null
    null
    null
    st2common/st2common/bootstrap/rulesregistrar.py
    avezraj/st2
    519c7f6819e52fb289c440bb7d1df7b558bb9ed7
    [ "Apache-2.0" ]
    null
    null
    null
    # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import six from st2common import log as logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): """ :return: Number of rules registered. :rtype: ``int`` """ # Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does not contain rules.', pack) continue try: LOG.debug('Registering rules from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count += count except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack: %s', rules_dir) return registered_count def register_from_pack(self, pack_dir): """ Register all the rules from the provided pack. :return: Number of rules registered. :rtype: ``int`` """ pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not rules_dir: return registered_count LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count = 0 # TODO: Refactor this monstrosity for rule in rules: LOG.debug('Loading rule from %s.', rule) try: content = self._meta_loader.load(rule) pack_field = content.get('pack', None) if not pack_field: content['pack'] = pack pack_field = pack if pack_field != pack: raise Exception('Model is in pack "%s" but field "pack" is different: %s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from rule without pack to rule with pack. # There might be a rule with same name but in pack `default` # generated in migration script. In this case, we want to # delete so we don't have duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to create rule %s.', rule_api.name) # If there was an existing rule then the ref count was updated in # to_model so it needs to be adjusted down here. Also, update could # lead to removal of a Trigger so now is a good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure: msg = ('Failed to register rule "%s" from pack "%s": %s' % (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from %s.', rule) else: registered_count += 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result = registrar.register_from_packs(base_dirs=packs_base_paths) return result
    41.109453
    98
    0.57824
    6,351
    0.768607
    0
    0
    0
    0
    0
    0
    2,013
    0.243616
    b9912797a8155d6800745fe804b93206d95de8ac
    91,819
    py
    Python
    sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
    aiven/azure-sdk-for-python
    8764dc07423beca46ed0b51212d81289d9e52c60
    [ "MIT" ]
    1
    2021-09-07T18:43:20.000Z
    2021-09-07T18:43:20.000Z
    sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
    aiven/azure-sdk-for-python
    8764dc07423beca46ed0b51212d81289d9e52c60
    [ "MIT" ]
    2
    2021-11-03T06:10:36.000Z
    2021-12-01T06:29:39.000Z
    sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/models/_models_py3.py
    msyyc/azure-sdk-for-python
    e2dba75181f8b4336ae57e75aa391322c12c3123
    [ "MIT" ]
    1
    2021-05-19T02:55:10.000Z
    2021-05-19T02:55:10.000Z
    # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): """The Resource model definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.tags = None class Alert(Resource): """An individual alert. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source: Source of alert. Possible values include: "Preset", "User". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status. Possible values include: "None", "Active", "Overridden", "Resolved", "Dismissed". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type creation_time: str :param close_time: dateTime in which alert was closed. :type close_time: str :param modification_time: dateTime in which alert was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the alert status was last modified. :type status_modification_time: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional["AlertPropertiesDefinition"] = None, description: Optional[str] = None, source: Optional[Union[str, "AlertSource"]] = None, details: Optional["AlertPropertiesDetails"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, "AlertStatus"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition self.description = description self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): """defines the type of alert. :param type: type of alert. Possible values include: "Budget", "Invoice", "Credit", "Quota", "General", "xCloud", "BudgetForecast". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values include: "Cost", "Usage", "Billing", "System". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert. Possible values include: "CostThresholdExceeded", "UsageThresholdExceeded", "CreditThresholdApproaching", "CreditThresholdReached", "QuotaThresholdApproaching", "QuotaThresholdReached", "MultiCurrency", "ForecastCostThresholdExceeded", "ForecastUsageThresholdExceeded", "InvoiceDueDateApproaching", "InvoiceDueDateReached", "CrossCloudNewDataAvailable", "CrossCloudCollectionError", "GeneralThresholdError". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "AlertType"]] = None, category: Optional[Union[str, "AlertCategory"]] = None, criteria: Optional[Union[str, "AlertCriteria"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category = category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): """Alert details. :param time_grain_type: Type of timegrain cadence. Possible values include: "None", "Monthly", "Quarterly", "Annually", "BillingMonth", "BillingQuarter", "BillingAnnual". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type period_start_date: str :param triggered_by: notificationId that triggered this alert. :type triggered_by: str :param resource_group_filter: array of resourceGroups to filter by. :type resource_group_filter: list[object] :param resource_filter: array of resources to filter by. :type resource_filter: list[object] :param meter_filter: array of meters to filter by. :type meter_filter: list[object] :param tag_filter: tags to filter by. :type tag_filter: object :param threshold: notification threshold percentage as a decimal which activated this alert. :type threshold: float :param operator: operator used to compare currentSpend with amount. Possible values include: "None", "EqualTo", "GreaterThan", "GreaterThanOrEqualTo", "LessThan", "LessThanOrEqualTo". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type amount: float :param unit: unit of currency being used. :type unit: str :param current_spend: current spend. :type current_spend: float :param contact_emails: list of emails to contact. :type contact_emails: list[str] :param contact_groups: list of action groups to broadcast to. :type contact_groups: list[str] :param contact_roles: list of contact roles. :type contact_roles: list[str] :param overriding_alert: overriding alert. :type overriding_alert: str """ _attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def __init__( self, *, time_grain_type: Optional[Union[str, "AlertTimeGrainType"]] = None, period_start_date: Optional[str] = None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] = None, threshold: Optional[float] = None, operator: Optional[Union[str, "AlertOperator"]] = None, amount: Optional[float] = None, unit: Optional[str] = None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold = threshold self.operator = operator self.amount = amount self.unit = unit self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): """Result of alerts. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the next set of alerts results if there are any. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link = None class CommonExportProperties(msrest.serialization.Model): """The common properties of the export. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime """ _validation = { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self, *, delivery_info: "ExportDeliveryInfo", definition: "ExportDefinition", format: Optional[Union[str, "FormatType"]] = None, run_history: Optional["ExportExecutionListResult"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource): """Dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Dimension description. :vartype description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data: :type data: list[str] :ivar total: Total number of data for the dimension. :vartype total: int :ivar category: Dimension category. :vartype category: str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link (url) to the next page of results. :vartype next_link: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self, *, data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled = None self.data = data self.total = None self.category = None self.usage_start = None self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model): """Result of listing dimensions. It contains a list of available dimensions. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): """The request payload to update an alert. :param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source: Source of alert. Possible values include: "Preset", "User". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status. Possible values include: "None", "Active", "Overridden", "Resolved", "Dismissed". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type creation_time: str :param close_time: dateTime in which alert was closed. :type close_time: str :param modification_time: dateTime in which alert was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the alert status was last modified. :type status_modification_time: str """ _attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional["AlertPropertiesDefinition"] = None, description: Optional[str] = None, source: Optional[Union[str, "AlertSource"]] = None, details: Optional["AlertPropertiesDetails"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, "AlertStatus"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): """The details of the error. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: Error code. :vartype code: str :ivar message: Error message indicating why the operation failed. :vartype message: str """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None class ErrorResponse(msrest.serialization.Model): """Error response indicates that the service is not able to process the incoming request. The reason is provided in the error message. Some Error responses: * 429 TooManyRequests - Request is throttled. Retry after waiting for the time specified in the "x-ms-ratelimit-microsoft.consumption-retry-after" header. * 503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for the time specified in the "Retry-After" header. :param error: The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, *, error: Optional["ErrorDetails"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): """The Resource model definition. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__( self, *, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.e_tag = e_tag class Export(ProxyResource): """An export resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str] = None, format: Optional[Union[str, "FormatType"]] = None, delivery_info: Optional["ExportDeliveryInfo"] = None, definition: Optional["ExportDefinition"] = None, run_history: Optional["ExportExecutionListResult"] = None, schedule: Optional["ExportSchedule"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model): """The definition for data in the export. :param granularity: The granularity of rows in the export. Currently only 'Daily' is supported. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration """ _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["ExportDatasetConfiguration"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): """The export dataset configuration. Allows columns to be selected for the export. If not provided then the export will include all available columns. :param columns: Array of column names to be included in the export. If not provided then the export will include all available columns. The available columns can vary by customer channel (see examples). :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): """The definition of an export. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the export. Note that 'Usage' is equivalent to 'ActualCost' and is applicable to exports that do not yet provide data for charges or amortization for service reservations. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling data for the export. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__( self, *, type: Union[str, "ExportType"], timeframe: Union[str, "TimeframeType"], time_period: Optional["ExportTimePeriod"] = None, data_set: Optional["ExportDataset"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): """The destination information for the delivery of the export. To allow access to a storage account, you must register the account's subscription with the Microsoft.CostManagementExports resource provider. This is required once per subscription. When creating an export in the Azure portal, it is done automatically, however API users need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be populated in order to send to Azure. :param resource_id: Required. The resource id of the storage account where exports will be delivered. :type resource_id: str :param container: Required. The name of the container where exports will be uploaded. :type container: str :param root_folder_path: The name of the directory where exports will be uploaded. :type root_folder_path: str """ _validation = { 'resource_id': {'required': True}, 'container': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def __init__( self, *, resource_id: str, container: str, root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container = container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): """The delivery information associated with a export. All required parameters must be populated in order to send to Azure. :param destination: Required. Has destination for the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination """ _validation = { 'destination': {'required': True}, } _attribute_map = { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self, *, destination: "ExportDeliveryDestination", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): """An export execution. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param execution_type: The type of the export execution. Possible values include: "OnDemand", "Scheduled". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status of the export execution. Possible values include: "Queued", "InProgress", "Completed", "Failed", "Timeout", "NewDataNotAvailable", "DataNotAvailable". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the entity that executed the export. For OnDemand executions it is the user email. For scheduled executions it is 'System'. :type submitted_by: str :param submitted_time: The time when export was queued to be executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The time when export was picked up to be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The time when the export execution finished. :type processing_end_time: ~datetime.datetime :param file_name: The name of the exported file. :type file_name: str :param run_settings: The export settings that were in effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__( self, *, execution_type: Optional[Union[str, "ExecutionType"]] = None, status: Optional[Union[str, "ExecutionStatus"]] = None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] = None, run_settings: Optional["CommonExportProperties"] = None, error: Optional["ErrorDetails"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status = status self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings = run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): """Result of listing the execution history of an export. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: A list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExportExecution]'}, } def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): """Result of listing exports. It contains a list of available exports in the scope provided. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] """ _validation = { 'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Export]'}, } def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): """The properties of the export. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param format: The format of the export being delivered. Currently only 'Csv' is supported. Possible values include: "Csv". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule, provides an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule """ _validation = { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, delivery_info: "ExportDeliveryInfo", definition: "ExportDefinition", format: Optional[Union[str, "FormatType"]] = None, run_history: Optional["ExportExecutionListResult"] = None, schedule: Optional["ExportSchedule"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): """The start and end date for recurrence schedule. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date of recurrence. :type from_property: ~datetime.datetime :param to: The end date of recurrence. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ExportSchedule(msrest.serialization.Model): """The schedule associated with the export. All required parameters must be populated in order to send to Azure. :param status: The status of the export's schedule. If 'Inactive', the export's schedule is paused. Possible values include: "Active", "Inactive". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence. Possible values include: "Daily", "Weekly", "Monthly", "Annually". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end date of the recurrence. The start date must be in future. If present, the end date must be greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod """ _validation = { 'recurrence': {'required': True}, } _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence: Union[str, "RecurrenceType"], status: Optional[Union[str, "StatusType"]] = None, recurrence_period: Optional["ExportRecurrencePeriod"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence = recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): """The date range for data in the export. This should only be specified with timeFrame set to 'Custom'. The maximum date range is 3 months. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date for export data. :type from_property: ~datetime.datetime :param to: Required. The end date for export data. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ForecastDataset(msrest.serialization.Model): """The definition of data present in the forecast. :param granularity: The granularity of rows in the forecast. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data in the export. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the forecast. The key of each item in the dictionary is the alias for the aggregated column. forecast can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter """ _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["QueryDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "QueryAggregation"]] = None, filter: Optional["QueryFilter"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): """The definition of a forecast. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the forecast. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame for pulling data for the forecast. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for pulling data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if actualCost will be included. :type include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included. :type include_fresh_partial_cost: bool """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self, *, type: Union[str, "ForecastType"], timeframe: Union[str, "ForecastTimeframeType"], time_period: Optional["QueryTimePeriod"] = None, dataset: Optional["ForecastDataset"] = None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): """Each KPI must contain a 'type' and 'enabled' key. :param type: KPI type (Forecast, Budget). Possible values include: "Forecast", "Budget". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related to metric (budget). :type id: str :param enabled: show the KPI in the UI?. :type enabled: bool """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__( self, *, type: Optional[Union[str, "KpiType"]] = None, id: Optional[str] = None, enabled: Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id = id self.enabled = enabled class Operation(msrest.serialization.Model): """A Cost management REST API operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :param display: The object that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay """ _validation = { 'name': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, *, display: Optional["OperationDisplay"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display = display class OperationDisplay(msrest.serialization.Model): """The object that represents the operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource on which the operation is performed: Dimensions, Query. :vartype resource: str :ivar operation: Operation type: Read, write, delete, etc. :vartype operation: str """ _validation = { 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, } _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None self.operation = None class OperationListResult(msrest.serialization.Model): """Result of listing cost management operations. It contains a list of operations and a URL link to get the next set of results. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: List of cost management operations supported by the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the next set of operation list results if there are any. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None class PivotProperties(msrest.serialization.Model): """Each pivot must contain a 'type' and 'name'. :param type: Data type to show in view. Possible values include: "Dimension", "TagKey". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show in view. :type name: str """ _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, "PivotType"]] = None, name: Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name = name class QueryAggregation(msrest.serialization.Model): """The aggregation expression to be used in the query. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to aggregate. :type name: str :param function: Required. The name of the aggregation function to use. Possible values include: "Sum". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType """ _validation = { 'name': {'required': True}, 'function': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__( self, *, name: str, function: Union[str, "FunctionType"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function = function class QueryColumn(msrest.serialization.Model): """QueryColumn. :param name: The name of column. :type name: str :param type: The type of column. :type type: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, *, name: Optional[str] = None, type: Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type = type class QueryComparisonExpression(msrest.serialization.Model): """The comparison expression to be used in the query. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to use in comparison. :type name: str :param operator: Required. The operator to use for comparison. Possible values include: "In", "Contains". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use for comparison. :type values: list[str] """ _validation = { 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__( self, *, name: str, operator: Union[str, "OperatorType"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values = values class QueryDataset(msrest.serialization.Model): """The definition of data present in the query. :param granularity: The granularity of rows in the query. Possible values include: "Daily". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data in the export. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the query. The key of each item in the dictionary is the alias for the aggregated column. Query can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by expression to use in the query. Query can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "GranularityType"]] = None, configuration: Optional["QueryDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "QueryAggregation"]] = None, grouping: Optional[List["QueryGrouping"]] = None, filter: Optional["QueryFilter"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): """The configuration of dataset in the query. :param columns: Array of column names to be included in the query. Any valid query column name is allowed. If not provided, then query includes all columns. :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): """The definition of a query. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the query. Possible values include: "Usage", "ActualCost", "AmortizedCost". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling data for the query. If custom, then a specific time period must be provided. Possible values include: "MonthToDate", "BillingMonthToDate", "TheLastMonth", "TheLastBillingMonth", "WeekToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def __init__( self, *, type: Union[str, "ExportType"], timeframe: Union[str, "TimeframeType"], time_period: Optional["QueryTimePeriod"] = None, dataset: Optional["QueryDataset"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): """The filter expression to be used in the export. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["QueryFilter"]] = None, or_property: Optional[List["QueryFilter"]] = None, not_property: Optional["QueryFilter"] = None, dimension: Optional["QueryComparisonExpression"] = None, tag: Optional["QueryComparisonExpression"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): """The group by expression to be used in the query. All required parameters must be populated in order to send to Azure. :param type: Required. Has type of the column to group. Possible values include: "Tag", "Dimension". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of the column to group. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Union[str, "QueryColumnType"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name = name class QueryResult(Resource): """Result of query. It contains all columns listed under groupings and aggregation. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :ivar tags: A set of tags. Resource tags. :vartype tags: dict[str, str] :param next_link: The link (url) to the next page of results. :type next_link: str :param columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type rows: list[list[object]] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def __init__( self, *, next_link: Optional[str] = None, columns: Optional[List["QueryColumn"]] = None, rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns = columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): """The start and end date for pulling data for the query. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date to pull data from. :type from_property: ~datetime.datetime :param to: Required. The end date to pull data to. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): """The aggregation expression to be used in the report. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to aggregate. :type name: str :param function: Required. The name of the aggregation function to use. Possible values include: "Sum". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType """ _validation = { 'name': {'required': True}, 'function': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__( self, *, name: str, function: Union[str, "FunctionType"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): """The comparison expression to be used in the report. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the column to use in comparison. :type name: str :param operator: Required. The operator to use for comparison. Possible values include: "In", "Contains". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use for comparison. :type values: list[str] """ _validation = { 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__( self, *, name: str, operator: Union[str, "OperatorType"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values = values class ReportConfigDataset(msrest.serialization.Model): """The definition of data present in the report. :param granularity: The granularity of rows in the report. Possible values include: "Daily", "Monthly". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data in the report. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the report. The key of each item in the dictionary is the alias for the aggregated column. Report can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to use in the report. Report can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__( self, *, granularity: Optional[Union[str, "ReportGranularityType"]] = None, configuration: Optional["ReportConfigDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None, grouping: Optional[List["ReportConfigGrouping"]] = None, sorting: Optional[List["ReportConfigSorting"]] = None, filter: Optional["ReportConfigFilter"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): """The definition of data present in the report. :param granularity: The granularity of rows in the report. Possible values include: "Daily", "Monthly". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data in the report. The configuration will be ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in the report. The key of each item in the dictionary is the alias for the aggregated column. Report can have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to use in the report. Report can have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated """ _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity: Optional[Union[str, "ReportGranularityType"]] = None, configuration: Optional["ReportConfigDatasetConfiguration"] = None, aggregation: Optional[Dict[str, "ReportConfigAggregation"]] = None, grouping: Optional[List["ReportConfigGrouping"]] = None, sorting: Optional[List["ReportConfigSorting"]] = None, filter: Optional["ReportConfigFilterAutoGenerated"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): """The configuration of dataset in the report. :param columns: Array of column names to be included in the report. Any valid report column name is allowed. If not provided, then report includes all columns. :type columns: list[str] """ _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): """The definition of a report config. All required parameters must be populated in order to send to Azure. :param type: Required. The type of the report. Usage represents actual usage, forecast represents forecasted data and UsageAndForecast represents both usage and forecasted data. Actual usage and forecasted data can be differentiated based on dates. Possible values include: "Usage". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame for pulling data for the report. If custom, then a specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate", "YearToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated """ _validation = { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type: Union[str, "ReportType"], timeframe: Union[str, "ReportTimeframeType"], time_period: Optional["ReportConfigTimePeriod"] = None, dataset: Optional["ReportConfigDatasetAutoGenerated"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): """The filter expression to be used in the report. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["ReportConfigFilter"]] = None, or_property: Optional[List["ReportConfigFilter"]] = None, not_property: Optional["ReportConfigFilter"] = None, dimension: Optional["ReportConfigComparisonExpression"] = None, tag: Optional["ReportConfigComparisonExpression"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): """The filter expression to be used in the report. :param and_property: The logical "AND" expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical "OR" expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical "NOT" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression """ _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None, or_property: Optional[List["ReportConfigFilterAutoGenerated"]] = None, not_property: Optional["ReportConfigFilterAutoGenerated"] = None, dimension: Optional["ReportConfigComparisonExpression"] = None, tag: Optional["ReportConfigComparisonExpression"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): """The group by expression to be used in the report. All required parameters must be populated in order to send to Azure. :param type: Required. Has type of the column to group. Possible values include: "Tag", "Dimension". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of the column to group. This version supports subscription lowest possible grain. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, type: Union[str, "ReportConfigColumnType"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name = name class ReportConfigSorting(msrest.serialization.Model): """The order by expression to be used in the report. All required parameters must be populated in order to send to Azure. :param direction: Direction of sort. Possible values include: "Ascending", "Descending". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of the column to sort. :type name: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *, name: str, direction: Optional[Union[str, "ReportConfigSortingDirection"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): """The start and end date for pulling data for the report. All required parameters must be populated in order to send to Azure. :param from_property: Required. The start date to pull data from. :type from_property: ~datetime.datetime :param to: Required. The end date to pull data to. :type to: ~datetime.datetime """ _validation = { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class View(ProxyResource): """States and configurations of Cost Analysis. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param e_tag: eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. :type e_tag: str :param display_name: User input name of the view. Required. :type display_name: str :param scope: Cost Management scope to save the view on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str :ivar created_on: Date the user created this view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date when the user last modified this view. :vartype modified_on: ~datetime.datetime :param chart: Chart type of the main view in Cost Analysis. Required. Possible values include: "Area", "Line", "StackedColumn", "GroupedColumn", "Table". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over time. Possible values include: "true", "false". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when displaying costs. Possible values include: "ActualCost", "AmortizedCost", "AHUB". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to show in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views in the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the report. Usage represents actual usage, forecast represents forecasted data and UsageAndForecast represents both usage and forecasted data. Actual usage and forecasted data can be differentiated based on dates. Possible values include: "Usage". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for pulling data for the report. If custom, then a specific time period must be provided. Possible values include: "WeekToDate", "MonthToDate", "YearToDate", "Custom". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self, *, e_tag: Optional[str] = None, display_name: Optional[str] = None, scope: Optional[str] = None, chart: Optional[Union[str, "ChartType"]] = None, accumulated: Optional[Union[str, "AccumulatedType"]] = None, metric: Optional[Union[str, "MetricType"]] = None, kpis: Optional[List["KpiProperties"]] = None, pivots: Optional[List["PivotProperties"]] = None, type_properties_query_type: Optional[Union[str, "ReportType"]] = None, timeframe: Optional[Union[str, "ReportTimeframeType"]] = None, time_period: Optional["ReportConfigTimePeriod"] = None, dataset: Optional["ReportConfigDataset"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope = scope self.created_on = None self.modified_on = None self.chart = chart self.accumulated = accumulated self.metric = metric self.kpis = kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): """Result of listing views. It contains a list of available views. Variables are only populated by the server, and will be ignored when sending a request. :ivar value: The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to the next page of results. :vartype next_link: str """ _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None self.next_link = None
    38.16251
    498
    0.649005
    90,996
    0.991037
    0
    0
    0
    0
    0
    0
    57,015
    0.62095
    b9921ebf7fdd9b5fb1dd763092a97ae1888e730f
    3,860
    py
    Python
    test/test_simple_compression.py
    jayvdb/brotlipy
    ffddf2ea5adc584c8c353d246bb1077b7e781b63
    [ "MIT" ]
    null
    null
    null
    test/test_simple_compression.py
    jayvdb/brotlipy
    ffddf2ea5adc584c8c353d246bb1077b7e781b63
    [ "MIT" ]
    null
    null
    null
    test/test_simple_compression.py
    jayvdb/brotlipy
    ffddf2ea5adc584c8c353d246bb1077b7e781b63
    [ "MIT" ]
    null
    null
    null
    # -*- coding: utf-8 -*- """ test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks. """ import brotli import pytest from hypothesis import given from hypothesis.strategies import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): """ Roundtripping data through the compressor works correctly. """ with open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): """ Confirm that the streaming compressor works as expected. """ compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): """ Confirm that the streaming compressor works as expected, including flushes after each chunk. """ compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( "params", [ {"mode": 52}, {"quality": 52}, {"lgwin": 52}, {"lgblock": 52}, ] ) @pytest.mark.parametrize("exception_cls", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls): with pytest.raises(exception_cls): brotli.Compressor(**params)
    29.692308
    78
    0.615803
    0
    0
    0
    0
    3,251
    0.842228
    0
    0
    459
    0.118912
    b992a4ec960bcf3e39ba5a1bb6a8cd2e68be293e
    1,987
    py
    Python
    wexapi/models/ticker.py
    madmis/wexapi
    f5b1b9b566f767bca7d8fad1f08c3d1bca42355a
    [ "MIT" ]
    3
    2018-06-08T12:45:04.000Z
    2018-08-02T11:09:11.000Z
    wexapi/models/ticker.py
    madmis/wexapi
    f5b1b9b566f767bca7d8fad1f08c3d1bca42355a
    [ "MIT" ]
    null
    null
    null
    wexapi/models/ticker.py
    madmis/wexapi
    f5b1b9b566f767bca7d8fad1f08c3d1bca42355a
    [ "MIT" ]
    null
    null
    null
    from decimal import Decimal class Ticker(object): def __init__( self, high: float, low: float, avg: float, vol: float, vol_cur: int, last: float, buy: float, sell: float, updated: int, ): self.high = high self.low = low self.avg = avg self.vol = vol self.vol_cur = vol_cur self.last = last self.buy = buy self.sell = sell self.updated = updated @property def high(self) -> Decimal: return self._high @high.setter def high(self, value: float): self._high = Decimal(value) @property def low(self) -> Decimal: return self._low @low.setter def low(self, value: float): self._low = Decimal(value) @property def avg(self) -> Decimal: return self._avg @avg.setter def avg(self, value: float): self._avg = Decimal(value) @property def vol(self) -> Decimal: return self._vol @vol.setter def vol(self, value: float): self._vol = Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def last(self) -> Decimal: return self._last @last.setter def last(self, value: float): self._last = Decimal(value) @property def buy(self) -> Decimal: return self._buy @buy.setter def buy(self, value: float): self._buy = Decimal(value) @property def sell(self) -> Decimal: return self._sell @sell.setter def sell(self, value: float): self._sell = Decimal(value) @property def updated(self) -> int: return self._updated @updated.setter def updated(self, value: int): self._updated = int(value)
    20.27551
    38
    0.545546
    1,956
    0.984399
    0
    0
    1,332
    0.670357
    0
    0
    0
    0
    b99506d26f9716e398b3a3724d393185a9900942
    1,216
    py
    Python
    hard-gists/98bb452dc14e8c40e403/snippet.py
    jjhenkel/dockerizeme
    eaa4fe5366f6b9adf74399eab01c712cacaeb279
    [ "Apache-2.0" ]
    21
    2019-07-08T08:26:45.000Z
    2022-01-24T23:53:25.000Z
    hard-gists/98bb452dc14e8c40e403/snippet.py
    jjhenkel/dockerizeme
    eaa4fe5366f6b9adf74399eab01c712cacaeb279
    [ "Apache-2.0" ]
    5
    2019-06-15T14:47:47.000Z
    2022-02-26T05:02:56.000Z
    hard-gists/98bb452dc14e8c40e403/snippet.py
    jjhenkel/dockerizeme
    eaa4fe5366f6b9adf74399eab01c712cacaeb279
    [ "Apache-2.0" ]
    17
    2019-05-16T03:50:34.000Z
    2021-01-14T14:35:12.000Z
    from scryptos import * p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3 e = 3 c = int(open("flag.enc", "rb").read().encode("hex"), 16) # from User's Guide to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error("Impossible case in sqrtn"));if(type(x)=="t_INTMOD"||type(x)=="t_PADIC",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p1)])) c2 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p2)])) c3 = eval(parigp([sqrtnall, "Vec(liftall(sqrtnall(Mod(%d, %d), 3)))" % (c, p3)])) """ c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] """ for x in c1: for y in c2: for z in c3: crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d = hex(crt, 2)[2:].decode("hex") if "0ctf" in d: print d[d.find("0ctf"):].strip()
    39.225806
    224
    0.663651
    0
    0
    0
    0
    0
    0
    0
    0
    678
    0.557566
    b9954284c404c9a5aed225965d5006c8735af349
    1,717
    py
    Python
    musa/migrations/0001_initial.py
    ccsreenidhin/Music-Web-Django
    9b8286914f9099b9ed56c712c7ca384846f189d1
    [ "MIT" ]
    null
    null
    null
    musa/migrations/0001_initial.py
    ccsreenidhin/Music-Web-Django
    9b8286914f9099b9ed56c712c7ca384846f189d1
    [ "MIT" ]
    null
    null
    null
    musa/migrations/0001_initial.py
    ccsreenidhin/Music-Web-Django
    9b8286914f9099b9ed56c712c7ca384846f189d1
    [ "MIT" ]
    null
    null
    null
    # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-03-29 06:43 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
    40.880952
    121
    0.633663
    1,476
    0.859639
    0
    0
    0
    0
    0
    0
    245
    0.142691
    b9957182927ee0480e35dd837a4d9ee2d8587462
    3,207
    py
    Python
    nuitka/codegen/LoopCodes.py
    RESP3CT88/Nuitka
    0fcc25d9f00c4fc78c79a863c4b7987f573962e1
    [ "Apache-2.0" ]
    1
    2021-05-25T12:48:28.000Z
    2021-05-25T12:48:28.000Z
    venv/Lib/site-packages/nuitka/codegen/LoopCodes.py
    matthijsvanvliet/raytracing-python
    73d692b47330ab94eedde579a51063e3a907e92b
    [ "MIT" ]
    null
    null
    null
    venv/Lib/site-packages/nuitka/codegen/LoopCodes.py
    matthijsvanvliet/raytracing-python
    73d692b47330ab94eedde579a51063e3a907e92b
    [ "MIT" ]
    null
    null
    null
    # Copyright 2021, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Loop codes. Code generation for loops, breaking them, or continuing them. In Nuitka, there are no for-loops or while-loops at this point. They have been re-formulated in a simpler loop without a condition, and statements there-in that break under certain conditions. See Developer Manual for how the CPython loops are mapped to these nodes. """ from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions used for generation all accept statement, but this one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): # Functions used for generation all accept statement, but this one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel("loop_start") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel("loop_end") else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition="CONSIDER_THREADING() == false", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not None: getLabelCode(loop_end_label, emit)
    34.858696
    111
    0.752728
    0
    0
    0
    0
    0
    0
    0
    0
    1,505
    0.469286
    b995831c9a98c5b05882c5bbcc4b241cd51503bd
    4,837
    py
    Python
    3_module/C_BloomFilter.py
    L4mborg1n1-D14610/Algoritms_and_DataStructure
    f61b7434dbc600da02e8ec38648fa84beb160f17
    [ "Xnet", "X11", "CECILL-B" ]
    null
    null
    null
    3_module/C_BloomFilter.py
    L4mborg1n1-D14610/Algoritms_and_DataStructure
    f61b7434dbc600da02e8ec38648fa84beb160f17
    [ "Xnet", "X11", "CECILL-B" ]
    null
    null
    null
    3_module/C_BloomFilter.py
    L4mborg1n1-D14610/Algoritms_and_DataStructure
    f61b7434dbc600da02e8ec38648fa84beb160f17
    [ "Xnet", "X11", "CECILL-B" ]
    null
    null
    null
    import math from sys import exit # итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер # структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P # хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции, # pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число. # При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter # и будем хранить в структуре данных. # Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует, # поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить # указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив Mersen_31 = 2147483647 class BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size = size def add_bit(self, i): # i-тый бит содержится в i//8 байте на i % 8 месте self.__array[i // 8] |= 2 ** (7 - (i % 8)) def check_bit(self, i): if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0: return False else: return True def print(self): array_str = "" for byte in self.__array: _line = str(bin(byte))[2:] if len(_line) != 8: _line = '0' * (8 - len(_line)) + _line array_str += _line return array_str[:self.__size] class BloomFilter: def __init__(self, n: int, p: float): self.size = int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): # обычный проход по всем числам и их проверка на простоту - сложно по времени # немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на # делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) < prime_size: j = 1 prime_flag = True while j < len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0: prime_flag = False break j += 1 if prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self, x, i): return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size def add(self, key: int): i = 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self, key: int): i = 0 while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1 return True def print(self): return self.__bitarray.print() bloom_filter = 0 while True: try: line = input().split() if len(line) == 0: continue else: if line[0] == "set": try: elements_number = int(line[1]) probability = float(line[2]) if (elements_number <= 0) | (probability <= 0) | (probability >= 1): print("error") continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0): print("error") continue break except TypeError: print("error") continue else: print("error") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line = input().split() if len(line) == 0: continue elif line[0] == "print": print(bloom_filter.print()) elif (line[0] == "add") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == "search") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print("error") except EOFError: break
    34.798561
    116
    0.551995
    2,671
    0.46975
    0
    0
    0
    0
    0
    0
    2,126
    0.373901
    b996ad8d5f407e5b1769d9b50ca7be5705a211e8
    1,937
    py
    Python
    pyzmq/examples/pubsub/subscriber.py
    Surfndez/source-publish
    c3838b303c1a0806f21cd4e8d8c207015b3ce9c8
    [ "Intel" ]
    null
    null
    null
    pyzmq/examples/pubsub/subscriber.py
    Surfndez/source-publish
    c3838b303c1a0806f21cd4e8d8c207015b3ce9c8
    [ "Intel" ]
    1
    2021-01-21T17:43:33.000Z
    2021-01-21T17:43:33.000Z
    pyzmq/examples/pubsub/subscriber.py
    Surfndez/source-publish
    c3838b303c1a0806f21cd4e8d8c207015b3ce9c8
    [ "Intel" ]
    null
    null
    null
    """A test that subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1) to synchronize """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy def sync(connect_to): # use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv) != 3: print 'usage: subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to = sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must be integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print "Receiving arrays..." for i in range(array_count): a = s.recv_pyobj() print " Done." end = time.clock() elapsed = (end - start) * 1000000 if elapsed == 0: elapsed = 1 throughput = (1000000.0 * float (array_count)) / float (elapsed) message_size = a.nbytes megabits = float (throughput * message_size * 8) / 1000000 print "message size: %.0f [B]" % (message_size, ) print "array count: %.0f" % (array_count, ) print "mean throughput: %.0f [msg/s]" % (throughput, ) print "mean throughput: %.3f [Mb/s]" % (megabits, ) time.sleep(1.0) if __name__ == "__main__": main()
    25.826667
    78
    0.545173
    0
    0
    0
    0
    0
    0
    0
    0
    694
    0.358286
    b997c70668ace413cc27502883f737e007e56239
    1,006
    py
    Python
    Doc/includes/sqlite3/load_extension.py
    livioso/cpython
    077061a7b24917aaf31057885c69919c5a553c88
    [ "PSF-2.0" ]
    36
    2019-06-07T20:44:06.000Z
    2022-03-23T06:19:43.000Z
    Doc/includes/sqlite3/load_extension.py
    livioso/cpython
    077061a7b24917aaf31057885c69919c5a553c88
    [ "PSF-2.0" ]
    49
    2016-02-29T17:59:52.000Z
    2019-05-05T04:59:26.000Z
    Doc/includes/sqlite3/load_extension.py
    livioso/cpython
    077061a7b24917aaf31057885c69919c5a553c88
    [ "PSF-2.0" ]
    28
    2019-06-27T04:11:27.000Z
    2022-03-11T06:27:44.000Z
    import sqlite3 con = sqlite3.connect(":memory:") # enable extension loading con.enable_load_extension(True) # Load the fulltext search extension con.execute("select load_extension('./fts3.so')") # alternatively you can load the extension using an API call: # con.load_extension("./fts3.so") # disable extension loading again con.enable_load_extension(False) # example from SQLite wiki con.execute("create virtual table recipe using fts3(name, ingredients)") con.executescript(""" insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery'); insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour'); insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter'); """) for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"): print(row)
    37.259259
    104
    0.744533
    0
    0
    0
    0
    0
    0
    0
    0
    801
    0.796223
    b9982b7f935a0931c3a9dc4e8ec48b12b5523acb
    22,060
    py
    Python
    lingvo/core/inference_graph_exporter.py
    RunzheYang/lingvo
    1291e29812f9ee9836f9cacbb05db9ec6b095234
    [ "Apache-2.0" ]
    1
    2021-09-02T18:04:13.000Z
    2021-09-02T18:04:13.000Z
    lingvo/core/inference_graph_exporter.py
    RunzheYang/lingvo
    1291e29812f9ee9836f9cacbb05db9ec6b095234
    [ "Apache-2.0" ]
    null
    null
    null
    lingvo/core/inference_graph_exporter.py
    RunzheYang/lingvo
    1291e29812f9ee9836f9cacbb05db9ec6b095234
    [ "Apache-2.0" ]
    null
    null
    null
    # Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility for exporting an InferenceGraph proto from model params.""" import collections import contextlib import re import lingvo.compat as tf from lingvo.core import base_model from lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core import py_utils import six from google.protobuf import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options to configure inference on the device. # device: Device to infer on. # retain_device_placement: If true, the specified device in the generated # inference graph nodes will be retained. Otherwise, the specified device # will be cleared, so that the runtime can choose automatically. # var_options: Options on handling variables. For TPUs, variables can be # either placed on device through 'ON_DEVICE' option, or treated as # constants with AS_CONSTANTS. # gen_init_op: Whether to serialize initialization ops for the device. For TPUs, # servers can be initialized globally once, in which case this should be # turned off to avoid tripping initialization checks. # dtype_override: Whether to override the dtype to use for activations and # weights in the model. Options supported are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): """Disallow const gauranteeing variable with-in scope.""" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): """Treats all variables under this scope as constants.""" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): """Returns a dictionary with name -> tf.Variable() mapping.""" vars_dict = {} for v in variables: vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): """Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph. """ # Build the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1] # Rewrite fetches and feeds to map to their tensor name instead of # Tensor instance. named_fetches = {k: v.name for k, v in fetches.items() if v is not None} named_feeds = {k: v.name for k, v in feeds.items() if v is not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): """Gets output op names from an inference graph. Args: graph: The tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an optional list of subgraph names. If provided, only output ops from these subgraphs are preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes: a Python bool, default to True. Preserves nodes colocating with the closure of output ops in the returned array. preserve_saver_restore_nodes: a Python bool, default to False. Preserves nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra op names to preserve as long as they present in the graph. Returns: Array of tf op names that should be preserved in the graph. """ output_op_names = set() def _GetOpName(tensor_or_op_name): """Returns the op name of the given node name.""" # Tensor names have format <op_name>:<output_index>. Some inference # graphs put tensors and others put ops in the feeds/fetches (depends # on how it is used). We differentiate here. We still do the lookup in # the graph to sanity check (versus relying on the text manipulation). # If this logic ever breaks, TensorFlow will raise a ValueError with # a description of the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes feeds aren't connected to any outputs but keep them in the graph # anyways to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for restoring is preserved. saver_def.save_tensor_name is # skipped because it's only used for saving. saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name) if not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) # We also need to preserve any nodes that are used for colocation. # E.g., a node may have this attr: # attr { # key: "_class" # value { # list { # s: "loc:@inference/embedding_lookup/Read/ReadVariableOp" # } # } # } # # In this case, we need to make sure the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc): It's possible that it's better to fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node in graph_def.node] for node in graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not in reachable_vars: # Skip nodes that cannot be reached from the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): """Tests whether param_name is contained in param_obj.""" if not param_obj: return for k, _ in param_obj.IterParams(): if k == param_name: return True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): """Freezes a graph from a checkpoint. Args: graph: tf.Graph. saver: The tf.Saver to use for restoration. checkpoint: The checkpoint to restore. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. """ sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): """Default initializes a graph and freezes it. Args: graph: tf.Graph. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. """ with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: """Class for exporting inference graphs.""" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): """Exports a InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts to False unless user explicitly sets it to True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and multi-core inference on TPUs work properly. Args: model_cfg: a Params instance as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task to generate an inference graph for. Should be None for single-task models. device_options: Device options for the accelerator used for serving. freeze_checkpoint: The checkpoint to load. Loads and freezes the model if given. freeze_defaults: Default initializes the graph and freeze. Useful for early testing of downstream tools without having a checkpoint. export_path: If not None, write the inference graph in ASCII to this path. subgraph_filter: A string or a list of subgraph names. If not None or empty, export only this list of inference subgraphs. random_seed: Fixes the random seed in the exported inference graph. disable_packed_input: Disable packed input for inference writing purposes. Returns: InferenceGraph proto. Raises: ValueError: if the model does not support the listed subgraphs. """ assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] # Disable assertions unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work out how much we need to specify here in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line in model_cfg.ToText().split('\n'): tf.logging.debug('%s', line) # Instantiate the graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do not specify devices for variables if we are marking them as # constants. device = '' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace variables with tensors using tf.identity in theta before # freezing to avoid the graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad, however this seems to be the # easiest way to get this assets registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type == 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] # Add a table init op and global variable init op to the graph. # Tables can be declared anywhere in the graph, so this op has to be # added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for op in graph.get_operations()]) # Collection defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs ' 'since operating in eager mode.') # Freezing. if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to just the parts we need. # To support restoring, we have to not prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear the device so that the runtime can choose. tf.logging.info('Clearing device placement for: %s', device_options.device) for node in graph_def.node: node.ClearField('device') for function in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): """Sets cluster params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. """ def Update(p): """Update cluster params `p`.""" p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica = 1 if IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls, device_options): return IsTpu(device_options)
    38.100173
    116
    0.694334
    11,523
    0.522348
    958
    0.043427
    12,475
    0.565503
    0
    0
    8,165
    0.370127
    b9982e3e4e7a4b4799e5780bd7629d5235cc1b40
    1,836
    py
    Python
    src/preprocessing/annual_hc_by_crime_loc.py
    VijayKalmath/USCrimeAnalysis
    14c96aae52547a4f7ea140395c62a621a97def50
    [ "MIT" ]
    null
    null
    null
    src/preprocessing/annual_hc_by_crime_loc.py
    VijayKalmath/USCrimeAnalysis
    14c96aae52547a4f7ea140395c62a621a97def50
    [ "MIT" ]
    null
    null
    null
    src/preprocessing/annual_hc_by_crime_loc.py
    VijayKalmath/USCrimeAnalysis
    14c96aae52547a4f7ea140395c62a621a97def50
    [ "MIT" ]
    null
    null
    null
    #! usr/env/bin python import glob import numpy as np import pandas as pd from tqdm import tqdm def main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a result dataframe to store the data df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest of the files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = "Place", how = "left") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: """ Function to return """ # Extracting the table name from and year from the given file path t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_")) t_year = path[path.index(".xls")-4:path.index(".xls")] try: # Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start and end indices of the interested datapoints start = df.index[df[t_name] == "Total"][0] + 1 end = df.index[df[t_name] == "Multiple locations"][0] # Slice the dataset df = df.iloc[start:end,0:2] # Reset the index for the reduced dataframe df.reset_index(drop = True, inplace = True) # Rename the columns df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True) # Return the value return df except: # If there is no such data return an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ == '__main__': main()
    33.381818
    84
    0.6378
    0
    0
    0
    0
    0
    0
    0
    0
    720
    0.392157
    b998534e368ce74be309448b790e384f839c6d4a
    1,672
    py
    Python
    allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
    ethanjperez/allennlp
    e520993f16f0da7e2c40f6e44b8dc56338f46b57
    [ "Apache-2.0" ]
    24
    2019-09-16T00:10:54.000Z
    2021-09-08T19:31:51.000Z
    allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
    ethanjperez/allennlp
    e520993f16f0da7e2c40f6e44b8dc56338f46b57
    [ "Apache-2.0" ]
    null
    null
    null
    allennlp/tests/modules/token_embedders/bag_of_word_counts_token_embedder_test.py
    ethanjperez/allennlp
    e520993f16f0da7e2c40f6e44b8dc56338f46b57
    [ "Apache-2.0" ]
    7
    2019-09-16T02:37:31.000Z
    2021-09-01T06:06:17.000Z
    # pylint: disable=no-self-use,invalid-name import numpy as np from numpy.testing import assert_almost_equal import torch from allennlp.common import Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace("1") self.vocab.add_token_to_namespace("2") self.vocab.add_token_to_namespace("3") self.vocab.add_token_to_namespace("4") def test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({"projection_dim": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert embedder_output.shape[1] == 50
    45.189189
    93
    0.70634
    1,349
    0.806818
    0
    0
    0
    0
    0
    0
    79
    0.047249
    b998e92d411833a80bc4657adf0243c90d5c6084
    5,457
    py
    Python
    demo/demo_shapenet.py
    hengkaiz/meshrcnn
    eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
    [ "BSD-3-Clause" ]
    null
    null
    null
    demo/demo_shapenet.py
    hengkaiz/meshrcnn
    eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
    [ "BSD-3-Clause" ]
    null
    null
    null
    demo/demo_shapenet.py
    hengkaiz/meshrcnn
    eb5b5bc0639a33e48f0fc1e0834106798cd1e3d8
    [ "BSD-3-Clause" ]
    null
    null
    null
    import argparse import logging import multiprocessing as mp import logging import os from detectron2.evaluation import inference_context import torch import torch.distributed as dist import torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager from pathlib import Path from pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T import glob from PIL import Image import trimesh import pyvista as pv import pyacvd import numpy as np logger = logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description="MeshRCNN Demo") parser.add_argument( "--config-file", default="configs/shapenet/voxmesh_R50.yaml", metavar="FILE", help="path to config file", ) parser.add_argument("--input", help="A path to an input main folder") # parser.add_argument("--output", help="A directory to save output visualizations") parser.add_argument( "--focal-length", type=float, default=20.0, help="Focal length for the image" ) parser.add_argument( "--onlyhighest", action="store_true", help="will return only the highest scoring detection" ) parser.add_argument( "opts", help="Modify model config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh() # verts = remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__ == "__main__": mp.set_start_method("spawn", force=True) args = get_parser().parse_args() device = torch.device("cuda:%d" % 0) logger = setup_logger(name="demo shapenet") logger.info("Arguments: " + str(args)) cfg = setup_cfgs(args) # load checkpoing and build model if cfg.MODEL.CHECKPOINT == "": raise ValueError("Invalid checkpoing provided") logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp["best_states"]["model"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info("Model loaded") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir: curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path + "/*.png") for img_dir in images: # load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split("/")[-1].split(".")[0] with PathManager.open(img_dir, "rb") as f: img = Image.open(f).convert("RGB") img = transform(img) img = img[None, :, :, :] img = img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img) # Save voxel_score voxel_odir = os.path.join(curr_path, "voxel_score") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name)) torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir = os.path.join(curr_path, "img_feat") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir = os.path.join(curr_path, "P") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, "%s.pt" % (im_name)) torch.save(P, p_file) # Save cubified mesh cmesh_odir = os.path.join(curr_path, "cube_mesh") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh mesh_odir = os.path.join(curr_path, "final_mesh") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, "%s.obj" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
    31.912281
    99
    0.637713
    0
    0
    0
    0
    0
    0
    0
    0
    956
    0.175188
    b998f6994cf6e83702b501cd661bb37f91b59317
    7,854
    py
    Python
    proglearn/voters.py
    jshin13/progressive-learning
    dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
    [ "Apache-2.0" ]
    null
    null
    null
    proglearn/voters.py
    jshin13/progressive-learning
    dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
    [ "Apache-2.0" ]
    null
    null
    null
    proglearn/voters.py
    jshin13/progressive-learning
    dccc70fe5f6a03d2c53c2b01fd2122d7fd2798dc
    [ "Apache-2.0" ]
    null
    null
    null
    import numpy as np # from sklearn.ensemble import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import BaseVoter from tensorflow import keras from keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): """ Doc strings here. """ self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel = False def fit(self, X, y): """ Doc strings here. """ check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary task. self.multilabel = True return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) # Each posterior is now a num_labels size vector or binary probabilities. self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this voter." ) raise NotFittedError(msg % {"name": type(self).__name__}) votes_per_example = [] for x in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): """ Doc strings here. """ return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): """ encourage posteriors to approach uniform when there is low data """ correction_constant = 1 / (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): """ Doc strings here. """ self._is_fitted = False self.k = k self.kwargs = kwargs def fit(self, X, y): """ Doc strings here. """ X, y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this transformer." ) raise NotFittedError(msg % {"name": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self): """ Doc strings here. """ return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss="mse", epochs=100, lr=1e-4, verbose=False, ): """ Doc strings here. """ self.validation_split = validation_split self.loss = loss self.epochs = epochs self.lr = lr self.verbose = verbose self._is_fitted = False def fit(self, X, y): """ Doc strings here. """ X, y = check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation="linear", input_shape=(X.shape[1],), name="transform_to_vote", ) ) self.voter.compile( loss=self.loss, metrics=["mae"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor="val_loss")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this transformer." ) raise NotFittedError(msg % {"name": type(self).__name__}) X = check_array(X) return self.voter.predict(X) def is_fitted(self): """ Doc strings here. """ return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): """ Doc strings here. """ self._is_fitted = False def fit(self, X, y): """ Doc strings here. """ self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def vote(self, X): """ Doc strings here. """ if not self.is_fitted(): msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this voter." ) raise NotFittedError(msg % {"name": type(self).__name__}) votes_per_example = [] for x in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): """ Doc strings here. """ return self._is_fitted
    26.805461
    99
    0.556277
    7,412
    0.943723
    0
    0
    0
    0
    0
    0
    1,610
    0.204991
    b999024320e50c940c8f273e6f0536039450c829
    1,949
    py
    Python
    config.py
    jhattat/photoBooth
    f6fe3ab418bb917792e10349597401ed34078766
    [ "MIT" ]
    null
    null
    null
    config.py
    jhattat/photoBooth
    f6fe3ab418bb917792e10349597401ed34078766
    [ "MIT" ]
    null
    null
    null
    config.py
    jhattat/photoBooth
    f6fe3ab418bb917792e10349597401ed34078766
    [ "MIT" ]
    null
    null
    null
    # Tumblr Setup # Replace the values with your information # OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='ShbOqI5zErQXOL7Qnd5XduXpY9XQUlBgJDpCLeq1OYqnY2KzSt' #replace with your key consumer_secret='ulZradkbJGksjpl2MMlshAfJgEW6TNeSdZucykqeTp8jvwgnhu' #replace with your secret code oath_token='uUcBuvJx8yhk4HJIZ39sfcYo0W4VoqcvUetR2EwcI5Sn8SLgNt' #replace with your oath token oath_secret='iNJlqQJI6dwhAGmdNbMtD9u7VazmX2Rk5uW0fuIozIEjk97lz4' #replace with your oath secret code tumblr_blog = 'soniaetjeremie' # replace with your tumblr account name without .tumblr.com tagsForTumblr = "photobooth" # change to tags you want, separated with commas #Config settings to change behavior of photo booth monitor_w = 800 # width of the display monitor monitor_h = 480 # height of the display monitor file_path = '/home/pi/photobooth/pics/' # path to save images clear_on_startup = False # True will clear previously stored photos as the program launches. False will leave all previous photos. debounce = 0.3 # how long to debounce the button. Add more time if the button triggers too many times. post_online = True # True to upload images. False to store locally only. capture_count_pics = True # if true, show a photo count between taking photos. If false, do not. False is faster. make_gifs = True # True to make an animated gif. False to post 4 jpgs into one post. hi_res_pics = False # True to save high res pics from camera. # If also uploading, the program will also convert each image to a smaller image before making the gif. # False to first capture low res pics. False is faster. # Careful, each photo costs against your daily Tumblr upload max. camera_iso = 400 # adjust for lighting issues. Normal is 100 or 200. Sort of dark is 400. Dark is 800 max. # available options: 100, 200, 320, 400, 500, 640, 800
    77.96
    130
    0.758338
    0
    0
    0
    0
    0
    0
    0
    0
    1,561
    0.800924
    b9991711cbe60fa3459b0fb4cb64d023132610e8
    896
    py
    Python
    accounts/admin.py
    GuilhemN/site-interludes
    69873810d5b0168aa57277ba51805117e6c53874
    [ "MIT" ]
    null
    null
    null
    accounts/admin.py
    GuilhemN/site-interludes
    69873810d5b0168aa57277ba51805117e6c53874
    [ "MIT" ]
    1
    2022-03-24T10:41:10.000Z
    2022-03-24T12:39:30.000Z
    accounts/admin.py
    GuilhemN/site-interludes
    69873810d5b0168aa57277ba51805117e6c53874
    [ "MIT" ]
    1
    2022-03-23T22:30:12.000Z
    2022-03-23T22:30:12.000Z
    from django.contrib import admin from django.contrib.auth.models import Group from accounts.models import EmailUser from shared.admin import ExportCsvMixin # no need for groups - we only have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): """option d'affichage des activités dans la vue django admin""" filename = "export_utilisateurs.csv" list_display = ("email", "last_name", "first_name", "is_superuser", "is_active", "email_confirmed",) list_filter = ("is_superuser","is_active", "email_confirmed",) fields = ("email", "last_name", "first_name", "is_superuser", "is_staff", "is_active", "email_confirmed", ("date_joined", "last_login",), ) ordering = ("last_name", "first_name") readonly_fields = ("date_joined", "last_login",) list_per_page = 200 csv_export_exclude = ["password"]
    37.333333
    106
    0.753348
    616
    0.686734
    0
    0
    643
    0.716834
    0
    0
    432
    0.481605
    b9993aa0d134cc4869bfe49fd1ecd6dc8c6b0b96
    23,640
    py
    Python
    rotkehlchen/exchanges/coinbase.py
    vnavascues/rotki
    8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
    [ "BSD-3-Clause" ]
    null
    null
    null
    rotkehlchen/exchanges/coinbase.py
    vnavascues/rotki
    8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
    [ "BSD-3-Clause" ]
    null
    null
    null
    rotkehlchen/exchanges/coinbase.py
    vnavascues/rotki
    8675bdb02bf84bfccb5d59362e3ae2b7138fcd8f
    [ "BSD-3-Clause" ]
    null
    null
    null
    import hashlib import hmac import logging import time from json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from urllib.parse import urlencode import requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: """Turns a coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not a trade related transaction returns None Throws: - UnknownAsset due to Asset instantiation - DeserializationError due to unexpected format of dict entries - KeyError due to dict entires missing an expected entry """ if raw_trade['status'] != 'completed': # We only want to deal with completed trades return None if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate is how much you get/give in quotecurrency if you buy/sell 1 unit of base currency rate = Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) -> None: self.first_connection_made = True def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error = str(e) if 'transactions' in method_str: permission = 'wallet:transactions:read' elif 'buys' in method_str: permission = 'wallet:buys:read' elif 'sells' in method_str: permission = 'wallet:sells:read' elif 'deposits' in method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif 'trades' in method_str: permission = 'wallet:trades:read' # the accounts elif should be at the end since the word appears # in other endpoints elif 'accounts' in method_str: permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method {method_str} at API key validation', ) msg = ( f'Provided Coinbase API key needs to have {permission} permission activated. ' f'Please log into your coinbase account and set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None, msg except RemoteError as e: error = str(e) if 'invalid signature' in error: return None, 'Failed to authenticate with the Provided API key/secret' elif 'invalid api key' in error: return None, 'Provided API Key is invalid' else: # any other remote error return None, error return result, '' def validate_api_key(self) -> Tuple[bool, str]: """Validates that the Coinbase API key is good for usage in Rotki Makes sure that the following permissions are given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read """ result, msg = self._validate_single_api_key_action('accounts') if result is None: return False, msg # now get the account ids account_ids = self._get_account_ids(result) if len(account_ids) != 0: # and now try to get all transactions of an account to see if that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all buys of an account to see if that's possible method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all sells of an account to see if that's possible method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all deposits of an account to see if that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg # and now try to get all withdrawals of an account to see if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result is None: return False, msg return True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: """Gets the account ids out of the accounts response""" account_ids = [] for account_data in accounts: if 'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without an id key. Skipping it. ', ) continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry with a non string id: ' f'{account_data["id"]}. Skipping it. ', ) continue account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint: str, options: Optional[Dict[str, Any]] = None, pagination_next_uri: str = None, ignore_pagination: bool = False, ) -> List[Any]: """Performs a coinbase API Query for endpoint You can optionally provide extra arguments to the endpoint via the options argument. If this is an ongoing paginating call then provide pagination_next_uri. If you want just the first results then set ignore_pagination to True. """ request_verb = "GET" if pagination_next_uri: request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp = str(int(time.time())) message = timestamp + request_verb + request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed to guarantee the up to the given date # API version response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri + request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request failed due to {str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API key does not have permission for {endpoint}') if response.status_code != 200: raise RemoteError( f'Coinbase query {full_url} responded with error status code: ' f'{response.status_code} and text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if 'data' not in json_ret: raise RemoteError(f'Coinbase json response does not contain data: {response.text}') final_data = json_ret['data'] # If we got pagination and this is the first query, gather all the subsequent queries if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json response contained no "next_uri" key') next_uri = json_ret['pagination']['next_uri'] if not next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an empty next_uri we are done return final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp = self._api_query('accounts') except RemoteError as e: msg = ( 'Coinbase API request failed. Could not reach coinbase due ' 'to {}'.format(e) ) log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str, Any]] = {} for account in resp: try: if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns zero balances for everything # a user does not own if amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry due to inability to ' f'query USD price: {str(e)}. Skipping balance entry', ) continue if asset in returned_balances: amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account balance. Check logs ' 'for details. Ignoring it.', ) log.error( 'Error processing a coinbase account balance', account_balance=account, error=msg, ) continue return returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts') # now get the account ids and for each one query buys/sells # Looking at coinbase's API no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would need to return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades = [] for raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check logs ' 'for details. Ignoring it.', ) log.error( 'Error processing a coinbase trade', trade=raw_trade, error=msg, ) continue # limit coinbase trades in the requested time range here since there # is no argument in the API call if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: """Processes a single deposit/withdrawal from coinbase and deserializes it Can log error/warning and return None if something went wrong at deserialization """ try: if raw_data['status'] != 'completed': return None payout_date = raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get address/transaction id for "send" type of transactions address = None transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: # Then this should be a "send" which is the way Coinbase uses to send # crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non "send" type found in coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the fee being charged from the "send" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in the docs but from an experiment of sending ETH # to an address from coinbase there is the network fee in the response fee = Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee: # Since this is a withdrawal the fee should be the same as the moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set ZERO fee and ignore log.error( f'In a coinbase withdrawal of {asset.identifier} the fee' f'is denoted in {raw_fee["currency"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of a coinbase ' 'asset movement. Check logs for details and open a bug report.', ) log.error( f'Unexpected data encountered during deserialization of coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}', ) return None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get the "sends", which in Coinbase is the # way to send Crypto out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type' not in tx: continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements = [] for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the requested time range # here since there is no argument in the API call if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts: movements.append(movement) return movements
    40.688468
    98
    0.592047
    20,182
    0.853723
    0
    0
    2,906
    0.122927
    0
    0
    7,542
    0.319036
    b9994eb6b47f29e07dc9f474ab82878fdc8ae029
    3,533
    py
    Python
    lib/python3.7/site-packages/ldap/controls/deref.py
    aonrobot/MSC-thug-auth-provider
    aef37ef5a000586b8502cc536244f31e08b9c2db
    [ "Apache-2.0" ]
    1
    2019-06-21T11:51:26.000Z
    2019-06-21T11:51:26.000Z
    lib/python3.7/site-packages/ldap/controls/deref.py
    aonrobot/MSC-thug-auth-provider
    aef37ef5a000586b8502cc536244f31e08b9c2db
    [ "Apache-2.0" ]
    13
    2019-07-03T21:28:31.000Z
    2022-02-26T10:42:05.000Z
    lib/python3.7/site-packages/ldap/controls/deref.py
    aonrobot/MSC-thug-auth-provider
    aef37ef5a000586b8502cc536244f31e08b9c2db
    [ "Apache-2.0" ]
    2
    2020-02-11T09:34:39.000Z
    2020-11-10T14:41:32.000Z
    # -*- coding: utf-8 -*- """ ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. """ __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def _derefSpecs(self): deref_specs = DerefSpecs() i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for v in tv[1]] for tv in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)] KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl
    29.441667
    102
    0.711577
    2,552
    0.722332
    0
    0
    0
    0
    0
    0
    566
    0.160204
    b999aec7c34874ef90e0f30812ac97217ce90cca
    3,145
    py
    Python
    emoji.py
    notagoat/Deepmoji
    1ab922306c3647f9c7ea98caa2660a53b18fe4b6
    [ "MIT" ]
    1
    2020-03-19T20:09:00.000Z
    2020-03-19T20:09:00.000Z
    emoji.py
    notagoat/Deepmoji
    1ab922306c3647f9c7ea98caa2660a53b18fe4b6
    [ "MIT" ]
    null
    null
    null
    emoji.py
    notagoat/Deepmoji
    1ab922306c3647f9c7ea98caa2660a53b18fe4b6
    [ "MIT" ]
    null
    null
    null
    import requests import urllib.request import os.path import shutil import csv def main(): with open("data.csv") as i: #Open the data.csv file instances = i.readlines() #Write them into memory instances = [x.strip() for x in instances] #Strip any weird issues from writing instances.sort() #Sort them alphabetically setup(instances) #Run setup to create all the necessary files and subfolders count = len(instances) #Get the count just for fun i = 0 try: for name in instances: try: i += 1 print("-----!"+name+"!-----") print(str(i) +" of " + str(count) + " remaining!") fetch(name) #Run the fetching code except Exception as e: print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc pass #Don't stop the beat except Exception as e: print("Instance Error") print(e) pass clone(instances) #Clone all of them into one big folder for ease of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data path = "emoji/%s/" % name #Because of the clone function we know all of these folders will exist try: for emoji in r.json(): #Emoji = the json code from the request try: if os.path.isfile(path+emoji['shortcode']+".png"): #Check to see if it exists. pass else: if "ms_" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common #print(emoji['shortcode'] + " found!") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json open(path + emoji['shortcode']+".png",'wb').write(emojiimage.content) #Now save it as an image in the filesystem except Exception as e: print("Did not get: " + emoji['url']) #If somethings fucky throw a nice error then keep going. print(e) pass except Exception as e: print(e) def setup(instances): if (os.path.isdir("emoji/")): #Check to see if emoji/ exists pass else: os.mkdir("emoji/") #make it if it doesnt for name in instances: if (os.path.isdir("emoji/%s/"%name)): pass else: os.mkdir("emoji/%s/"%name) if (os.path.isdir("emoji/all")): pass else: os.mkdir("emoji/all") def clone(instances): for name in instances: print("Copying emoji for: %s"% name) path = "emoji/%s/" % name files = os.listdir(path) for name in files: #This gets alll files try: shutil.copyfile(path+name,"emoji/all/"+name) #Then copies them into the all folder except Exception as e: print(e) pass if __name__ == '__main__': main()
    37.440476
    151
    0.574245
    0
    0
    0
    0
    0
    0
    0
    0
    1,256
    0.399364
    b99add86778172fa08bc930ed29f8f26a88ec4d3
    943
    py
    Python
    String/640.One Edit Distance/Solution_DP.py
    Zhenye-Na/LxxxCode
    afd79d790d0a7495d75e6650f80adaa99bd0ff07
    [ "MIT" ]
    12
    2019-05-04T04:21:27.000Z
    2022-03-02T07:06:57.000Z
    String/640.One Edit Distance/Solution_DP.py
    Zhenye-Na/LxxxCode
    afd79d790d0a7495d75e6650f80adaa99bd0ff07
    [ "MIT" ]
    1
    2019-07-24T18:43:53.000Z
    2019-07-24T18:43:53.000Z
    String/640.One Edit Distance/Solution_DP.py
    Zhenye-Na/LxxxCode
    afd79d790d0a7495d75e6650f80adaa99bd0ff07
    [ "MIT" ]
    10
    2019-07-01T04:03:04.000Z
    2022-03-09T03:57:37.000Z
    class Solution: """ @param s: a string @param t: a string @return: true if they are both one edit distance apart or false """ def isOneEditDistance(self, s, t): # write your code here if s == t: return False if abs(len(s) - len(t)) > 1: return False n, m = len(s), len(t) f = [[0] * (m + 1) for _ in range(2)] for j in range(m + 1): f[0][j] = j for i in range(1, n + 1): f[i % 2][0] = i for j in range(1, m + 1): if s[i - 1] == t[j - 1]: f[i % 2][j] = min(f[(i - 1) % 2][j - 1], f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1) else: f[i % 2][j] = min(f[(i - 1) % 2][j - 1] + 1, f[(i - 1) % 2][j] + 1, f[i % 2][j - 1] + 1) return f[n % 2][m] == 1
    29.46875
    81
    0.341463
    942
    0.99894
    0
    0
    0
    0
    0
    0
    147
    0.155885
    b99b1d1ec6004cbeeb91e19410dbbb1e2216c45e
    1,478
    py
    Python
    nsq/__init__.py
    jehiah/pynsq
    899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96
    [ "MIT" ]
    1
    2015-05-25T00:23:53.000Z
    2015-05-25T00:23:53.000Z
    nsq/__init__.py
    barkinet/pynsq
    899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96
    [ "MIT" ]
    null
    null
    null
    nsq/__init__.py
    barkinet/pynsq
    899b60a8ce77ed6c8ab899fbdfd7adbc1b450c96
    [ "MIT" ]
    null
    null
    null
    from __future__ import absolute_import import signal import tornado.ioloop import logging from .protocol import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message from .backoff_timer import BackoffTimer from .sync import SyncConn from .async import AsyncConn from .reader import Reader from .legacy_reader import LegacyReader from .writer import Writer from .version import __version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): """ Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` """ signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = "Matt Reiferson <[email protected]>" __all__ = ["Reader", "Writer", "run", "BackoffTimer", "Message", "Error", "LegacyReader", "SyncConn", "AsyncConn", "unpack_response", "decode_message", "identify", "subscribe", "ready", "finish", "touch", "requeue", "nop", "pub", "mpub", "valid_topic_name", "valid_channel_name", "FRAME_TYPE_RESPONSE", "FRAME_TYPE_ERROR", "FRAME_TYPE_MESSAGE"]
    26.392857
    96
    0.696211
    0
    0
    0
    0
    0
    0
    0
    0
    450
    0.304465
    b99b2da4f2ac2ca37d2ded7c72545cef1cab4228
    5,356
    py
    Python
    scripts/summaryPlot.py
    Hespian/ParFastKer
    5ddf1685c0652e73c889cfc64c7ec1fd827f905c
    [ "BSD-3-Clause", "MIT" ]
    3
    2019-08-10T08:24:19.000Z
    2019-08-12T07:16:03.000Z
    scripts/summaryPlot.py
    Hespian/ParFastKer
    5ddf1685c0652e73c889cfc64c7ec1fd827f905c
    [ "BSD-3-Clause", "MIT" ]
    null
    null
    null
    scripts/summaryPlot.py
    Hespian/ParFastKer
    5ddf1685c0652e73c889cfc64c7ec1fd827f905c
    [ "BSD-3-Clause", "MIT" ]
    null
    null
    null
    import get_data_ours import get_data_akiba import get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot as plt # graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"] graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"] linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs" partitioningDir = "../../LinearTimeKernels/partitions" ourTimeDir = "../../results/LinearTimeKernelsScalingAll" nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear" akibaDir = "../../akiba_vertex_cover/results" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"] result["size"] = res["sequential_quasikernel_size"] return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"] result["size"] = res["parallel_quasikernel_size"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear, linearTime] # data = [oursequential, ourparallel, akiba, nearLinear] data = filter(lambda x : x >= 0, data) minimum = min(data) if minimum == 0: return 1 return minimum oursizeSequential = [] ourtimeSequential = [] oursizeParallel = [] ourtimeParallel = [] akibasize = [] akibatime = [] nearlinearsize = [] nearlineartime = [] lineartimesize = [] lineartimetime = [] for graph in graphs: minsize = getAkibaTimeAndSize(graph)["size"] mintime = getAkibaTimeAndSize(graph)["time"] oss = getOurTimeAndSizeSequential(graph)["size"] / minsize # print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"])) ots = getOurTimeAndSizeSequential(graph)["time"] / mintime if oss > 0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)["size"] / minsize # print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"])) otp = getOurTimeAndSizeParallel(graph)["time"] / mintime if osp > 0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)["size"] / minsize akt = getAkibaTimeAndSize(graph)["time"] / mintime if aks > 0 and akt > 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)["size"] / minsize nlt = getNearLinearTimeAndSize(graph)["time"] / mintime if nls > 0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)["size"] / minsize ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime if nls > 0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print("We") # print(oursizeSequential) # print(ourtimeSequential) # print("We (parallel)") # print(oursizeParallel) # print(ourtimeParallel) # print("Akiba") # print(akibasize) # print(akibatime) # print("NearLinear") # print(nearlinearsize) # print(nearlineartime) # print("LinearTime") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title("Summary", fontsize=14) ax.set_yscale("log") ax.set_xscale("log") ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green") ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black") # ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none") ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none") ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none") plt.xlabel("time / VCSolver time") plt.ylabel("size / VCSolver size") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand") plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight") # plt.show()
    39.094891
    234
    0.720127
    0
    0
    0
    0
    0
    0
    0
    0
    1,795
    0.335138
    b99c2305beceab596bedee8ad399b6faa3216070
    3,587
    py
    Python
    bouncer/cli/base.py
    lrnt/git-bouncer
    3015e11a5d2c90986124de73bf1fd0f5a8563360
    [ "MIT" ]
    null
    null
    null
    bouncer/cli/base.py
    lrnt/git-bouncer
    3015e11a5d2c90986124de73bf1fd0f5a8563360
    [ "MIT" ]
    null
    null
    null
    bouncer/cli/base.py
    lrnt/git-bouncer
    3015e11a5d2c90986124de73bf1fd0f5a8563360
    [ "MIT" ]
    null
    null
    null
    import configparser import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if not hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs)) return method return decorator def noopts(method): method.options = [] return method class HelpMixin(object): def help(self): print('available commands:') for name, command in self.commands.items(): description = str(command.__doc__ or '').strip('\n') print(' ', name.ljust(10), description) return 1 class SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands) def _commands(self, commands): prog = sys.argv[0] result = {} for cmd in commands: name = getattr(cmd, '_name', None) if not name: continue cmd.prog = prog result[name] = cmd return result def run(self): args = sys.argv[1:] for index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self): self.global_options = [] self.commands = self._methods_with_opts() def _methods_with_opts(self): result = {} for name in dir(self): if name.startswith('__'): continue method = getattr(self, name) if not hasattr(method, 'options'): continue result[name] = method return result def _parse_args(self, method, args): prog = '{} {} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args): # Find out which arguments the method expects expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args = {} # Get the expected method arguments, ignore rest for name in expected_args: if name in args: method_args[name] = args.pop(name) # Put rest of the arguments in self for name, value in self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args) def __call__(self, args): for index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self): config = configparser.ConfigParser() config.read(self.config_path) print(config.sections())
    27.381679
    72
    0.578199
    3,180
    0.886535
    0
    0
    0
    0
    0
    0
    257
    0.071648
    b99c4d9fb380e0635cac67dff2a6820b500bf34f
    13,728
    py
    Python
    Examples/ExampleCodes_ssccoorriinngg.py
    MahdadJafarzadeh/ssccoorriinngg
    63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
    [ "MIT" ]
    2
    2020-04-28T12:50:26.000Z
    2020-05-13T08:52:42.000Z
    Examples/ExampleCodes_ssccoorriinngg.py
    MahdadJafarzadeh/ssccoorriinngg
    63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
    [ "MIT" ]
    null
    null
    null
    Examples/ExampleCodes_ssccoorriinngg.py
    MahdadJafarzadeh/ssccoorriinngg
    63c726e9e7d0f6d13032415c76b8c3bb1ff2bee3
    [ "MIT" ]
    1
    2020-07-14T13:48:56.000Z
    2020-07-14T13:48:56.000Z
    #%% Import libs import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py import time from ssccoorriinngg import ssccoorriinngg import numpy as np from sklearn.model_selection import cross_validate #%% Picking featureset of interest and apply classification Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30) path = 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels' # Train set X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels) # Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels) # Define the scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv = 10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, cv = 10 , max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien required metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics: #RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid search to find the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring = scoring, n_estimators = [int(x) for x in np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'], max_depth = [int(x) for x in np.arange(10, 100, 30)], min_samples_split = [2, 5, 10], min_samples_leaf = [1, 2, 4], bootstrap = [True, False], n_iter = 100, cv = 10) #%% Test feature selection methods ## # PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv = 10) #%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3') #%% Example load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels') #%% Combining some REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features from more than one channel: tic = time.time() ########### Central electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+"tr90_N3_C3-M2_C4-M1.h5") fname_C_REM = (main_path+"tr90_REM_C3-M2_C4-M1.h5") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" fname_O_N3 = (main_path+"tr90_N3_O1-M2_O2-M1.h5") fname_O_REM = (main_path+"tr90_REM_O1-M2_O2-M1.h5") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3') ########### Fp electrodes ############# main_path = "D:/1D_TimeSeries/raw_EEG/without artefact/train_test/" fname_fp_N3 = (main_path+"tr90_N3_fp1-M2_fp2-M1.h5") fname_fp_REM = (main_path+"tr90_REM_fp1-M2_fp2-M1.h5") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc - tic}') ########## Concatenate all features ######### # RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs = 200, T = 30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features from different brain regions, sleep stage and combine them Object = ML_Depression(filename='', channel='', fs = 200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels' # Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')
    53.209302
    127
    0.682984
    0
    0
    0
    0
    0
    0
    0
    0
    3,709
    0.270178
    b99d08420cae81be117acdda96af821aba38eea2
    6,891
    py
    Python
    igibson/examples/behavior/behavior_demo_collection.py
    suresh-guttikonda/iGibson
    a69e623058180146466cd52d4bb3c00d1facdacf
    [ "MIT" ]
    null
    null
    null
    igibson/examples/behavior/behavior_demo_collection.py
    suresh-guttikonda/iGibson
    a69e623058180146466cd52d4bb3c00d1facdacf
    [ "MIT" ]
    null
    null
    null
    igibson/examples/behavior/behavior_demo_collection.py
    suresh-guttikonda/iGibson
    a69e623058180146466cd52d4bb3c00d1facdacf
    [ "MIT" ]
    null
    null
    null
    """ Main BEHAVIOR demo collection entrypoint """ import argparse import copy import datetime import os import bddl import numpy as np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices = [ "Beechwood_0_int", "Beechwood_1_int", "Benevolence_0_int", "Benevolence_1_int", "Benevolence_2_int", "Ihlen_0_int", "Ihlen_1_int", "Merom_0_int", "Merom_1_int", "Pomaria_0_int", "Pomaria_1_int", "Pomaria_2_int", "Rs_int", "Wainscott_0_int", "Wainscott_1_int", ] task_id_choices = [0, 1] parser = argparse.ArgumentParser(description="Run and collect an ATUS demo") parser.add_argument( "--task", type=str, required=True, nargs="?", help="Name of ATUS activity matching parent folder in bddl." ) parser.add_argument( "--task_id", type=int, required=True, choices=task_id_choices, nargs="?", help="BDDL integer ID, matching suffix of bddl.", ) parser.add_argument("--vr_log_path", type=str, help="Path (and filename) of vr log") parser.add_argument( "--scene", type=str, choices=scene_choices, nargs="?", help="Scene name/ID matching iGibson interactive scenes." ) parser.add_argument("--disable_save", action="store_true", help="Whether to disable saving logfiles.") parser.add_argument( "--disable_scene_cache", action="store_true", help="Whether to disable using pre-initialized scene caches." ) parser.add_argument("--profile", action="store_true", help="Whether to print profiling data.") parser.add_argument( "--no_vr", action="store_true", help="Whether to turn off VR recording and save random actions." ) parser.add_argument("--max_steps", type=int, default=-1, help="Maximum number of steps to record before stopping.") return parser.parse_args() def main(): args = parse_args() bddl.set_backend("iGibson") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr") hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png" ) background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg") # VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system settings mode = "headless" if no_vr else "vr" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling = True if not disable_scene_cache: scene_kwargs = { "urdf_file": "{}_task_{}_{}_0_fixed_furniture".format(scene, task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if not disable_save: timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if vr_log_path is None: vr_log_path = "{}_{}_{}_{}.hdf5".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps < 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps < 2: action = np.zeros((28,)) action[19] = 1 action[27] = 1 else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event("right_controller", "overlay_toggle"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event("left_controller", "overlay_toggle"): vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1 if post_task_steps == 0: break steps += 1 if log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if __name__ == "__main__": main()
    31.465753
    120
    0.652881
    0
    0
    0
    0
    0
    0
    0
    0
    1,308
    0.189813
    b99e3b0ee335439a781ae231769595415a1dc6ec
    546
    py
    Python
    wagtail/wagtailadmin/menu.py
    digitalmarmalade/wagtail
    ac4d23172ff3f42746625630583b17d243fb9822
    [ "BSD-3-Clause" ]
    1
    2015-11-05T18:02:04.000Z
    2015-11-05T18:02:04.000Z
    wagtail/wagtailadmin/menu.py
    digitalmarmalade/wagtail
    ac4d23172ff3f42746625630583b17d243fb9822
    [ "BSD-3-Clause" ]
    null
    null
    null
    wagtail/wagtailadmin/menu.py
    digitalmarmalade/wagtail
    ac4d23172ff3f42746625630583b17d243fb9822
    [ "BSD-3-Clause" ]
    null
    null
    null
    from django.utils.text import slugify from django.utils.html import format_html class MenuItem(object): def __init__(self, label, url, name=None, classnames='', order=1000): self.label = label self.url = url self.classnames = classnames self.name = (name or slugify(unicode(label))) self.order = order def render_html(self): return format_html( u"""<li class="menu-{0}"><a href="{1}" class="{2}">{3}</a></li>""", self.name, self.url, self.classnames, self.label)
    32.117647
    79
    0.611722
    463
    0.847985
    0
    0
    0
    0
    0
    0
    68
    0.124542
    b99ee5dfe9849188796ff8d2b024b524adedb8d2
    1,950
    py
    Python
    django_mfa/migrations/0001_initial.py
    timgates42/django-mfa
    89eeb83f7da3ea24f205b40b13c7f9d33ea15b99
    [ "MIT" ]
    null
    null
    null
    django_mfa/migrations/0001_initial.py
    timgates42/django-mfa
    89eeb83f7da3ea24f205b40b13c7f9d33ea15b99
    [ "MIT" ]
    null
    null
    null
    django_mfa/migrations/0001_initial.py
    timgates42/django-mfa
    89eeb83f7da3ea24f205b40b13c7f9d33ea15b99
    [ "MIT" ]
    null
    null
    null
    # Generated by Django 2.1.5 on 2019-03-26 11:35 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')), ], ), ]
    41.489362
    143
    0.598974
    1,791
    0.918462
    0
    0
    0
    0
    0
    0
    272
    0.139487
    b99f21827c3ba7ccbcab4806c878cdacfa139e20
    317
    py
    Python
    app/logger_example/main.py
    khanh-nguyen-code/my-collection
    31581ef0b1dae67aafb1f4e64b9973a38cc01edf
    [ "MIT" ]
    null
    null
    null
    app/logger_example/main.py
    khanh-nguyen-code/my-collection
    31581ef0b1dae67aafb1f4e64b9973a38cc01edf
    [ "MIT" ]
    null
    null
    null
    app/logger_example/main.py
    khanh-nguyen-code/my-collection
    31581ef0b1dae67aafb1f4e64b9973a38cc01edf
    [ "MIT" ]
    null
    null
    null
    from my_collection import logger if __name__ == "__main__": logger.now().debug("debug1") logger.now().debug("debug2") logger.now().info("hello1") logger.now().info("hello2") logger.now().with_field("key", "val").error("with field1") logger.now().with_field("key", "val").error("with field2")
    31.7
    62
    0.646688
    0
    0
    0
    0
    0
    0
    0
    0
    88
    0.277603
    b9a14f8cda479b51cbe9296c63d8ae7397078bc7
    760
    py
    Python
    robotframework_iperf3/__main__.py
    scathaig/robotframework-iperf3
    cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
    [ "Apache-2.0" ]
    null
    null
    null
    robotframework_iperf3/__main__.py
    scathaig/robotframework-iperf3
    cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
    [ "Apache-2.0" ]
    null
    null
    null
    robotframework_iperf3/__main__.py
    scathaig/robotframework-iperf3
    cfeeb3e265777403d7eb06fcfa6d69650f2a5e67
    [ "Apache-2.0" ]
    null
    null
    null
    import argparse from robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3 if __name__ == '__main__': # create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' # add parser options parser.add_argument( "-a", "--address", type=str, help="server listen address", default='0.0.0.0') parser.add_argument( "-p", "--port", type=int, help="server listen port", default=8270) args = parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address, port=args.port ) server.serve()
    21.111111
    92
    0.619737
    0
    0
    0
    0
    0
    0
    0
    0
    170
    0.223684
    b9a1ae11b40a499e6f6854e1a273c2ff226ef650
    692
    py
    Python
    h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
    ahmedengu/h2o-3
    ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
    [ "Apache-2.0" ]
    6,098
    2015-05-22T02:46:12.000Z
    2022-03-31T16:54:51.000Z
    h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
    ahmedengu/h2o-3
    ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
    [ "Apache-2.0" ]
    2,517
    2015-05-23T02:10:54.000Z
    2022-03-30T17:03:39.000Z
    h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
    ahmedengu/h2o-3
    ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
    [ "Apache-2.0" ]
    2,199
    2015-05-22T04:09:55.000Z
    2022-03-28T22:20:45.000Z
    df8.cbind(df9) # A B C D A0 B0 C0 D0 # ----- ------ ------ ------ ------ ----- ----- ----- # -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86 # -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27 # 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25 # 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 # 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52 # 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09 # 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63 # 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42 # -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45 # 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05 # # [100 rows x 8 columns]
    43.25
    54
    0.460983
    0
    0
    0
    0
    0
    0
    0
    0
    663
    0.958092
    b9a1dbb5125acea57356714e95e66c8e3a612e30
    1,101
    py
    Python
    FluentPython/dynamic_attr_and_prop/frozen_json.py
    xu6148152/Binea_Python_Project
    d943eb5f4685d08f080b372dcf1a7cbd5d63efed
    [ "MIT" ]
    null
    null
    null
    FluentPython/dynamic_attr_and_prop/frozen_json.py
    xu6148152/Binea_Python_Project
    d943eb5f4685d08f080b372dcf1a7cbd5d63efed
    [ "MIT" ]
    null
    null
    null
    FluentPython/dynamic_attr_and_prop/frozen_json.py
    xu6148152/Binea_Python_Project
    d943eb5f4685d08f080b372dcf1a7cbd5d63efed
    [ "MIT" ]
    null
    null
    null
    #!/usr/bin/env python3 # -*- encoding: utf-8 -*- from collections import abc from keyword import iskeyword class FronzenJSON: def __init__(self, mapping): self._data = {} for key, value in mapping.items(): if iskeyword(key): key += '_' # self._data = dict(mapping) self._data[key] = value def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in obj] else: return obj def __new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for item in arg] else: return arg
    27.525
    56
    0.561308
    990
    0.899183
    0
    0
    246
    0.223433
    0
    0
    122
    0.110808
    b9a20089dfb3f5c8a3472d1f3be189af236d4d44
    4,062
    py
    Python
    pomdp_problems/tag/models/transition_model.py
    Semanti1/pomdp_findit
    b96c1c06aab4b485fa005654cf6438ff63718083
    [ "MIT" ]
    null
    null
    null
    pomdp_problems/tag/models/transition_model.py
    Semanti1/pomdp_findit
    b96c1c06aab4b485fa005654cf6438ff63718083
    [ "MIT" ]
    null
    null
    null
    pomdp_problems/tag/models/transition_model.py
    Semanti1/pomdp_findit
    b96c1c06aab4b485fa005654cf6438ff63718083
    [ "MIT" ]
    null
    null
    null
    """The Tag problem. Implemented according to the paper `Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically. The target's movement depends on the robot; With Pr=0.8 the target moves away from the robot, and with Pr=0.2, the target stays at the same place. The target never moves closer to the robot. """ import copy import pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction): dx, dy = action.motion next_position = (position[0] + dx, position[1] + dy) if grid_map.valid_pose(next_position): return next_position return position def probability(self, next_state, state, action, **kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON else: return constants.EPSILON else: if next_state.target_found: return constants.EPSILON else: return 1.0 - constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False): # Robot motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action if isinstance(action, TagAction): if not state.target_found: if state.robot_position == state.target_position: next_state.target_found = True return next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self, state, action, **kwargs): return self.sample(state, action, argmax=True)
    45.640449
    103
    0.537912
    3,466
    0.853274
    0
    0
    359
    0.08838
    0
    0
    494
    0.121615
    b9a21ff5a8c4fcb07930580d031f6847ecfaed43
    4,731
    py
    Python
    packit/fedpkg.py
    bocekm/packit
    b5da23c0fa3f205537551b9ed212d8f77d00d705
    [ "MIT" ]
    null
    null
    null
    packit/fedpkg.py
    bocekm/packit
    b5da23c0fa3f205537551b9ed212d8f77d00d705
    [ "MIT" ]
    null
    null
    null
    packit/fedpkg.py
    bocekm/packit
    b5da23c0fa3f205537551b9ed212d8f77d00d705
    [ "MIT" ]
    null
    null
    null
    # MIT License # # Copyright (c) 2019 Red Hat, Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from pathlib import Path from typing import Optional from packit.exceptions import PackitCommandFailedError from packit.utils import commands # so we can mock utils from packit.utils.logging import logger class FedPKG: """ Part of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py """ def __init__( self, fas_username: str = None, directory: str = None, stage: bool = False ): self.fas_username = fas_username self.directory = directory self.stage = stage self.fedpkg_exec = "fedpkg-stage" if stage else "fedpkg" def __repr__(self): return ( "FedPKG(" f"fas_username='{self.fas_username}', " f"directory='{self.directory}', " f"stage='{self.stage}')" ) def new_sources(self, sources="", fail=True): if not Path(self.directory).is_dir(): raise Exception("Cannot access fedpkg repository:") return commands.run_command_remote( cmd=[self.fedpkg_exec, "new-sources", sources], cwd=self.directory, error_message="Adding new sources failed:", fail=fail, ) def build( self, scratch: bool = False, nowait: bool = False, koji_target: Optional[str] = None, srpm_path: Optional[Path] = None, ): """ build in koji :param scratch: scratch (temporary) build or not? :param nowait: False == wait for the build to finish :param koji_target: koji target to build in (`koji list-targets`) :param srpm_path: use selected SRPM for build, not dist-git repo & ref :return: """ cmd = [self.fedpkg_exec, "build"] if scratch: cmd.append("--scratch") if nowait: cmd.append("--nowait") if koji_target: cmd += ["--target", koji_target] if srpm_path: cmd += ["--srpm", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message="Submission of build to koji failed.", fail=True, ) except PackitCommandFailedError as ex: # fail on the fedpkg side, the build is triggered if ( "watch_tasks() got an unexpected keyword argument 'ki_handler'" in ex.stderr_output ): logger.info( "The 'fedpkg build' command crashed which is a known issue: " "the build is submitted in koji anyway." ) logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str, target_path: str, anonymous: bool = False): """ clone a dist-git repo; this has to be done in current env b/c we don't have the keytab in sandbox """ cmd = [self.fedpkg_exec] if self.fas_username: cmd += ["--user", self.fas_username] cmd += ["-q", "clone"] if anonymous: cmd += ["-a"] cmd += [package_name, target_path] error_msg = ( f"Packit failed to clone the repository {package_name}; " "please make sure that you are authorized to clone repositories " "from Fedora dist-git - this may require SSH keys set up or " "Kerberos ticket being active." ) commands.run_command(cmd=cmd, error_message=error_msg)
    35.044444
    82
    0.609808
    3,412
    0.721201
    0
    0
    0
    0
    0
    0
    2,434
    0.514479
    b9a3c97262cf3c50a695832e8941374463a78067
    901
    py
    Python
    tests/test_MaskedArrayCollection.py
    ahaldane/NDducktype_tests
    4876416e5fbff7ba0d85445c0eeae432d6e80014
    [ "BSD-3-Clause" ]
    3
    2020-06-18T14:18:39.000Z
    2021-07-22T18:05:52.000Z
    tests/test_MaskedArrayCollection.py
    ahaldane/NDducktype_tests
    4876416e5fbff7ba0d85445c0eeae432d6e80014
    [ "BSD-3-Clause" ]
    2
    2020-07-19T15:44:09.000Z
    2020-07-28T23:22:21.000Z
    tests/test_MaskedArrayCollection.py
    ahaldane/NDducktype_tests
    4876416e5fbff7ba0d85445c0eeae432d6e80014
    [ "BSD-3-Clause" ]
    2
    2019-06-20T00:20:13.000Z
    2020-09-20T21:42:52.000Z
    #!/usr/bin/env python from ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np # Tests for Masked ArrayCollections. # # First try: Simply make an arraycollection of MaskedArrays. Downside: this # strategy does not give a "filled" method. Probably to get a masked # ArrayCollection we should really subclass ArrayCollection to have a # fill_value and a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) #print(repr(c.filled()))
    31.068966
    75
    0.738069
    0
    0
    0
    0
    0
    0
    0
    0
    668
    0.741398
    b9a4cbf5401cd86949e3f94c13bc464c4725fcee
    192,704
    py
    Python
    rpc/gen/core_pb2.py
    jasonjoo2010/core
    7c05ddbdac2e05a3d96db28f8bdfacf661907b82
    [ "MIT" ]
    null
    null
    null
    rpc/gen/core_pb2.py
    jasonjoo2010/core
    7c05ddbdac2e05a3d96db28f8bdfacf661907b82
    [ "MIT" ]
    null
    null
    null
    rpc/gen/core_pb2.py
    jasonjoo2010/core
    7c05ddbdac2e05a3d96db28f8bdfacf661907b82
    [ "MIT" ]
    null
    null
    null
    # Generated by the protocol buffer compiler. DO NOT EDIT! # source: core.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='core.proto', package='pb', syntax='proto3', serialized_pb=_b('\n\ncore.proto\x12\x02pb\"\x07\n\x05\x45mpty\"\xb4\x01\n\x15ListContainersOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x35\n\x06labels\x18\x04 \x03(\x0b\x32%.pb.ListContainersOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"L\n\x13\x44\x65ployStatusOptions\x12\x0f\n\x07\x61ppname\x18\x01 \x01(\t\x12\x12\n\nentrypoint\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\"v\n\x13\x44\x65ployStatusMessage\x12\x0e\n\x06\x61\x63tion\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\n\n\x02id\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"0\n\x03Pod\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x03 \x01(\t\"\x1d\n\x04Pods\x12\x15\n\x04pods\x18\x01 \x03(\x0b\x32\x07.pb.Pod\"\xfc\x02\n\x0bPodResource\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x03\x63pu\x18\x02 \x03(\x0b\x32\x18.pb.PodResource.CpuEntry\x12+\n\x06memory\x18\x03 \x03(\x0b\x32\x1b.pb.PodResource.MemoryEntry\x12\'\n\x04\x64iff\x18\x04 \x03(\x0b\x32\x19.pb.PodResource.DiffEntry\x12+\n\x06\x64\x65tail\x18\x05 \x03(\x0b\x32\x1b.pb.PodResource.DetailEntry\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a-\n\x0bMemoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a+\n\tDiffEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x1a-\n\x0b\x44\x65tailEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"5\n\x12ListNetworkOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0e\n\x06\x64river\x18\x02 \x01(\t\"(\n\x07Network\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07subnets\x18\x02 \x03(\t\")\n\x08Networks\x12\x1d\n\x08networks\x18\x01 \x03(\x0b\x32\x0b.pb.Network\"\x9e\x03\n\x04Node\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x1e\n\x03\x63pu\x18\x04 \x03(\x0b\x32\x11.pb.Node.CpuEntry\x12\x10\n\x08\x63pu_used\x18\x05 \x01(\x01\x12\x0e\n\x06memory\x18\x06 \x01(\x03\x12\x13\n\x0bmemory_used\x18\x07 \x01(\x03\x12\x11\n\tavailable\x18\x08 \x01(\x08\x12$\n\x06labels\x18\t \x03(\x0b\x32\x14.pb.Node.LabelsEntry\x12\x13\n\x0binit_memory\x18\n \x01(\x03\x12\'\n\x08init_cpu\x18\x0b \x03(\x0b\x32\x15.pb.Node.InitCpuEntry\x12\x0c\n\x04info\x18\x0c \x01(\t\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cInitCpuEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\" \n\x05Nodes\x12\x17\n\x05nodes\x18\x01 \x03(\x0b\x32\x08.pb.Node\"E\n\rNodeAvailable\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x11\n\tavailable\x18\x03 \x01(\x08\"\xb8\x03\n\tContainer\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12#\n\x03\x63pu\x18\x05 \x03(\x0b\x32\x16.pb.Container.CpuEntry\x12\r\n\x05quota\x18\x06 \x01(\x01\x12\x0e\n\x06memory\x18\x07 \x01(\x03\x12\x12\n\nprivileged\x18\x08 \x01(\x08\x12)\n\x06labels\x18\t \x03(\x0b\x32\x19.pb.Container.LabelsEntry\x12+\n\x07publish\x18\n \x03(\x0b\x32\x1a.pb.Container.PublishEntry\x12\r\n\x05image\x18\x0b \x01(\t\x12\x0f\n\x07inspect\x18\x0c \x01(\x0c\x12\x13\n\x0bstatus_data\x18\r \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"k\n\x18\x43ontainerDeployedOptions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppname\x18\x02 \x01(\t\x12\x12\n\nentrypoint\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\"/\n\nContainers\x12!\n\ncontainers\x18\x01 \x03(\x0b\x32\r.pb.Container\"\x19\n\x0b\x43ontainerID\x12\n\n\x02id\x18\x01 \x01(\t\"\x1b\n\x0c\x43ontainerIDs\x12\x0b\n\x03ids\x18\x01 \x03(\t\"4\n\x16RemoveContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\"7\n\x0eReallocOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0b\n\x03\x63pu\x18\x02 \x01(\x01\x12\x0b\n\x03mem\x18\x03 \x01(\x03\":\n\rAddPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x66\x61vor\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x03 \x01(\t\" \n\x10RemovePodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\rGetPodOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xf7\x01\n\x0e\x41\x64\x64NodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x02 \x01(\t\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\n\n\x02\x63\x61\x18\x04 \x01(\t\x12\x0c\n\x04\x63\x65rt\x18\x05 \x01(\t\x12\x0b\n\x03key\x18\x06 \x01(\t\x12\x0b\n\x03\x63pu\x18\x07 \x01(\x05\x12\r\n\x05share\x18\x08 \x01(\x05\x12\x0e\n\x06memory\x18\t \x01(\x03\x12.\n\x06labels\x18\n \x03(\x0b\x32\x1e.pb.AddNodeOptions.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"6\n\x11RemoveNodeOptions\x12\x10\n\x08nodename\x18\x01 \x01(\t\x12\x0f\n\x07podname\x18\x02 \x01(\t\"3\n\x0eGetNodeOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\"0\n\x10ListNodesOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x0b\n\x03\x61ll\x18\x02 \x01(\x08\"\x8e\x04\n\x05\x42uild\x12\x0c\n\x04\x62\x61se\x18\x01 \x01(\t\x12\x0c\n\x04repo\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x11\n\tsubmodule\x18\x05 \x01(\x08\x12\x10\n\x08\x63ommands\x18\x06 \x03(\t\x12!\n\x04\x65nvs\x18\x07 \x03(\x0b\x32\x13.pb.Build.EnvsEntry\x12!\n\x04\x61rgs\x18\x08 \x03(\x0b\x32\x13.pb.Build.ArgsEntry\x12%\n\x06labels\x18\t \x03(\x0b\x32\x15.pb.Build.LabelsEntry\x12+\n\tartifacts\x18\n \x03(\x0b\x32\x18.pb.Build.ArtifactsEntry\x12#\n\x05\x63\x61\x63he\x18\x0b \x03(\x0b\x32\x14.pb.Build.CacheEntry\x1a+\n\tEnvsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x30\n\x0e\x41rtifactsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a,\n\nCacheEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"z\n\x06\x42uilds\x12\x0e\n\x06stages\x18\x01 \x03(\t\x12&\n\x06\x62uilds\x18\x02 \x03(\x0b\x32\x16.pb.Builds.BuildsEntry\x1a\x38\n\x0b\x42uildsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.pb.Build:\x02\x38\x01\"s\n\x11\x42uildImageOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x0b\n\x03uid\x18\x03 \x01(\x05\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12\x1a\n\x06\x62uilds\x18\x05 \x01(\x0b\x32\n.pb.Builds\x12\x0b\n\x03tar\x18\x06 \x01(\x0c\"F\n\x0bHookOptions\x12\x13\n\x0b\x61\x66ter_start\x18\x01 \x03(\t\x12\x13\n\x0b\x62\x65\x66ore_stop\x18\x02 \x03(\t\x12\r\n\x05\x66orce\x18\x03 \x01(\x08\"U\n\x12HealthCheckOptions\x12\x11\n\ttcp_ports\x18\x01 \x03(\t\x12\x11\n\thttp_port\x18\x02 \x01(\t\x12\x0b\n\x03url\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\x05\"u\n\nLogOptions\x12\x0c\n\x04type\x18\x01 \x01(\t\x12*\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x1a.pb.LogOptions.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xca\x02\n\x11\x45ntrypointOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x12\n\nprivileged\x18\x03 \x01(\x08\x12\x0b\n\x03\x64ir\x18\x04 \x01(\t\x12\x1b\n\x03log\x18\x05 \x01(\x0b\x32\x0e.pb.LogOptions\x12\x0f\n\x07publish\x18\x06 \x03(\t\x12+\n\x0bhealthcheck\x18\x07 \x01(\x0b\x32\x16.pb.HealthCheckOptions\x12\x1d\n\x04hook\x18\x08 \x01(\x0b\x32\x0f.pb.HookOptions\x12\x16\n\x0erestart_policy\x18\t \x01(\t\x12\x33\n\x07sysctls\x18\n \x03(\x0b\x32\".pb.EntrypointOptions.SysctlsEntry\x1a.\n\x0cSysctlsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x06\n\rDeployOptions\x12\x0c\n\x04name\x18\x01 \x01(\t\x12)\n\nentrypoint\x18\x02 \x01(\x0b\x32\x15.pb.EntrypointOptions\x12\x0f\n\x07podname\x18\x03 \x01(\t\x12\x10\n\x08nodename\x18\x04 \x01(\t\x12\r\n\x05image\x18\x05 \x01(\t\x12\x12\n\nextra_args\x18\x06 \x01(\t\x12\x11\n\tcpu_quota\x18\x07 \x01(\x01\x12\x0e\n\x06memory\x18\x08 \x01(\x03\x12\r\n\x05\x63ount\x18\t \x01(\x05\x12\x0b\n\x03\x65nv\x18\n \x03(\t\x12\x0b\n\x03\x64ns\x18\x0b \x03(\t\x12\x13\n\x0b\x65xtra_hosts\x18\x0c \x03(\t\x12\x0f\n\x07volumes\x18\r \x03(\t\x12\x31\n\x08networks\x18\x0e \x03(\x0b\x32\x1f.pb.DeployOptions.NetworksEntry\x12\x13\n\x0bnetworkmode\x18\x0f \x01(\t\x12\x0c\n\x04user\x18\x10 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x11 \x01(\x08\x12\x11\n\topenStdin\x18\x12 \x01(\x08\x12-\n\x06labels\x18\x13 \x03(\x0b\x32\x1d.pb.DeployOptions.LabelsEntry\x12\x35\n\nnodelabels\x18\x14 \x03(\x0b\x32!.pb.DeployOptions.NodelabelsEntry\x12\x15\n\rdeploy_method\x18\x15 \x01(\t\x12)\n\x04\x64\x61ta\x18\x16 \x03(\x0b\x32\x1b.pb.DeployOptions.DataEntry\x12\x11\n\tsoftlimit\x18\x17 \x01(\x08\x12\x13\n\x0bnodes_limit\x18\x18 \x01(\x05\x1a/\n\rNetworksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fNodelabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"\xb5\x02\n\x0eReplaceOptions\x12$\n\tdeployOpt\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\r\n\x05\x66orce\x18\x02 \x01(\x08\x12;\n\rfilter_labels\x18\x03 \x03(\x0b\x32$.pb.ReplaceOptions.FilterLabelsEntry\x12*\n\x04\x63opy\x18\x04 \x03(\x0b\x32\x1c.pb.ReplaceOptions.CopyEntry\x12\x0b\n\x03ids\x18\x05 \x03(\t\x12\x16\n\x0enetworkinherit\x18\x06 \x01(\x08\x1a\x33\n\x11\x46ilterLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a+\n\tCopyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"T\n\x11\x43\x61\x63heImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\"d\n\x12RemoveImageOptions\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\x0e\n\x06images\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\x12\r\n\x05prune\x18\x05 \x01(\x08\"\x1a\n\tCopyPaths\x12\r\n\x05paths\x18\x01 \x03(\t\"{\n\x0b\x43opyOptions\x12-\n\x07targets\x18\x01 \x03(\x0b\x32\x1c.pb.CopyOptions.TargetsEntry\x1a=\n\x0cTargetsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.pb.CopyPaths:\x02\x38\x01\",\n\x0b\x45rrorDetail\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x87\x01\n\x11\x42uildImageMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x10\n\x08progress\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\x12\x0e\n\x06stream\x18\x05 \x01(\t\x12%\n\x0c\x65rror_detail\x18\x06 \x01(\x0b\x32\x0f.pb.ErrorDetail\"\xea\x02\n\x16\x43reateContainerMessage\x12\x0f\n\x07podname\x18\x01 \x01(\t\x12\x10\n\x08nodename\x18\x02 \x01(\t\x12\n\n\x02id\x18\x03 \x01(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0f\n\x07success\x18\x06 \x01(\x08\x12\x30\n\x03\x63pu\x18\x07 \x03(\x0b\x32#.pb.CreateContainerMessage.CpuEntry\x12\r\n\x05quota\x18\x08 \x01(\x01\x12\x0e\n\x06memory\x18\t \x01(\x03\x12\x38\n\x07publish\x18\n \x03(\x0b\x32\'.pb.CreateContainerMessage.PublishEntry\x12\x0c\n\x04hook\x18\x0b \x01(\x0c\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a.\n\x0cPublishEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x80\x01\n\x17ReplaceContainerMessage\x12*\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x1a.pb.CreateContainerMessage\x12*\n\x06remove\x18\x02 \x01(\x0b\x32\x1a.pb.RemoveContainerMessage\x12\r\n\x05\x65rror\x18\x03 \x01(\t\"7\n\x11RunAndWaitMessage\x12\x14\n\x0c\x63ontainer_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"V\n\x11\x43\x61\x63heImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08nodename\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"F\n\x12RemoveImageMessage\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x10\n\x08messages\x18\x03 \x03(\t\"C\n\x16RemoveContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0c\n\x04hook\x18\x03 \x01(\t\"5\n\x16ReallocResourceMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\"b\n\x0b\x43opyMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04path\x18\x04 \x01(\t\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x06 \x01(\x0c\"J\n\x11RunAndWaitOptions\x12(\n\rDeployOptions\x18\x01 \x01(\x0b\x32\x11.pb.DeployOptions\x12\x0b\n\x03\x43md\x18\x02 \x01(\x0c\"4\n\x17\x43ontrolContainerOptions\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"B\n\x17\x43ontrolContainerMessage\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0c\n\x04hook\x18\x03 \x01(\x0c\x32\xcb\x0c\n\x07\x43oreRPC\x12!\n\x08ListPods\x12\t.pb.Empty\x1a\x08.pb.Pods\"\x00\x12&\n\x06\x41\x64\x64Pod\x12\x11.pb.AddPodOptions\x1a\x07.pb.Pod\"\x00\x12.\n\tRemovePod\x12\x14.pb.RemovePodOptions\x1a\t.pb.Empty\"\x00\x12&\n\x06GetPod\x12\x11.pb.GetPodOptions\x1a\x07.pb.Pod\"\x00\x12\x36\n\x0eGetPodResource\x12\x11.pb.GetPodOptions\x1a\x0f.pb.PodResource\"\x00\x12)\n\x07\x41\x64\x64Node\x12\x12.pb.AddNodeOptions\x1a\x08.pb.Node\"\x00\x12.\n\nRemoveNode\x12\x15.pb.RemoveNodeOptions\x1a\x07.pb.Pod\"\x00\x12\x31\n\x10SetNodeAvailable\x12\x11.pb.NodeAvailable\x1a\x08.pb.Node\"\x00\x12)\n\x07GetNode\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x30\n\x0cGetContainer\x12\x0f.pb.ContainerID\x1a\r.pb.Container\"\x00\x12\x33\n\rGetContainers\x12\x10.pb.ContainerIDs\x1a\x0e.pb.Containers\"\x00\x12/\n\rGetNodeByName\x12\x12.pb.GetNodeOptions\x1a\x08.pb.Node\"\x00\x12\x31\n\x0cListPodNodes\x12\x14.pb.ListNodesOptions\x1a\t.pb.Nodes\"\x00\x12\x36\n\x0cListNetworks\x12\x16.pb.ListNetworkOptions\x1a\x0c.pb.Networks\"\x00\x12=\n\x0eListContainers\x12\x19.pb.ListContainersOptions\x1a\x0e.pb.Containers\"\x00\x12:\n\x12ListNodeContainers\x12\x12.pb.GetNodeOptions\x1a\x0e.pb.Containers\"\x00\x12>\n\x11\x43ontainerDeployed\x12\x1c.pb.ContainerDeployedOptions\x1a\t.pb.Empty\"\x00\x12,\n\x04\x43opy\x12\x0f.pb.CopyOptions\x1a\x0f.pb.CopyMessage\"\x00\x30\x01\x12>\n\nBuildImage\x12\x15.pb.BuildImageOptions\x1a\x15.pb.BuildImageMessage\"\x00\x30\x01\x12>\n\nCacheImage\x12\x15.pb.CacheImageOptions\x1a\x15.pb.CacheImageMessage\"\x00\x30\x01\x12\x41\n\x0bRemoveImage\x12\x16.pb.RemoveImageOptions\x1a\x16.pb.RemoveImageMessage\"\x00\x30\x01\x12\x44\n\x0c\x44\x65ployStatus\x12\x17.pb.DeployStatusOptions\x1a\x17.pb.DeployStatusMessage\"\x00\x30\x01\x12@\n\nRunAndWait\x12\x15.pb.RunAndWaitOptions\x1a\x15.pb.RunAndWaitMessage\"\x00(\x01\x30\x01\x12\x44\n\x0f\x43reateContainer\x12\x11.pb.DeployOptions\x1a\x1a.pb.CreateContainerMessage\"\x00\x30\x01\x12G\n\x10ReplaceContainer\x12\x12.pb.ReplaceOptions\x1a\x1b.pb.ReplaceContainerMessage\"\x00\x30\x01\x12M\n\x0fRemoveContainer\x12\x1a.pb.RemoveContainerOptions\x1a\x1a.pb.RemoveContainerMessage\"\x00\x30\x01\x12P\n\x10\x43ontrolContainer\x12\x1b.pb.ControlContainerOptions\x1a\x1b.pb.ControlContainerMessage\"\x00\x30\x01\x12\x45\n\x0fReallocResource\x12\x12.pb.ReallocOptions\x1a\x1a.pb.ReallocResourceMessage\"\x00\x30\x01\x62\x06proto3') ) _EMPTY = _descriptor.Descriptor( name='Empty', full_name='pb.Empty', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=18, serialized_end=25, ) _LISTCONTAINERSOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.ListContainersOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ListContainersOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ListContainersOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _LISTCONTAINERSOPTIONS = _descriptor.Descriptor( name='ListContainersOptions', full_name='pb.ListContainersOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='appname', full_name='pb.ListContainersOptions.appname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.ListContainersOptions.entrypoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.ListContainersOptions.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.ListContainersOptions.labels', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_LISTCONTAINERSOPTIONS_LABELSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=28, serialized_end=208, ) _DEPLOYSTATUSOPTIONS = _descriptor.Descriptor( name='DeployStatusOptions', full_name='pb.DeployStatusOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='appname', full_name='pb.DeployStatusOptions.appname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployStatusOptions.entrypoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployStatusOptions.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=210, serialized_end=286, ) _DEPLOYSTATUSMESSAGE = _descriptor.Descriptor( name='DeployStatusMessage', full_name='pb.DeployStatusMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='action', full_name='pb.DeployStatusMessage.action', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='appname', full_name='pb.DeployStatusMessage.appname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployStatusMessage.entrypoint', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployStatusMessage.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='id', full_name='pb.DeployStatusMessage.id', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.DeployStatusMessage.data', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=288, serialized_end=406, ) _POD = _descriptor.Descriptor( name='Pod', full_name='pb.Pod', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Pod.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='desc', full_name='pb.Pod.desc', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='favor', full_name='pb.Pod.favor', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=408, serialized_end=456, ) _PODS = _descriptor.Descriptor( name='Pods', full_name='pb.Pods', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pods', full_name='pb.Pods.pods', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=458, serialized_end=487, ) _PODRESOURCE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.PodResource.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.CpuEntry.value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=689, serialized_end=731, ) _PODRESOURCE_MEMORYENTRY = _descriptor.Descriptor( name='MemoryEntry', full_name='pb.PodResource.MemoryEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.MemoryEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.MemoryEntry.value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=733, serialized_end=778, ) _PODRESOURCE_DIFFENTRY = _descriptor.Descriptor( name='DiffEntry', full_name='pb.PodResource.DiffEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.DiffEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.DiffEntry.value', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=780, serialized_end=823, ) _PODRESOURCE_DETAILENTRY = _descriptor.Descriptor( name='DetailEntry', full_name='pb.PodResource.DetailEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.PodResource.DetailEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.PodResource.DetailEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=825, serialized_end=870, ) _PODRESOURCE = _descriptor.Descriptor( name='PodResource', full_name='pb.PodResource', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.PodResource.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.PodResource.cpu', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.PodResource.memory', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='diff', full_name='pb.PodResource.diff', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='detail', full_name='pb.PodResource.detail', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_PODRESOURCE_CPUENTRY, _PODRESOURCE_MEMORYENTRY, _PODRESOURCE_DIFFENTRY, _PODRESOURCE_DETAILENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=490, serialized_end=870, ) _LISTNETWORKOPTIONS = _descriptor.Descriptor( name='ListNetworkOptions', full_name='pb.ListNetworkOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.ListNetworkOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='driver', full_name='pb.ListNetworkOptions.driver', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=872, serialized_end=925, ) _NETWORK = _descriptor.Descriptor( name='Network', full_name='pb.Network', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Network.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='subnets', full_name='pb.Network.subnets', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=927, serialized_end=967, ) _NETWORKS = _descriptor.Descriptor( name='Networks', full_name='pb.Networks', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='networks', full_name='pb.Networks.networks', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=969, serialized_end=1010, ) _NODE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.Node.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _NODE_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Node.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _NODE_INITCPUENTRY = _descriptor.Descriptor( name='InitCpuEntry', full_name='pb.Node.InitCpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Node.InitCpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Node.InitCpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1381, serialized_end=1427, ) _NODE = _descriptor.Descriptor( name='Node', full_name='pb.Node', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.Node.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='endpoint', full_name='pb.Node.endpoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.Node.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.Node.cpu', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu_used', full_name='pb.Node.cpu_used', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.Node.memory', index=5, number=6, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory_used', full_name='pb.Node.memory_used', index=6, number=7, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='available', full_name='pb.Node.available', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Node.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='init_memory', full_name='pb.Node.init_memory', index=9, number=10, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='init_cpu', full_name='pb.Node.init_cpu', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='info', full_name='pb.Node.info', index=11, number=12, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_NODE_CPUENTRY, _NODE_LABELSENTRY, _NODE_INITCPUENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1013, serialized_end=1427, ) _NODES = _descriptor.Descriptor( name='Nodes', full_name='pb.Nodes', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodes', full_name='pb.Nodes.nodes', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1429, serialized_end=1461, ) _NODEAVAILABLE = _descriptor.Descriptor( name='NodeAvailable', full_name='pb.NodeAvailable', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.NodeAvailable.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.NodeAvailable.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='available', full_name='pb.NodeAvailable.available', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1463, serialized_end=1532, ) _CONTAINER_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.Container.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _CONTAINER_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Container.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _CONTAINER_PUBLISHENTRY = _descriptor.Descriptor( name='PublishEntry', full_name='pb.Container.PublishEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Container.PublishEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Container.PublishEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1929, serialized_end=1975, ) _CONTAINER = _descriptor.Descriptor( name='Container', full_name='pb.Container', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.Container.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.Container.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.Container.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.Container.name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.Container.cpu', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='quota', full_name='pb.Container.quota', index=5, number=6, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.Container.memory', index=6, number=7, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='privileged', full_name='pb.Container.privileged', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Container.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.Container.publish', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='image', full_name='pb.Container.image', index=10, number=11, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='inspect', full_name='pb.Container.inspect', index=11, number=12, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status_data', full_name='pb.Container.status_data', index=12, number=13, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_CONTAINER_CPUENTRY, _CONTAINER_LABELSENTRY, _CONTAINER_PUBLISHENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1535, serialized_end=1975, ) _CONTAINERDEPLOYEDOPTIONS = _descriptor.Descriptor( name='ContainerDeployedOptions', full_name='pb.ContainerDeployedOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ContainerDeployedOptions.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='appname', full_name='pb.ContainerDeployedOptions.appname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.ContainerDeployedOptions.entrypoint', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.ContainerDeployedOptions.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.ContainerDeployedOptions.data', index=4, number=5, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1977, serialized_end=2084, ) _CONTAINERS = _descriptor.Descriptor( name='Containers', full_name='pb.Containers', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='containers', full_name='pb.Containers.containers', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2086, serialized_end=2133, ) _CONTAINERID = _descriptor.Descriptor( name='ContainerID', full_name='pb.ContainerID', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ContainerID.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2135, serialized_end=2160, ) _CONTAINERIDS = _descriptor.Descriptor( name='ContainerIDs', full_name='pb.ContainerIDs', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ContainerIDs.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2162, serialized_end=2189, ) _REMOVECONTAINEROPTIONS = _descriptor.Descriptor( name='RemoveContainerOptions', full_name='pb.RemoveContainerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.RemoveContainerOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.RemoveContainerOptions.force', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2191, serialized_end=2243, ) _REALLOCOPTIONS = _descriptor.Descriptor( name='ReallocOptions', full_name='pb.ReallocOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ReallocOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.ReallocOptions.cpu', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mem', full_name='pb.ReallocOptions.mem', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2245, serialized_end=2300, ) _ADDPODOPTIONS = _descriptor.Descriptor( name='AddPodOptions', full_name='pb.AddPodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.AddPodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='favor', full_name='pb.AddPodOptions.favor', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='desc', full_name='pb.AddPodOptions.desc', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2302, serialized_end=2360, ) _REMOVEPODOPTIONS = _descriptor.Descriptor( name='RemovePodOptions', full_name='pb.RemovePodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.RemovePodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2362, serialized_end=2394, ) _GETPODOPTIONS = _descriptor.Descriptor( name='GetPodOptions', full_name='pb.GetPodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.GetPodOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2396, serialized_end=2425, ) _ADDNODEOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.AddNodeOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.AddNodeOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.AddNodeOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _ADDNODEOPTIONS = _descriptor.Descriptor( name='AddNodeOptions', full_name='pb.AddNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.AddNodeOptions.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='endpoint', full_name='pb.AddNodeOptions.endpoint', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.AddNodeOptions.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ca', full_name='pb.AddNodeOptions.ca', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cert', full_name='pb.AddNodeOptions.cert', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='pb.AddNodeOptions.key', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.AddNodeOptions.cpu', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='share', full_name='pb.AddNodeOptions.share', index=7, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.AddNodeOptions.memory', index=8, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.AddNodeOptions.labels', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ADDNODEOPTIONS_LABELSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2428, serialized_end=2675, ) _REMOVENODEOPTIONS = _descriptor.Descriptor( name='RemoveNodeOptions', full_name='pb.RemoveNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='nodename', full_name='pb.RemoveNodeOptions.nodename', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.RemoveNodeOptions.podname', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2677, serialized_end=2731, ) _GETNODEOPTIONS = _descriptor.Descriptor( name='GetNodeOptions', full_name='pb.GetNodeOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.GetNodeOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.GetNodeOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2733, serialized_end=2784, ) _LISTNODESOPTIONS = _descriptor.Descriptor( name='ListNodesOptions', full_name='pb.ListNodesOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.ListNodesOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='all', full_name='pb.ListNodesOptions.all', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2786, serialized_end=2834, ) _BUILD_ENVSENTRY = _descriptor.Descriptor( name='EnvsEntry', full_name='pb.Build.EnvsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.EnvsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.EnvsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3132, serialized_end=3175, ) _BUILD_ARGSENTRY = _descriptor.Descriptor( name='ArgsEntry', full_name='pb.Build.ArgsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.ArgsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.ArgsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3177, serialized_end=3220, ) _BUILD_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.Build.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _BUILD_ARTIFACTSENTRY = _descriptor.Descriptor( name='ArtifactsEntry', full_name='pb.Build.ArtifactsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.ArtifactsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.ArtifactsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3269, serialized_end=3317, ) _BUILD_CACHEENTRY = _descriptor.Descriptor( name='CacheEntry', full_name='pb.Build.CacheEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Build.CacheEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Build.CacheEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3319, serialized_end=3363, ) _BUILD = _descriptor.Descriptor( name='Build', full_name='pb.Build', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='base', full_name='pb.Build.base', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='repo', full_name='pb.Build.repo', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='pb.Build.version', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dir', full_name='pb.Build.dir', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='submodule', full_name='pb.Build.submodule', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='commands', full_name='pb.Build.commands', index=5, number=6, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='envs', full_name='pb.Build.envs', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='args', full_name='pb.Build.args', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.Build.labels', index=8, number=9, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='artifacts', full_name='pb.Build.artifacts', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cache', full_name='pb.Build.cache', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_BUILD_ENVSENTRY, _BUILD_ARGSENTRY, _BUILD_LABELSENTRY, _BUILD_ARTIFACTSENTRY, _BUILD_CACHEENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2837, serialized_end=3363, ) _BUILDS_BUILDSENTRY = _descriptor.Descriptor( name='BuildsEntry', full_name='pb.Builds.BuildsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.Builds.BuildsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.Builds.BuildsEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3431, serialized_end=3487, ) _BUILDS = _descriptor.Descriptor( name='Builds', full_name='pb.Builds', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='stages', full_name='pb.Builds.stages', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='builds', full_name='pb.Builds.builds', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_BUILDS_BUILDSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3365, serialized_end=3487, ) _BUILDIMAGEOPTIONS = _descriptor.Descriptor( name='BuildImageOptions', full_name='pb.BuildImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.BuildImageOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='user', full_name='pb.BuildImageOptions.user', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='uid', full_name='pb.BuildImageOptions.uid', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tags', full_name='pb.BuildImageOptions.tags', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='builds', full_name='pb.BuildImageOptions.builds', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='tar', full_name='pb.BuildImageOptions.tar', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3489, serialized_end=3604, ) _HOOKOPTIONS = _descriptor.Descriptor( name='HookOptions', full_name='pb.HookOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='after_start', full_name='pb.HookOptions.after_start', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='before_stop', full_name='pb.HookOptions.before_stop', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.HookOptions.force', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3606, serialized_end=3676, ) _HEALTHCHECKOPTIONS = _descriptor.Descriptor( name='HealthCheckOptions', full_name='pb.HealthCheckOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='tcp_ports', full_name='pb.HealthCheckOptions.tcp_ports', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='http_port', full_name='pb.HealthCheckOptions.http_port', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='url', full_name='pb.HealthCheckOptions.url', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='code', full_name='pb.HealthCheckOptions.code', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3678, serialized_end=3763, ) _LOGOPTIONS_CONFIGENTRY = _descriptor.Descriptor( name='ConfigEntry', full_name='pb.LogOptions.ConfigEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.LogOptions.ConfigEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.LogOptions.ConfigEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3837, serialized_end=3882, ) _LOGOPTIONS = _descriptor.Descriptor( name='LogOptions', full_name='pb.LogOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='type', full_name='pb.LogOptions.type', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='config', full_name='pb.LogOptions.config', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_LOGOPTIONS_CONFIGENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3765, serialized_end=3882, ) _ENTRYPOINTOPTIONS_SYSCTLSENTRY = _descriptor.Descriptor( name='SysctlsEntry', full_name='pb.EntrypointOptions.SysctlsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.EntrypointOptions.SysctlsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.EntrypointOptions.SysctlsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4169, serialized_end=4215, ) _ENTRYPOINTOPTIONS = _descriptor.Descriptor( name='EntrypointOptions', full_name='pb.EntrypointOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.EntrypointOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='command', full_name='pb.EntrypointOptions.command', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='privileged', full_name='pb.EntrypointOptions.privileged', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dir', full_name='pb.EntrypointOptions.dir', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='log', full_name='pb.EntrypointOptions.log', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.EntrypointOptions.publish', index=5, number=6, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='healthcheck', full_name='pb.EntrypointOptions.healthcheck', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.EntrypointOptions.hook', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='restart_policy', full_name='pb.EntrypointOptions.restart_policy', index=8, number=9, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sysctls', full_name='pb.EntrypointOptions.sysctls', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_ENTRYPOINTOPTIONS_SYSCTLSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=3885, serialized_end=4215, ) _DEPLOYOPTIONS_NETWORKSENTRY = _descriptor.Descriptor( name='NetworksEntry', full_name='pb.DeployOptions.NetworksEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.NetworksEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.NetworksEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4804, serialized_end=4851, ) _DEPLOYOPTIONS_LABELSENTRY = _descriptor.Descriptor( name='LabelsEntry', full_name='pb.DeployOptions.LabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.LabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.LabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=163, serialized_end=208, ) _DEPLOYOPTIONS_NODELABELSENTRY = _descriptor.Descriptor( name='NodelabelsEntry', full_name='pb.DeployOptions.NodelabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.NodelabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.NodelabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4900, serialized_end=4949, ) _DEPLOYOPTIONS_DATAENTRY = _descriptor.Descriptor( name='DataEntry', full_name='pb.DeployOptions.DataEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.DeployOptions.DataEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.DeployOptions.DataEntry.value', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4951, serialized_end=4994, ) _DEPLOYOPTIONS = _descriptor.Descriptor( name='DeployOptions', full_name='pb.DeployOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='pb.DeployOptions.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entrypoint', full_name='pb.DeployOptions.entrypoint', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='podname', full_name='pb.DeployOptions.podname', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.DeployOptions.nodename', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='image', full_name='pb.DeployOptions.image', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='extra_args', full_name='pb.DeployOptions.extra_args', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu_quota', full_name='pb.DeployOptions.cpu_quota', index=6, number=7, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.DeployOptions.memory', index=7, number=8, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='count', full_name='pb.DeployOptions.count', index=8, number=9, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='env', full_name='pb.DeployOptions.env', index=9, number=10, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dns', full_name='pb.DeployOptions.dns', index=10, number=11, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='extra_hosts', full_name='pb.DeployOptions.extra_hosts', index=11, number=12, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='volumes', full_name='pb.DeployOptions.volumes', index=12, number=13, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networks', full_name='pb.DeployOptions.networks', index=13, number=14, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networkmode', full_name='pb.DeployOptions.networkmode', index=14, number=15, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='user', full_name='pb.DeployOptions.user', index=15, number=16, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='debug', full_name='pb.DeployOptions.debug', index=16, number=17, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='openStdin', full_name='pb.DeployOptions.openStdin', index=17, number=18, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='pb.DeployOptions.labels', index=18, number=19, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodelabels', full_name='pb.DeployOptions.nodelabels', index=19, number=20, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='deploy_method', full_name='pb.DeployOptions.deploy_method', index=20, number=21, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.DeployOptions.data', index=21, number=22, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='softlimit', full_name='pb.DeployOptions.softlimit', index=22, number=23, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodes_limit', full_name='pb.DeployOptions.nodes_limit', index=23, number=24, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_DEPLOYOPTIONS_NETWORKSENTRY, _DEPLOYOPTIONS_LABELSENTRY, _DEPLOYOPTIONS_NODELABELSENTRY, _DEPLOYOPTIONS_DATAENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4218, serialized_end=4994, ) _REPLACEOPTIONS_FILTERLABELSENTRY = _descriptor.Descriptor( name='FilterLabelsEntry', full_name='pb.ReplaceOptions.FilterLabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ReplaceOptions.FilterLabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ReplaceOptions.FilterLabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5210, serialized_end=5261, ) _REPLACEOPTIONS_COPYENTRY = _descriptor.Descriptor( name='CopyEntry', full_name='pb.ReplaceOptions.CopyEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.ReplaceOptions.CopyEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.ReplaceOptions.CopyEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5263, serialized_end=5306, ) _REPLACEOPTIONS = _descriptor.Descriptor( name='ReplaceOptions', full_name='pb.ReplaceOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='deployOpt', full_name='pb.ReplaceOptions.deployOpt', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='force', full_name='pb.ReplaceOptions.force', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filter_labels', full_name='pb.ReplaceOptions.filter_labels', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='copy', full_name='pb.ReplaceOptions.copy', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ids', full_name='pb.ReplaceOptions.ids', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='networkinherit', full_name='pb.ReplaceOptions.networkinherit', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_REPLACEOPTIONS_FILTERLABELSENTRY, _REPLACEOPTIONS_COPYENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=4997, serialized_end=5306, ) _CACHEIMAGEOPTIONS = _descriptor.Descriptor( name='CacheImageOptions', full_name='pb.CacheImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.CacheImageOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CacheImageOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='images', full_name='pb.CacheImageOptions.images', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='step', full_name='pb.CacheImageOptions.step', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5308, serialized_end=5392, ) _REMOVEIMAGEOPTIONS = _descriptor.Descriptor( name='RemoveImageOptions', full_name='pb.RemoveImageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.RemoveImageOptions.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.RemoveImageOptions.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='images', full_name='pb.RemoveImageOptions.images', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='step', full_name='pb.RemoveImageOptions.step', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='prune', full_name='pb.RemoveImageOptions.prune', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5394, serialized_end=5494, ) _COPYPATHS = _descriptor.Descriptor( name='CopyPaths', full_name='pb.CopyPaths', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='paths', full_name='pb.CopyPaths.paths', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5496, serialized_end=5522, ) _COPYOPTIONS_TARGETSENTRY = _descriptor.Descriptor( name='TargetsEntry', full_name='pb.CopyOptions.TargetsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CopyOptions.TargetsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CopyOptions.TargetsEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5586, serialized_end=5647, ) _COPYOPTIONS = _descriptor.Descriptor( name='CopyOptions', full_name='pb.CopyOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='targets', full_name='pb.CopyOptions.targets', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_COPYOPTIONS_TARGETSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5524, serialized_end=5647, ) _ERRORDETAIL = _descriptor.Descriptor( name='ErrorDetail', full_name='pb.ErrorDetail', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='pb.ErrorDetail.code', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='message', full_name='pb.ErrorDetail.message', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5649, serialized_end=5693, ) _BUILDIMAGEMESSAGE = _descriptor.Descriptor( name='BuildImageMessage', full_name='pb.BuildImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.BuildImageMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status', full_name='pb.BuildImageMessage.status', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='progress', full_name='pb.BuildImageMessage.progress', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.BuildImageMessage.error', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='stream', full_name='pb.BuildImageMessage.stream', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error_detail', full_name='pb.BuildImageMessage.error_detail', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5696, serialized_end=5831, ) _CREATECONTAINERMESSAGE_CPUENTRY = _descriptor.Descriptor( name='CpuEntry', full_name='pb.CreateContainerMessage.CpuEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CreateContainerMessage.CpuEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CreateContainerMessage.CpuEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1290, serialized_end=1332, ) _CREATECONTAINERMESSAGE_PUBLISHENTRY = _descriptor.Descriptor( name='PublishEntry', full_name='pb.CreateContainerMessage.PublishEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='pb.CreateContainerMessage.PublishEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='pb.CreateContainerMessage.PublishEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1929, serialized_end=1975, ) _CREATECONTAINERMESSAGE = _descriptor.Descriptor( name='CreateContainerMessage', full_name='pb.CreateContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='podname', full_name='pb.CreateContainerMessage.podname', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CreateContainerMessage.nodename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='id', full_name='pb.CreateContainerMessage.id', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.CreateContainerMessage.name', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.CreateContainerMessage.error', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.CreateContainerMessage.success', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cpu', full_name='pb.CreateContainerMessage.cpu', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='quota', full_name='pb.CreateContainerMessage.quota', index=7, number=8, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='pb.CreateContainerMessage.memory', index=8, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='publish', full_name='pb.CreateContainerMessage.publish', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.CreateContainerMessage.hook', index=10, number=11, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_CREATECONTAINERMESSAGE_CPUENTRY, _CREATECONTAINERMESSAGE_PUBLISHENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=5834, serialized_end=6196, ) _REPLACECONTAINERMESSAGE = _descriptor.Descriptor( name='ReplaceContainerMessage', full_name='pb.ReplaceContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='create', full_name='pb.ReplaceContainerMessage.create', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='remove', full_name='pb.ReplaceContainerMessage.remove', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.ReplaceContainerMessage.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6199, serialized_end=6327, ) _RUNANDWAITMESSAGE = _descriptor.Descriptor( name='RunAndWaitMessage', full_name='pb.RunAndWaitMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='container_id', full_name='pb.RunAndWaitMessage.container_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.RunAndWaitMessage.data', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6329, serialized_end=6384, ) _CACHEIMAGEMESSAGE = _descriptor.Descriptor( name='CacheImageMessage', full_name='pb.CacheImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='pb.CacheImageMessage.image', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.CacheImageMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='nodename', full_name='pb.CacheImageMessage.nodename', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='message', full_name='pb.CacheImageMessage.message', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6386, serialized_end=6472, ) _REMOVEIMAGEMESSAGE = _descriptor.Descriptor( name='RemoveImageMessage', full_name='pb.RemoveImageMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='pb.RemoveImageMessage.image', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.RemoveImageMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='messages', full_name='pb.RemoveImageMessage.messages', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6474, serialized_end=6544, ) _REMOVECONTAINERMESSAGE = _descriptor.Descriptor( name='RemoveContainerMessage', full_name='pb.RemoveContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.RemoveContainerMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.RemoveContainerMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.RemoveContainerMessage.hook', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6546, serialized_end=6613, ) _REALLOCRESOURCEMESSAGE = _descriptor.Descriptor( name='ReallocResourceMessage', full_name='pb.ReallocResourceMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ReallocResourceMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='success', full_name='pb.ReallocResourceMessage.success', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6615, serialized_end=6668, ) _COPYMESSAGE = _descriptor.Descriptor( name='CopyMessage', full_name='pb.CopyMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.CopyMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status', full_name='pb.CopyMessage.status', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='pb.CopyMessage.name', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='path', full_name='pb.CopyMessage.path', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.CopyMessage.error', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='pb.CopyMessage.data', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6670, serialized_end=6768, ) _RUNANDWAITOPTIONS = _descriptor.Descriptor( name='RunAndWaitOptions', full_name='pb.RunAndWaitOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='DeployOptions', full_name='pb.RunAndWaitOptions.DeployOptions', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='Cmd', full_name='pb.RunAndWaitOptions.Cmd', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6770, serialized_end=6844, ) _CONTROLCONTAINEROPTIONS = _descriptor.Descriptor( name='ControlContainerOptions', full_name='pb.ControlContainerOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ids', full_name='pb.ControlContainerOptions.ids', index=0, number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='pb.ControlContainerOptions.type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6846, serialized_end=6898, ) _CONTROLCONTAINERMESSAGE = _descriptor.Descriptor( name='ControlContainerMessage', full_name='pb.ControlContainerMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='pb.ControlContainerMessage.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='pb.ControlContainerMessage.error', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hook', full_name='pb.ControlContainerMessage.hook', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=6900, serialized_end=6966, ) _LISTCONTAINERSOPTIONS_LABELSENTRY.containing_type = _LISTCONTAINERSOPTIONS _LISTCONTAINERSOPTIONS.fields_by_name['labels'].message_type = _LISTCONTAINERSOPTIONS_LABELSENTRY _PODS.fields_by_name['pods'].message_type = _POD _PODRESOURCE_CPUENTRY.containing_type = _PODRESOURCE _PODRESOURCE_MEMORYENTRY.containing_type = _PODRESOURCE _PODRESOURCE_DIFFENTRY.containing_type = _PODRESOURCE _PODRESOURCE_DETAILENTRY.containing_type = _PODRESOURCE _PODRESOURCE.fields_by_name['cpu'].message_type = _PODRESOURCE_CPUENTRY _PODRESOURCE.fields_by_name['memory'].message_type = _PODRESOURCE_MEMORYENTRY _PODRESOURCE.fields_by_name['diff'].message_type = _PODRESOURCE_DIFFENTRY _PODRESOURCE.fields_by_name['detail'].message_type = _PODRESOURCE_DETAILENTRY _NETWORKS.fields_by_name['networks'].message_type = _NETWORK _NODE_CPUENTRY.containing_type = _NODE _NODE_LABELSENTRY.containing_type = _NODE _NODE_INITCPUENTRY.containing_type = _NODE _NODE.fields_by_name['cpu'].message_type = _NODE_CPUENTRY _NODE.fields_by_name['labels'].message_type = _NODE_LABELSENTRY _NODE.fields_by_name['init_cpu'].message_type = _NODE_INITCPUENTRY _NODES.fields_by_name['nodes'].message_type = _NODE _CONTAINER_CPUENTRY.containing_type = _CONTAINER _CONTAINER_LABELSENTRY.containing_type = _CONTAINER _CONTAINER_PUBLISHENTRY.containing_type = _CONTAINER _CONTAINER.fields_by_name['cpu'].message_type = _CONTAINER_CPUENTRY _CONTAINER.fields_by_name['labels'].message_type = _CONTAINER_LABELSENTRY _CONTAINER.fields_by_name['publish'].message_type = _CONTAINER_PUBLISHENTRY _CONTAINERS.fields_by_name['containers'].message_type = _CONTAINER _ADDNODEOPTIONS_LABELSENTRY.containing_type = _ADDNODEOPTIONS _ADDNODEOPTIONS.fields_by_name['labels'].message_type = _ADDNODEOPTIONS_LABELSENTRY _BUILD_ENVSENTRY.containing_type = _BUILD _BUILD_ARGSENTRY.containing_type = _BUILD _BUILD_LABELSENTRY.containing_type = _BUILD _BUILD_ARTIFACTSENTRY.containing_type = _BUILD _BUILD_CACHEENTRY.containing_type = _BUILD _BUILD.fields_by_name['envs'].message_type = _BUILD_ENVSENTRY _BUILD.fields_by_name['args'].message_type = _BUILD_ARGSENTRY _BUILD.fields_by_name['labels'].message_type = _BUILD_LABELSENTRY _BUILD.fields_by_name['artifacts'].message_type = _BUILD_ARTIFACTSENTRY _BUILD.fields_by_name['cache'].message_type = _BUILD_CACHEENTRY _BUILDS_BUILDSENTRY.fields_by_name['value'].message_type = _BUILD _BUILDS_BUILDSENTRY.containing_type = _BUILDS _BUILDS.fields_by_name['builds'].message_type = _BUILDS_BUILDSENTRY _BUILDIMAGEOPTIONS.fields_by_name['builds'].message_type = _BUILDS _LOGOPTIONS_CONFIGENTRY.containing_type = _LOGOPTIONS _LOGOPTIONS.fields_by_name['config'].message_type = _LOGOPTIONS_CONFIGENTRY _ENTRYPOINTOPTIONS_SYSCTLSENTRY.containing_type = _ENTRYPOINTOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['log'].message_type = _LOGOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['healthcheck'].message_type = _HEALTHCHECKOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['hook'].message_type = _HOOKOPTIONS _ENTRYPOINTOPTIONS.fields_by_name['sysctls'].message_type = _ENTRYPOINTOPTIONS_SYSCTLSENTRY _DEPLOYOPTIONS_NETWORKSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_LABELSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_NODELABELSENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS_DATAENTRY.containing_type = _DEPLOYOPTIONS _DEPLOYOPTIONS.fields_by_name['entrypoint'].message_type = _ENTRYPOINTOPTIONS _DEPLOYOPTIONS.fields_by_name['networks'].message_type = _DEPLOYOPTIONS_NETWORKSENTRY _DEPLOYOPTIONS.fields_by_name['labels'].message_type = _DEPLOYOPTIONS_LABELSENTRY _DEPLOYOPTIONS.fields_by_name['nodelabels'].message_type = _DEPLOYOPTIONS_NODELABELSENTRY _DEPLOYOPTIONS.fields_by_name['data'].message_type = _DEPLOYOPTIONS_DATAENTRY _REPLACEOPTIONS_FILTERLABELSENTRY.containing_type = _REPLACEOPTIONS _REPLACEOPTIONS_COPYENTRY.containing_type = _REPLACEOPTIONS _REPLACEOPTIONS.fields_by_name['deployOpt'].message_type = _DEPLOYOPTIONS _REPLACEOPTIONS.fields_by_name['filter_labels'].message_type = _REPLACEOPTIONS_FILTERLABELSENTRY _REPLACEOPTIONS.fields_by_name['copy'].message_type = _REPLACEOPTIONS_COPYENTRY _COPYOPTIONS_TARGETSENTRY.fields_by_name['value'].message_type = _COPYPATHS _COPYOPTIONS_TARGETSENTRY.containing_type = _COPYOPTIONS _COPYOPTIONS.fields_by_name['targets'].message_type = _COPYOPTIONS_TARGETSENTRY _BUILDIMAGEMESSAGE.fields_by_name['error_detail'].message_type = _ERRORDETAIL _CREATECONTAINERMESSAGE_CPUENTRY.containing_type = _CREATECONTAINERMESSAGE _CREATECONTAINERMESSAGE_PUBLISHENTRY.containing_type = _CREATECONTAINERMESSAGE _CREATECONTAINERMESSAGE.fields_by_name['cpu'].message_type = _CREATECONTAINERMESSAGE_CPUENTRY _CREATECONTAINERMESSAGE.fields_by_name['publish'].message_type = _CREATECONTAINERMESSAGE_PUBLISHENTRY _REPLACECONTAINERMESSAGE.fields_by_name['create'].message_type = _CREATECONTAINERMESSAGE _REPLACECONTAINERMESSAGE.fields_by_name['remove'].message_type = _REMOVECONTAINERMESSAGE _RUNANDWAITOPTIONS.fields_by_name['DeployOptions'].message_type = _DEPLOYOPTIONS DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY DESCRIPTOR.message_types_by_name['ListContainersOptions'] = _LISTCONTAINERSOPTIONS DESCRIPTOR.message_types_by_name['DeployStatusOptions'] = _DEPLOYSTATUSOPTIONS DESCRIPTOR.message_types_by_name['DeployStatusMessage'] = _DEPLOYSTATUSMESSAGE DESCRIPTOR.message_types_by_name['Pod'] = _POD DESCRIPTOR.message_types_by_name['Pods'] = _PODS DESCRIPTOR.message_types_by_name['PodResource'] = _PODRESOURCE DESCRIPTOR.message_types_by_name['ListNetworkOptions'] = _LISTNETWORKOPTIONS DESCRIPTOR.message_types_by_name['Network'] = _NETWORK DESCRIPTOR.message_types_by_name['Networks'] = _NETWORKS DESCRIPTOR.message_types_by_name['Node'] = _NODE DESCRIPTOR.message_types_by_name['Nodes'] = _NODES DESCRIPTOR.message_types_by_name['NodeAvailable'] = _NODEAVAILABLE DESCRIPTOR.message_types_by_name['Container'] = _CONTAINER DESCRIPTOR.message_types_by_name['ContainerDeployedOptions'] = _CONTAINERDEPLOYEDOPTIONS DESCRIPTOR.message_types_by_name['Containers'] = _CONTAINERS DESCRIPTOR.message_types_by_name['ContainerID'] = _CONTAINERID DESCRIPTOR.message_types_by_name['ContainerIDs'] = _CONTAINERIDS DESCRIPTOR.message_types_by_name['RemoveContainerOptions'] = _REMOVECONTAINEROPTIONS DESCRIPTOR.message_types_by_name['ReallocOptions'] = _REALLOCOPTIONS DESCRIPTOR.message_types_by_name['AddPodOptions'] = _ADDPODOPTIONS DESCRIPTOR.message_types_by_name['RemovePodOptions'] = _REMOVEPODOPTIONS DESCRIPTOR.message_types_by_name['GetPodOptions'] = _GETPODOPTIONS DESCRIPTOR.message_types_by_name['AddNodeOptions'] = _ADDNODEOPTIONS DESCRIPTOR.message_types_by_name['RemoveNodeOptions'] = _REMOVENODEOPTIONS DESCRIPTOR.message_types_by_name['GetNodeOptions'] = _GETNODEOPTIONS DESCRIPTOR.message_types_by_name['ListNodesOptions'] = _LISTNODESOPTIONS DESCRIPTOR.message_types_by_name['Build'] = _BUILD DESCRIPTOR.message_types_by_name['Builds'] = _BUILDS DESCRIPTOR.message_types_by_name['BuildImageOptions'] = _BUILDIMAGEOPTIONS DESCRIPTOR.message_types_by_name['HookOptions'] = _HOOKOPTIONS DESCRIPTOR.message_types_by_name['HealthCheckOptions'] = _HEALTHCHECKOPTIONS DESCRIPTOR.message_types_by_name['LogOptions'] = _LOGOPTIONS DESCRIPTOR.message_types_by_name['EntrypointOptions'] = _ENTRYPOINTOPTIONS DESCRIPTOR.message_types_by_name['DeployOptions'] = _DEPLOYOPTIONS DESCRIPTOR.message_types_by_name['ReplaceOptions'] = _REPLACEOPTIONS DESCRIPTOR.message_types_by_name['CacheImageOptions'] = _CACHEIMAGEOPTIONS DESCRIPTOR.message_types_by_name['RemoveImageOptions'] = _REMOVEIMAGEOPTIONS DESCRIPTOR.message_types_by_name['CopyPaths'] = _COPYPATHS DESCRIPTOR.message_types_by_name['CopyOptions'] = _COPYOPTIONS DESCRIPTOR.message_types_by_name['ErrorDetail'] = _ERRORDETAIL DESCRIPTOR.message_types_by_name['BuildImageMessage'] = _BUILDIMAGEMESSAGE DESCRIPTOR.message_types_by_name['CreateContainerMessage'] = _CREATECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['ReplaceContainerMessage'] = _REPLACECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['RunAndWaitMessage'] = _RUNANDWAITMESSAGE DESCRIPTOR.message_types_by_name['CacheImageMessage'] = _CACHEIMAGEMESSAGE DESCRIPTOR.message_types_by_name['RemoveImageMessage'] = _REMOVEIMAGEMESSAGE DESCRIPTOR.message_types_by_name['RemoveContainerMessage'] = _REMOVECONTAINERMESSAGE DESCRIPTOR.message_types_by_name['ReallocResourceMessage'] = _REALLOCRESOURCEMESSAGE DESCRIPTOR.message_types_by_name['CopyMessage'] = _COPYMESSAGE DESCRIPTOR.message_types_by_name['RunAndWaitOptions'] = _RUNANDWAITOPTIONS DESCRIPTOR.message_types_by_name['ControlContainerOptions'] = _CONTROLCONTAINEROPTIONS DESCRIPTOR.message_types_by_name['ControlContainerMessage'] = _CONTROLCONTAINERMESSAGE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( DESCRIPTOR = _EMPTY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Empty) )) _sym_db.RegisterMessage(Empty) ListContainersOptions = _reflection.GeneratedProtocolMessageType('ListContainersOptions', (_message.Message,), dict( LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _LISTCONTAINERSOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListContainersOptions.LabelsEntry) )) , DESCRIPTOR = _LISTCONTAINERSOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListContainersOptions) )) _sym_db.RegisterMessage(ListContainersOptions) _sym_db.RegisterMessage(ListContainersOptions.LabelsEntry) DeployStatusOptions = _reflection.GeneratedProtocolMessageType('DeployStatusOptions', (_message.Message,), dict( DESCRIPTOR = _DEPLOYSTATUSOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployStatusOptions) )) _sym_db.RegisterMessage(DeployStatusOptions) DeployStatusMessage = _reflection.GeneratedProtocolMessageType('DeployStatusMessage', (_message.Message,), dict( DESCRIPTOR = _DEPLOYSTATUSMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployStatusMessage) )) _sym_db.RegisterMessage(DeployStatusMessage) Pod = _reflection.GeneratedProtocolMessageType('Pod', (_message.Message,), dict( DESCRIPTOR = _POD, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Pod) )) _sym_db.RegisterMessage(Pod) Pods = _reflection.GeneratedProtocolMessageType('Pods', (_message.Message,), dict( DESCRIPTOR = _PODS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Pods) )) _sym_db.RegisterMessage(Pods) PodResource = _reflection.GeneratedProtocolMessageType('PodResource', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.CpuEntry) )) , MemoryEntry = _reflection.GeneratedProtocolMessageType('MemoryEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_MEMORYENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.MemoryEntry) )) , DiffEntry = _reflection.GeneratedProtocolMessageType('DiffEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_DIFFENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.DiffEntry) )) , DetailEntry = _reflection.GeneratedProtocolMessageType('DetailEntry', (_message.Message,), dict( DESCRIPTOR = _PODRESOURCE_DETAILENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource.DetailEntry) )) , DESCRIPTOR = _PODRESOURCE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.PodResource) )) _sym_db.RegisterMessage(PodResource) _sym_db.RegisterMessage(PodResource.CpuEntry) _sym_db.RegisterMessage(PodResource.MemoryEntry) _sym_db.RegisterMessage(PodResource.DiffEntry) _sym_db.RegisterMessage(PodResource.DetailEntry) ListNetworkOptions = _reflection.GeneratedProtocolMessageType('ListNetworkOptions', (_message.Message,), dict( DESCRIPTOR = _LISTNETWORKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListNetworkOptions) )) _sym_db.RegisterMessage(ListNetworkOptions) Network = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), dict( DESCRIPTOR = _NETWORK, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Network) )) _sym_db.RegisterMessage(Network) Networks = _reflection.GeneratedProtocolMessageType('Networks', (_message.Message,), dict( DESCRIPTOR = _NETWORKS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Networks) )) _sym_db.RegisterMessage(Networks) Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.CpuEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.LabelsEntry) )) , InitCpuEntry = _reflection.GeneratedProtocolMessageType('InitCpuEntry', (_message.Message,), dict( DESCRIPTOR = _NODE_INITCPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node.InitCpuEntry) )) , DESCRIPTOR = _NODE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Node) )) _sym_db.RegisterMessage(Node) _sym_db.RegisterMessage(Node.CpuEntry) _sym_db.RegisterMessage(Node.LabelsEntry) _sym_db.RegisterMessage(Node.InitCpuEntry) Nodes = _reflection.GeneratedProtocolMessageType('Nodes', (_message.Message,), dict( DESCRIPTOR = _NODES, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Nodes) )) _sym_db.RegisterMessage(Nodes) NodeAvailable = _reflection.GeneratedProtocolMessageType('NodeAvailable', (_message.Message,), dict( DESCRIPTOR = _NODEAVAILABLE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.NodeAvailable) )) _sym_db.RegisterMessage(NodeAvailable) Container = _reflection.GeneratedProtocolMessageType('Container', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.CpuEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.LabelsEntry) )) , PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict( DESCRIPTOR = _CONTAINER_PUBLISHENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container.PublishEntry) )) , DESCRIPTOR = _CONTAINER, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Container) )) _sym_db.RegisterMessage(Container) _sym_db.RegisterMessage(Container.CpuEntry) _sym_db.RegisterMessage(Container.LabelsEntry) _sym_db.RegisterMessage(Container.PublishEntry) ContainerDeployedOptions = _reflection.GeneratedProtocolMessageType('ContainerDeployedOptions', (_message.Message,), dict( DESCRIPTOR = _CONTAINERDEPLOYEDOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerDeployedOptions) )) _sym_db.RegisterMessage(ContainerDeployedOptions) Containers = _reflection.GeneratedProtocolMessageType('Containers', (_message.Message,), dict( DESCRIPTOR = _CONTAINERS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Containers) )) _sym_db.RegisterMessage(Containers) ContainerID = _reflection.GeneratedProtocolMessageType('ContainerID', (_message.Message,), dict( DESCRIPTOR = _CONTAINERID, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerID) )) _sym_db.RegisterMessage(ContainerID) ContainerIDs = _reflection.GeneratedProtocolMessageType('ContainerIDs', (_message.Message,), dict( DESCRIPTOR = _CONTAINERIDS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ContainerIDs) )) _sym_db.RegisterMessage(ContainerIDs) RemoveContainerOptions = _reflection.GeneratedProtocolMessageType('RemoveContainerOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVECONTAINEROPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveContainerOptions) )) _sym_db.RegisterMessage(RemoveContainerOptions) ReallocOptions = _reflection.GeneratedProtocolMessageType('ReallocOptions', (_message.Message,), dict( DESCRIPTOR = _REALLOCOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReallocOptions) )) _sym_db.RegisterMessage(ReallocOptions) AddPodOptions = _reflection.GeneratedProtocolMessageType('AddPodOptions', (_message.Message,), dict( DESCRIPTOR = _ADDPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddPodOptions) )) _sym_db.RegisterMessage(AddPodOptions) RemovePodOptions = _reflection.GeneratedProtocolMessageType('RemovePodOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVEPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemovePodOptions) )) _sym_db.RegisterMessage(RemovePodOptions) GetPodOptions = _reflection.GeneratedProtocolMessageType('GetPodOptions', (_message.Message,), dict( DESCRIPTOR = _GETPODOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.GetPodOptions) )) _sym_db.RegisterMessage(GetPodOptions) AddNodeOptions = _reflection.GeneratedProtocolMessageType('AddNodeOptions', (_message.Message,), dict( LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _ADDNODEOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddNodeOptions.LabelsEntry) )) , DESCRIPTOR = _ADDNODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.AddNodeOptions) )) _sym_db.RegisterMessage(AddNodeOptions) _sym_db.RegisterMessage(AddNodeOptions.LabelsEntry) RemoveNodeOptions = _reflection.GeneratedProtocolMessageType('RemoveNodeOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVENODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveNodeOptions) )) _sym_db.RegisterMessage(RemoveNodeOptions) GetNodeOptions = _reflection.GeneratedProtocolMessageType('GetNodeOptions', (_message.Message,), dict( DESCRIPTOR = _GETNODEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.GetNodeOptions) )) _sym_db.RegisterMessage(GetNodeOptions) ListNodesOptions = _reflection.GeneratedProtocolMessageType('ListNodesOptions', (_message.Message,), dict( DESCRIPTOR = _LISTNODESOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ListNodesOptions) )) _sym_db.RegisterMessage(ListNodesOptions) Build = _reflection.GeneratedProtocolMessageType('Build', (_message.Message,), dict( EnvsEntry = _reflection.GeneratedProtocolMessageType('EnvsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ENVSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.EnvsEntry) )) , ArgsEntry = _reflection.GeneratedProtocolMessageType('ArgsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ARGSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.ArgsEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.LabelsEntry) )) , ArtifactsEntry = _reflection.GeneratedProtocolMessageType('ArtifactsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_ARTIFACTSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.ArtifactsEntry) )) , CacheEntry = _reflection.GeneratedProtocolMessageType('CacheEntry', (_message.Message,), dict( DESCRIPTOR = _BUILD_CACHEENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build.CacheEntry) )) , DESCRIPTOR = _BUILD, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Build) )) _sym_db.RegisterMessage(Build) _sym_db.RegisterMessage(Build.EnvsEntry) _sym_db.RegisterMessage(Build.ArgsEntry) _sym_db.RegisterMessage(Build.LabelsEntry) _sym_db.RegisterMessage(Build.ArtifactsEntry) _sym_db.RegisterMessage(Build.CacheEntry) Builds = _reflection.GeneratedProtocolMessageType('Builds', (_message.Message,), dict( BuildsEntry = _reflection.GeneratedProtocolMessageType('BuildsEntry', (_message.Message,), dict( DESCRIPTOR = _BUILDS_BUILDSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Builds.BuildsEntry) )) , DESCRIPTOR = _BUILDS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.Builds) )) _sym_db.RegisterMessage(Builds) _sym_db.RegisterMessage(Builds.BuildsEntry) BuildImageOptions = _reflection.GeneratedProtocolMessageType('BuildImageOptions', (_message.Message,), dict( DESCRIPTOR = _BUILDIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.BuildImageOptions) )) _sym_db.RegisterMessage(BuildImageOptions) HookOptions = _reflection.GeneratedProtocolMessageType('HookOptions', (_message.Message,), dict( DESCRIPTOR = _HOOKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.HookOptions) )) _sym_db.RegisterMessage(HookOptions) HealthCheckOptions = _reflection.GeneratedProtocolMessageType('HealthCheckOptions', (_message.Message,), dict( DESCRIPTOR = _HEALTHCHECKOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.HealthCheckOptions) )) _sym_db.RegisterMessage(HealthCheckOptions) LogOptions = _reflection.GeneratedProtocolMessageType('LogOptions', (_message.Message,), dict( ConfigEntry = _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), dict( DESCRIPTOR = _LOGOPTIONS_CONFIGENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.LogOptions.ConfigEntry) )) , DESCRIPTOR = _LOGOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.LogOptions) )) _sym_db.RegisterMessage(LogOptions) _sym_db.RegisterMessage(LogOptions.ConfigEntry) EntrypointOptions = _reflection.GeneratedProtocolMessageType('EntrypointOptions', (_message.Message,), dict( SysctlsEntry = _reflection.GeneratedProtocolMessageType('SysctlsEntry', (_message.Message,), dict( DESCRIPTOR = _ENTRYPOINTOPTIONS_SYSCTLSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.EntrypointOptions.SysctlsEntry) )) , DESCRIPTOR = _ENTRYPOINTOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.EntrypointOptions) )) _sym_db.RegisterMessage(EntrypointOptions) _sym_db.RegisterMessage(EntrypointOptions.SysctlsEntry) DeployOptions = _reflection.GeneratedProtocolMessageType('DeployOptions', (_message.Message,), dict( NetworksEntry = _reflection.GeneratedProtocolMessageType('NetworksEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_NETWORKSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.NetworksEntry) )) , LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_LABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.LabelsEntry) )) , NodelabelsEntry = _reflection.GeneratedProtocolMessageType('NodelabelsEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_NODELABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.NodelabelsEntry) )) , DataEntry = _reflection.GeneratedProtocolMessageType('DataEntry', (_message.Message,), dict( DESCRIPTOR = _DEPLOYOPTIONS_DATAENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions.DataEntry) )) , DESCRIPTOR = _DEPLOYOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.DeployOptions) )) _sym_db.RegisterMessage(DeployOptions) _sym_db.RegisterMessage(DeployOptions.NetworksEntry) _sym_db.RegisterMessage(DeployOptions.LabelsEntry) _sym_db.RegisterMessage(DeployOptions.NodelabelsEntry) _sym_db.RegisterMessage(DeployOptions.DataEntry) ReplaceOptions = _reflection.GeneratedProtocolMessageType('ReplaceOptions', (_message.Message,), dict( FilterLabelsEntry = _reflection.GeneratedProtocolMessageType('FilterLabelsEntry', (_message.Message,), dict( DESCRIPTOR = _REPLACEOPTIONS_FILTERLABELSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.FilterLabelsEntry) )) , CopyEntry = _reflection.GeneratedProtocolMessageType('CopyEntry', (_message.Message,), dict( DESCRIPTOR = _REPLACEOPTIONS_COPYENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions.CopyEntry) )) , DESCRIPTOR = _REPLACEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceOptions) )) _sym_db.RegisterMessage(ReplaceOptions) _sym_db.RegisterMessage(ReplaceOptions.FilterLabelsEntry) _sym_db.RegisterMessage(ReplaceOptions.CopyEntry) CacheImageOptions = _reflection.GeneratedProtocolMessageType('CacheImageOptions', (_message.Message,), dict( DESCRIPTOR = _CACHEIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CacheImageOptions) )) _sym_db.RegisterMessage(CacheImageOptions) RemoveImageOptions = _reflection.GeneratedProtocolMessageType('RemoveImageOptions', (_message.Message,), dict( DESCRIPTOR = _REMOVEIMAGEOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveImageOptions) )) _sym_db.RegisterMessage(RemoveImageOptions) CopyPaths = _reflection.GeneratedProtocolMessageType('CopyPaths', (_message.Message,), dict( DESCRIPTOR = _COPYPATHS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyPaths) )) _sym_db.RegisterMessage(CopyPaths) CopyOptions = _reflection.GeneratedProtocolMessageType('CopyOptions', (_message.Message,), dict( TargetsEntry = _reflection.GeneratedProtocolMessageType('TargetsEntry', (_message.Message,), dict( DESCRIPTOR = _COPYOPTIONS_TARGETSENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyOptions.TargetsEntry) )) , DESCRIPTOR = _COPYOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyOptions) )) _sym_db.RegisterMessage(CopyOptions) _sym_db.RegisterMessage(CopyOptions.TargetsEntry) ErrorDetail = _reflection.GeneratedProtocolMessageType('ErrorDetail', (_message.Message,), dict( DESCRIPTOR = _ERRORDETAIL, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ErrorDetail) )) _sym_db.RegisterMessage(ErrorDetail) BuildImageMessage = _reflection.GeneratedProtocolMessageType('BuildImageMessage', (_message.Message,), dict( DESCRIPTOR = _BUILDIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.BuildImageMessage) )) _sym_db.RegisterMessage(BuildImageMessage) CreateContainerMessage = _reflection.GeneratedProtocolMessageType('CreateContainerMessage', (_message.Message,), dict( CpuEntry = _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), dict( DESCRIPTOR = _CREATECONTAINERMESSAGE_CPUENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.CpuEntry) )) , PublishEntry = _reflection.GeneratedProtocolMessageType('PublishEntry', (_message.Message,), dict( DESCRIPTOR = _CREATECONTAINERMESSAGE_PUBLISHENTRY, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage.PublishEntry) )) , DESCRIPTOR = _CREATECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CreateContainerMessage) )) _sym_db.RegisterMessage(CreateContainerMessage) _sym_db.RegisterMessage(CreateContainerMessage.CpuEntry) _sym_db.RegisterMessage(CreateContainerMessage.PublishEntry) ReplaceContainerMessage = _reflection.GeneratedProtocolMessageType('ReplaceContainerMessage', (_message.Message,), dict( DESCRIPTOR = _REPLACECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReplaceContainerMessage) )) _sym_db.RegisterMessage(ReplaceContainerMessage) RunAndWaitMessage = _reflection.GeneratedProtocolMessageType('RunAndWaitMessage', (_message.Message,), dict( DESCRIPTOR = _RUNANDWAITMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RunAndWaitMessage) )) _sym_db.RegisterMessage(RunAndWaitMessage) CacheImageMessage = _reflection.GeneratedProtocolMessageType('CacheImageMessage', (_message.Message,), dict( DESCRIPTOR = _CACHEIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CacheImageMessage) )) _sym_db.RegisterMessage(CacheImageMessage) RemoveImageMessage = _reflection.GeneratedProtocolMessageType('RemoveImageMessage', (_message.Message,), dict( DESCRIPTOR = _REMOVEIMAGEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveImageMessage) )) _sym_db.RegisterMessage(RemoveImageMessage) RemoveContainerMessage = _reflection.GeneratedProtocolMessageType('RemoveContainerMessage', (_message.Message,), dict( DESCRIPTOR = _REMOVECONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RemoveContainerMessage) )) _sym_db.RegisterMessage(RemoveContainerMessage) ReallocResourceMessage = _reflection.GeneratedProtocolMessageType('ReallocResourceMessage', (_message.Message,), dict( DESCRIPTOR = _REALLOCRESOURCEMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ReallocResourceMessage) )) _sym_db.RegisterMessage(ReallocResourceMessage) CopyMessage = _reflection.GeneratedProtocolMessageType('CopyMessage', (_message.Message,), dict( DESCRIPTOR = _COPYMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.CopyMessage) )) _sym_db.RegisterMessage(CopyMessage) RunAndWaitOptions = _reflection.GeneratedProtocolMessageType('RunAndWaitOptions', (_message.Message,), dict( DESCRIPTOR = _RUNANDWAITOPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.RunAndWaitOptions) )) _sym_db.RegisterMessage(RunAndWaitOptions) ControlContainerOptions = _reflection.GeneratedProtocolMessageType('ControlContainerOptions', (_message.Message,), dict( DESCRIPTOR = _CONTROLCONTAINEROPTIONS, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ControlContainerOptions) )) _sym_db.RegisterMessage(ControlContainerOptions) ControlContainerMessage = _reflection.GeneratedProtocolMessageType('ControlContainerMessage', (_message.Message,), dict( DESCRIPTOR = _CONTROLCONTAINERMESSAGE, __module__ = 'core_pb2' # @@protoc_insertion_point(class_scope:pb.ControlContainerMessage) )) _sym_db.RegisterMessage(ControlContainerMessage) _LISTCONTAINERSOPTIONS_LABELSENTRY.has_options = True _LISTCONTAINERSOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_CPUENTRY.has_options = True _PODRESOURCE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_MEMORYENTRY.has_options = True _PODRESOURCE_MEMORYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_DIFFENTRY.has_options = True _PODRESOURCE_DIFFENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _PODRESOURCE_DETAILENTRY.has_options = True _PODRESOURCE_DETAILENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_CPUENTRY.has_options = True _NODE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_LABELSENTRY.has_options = True _NODE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _NODE_INITCPUENTRY.has_options = True _NODE_INITCPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_CPUENTRY.has_options = True _CONTAINER_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_LABELSENTRY.has_options = True _CONTAINER_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CONTAINER_PUBLISHENTRY.has_options = True _CONTAINER_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _ADDNODEOPTIONS_LABELSENTRY.has_options = True _ADDNODEOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ENVSENTRY.has_options = True _BUILD_ENVSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ARGSENTRY.has_options = True _BUILD_ARGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_LABELSENTRY.has_options = True _BUILD_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_ARTIFACTSENTRY.has_options = True _BUILD_ARTIFACTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILD_CACHEENTRY.has_options = True _BUILD_CACHEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _BUILDS_BUILDSENTRY.has_options = True _BUILDS_BUILDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _LOGOPTIONS_CONFIGENTRY.has_options = True _LOGOPTIONS_CONFIGENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _ENTRYPOINTOPTIONS_SYSCTLSENTRY.has_options = True _ENTRYPOINTOPTIONS_SYSCTLSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_NETWORKSENTRY.has_options = True _DEPLOYOPTIONS_NETWORKSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_LABELSENTRY.has_options = True _DEPLOYOPTIONS_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_NODELABELSENTRY.has_options = True _DEPLOYOPTIONS_NODELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _DEPLOYOPTIONS_DATAENTRY.has_options = True _DEPLOYOPTIONS_DATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _REPLACEOPTIONS_FILTERLABELSENTRY.has_options = True _REPLACEOPTIONS_FILTERLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _REPLACEOPTIONS_COPYENTRY.has_options = True _REPLACEOPTIONS_COPYENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _COPYOPTIONS_TARGETSENTRY.has_options = True _COPYOPTIONS_TARGETSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CREATECONTAINERMESSAGE_CPUENTRY.has_options = True _CREATECONTAINERMESSAGE_CPUENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CREATECONTAINERMESSAGE_PUBLISHENTRY.has_options = True _CREATECONTAINERMESSAGE_PUBLISHENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _CORERPC = _descriptor.ServiceDescriptor( name='CoreRPC', full_name='pb.CoreRPC', file=DESCRIPTOR, index=0, options=None, serialized_start=6969, serialized_end=8580, methods=[ _descriptor.MethodDescriptor( name='ListPods', full_name='pb.CoreRPC.ListPods', index=0, containing_service=None, input_type=_EMPTY, output_type=_PODS, options=None, ), _descriptor.MethodDescriptor( name='AddPod', full_name='pb.CoreRPC.AddPod', index=1, containing_service=None, input_type=_ADDPODOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='RemovePod', full_name='pb.CoreRPC.RemovePod', index=2, containing_service=None, input_type=_REMOVEPODOPTIONS, output_type=_EMPTY, options=None, ), _descriptor.MethodDescriptor( name='GetPod', full_name='pb.CoreRPC.GetPod', index=3, containing_service=None, input_type=_GETPODOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='GetPodResource', full_name='pb.CoreRPC.GetPodResource', index=4, containing_service=None, input_type=_GETPODOPTIONS, output_type=_PODRESOURCE, options=None, ), _descriptor.MethodDescriptor( name='AddNode', full_name='pb.CoreRPC.AddNode', index=5, containing_service=None, input_type=_ADDNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='RemoveNode', full_name='pb.CoreRPC.RemoveNode', index=6, containing_service=None, input_type=_REMOVENODEOPTIONS, output_type=_POD, options=None, ), _descriptor.MethodDescriptor( name='SetNodeAvailable', full_name='pb.CoreRPC.SetNodeAvailable', index=7, containing_service=None, input_type=_NODEAVAILABLE, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='GetNode', full_name='pb.CoreRPC.GetNode', index=8, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='GetContainer', full_name='pb.CoreRPC.GetContainer', index=9, containing_service=None, input_type=_CONTAINERID, output_type=_CONTAINER, options=None, ), _descriptor.MethodDescriptor( name='GetContainers', full_name='pb.CoreRPC.GetContainers', index=10, containing_service=None, input_type=_CONTAINERIDS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='GetNodeByName', full_name='pb.CoreRPC.GetNodeByName', index=11, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_NODE, options=None, ), _descriptor.MethodDescriptor( name='ListPodNodes', full_name='pb.CoreRPC.ListPodNodes', index=12, containing_service=None, input_type=_LISTNODESOPTIONS, output_type=_NODES, options=None, ), _descriptor.MethodDescriptor( name='ListNetworks', full_name='pb.CoreRPC.ListNetworks', index=13, containing_service=None, input_type=_LISTNETWORKOPTIONS, output_type=_NETWORKS, options=None, ), _descriptor.MethodDescriptor( name='ListContainers', full_name='pb.CoreRPC.ListContainers', index=14, containing_service=None, input_type=_LISTCONTAINERSOPTIONS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='ListNodeContainers', full_name='pb.CoreRPC.ListNodeContainers', index=15, containing_service=None, input_type=_GETNODEOPTIONS, output_type=_CONTAINERS, options=None, ), _descriptor.MethodDescriptor( name='ContainerDeployed', full_name='pb.CoreRPC.ContainerDeployed', index=16, containing_service=None, input_type=_CONTAINERDEPLOYEDOPTIONS, output_type=_EMPTY, options=None, ), _descriptor.MethodDescriptor( name='Copy', full_name='pb.CoreRPC.Copy', index=17, containing_service=None, input_type=_COPYOPTIONS, output_type=_COPYMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='BuildImage', full_name='pb.CoreRPC.BuildImage', index=18, containing_service=None, input_type=_BUILDIMAGEOPTIONS, output_type=_BUILDIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='CacheImage', full_name='pb.CoreRPC.CacheImage', index=19, containing_service=None, input_type=_CACHEIMAGEOPTIONS, output_type=_CACHEIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RemoveImage', full_name='pb.CoreRPC.RemoveImage', index=20, containing_service=None, input_type=_REMOVEIMAGEOPTIONS, output_type=_REMOVEIMAGEMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='DeployStatus', full_name='pb.CoreRPC.DeployStatus', index=21, containing_service=None, input_type=_DEPLOYSTATUSOPTIONS, output_type=_DEPLOYSTATUSMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RunAndWait', full_name='pb.CoreRPC.RunAndWait', index=22, containing_service=None, input_type=_RUNANDWAITOPTIONS, output_type=_RUNANDWAITMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='CreateContainer', full_name='pb.CoreRPC.CreateContainer', index=23, containing_service=None, input_type=_DEPLOYOPTIONS, output_type=_CREATECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ReplaceContainer', full_name='pb.CoreRPC.ReplaceContainer', index=24, containing_service=None, input_type=_REPLACEOPTIONS, output_type=_REPLACECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='RemoveContainer', full_name='pb.CoreRPC.RemoveContainer', index=25, containing_service=None, input_type=_REMOVECONTAINEROPTIONS, output_type=_REMOVECONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ControlContainer', full_name='pb.CoreRPC.ControlContainer', index=26, containing_service=None, input_type=_CONTROLCONTAINEROPTIONS, output_type=_CONTROLCONTAINERMESSAGE, options=None, ), _descriptor.MethodDescriptor( name='ReallocResource', full_name='pb.CoreRPC.ReallocResource', index=27, containing_service=None, input_type=_REALLOCOPTIONS, output_type=_REALLOCRESOURCEMESSAGE, options=None, ), ]) _sym_db.RegisterServiceDescriptor(_CORERPC) DESCRIPTOR.services_by_name['CoreRPC'] = _CORERPC # @@protoc_insertion_point(module_scope)
    38.875126
    16,249
    0.731339
    0
    0
    0
    0
    0
    0
    0
    0
    40,959
    0.212549
    b9a524c2d76717a70aa199aeb8c04e4579e1a276
    2,217
    py
    Python
    src/models/text_node.py
    moevm/nosql1h19-text-graph
    410f156ad4f232f8aa060d43692ab020610ddfd4
    [ "MIT" ]
    null
    null
    null
    src/models/text_node.py
    moevm/nosql1h19-text-graph
    410f156ad4f232f8aa060d43692ab020610ddfd4
    [ "MIT" ]
    null
    null
    null
    src/models/text_node.py
    moevm/nosql1h19-text-graph
    410f156ad4f232f8aa060d43692ab020610ddfd4
    [ "MIT" ]
    null
    null
    null
    from neomodel import StructuredNode, StringProperty, JSONProperty, \ Relationship, IntegerProperty import numpy as np import re from models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() + ' ' for word in re.split(r'[\n ]', self.text, 5)[:5]]) return res def describe(self): return f""" <h1>Фрагмент: {self.order_id} </h1> <table border="1" width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> """ def preview(self, frag_num=0): leading = 3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." else: return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." return f"[{self.label}] {self.short()}..." def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return len([s for s in self.text.split('.') if len(s) > 2])
    31.671429
    73
    0.488498
    2,088
    0.903114
    0
    0
    0
    0
    0
    0
    1,105
    0.477941
    b9a529f9f36fb2cce0a38f16148b6bc2117ab033
    2,655
    py
    Python
    tests/test_bishop_generate.py
    otaviocarvalho/chess-negamax
    21f1066611e581dac3257d3f46c71ca2b09b5964
    [ "MIT" ]
    6
    2015-04-04T15:58:29.000Z
    2019-04-07T11:45:02.000Z
    tests/test_bishop_generate.py
    otaviocarvalho/chess-negamax
    21f1066611e581dac3257d3f46c71ca2b09b5964
    [ "MIT" ]
    1
    2015-04-27T19:02:06.000Z
    2015-04-27T19:02:06.000Z
    tests/test_bishop_generate.py
    otaviocarvalho/chess-negamax
    21f1066611e581dac3257d3f46c71ca2b09b5964
    [ "MIT" ]
    3
    2015-10-04T00:22:17.000Z
    2019-04-07T11:44:56.000Z
    import unittest from .helpers import StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from chess.models import Bishop return Bishop(board, team, position) def compare_list(self, expected, results): compared = [] for e in expected: for r in results: if e[0] == r[0] and e[1] == r[1]: compared.append(True) break else: compared.append(False) return compared def test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if __name__ == '__main__': unittest.main()
    30.872093
    58
    0.581921
    2,530
    0.952919
    0
    0
    0
    0
    0
    0
    110
    0.041431