{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \"\"\"\n \n print (\"[SYSTEM] Creating the TaskResults.html\")\n f.write(P1)\n Linkindex = 1\n \n for x in r:\n spliter = x.split(\",\")\n \n P2_2 = \"\\\" target=\\\"_blank\\\">Check out Link Number: \" + str(Linkindex) + \" (\" + spliter[1] +\"% Relevancy)\"\n \n FullHtml = P2_1 + spliter[0] + P2_2 + P2_3\n Linkindex += 1 \n f.write(FullHtml)\n\n f.write(P3)\n f.close()\n \n print(\"[SYSTEM] Deploying the TaskResults.html\")\n\n url = '4_RefferenceResultGen\\TestFolder\\TaskResults.html'\n webbrowser.open(url, new=2) # open in new tab\n \nif __name__ == \"__main__\":\n main() "},"size":{"kind":"number","value":5141,"string":"5,141"}}},{"rowIdx":127389,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/example_fuzzy_1.py"},"max_stars_repo_name":{"kind":"string","value":"RichardOkubo/PythonScripts"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170457"},"content":{"kind":"string","value":"# import numpy as np\n# import skfuzzy as fuzz\n#\n# from skfuzzy import control as ctrl\n#\n# design = ctrl.Antecedent(np.arange(1, 6), \"design\")\n# potencia = ctrl.Antecedent(np.arange(1, 6), \"potencia\")\n# economia = ctrl.Antecedent(np.arange(1, 6), \"economia\")\n# preco = ctrl.Antecedent(np.arange(1, 6), \"preco\")\n# espaco = ctrl.Antecedent(np.arange(1, 6), \"espaco\")\n#\n# percepcao = ctrl.Consequent(np.arange(1, 6), \"percepcao\")\n#\n# # Design\n# design[\"feio\"] = fuzz.trapmf(design.universe, [1, 1, 2, 3])\n# design[\"razoável\"] = fuzz.trapmf(design.universe, [2, 3, 3, 4])\n# design[\"belo\"] = fuzz.trapmf(design.universe, [3, 4, 5, 5])\n#\n# # Potência\n# potencia[\"baixa\"] = fuzz.trapmf(potencia.universe, [1, 1, 2, 3])\n# potencia[\"média\"] = fuzz.trapmf(potencia.universe, [2, 3, 3, 4])\n# potencia[\"alta\"] = fuzz.trapmf(potencia.universe, [3, 4, 5, 5])\n#\n# # Economia\n# economia[\"baixa\"] = fuzz.trapmf(economia.universe, [1, 1, 2, 3])\n# economia[\"média\"] = fuzz.trapmf(economia.universe, [2, 3, 3, 4])\n# economia[\"alta\"] = fuzz.trapmf(economia.universe, [3, 4, 5, 5])\n#\n# # Preço\n# preco[\"elevado\"] = fuzz.trapmf(preco.universe, [1, 1, 2, 3])\n# preco[\"coerente\"] = fuzz.trapmf(preco.universe, [2, 3, 3, 4])\n# preco[\"barato\"] = fuzz.trapmf(preco.universe, [3, 4, 5, 5])\n#\n# # Espaço Interno\n# espaco[\"apertado\"] = fuzz.trapmf(espaco.universe, [1, 1, 2, 3])\n# espaco[\"médio\"] = fuzz.trapmf(espaco.universe, [2, 3, 3, 4])\n# espaco[\"espaçoso\"] = fuzz.trapmf(espaco.universe, [3, 4, 5, 5])\n#\n# # Percepção\n# percepcao[\"dispensável\"] = fuzz.trapmf(percepcao.universe, [1, 1, 2, 3])\n# percepcao[\"importante\"] = fuzz.trapmf(percepcao.universe, [2, 3, 3, 4])\n# percepcao[\"crucial\"] = fuzz.trapmf(percepcao.universe, [3, 4, 5, 5])\n\nfrom pprint import pprint\n\n\ndef multiplica(mat_A: \"matriz\", mat_B: \"matriz\") -> \"matriz\":\n \"\"\"Multiplica as matrizes A com B.\"\"\"\n assert len(mat_A) == len(mat_B) and len(mat_A[0]) == len(mat_B[0])\n matriz = [None] * len(mat_A)\n for linha in range(len(mat_A)):\n matriz[linha] = [None] * len(mat_A[0])\n for coluna in range(len(mat_A[0])):\n matriz[linha][coluna] = mat_A[linha][coluna] * mat_B[linha][coluna]\n return matriz\n\n\ndef reduz(matriz: \"matriz\") -> int:\n \"\"\"Reduz a matriz em um número, que é a soma de cada elemento da matriz.\"\"\"\n soma = 0\n for linha in matriz:\n soma += sum(linha)\n return soma\n\n\ndef transposta(matriz: \"matriz\") -> \"matriz\":\n \"\"\"Cria uma matriz transposta da matriz dada pelo usuário.\"\"\"\n matriz_transposta = []\n for coluna in range(len(matriz[0])):\n nova_coluna = []\n for linha in range(len(matriz)):\n nova_coluna.append(matriz[linha][coluna])\n matriz_transposta.append(nova_coluna)\n return matriz_transposta\n\n\n# ------------------------------------------------------------------------------\ndef oferta_demanda(oferta: list, demanda: list) -> list:\n \"\"\"Função de cruzamento entre oferta e demanda.\"\"\"\n resultado = []\n for oferta_ in oferta:\n resultado.append(reduz(multiplica(oferta_, demanda)))\n return resultado\n\n\ndef normalizador(matriz: list) -> list:\n \"\"\"Normaliza os valores da matriz passada.\"\"\"\n matriz_normalizada = []\n maximizante_da_matriz = matriz[-1]\n for linha in matriz:\n nova_coluna = []\n for i, coluna in enumerate(linha):\n nova_coluna.append(coluna / maximizante_da_matriz[i])\n matriz_normalizada.append(nova_coluna)\n return matriz_normalizada\n\n\ndef resolve(oferta: list, demanda: (list, \"matriz\"), geral=False) -> \"matriz\":\n \"\"\"Função que resolve tanto para um ou mais clientes passado.\"\"\"\n resultado_parcial = []\n\n if not geral:\n resultado_parcial.append(oferta_demanda(oferta, demanda))\n else:\n for i in range(len(demanda)):\n resultado_parcial.append(oferta_demanda(oferta, demanda[i]))\n\n resultado_final = normalizador(transposta(resultado_parcial))\n return resultado_final\n\n\n# ------------------------------------------------------------------------------\n# CARROS\nproduto_A = [\n [1, 1, 2, 3], # design - feio\n [1, 1, 2, 3], # potência - baixa\n [3, 4, 5, 5], # economia - alta\n [3, 4, 5, 5], # preço - barato\n [1, 1, 2, 3], # espaço - apertado\n]\nproduto_B = [[3, 4, 5, 5], [3, 4, 5, 5], [1, 1, 2, 3], [1, 1, 2, 3], [3, 4, 5, 5]]\nproduto_C = [[2, 3, 3, 4], [2, 3, 3, 4], [2, 3, 3, 4], [2, 3, 3, 4], [3, 4, 5, 5]]\nproduto_D = [[3, 4, 5, 5], [3, 4, 5, 5], [1, 1, 2, 3], [1, 1, 2, 3], [2, 3, 3, 4]]\n\n# MAXIMIZANTE\nmaximizante = [ # Máximos valores possíveis para cada atributo de produto\n [3, 4, 5, 5],\n [3, 4, 5, 5],\n [3, 4, 5, 5],\n [3, 4, 5, 5],\n [3, 4, 5, 5],\n]\n\n# CLIENTES\ncliente_A = [\n [2, 3, 3, 4], # design - importante\n [3, 4, 5, 5], # potência - crucial\n [2, 3, 3, 4], # economia - importante\n [2, 3, 3, 4], # preço - importante\n [3, 4, 5, 5], # espaço - crucial\n]\ncliente_B = [\n [2, 3, 3, 4], # design - importante\n [3, 4, 5, 5], # potência - crucial\n [1, 1, 2, 3], # economia - dispensável\n [1, 1, 2, 3], # preço - dispensável\n [1, 1, 2, 3], # espaço - dispensável\n]\ncliente_C = [\n [1, 1, 2, 3], # design - dispensável\n [2, 3, 3, 4], # potência - importante\n [2, 3, 3, 4], # economia - importante\n [2, 3, 3, 4], # preço - importante\n [3, 4, 5, 5], # espaço - crucial\n]\ncliente_D = [\n [1, 1, 2, 3], # design - dispentável\n [1, 1, 2, 3], # potência - dispensável\n [3, 4, 5, 5], # economia - crucial\n [3, 4, 5, 5], # preço - crucial\n [1, 1, 2, 3], # espaço - dispensável\n]\n\nprodutos = [produto_A, produto_B, produto_C, produto_D, maximizante]\nclientes = [cliente_A, cliente_B, cliente_C, cliente_D]\n\nif __name__ == \"__main__\":\n\n pprint(resolve(oferta=produtos, demanda=cliente_A, geral=False))\n"},"size":{"kind":"number","value":5791,"string":"5,791"}}},{"rowIdx":127390,"cells":{"max_stars_repo_path":{"kind":"string","value":"dev/scripts/extHelperMOD.py"},"max_stars_repo_name":{"kind":"string","value":"bforest-ariadne/touchdesigner-tox-prep-for-release"},"max_stars_count":{"kind":"number","value":15,"string":"15"},"id":{"kind":"string","value":"2169913"},"content":{"kind":"string","value":"import os\nimport subprocess\nimport platform\n\ndef Check_dep():\n\t'''\n\t\tThis is a sample method.\n\t\t\n\t\tThis sample method is intended to help illustrate what method docstrings should look like.\n\t\t \n\t\tNotes\n\t\t---------------\n\t\t'self' does not need to be included in the Args section.\n\t\t\n\t\tArgs\n\t\t---------------\n\t\tNone\n\t\t\t\t\t\t\t\t\n\t\tReturns\n\t\t---------------\n\t\tNone\n\t'''\n\n\tDep_path \t\t= '{}/dep/python/'.format(project.folder)\n\n\tif Dep_path in sys.path:\n\t\tpass\n\n\telse:\n\t\tsys.path.insert(0, Dep_path)\n\n\tfor each in sys.path:\n\t\tprint(each)\n\n\treturn\n\ndef Check_dep_path():\n\t'''\n\t\tThis method checks for and creates a dep path.\n\t\t\n\t\tMore here shortly.\n\t\t \n\t\tNotes\n\t\t---------------\n\t\t'self' does not need to be included in the Args section.\n\t\t\n\t\tArgs\n\t\t---------------\n\t\tNone\n\t\t\t\t\t\t\t\t\n\t\tReturns\n\t\t---------------\n\t\tNone\n\t'''\n \n\tdep_path \t\t\t= '{}/dep'.format(project.folder)\n\tpython_path \t\t= '{}/dep/python'.format(project.folder)\n\tscripts_reqs_path \t= '{proj}/dep/{name}'.format(proj=project.folder, name=parent().par.Name)\n\trequirements \t\t= '{}/requirements.txt'.format(scripts_reqs_path)\n\treqs_dat \t\t\t= op('reqs')\n\tphue_path \t\t\t= '{}/dep/python/phue.py'.format(project.folder)\n\twin_py_dep \t\t\t= '{}/update-dep-python-windows.cmd'.format(scripts_reqs_path)\n\tmac_py_dep \t\t\t= '{}/update-dep-python-mac.sh'.format(scripts_reqs_path)\n\n \t# check to see if /dep is in the project folder\n\tif os.path.isdir(dep_path):\n\t\tpass\n\t# create the direcotry if it's not there\n\telse:\n\t\tos.mkdir(dep_path)\n\n \t# check to see if /python is in the project folder\n\tif os.path.isdir(python_path):\n\t\tpass\n\t# create the direcotry if it's not there\n\telse:\n\t\tos.mkdir(python_path)\n\n \t# check to see if there's a scripts and requirements folder\n\tif os.path.isdir(scripts_reqs_path):\n\t\tpass\n\t# create the direcotry if it's not there\n\telse:\n\t\tos.mkdir(scripts_reqs_path)\n\n\t# check to see if the requirements txt is in place\n\tif os.path.isfile(requirements):\n\t\tpass\n\telse:\n\t\treqs_file \t= open(requirements, 'w')\n\t\treqs_file.write(reqs_dat.text)\n\t\treqs_file.close()\n\n\t# check to see if our auto-generaetd scripts are in place\n\thas_win_py \t\t= os.path.isfile(win_py_dep)\n\thas_mac_py \t\t= os.path.isfile(mac_py_dep)\n\n\twin_py_txt \t\t= me.mod.extHelperMOD.win_dep(scripts_reqs_path, python_path)\n\tmac_py_txt \t\t= me.mod.extHelperMOD.mac_dep(scripts_reqs_path, python_path)\n\n\t# identify platform\n\tosPlatform \t\t= platform.system()\n\n\t# on windows\n\tif osPlatform == \"Windows\":\n\t\t# create the script to handle grabbing our dependencies\n\t\treq_file \t= open(win_py_dep, 'w')\n\t\treq_file.write(win_py_txt)\n\t\treq_file.close()\n\n\t\t# check to see if there is anything in the python dep dir\n\t\t# for now we'll assume that if there are files here we\n\t\t# successfully installed our python dependencies\n\t\tif len(os.listdir(python_path)) == 0:\n\t\t\tsubprocess.Popen([win_py_dep])\n\t\telse:\n\t\t\tpass\t\t\t\t\n\t# on mac\n\telif osPlatform == \"Darwin\":\n\t\t# create the script to handle grabbing our dependencies\n\t\tmac_file \t= open(mac_py_dep, 'w')\n\t\tmac_file.write(mac_py_txt)\n\t\tmac_file.close()\n\n\t\t# check to see if there is anything in the python dep dir\n\t\t# for now we'll assume that if there are files here we\n\t\t# successfully installed our python dependencies\n\t\tif len(os.listdir(python_path)) == 0:\n\t\t\tsubprocess.Popen([mac_py_dep])\n\t\telse:\n\t\t\tpass\n\n\telse:\n\t\tpass\n\n\treturn\n\ndef win_dep(requirementsPath, targetPath):\n\twin_txt = ''':: Update dependencies\n\n:: make sure pip is up to date\npython -m pip install --upgrade pip\n\n:: install requirements\npip install -r {reqs}/requirements.txt --target=\"{target}\"'''\n\n\tformatted_win_txt = win_txt.format(reqs=requirementsPath, target=targetPath)\n\t\n\treturn formatted_win_txt\n\ndef mac_dep(requirementsPath, targetPath):\n\tmac_txt = '''\n#!/bin/bash \n\ndep=$(dirname \"$0\")\npythonDir=/python\n\n# change current direcotry to where the script is run from\ndirname \"$(readlink -f \"$0\")\"\n\n# permission to run the file\nsudo chmod 755 udpate-dep-python-mac.sh\n\n# fix up pip with python3\ncurl https://bootstrap.pypa.io/get-pip.py -o get-pip.py\npython3 get-pip.py\n\n# Update dependencies\n\n# make sure pip is up to date\npython3 -m pip install --upgrade pip\n\n# install requirements\npython3 -m pip install -r {reqs}/requirements.txt --target={target}'''\n\tformatted_mac_txt = mac_txt.format(reqs=requirementsPath, target=targetPath)\n\treturn formatted_mac_txt"},"size":{"kind":"number","value":4324,"string":"4,324"}}},{"rowIdx":127391,"cells":{"max_stars_repo_path":{"kind":"string","value":"api/alembic/versions/28ba6976c1bc_set_groupid_as_foreign_key.py"},"max_stars_repo_name":{"kind":"string","value":"mwath/Integration"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170986"},"content":{"kind":"string","value":"\"\"\"Set groupId as foreign key\n\nRevision ID: \nRevises: 7db537b8750a\nCreate Date: 2021-11-10 11:52:26.341746\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = ''\ndown_revision = '7db537b8750a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(None, 'devices', 'groups', ['groupId'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'devices', type_='foreignkey')\n # ### end Alembic commands ###\n"},"size":{"kind":"number","value":668,"string":"668"}}},{"rowIdx":127392,"cells":{"max_stars_repo_path":{"kind":"string","value":"sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/sweetie_bot_follow_head_pose_smart.py"},"max_stars_repo_name":{"kind":"string","value":"sweetie-bot-project/sweetie_bot_flexbe_behaviors"},"max_stars_count":{"kind":"number","value":9,"string":"9"},"id":{"kind":"string","value":"2171015"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport math\nimport numpy\nimport rospy\nimport tf\n\nfrom flexbe_core import EventState, Logger\nfrom flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyTransformListener, ProxyServiceCaller\n\nfrom std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse\nfrom std_msgs.msg import Header\nfrom geometry_msgs.msg import Point, PointStamped, PoseStamped\nfrom sensor_msgs.msg import JointState\n\nfrom proto2.head_ik import HeadIK\n\nclass SweetieBotFollowHeadPoseSmart(EventState):\n '''\n SweetieBot follows object with head and eyes. Object is specified by PoseStamped on focus_topic. \n Robot tries to keep comfort distance beetween object and head. If it is not possible, corresponding \n outcome may be triggered.\n\n If distance between head is object is smaller then `distance_uncomfortable` set joint51 to `neck_angle_uncomfortable`.\n If it is greater then `distance_comfortable` set joint51 to `neck_angle_comfortable`.\n\n -- pose_topic string geometry_msgs.msg.PoseStamped topic, where object pose is published.\n -- follow_joint_state_controller string FollowJointState controller name without prefix.\n -- discomfort_time boolean If distance beetween head and object is less then `distance_uncomfortable` for `discomfort_time` seconds then `too_close` outcome is triggered.\n -- neck_control_parameteres float[] [ neck_angle_cofortable, distance_comfortable, neck_angle_uncofortable, distance_uncomfortable ] \n -- deactivate boolean Deactivate controller on exit state.\n -- controlled_chains string[] List of controlled kinematics chains, may contains 'head', 'eyes'.\n\n <= failed \t Unable to activate state (controller is unavailable and etc)\n <= too_close \t Object is too close to head.\n\n '''\n\n def __init__(self, pose_topic, follow_joint_state_controller = 'joint_state_head', discomfort_time = 1000.0, \n neck_control_parameteres = [ -0.13, 0.3, 0.20, 0.2], deactivate = True, controlled_chains = ['head', 'eyes']):\n super(SweetieBotFollowHeadPoseSmart, self).__init__(outcomes = ['failed', 'too_close'])\n\n # store state parameter for later use.\n self._pose_topic = pose_topic\n if len(neck_control_parameteres) != 4:\n raise TypeError('SweetieBotFollowHeadPoseSmart: neck_control_parameteres must be float[4]')\n self._neck_params = neck_control_parameteres\n self._discomfort_time = discomfort_time\n self._controller = 'motion/controller/' + follow_joint_state_controller\n self._deactivate = deactivate\n self._control_head = 'head' in controlled_chains\n self._control_eyes = 'eyes' in controlled_chains\n\n # setup proxies\n self._set_operational_caller = ProxyServiceCaller({ self._controller + '/set_operational': SetBool })\n self._pose_subscriber = ProxySubscriberCached({ self._pose_topic: PoseStamped })\n self._joints_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState })\n\n # head inverse kinematics\n self._ik = HeadIK()\n \n # state\n self._neck_angle = None\n self._comfortable_stamp = None\n\n # error in enter hook\n self._error = False\n\n def on_enter(self, userdata):\n self._error = False\n\n # activate head controller\n try: \n res = self._set_operational_caller.call(self._controller + '/set_operational', True)\n except Exception as e:\n Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to activate `' + self._controller + '` controller:\\n%s' % str(e))\n self._error = True\n return\n\n if not res.success:\n Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to activate `' + self._controller + '` controller (SetBoolResponse: success = false).')\n self._error = True\n return\n\n # set default value\n self._neck_angle = self._neck_params[0]\n self._comfortable_stamp = rospy.Time.now()\n\n Logger.loginfo('SweetieBotFollowHeadLeapMotion: controller `{0}` is active.'.format(self._controller))\n\n def execute(self, userdata):\n if self._error:\n return 'failed'\n \n # check if new message is available\n if self._pose_subscriber.has_msg(self._pose_topic):\n # get object position\n pose = self._pose_subscriber.get_last_msg(self._pose_topic)\n # convert to PointStamped\n focus_point = PointStamped()\n focus_point.header = Header(frame_id = pose.header.frame_id)\n focus_point.point = pose.pose.position\n \n head_joints_msg = None\n eyes_joints_msg = None\n # CALCULATE HEAD POSITION\n if self._control_head:\n # calculate comfort heck_angle\n try:\n # convert point coordinates to bone54 frame\n fp = self._ik._tf.transformPoint('bone54', focus_point).point\n # distance and direction angle\n dist = math.sqrt(fp.x**2 + fp.y**2 + fp.z**2)\n angle = math.acos(fp.x / dist)\n # Logger.loginfo('SweetieBotFollowHeadLeapMotion: dist: %s, angle: %s' % (str(dist), str(angle)))\n # check comfort distance\n if angle < math.pi/4:\n if dist > self._neck_params[1]:\n self._neck_angle = self._neck_params[0]\n self._comfortable_stamp = rospy.Time.now()\n elif dist < self._neck_params[3]:\n self._neck_angle = self._neck_params[2]\n # check if discomfort timer is elasped\n if (rospy.Time.now() - self._comfortable_stamp).to_sec() > self._discomfort_time:\n return 'too_close'\n else:\n self._comfortable_stamp = rospy.Time.now()\n else:\n self._comfortable_stamp = rospy.Time.now()\n except tf.Exception as e:\n Logger.logwarn('SweetieBotFollowHeadPoseSmart: Cannot transform to bone54:\\n%s' % str(e))\n self._neck_angle = self._neck_params[0]\n # calculate head pose for given angle\n head_joints_msg = self._ik.pointDirectionToHeadPose(focus_point, self._neck_angle, 0.0)\n\n # CALCULATE EYES POSE\n if self._control_eyes:\n eyes_joints_msg = self._ik.pointDirectionToEyesPose(focus_point)\n\n # PUBLISH POSE\n if head_joints_msg:\n if eyes_joints_msg:\n # join head and eyes pose\n head_joints_msg.name += eyes_joints_msg.name\n head_joints_msg.position += eyes_joints_msg.position\n # publish pose\n self._joints_publisher.publish(self._controller + '/in_joints_ref', head_joints_msg)\n elif eyes_joints_msg:\n # publish pose\n self._joints_publisher.publish(self._controller + '/in_joints_ref', eyes_joints_msg)\n\n def on_exit(self, userdata):\n if self._deactivate:\n self.on_stop()\n\n def on_stop(self):\n try: \n res = self._set_operational_caller.call(self._controller + '/set_operational', False)\n except Exception as e:\n Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to deactivate `' + self._controller + '` controller:\\n%s' % str(e))\n Logger.loginfo('SweetieBotFollowHeadPoseSmart: controller `{0}` is deactivated.'.format(self._controller))\n\n\n\n\n"},"size":{"kind":"number","value":7921,"string":"7,921"}}},{"rowIdx":127393,"cells":{"max_stars_repo_path":{"kind":"string","value":"ichnaea/constants.py"},"max_stars_repo_name":{"kind":"string","value":"crankycoder/ichnaea"},"max_stars_count":{"kind":"number","value":348,"string":"348"},"id":{"kind":"string","value":"2170999"},"content":{"kind":"string","value":"# We return position and accuracy values rounded to 7\n# :term:`decimal degrees`, mostly to make the resulting JSON look\n# prettier. 1E-7 degrees =~ 1.1cm at the equator, so clients of our\n# external APIs will see that as our spatial resolution, though in\n# practice we are always in the multiple of tens of meters range.\nDEGREE_DECIMAL_PLACES = 7\n\nMAX_LAT = 85.051 # Maximum latitude in :term:`Web Mercator` projection.\nMIN_LAT = -85.051 # Minimum latitude in :term:`Web Mercator` projection.\n\nMAX_LON = 180.0 # Maximum unrestricted longitude in :term:`WSG84`.\nMIN_LON = -180.0 # Minimum unrestricted longitude in :term:`WSG84`.\n"},"size":{"kind":"number","value":633,"string":"633"}}},{"rowIdx":127394,"cells":{"max_stars_repo_path":{"kind":"string","value":"api/app.py"},"max_stars_repo_name":{"kind":"string","value":"ca2315/PlasmoCount"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170580"},"content":{"kind":"string","value":"from flask import Flask, request, jsonify, render_template, send_from_directory\nfrom flask_cors import CORS, cross_origin\n\nfrom pathlib import Path\nimport matplotlib\nimport warnings\nimport json\nimport time\n\nmatplotlib.use('Agg')\nwarnings.filterwarnings('ignore')\n\nfrom programs.model import Model\nfrom programs.result import Result\nfrom programs.summarize import summarize\n\napp = Flask(__name__, static_folder='../build', static_url_path='/')\napp.config.from_object('config')\nUPLOAD_FOLDER = app.config['UPLOAD_FOLDER']\nEXAMPLE_FOLDER = app.config['EXAMPLE_FOLDER']\nCORS(app, support_credentials=True)\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return app.send_static_file('index.html')\n\n\n@app.route('/')\ndef index():\n return app.send_static_file('index.html')\n\n\n@app.route('/api/uploads/')\ndef download_file(filename):\n return send_from_directory(UPLOAD_FOLDER, filename)\n\n\n@app.route('/api/example/')\ndef download_example(filename):\n return send_from_directory(EXAMPLE_FOLDER, filename)\n\n\n@app.route('/api/model', methods=['POST'])\ndef run(upload_folder=UPLOAD_FOLDER):\n job = {\n 'id': request.form.get('id'),\n 'date': request.form.get('date'),\n 'email-address': request.form.get('email-address'),\n 'has-gams': request.form.get('has-gams') == 'true',\n 'data-contrib': request.form.get('data-contrib') == 'true',\n 'cut-offs': [1.5, 2.5]\n }\n\n upload_folder = Path(upload_folder)\n job_folder = upload_folder / job['id']\n job_folder.mkdir(exist_ok=True)\n\n # get files\n files = request.files\n\n # load model\n model = Model(has_gams=job['has-gams'])\n results = []\n for i in files:\n # load result\n img = model.load_image(files[i])\n pred = model.predict()\n result = Result(i, files[i].filename, img, pred)\n result.run(upload_folder=job_folder)\n results.append(result.to_output())\n\n output = {\n 'data': {\n 'summary': summarize(results),\n 'results': results\n },\n 'statusOK': True\n }\n\n with open(job_folder / 'output.json', 'w') as f:\n json.dump(output, f)\n return output\n\n\n@app.route('/api/result', methods=['POST'])\ndef return_result(upload_folder=UPLOAD_FOLDER, example_folder=EXAMPLE_FOLDER):\n job_id = request.get_json()['id']\n if job_id == 'example':\n result_dir = Path(example_folder)\n else:\n result_dir = Path(upload_folder) / job_id\n result_path = result_dir / 'output.json'\n if result_path.exists():\n with open(result_path) as f:\n return json.load(f)\n else:\n return {'statusOK': False}\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"},"size":{"kind":"number","value":2718,"string":"2,718"}}},{"rowIdx":127395,"cells":{"max_stars_repo_path":{"kind":"string","value":"corehq/apps/hqpillow_retry/urls.py"},"max_stars_repo_name":{"kind":"string","value":"kkrampa/commcare-hq"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170713"},"content":{"kind":"string","value":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom corehq.apps.hqpillow_retry.views import EditPillowError\n\nurlpatterns = [\n url(r'^edit_errors/$', EditPillowError.as_view(), name=EditPillowError.urlname),\n]\n"},"size":{"kind":"number","value":277,"string":"277"}}},{"rowIdx":127396,"cells":{"max_stars_repo_path":{"kind":"string","value":"library/source2/resource_types/vwrld/world.py"},"max_stars_repo_name":{"kind":"string","value":"anderlli0053/SourceIO"},"max_stars_count":{"kind":"number","value":199,"string":"199"},"id":{"kind":"string","value":"2169882"},"content":{"kind":"string","value":"from ..resource import ValveCompiledResource\n\n\nclass ValveCompiledWorld(ValveCompiledResource):\n pass"},"size":{"kind":"number","value":104,"string":"104"}}},{"rowIdx":127397,"cells":{"max_stars_repo_path":{"kind":"string","value":"util/stringutil.py"},"max_stars_repo_name":{"kind":"string","value":"sharkbound/adventofcode2020"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170972"},"content":{"kind":"string","value":"from typing import Iterable\nimport re\n\nRE_ALL_INTS = re.compile(r'([+-]?\\d+)')\n\n\ndef striplines(lines: Iterable[str]):\n return map(str.strip, lines)\n\n\ndef striplines_aslist(lines: Iterable[str]):\n return list(striplines(lines))\n\n\ndef find_all_ints(string: str):\n return [int(x) for x in RE_ALL_INTS.findall(string)]\n"},"size":{"kind":"number","value":325,"string":"325"}}},{"rowIdx":127398,"cells":{"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"Viniciuuz/redditIF-bot-publico"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170987"},"content":{"kind":"string","value":"import praw\r\nfrom praw.reddit import Subreddit\r\n\r\nfrom PIL import Image\r\nimport imagehash\r\n\r\nimport requests\r\nimport shutil\r\n\r\nimport os\r\nfrom time import sleep\r\n\r\nreddit = praw.Reddit(client_id = \"\",\r\n client_secret = \"\",\r\n username = \"\",\r\n password = \"\",\r\n user_agent = \"IF-moderator\")\r\n\r\nsubreddit = reddit.subreddit(\"IFFans\")\r\n\r\n\r\ndef timer():\r\n while True:\r\n main()\r\n sleep(10)\r\n\r\ndef remove_post(post_id):\r\n ac_post = reddit.submission(post_id)\r\n ac_post.mod.remove()\r\n reddit.redditor(str(ac_post.author)).message('Seu post foi removido',\r\n f\"\"\"o seu [post](https://reddit.com/{post_id}) em r/IFFans foi removido por ir contra as nossas regras. \r\n (Esta ação foi feita por um bot, se você acha que foi um engano, fale com o criador do bot: u/_3DWaffle_)\"\"\",\r\n from_subreddit=\"IFFans\")\r\n\r\ndef compare(image):\r\n l = os.listdir('memes/')\r\n\r\n post = image\r\n\r\n for i in l:\r\n meme = imagehash.average_hash(Image.open(\"memes/\" + i))\r\n post = imagehash.average_hash(Image.open(post))\r\n if (meme - post) >= 50:\r\n return True\r\n \r\ndef download_image(url, name):\r\n r = requests.get(url, stream = True)\r\n if r.status_code == 200:\r\n r.raw.decode_content = True\r\n \r\n with open(name,'wb') as f:\r\n shutil.copyfileobj(r.raw, f)\r\n \r\n print('imagem baixada: ', name)\r\n\r\n\r\ndef main():\r\n for submission in subreddit.new(limit=1):\r\n # for submission in subreddit.stream.submissions():\r\n if not submission.stickied:\r\n if submission.url.endswith((\".jpg\", \".png\")):\r\n\r\n print(f\"começando analise do post: {submission.title} - {submission.author}\")\r\n\r\n\r\n download_image(submission.url, f\"{submission.title.lower()}.{submission.url.lower()[-3:]}\")\r\n print(\"comparando imagens...\")\r\n\r\n if compare(f\"{submission.title.lower()}.{submission.url.lower()[-3:]}\"):\r\n print(\"semelhança de imagem confere, apagando post...\")\r\n remove_post(submission)\r\n print(\"post apagado\")\r\n\r\n else:\r\n print('semelhança de imagem baixa')\r\n sleep(5)\r\n os.system(\"cls\")\r\n else: \r\n print('post não pode ser analisado por não ser uma imagem')\r\n print(f\"Post: {submission.title}, Por: {submission.author}\")\r\n\r\nmain()\r\n"},"size":{"kind":"number","value":2671,"string":"2,671"}}},{"rowIdx":127399,"cells":{"max_stars_repo_path":{"kind":"string","value":"modules/info.py"},"max_stars_repo_name":{"kind":"string","value":"zenzue/viBot"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2170314"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#\n#\n\n\"\"\" Info module for viBot Project \"\"\"\n\n__author__ = \"blackvkng\"\n\nimport os\nimport ctypes\nimport getpass\nimport platform\n\ndef run():\n\tif os.name == \"nt\":\n\t\tadmin_access = \"Yes\" if ctypes.windll.shell32.IsUserAnAdmin() != 0 else \"No\"\n\telse:\n\t\tadmin_access = \"Yes\" if os.getuid() == 0 else \"No\"\n\n\tinfo = [(\"[>] Platform \", platform.system()), \n\t\t\t(\"[>] Admin Access \", admin_access),\n\t\t\t(\"[>] Architecture \", platform.architecture()[0]),\n\t\t\t(\"[>] Username \", getpass.getuser())]\n\n\treturn info"},"size":{"kind":"number","value":559,"string":"559"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1273,"numItemsPerPage":100,"numTotalItems":129320,"offset":127300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzA4ODI0MCwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTcwOTE4NDAsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.W5OZipqyt4X9ncno0qCrgjta2j2MmoxrMKC9xrtbmkaXiPAVjSfaOE1thjy2pfC8Glmzxzo2SsOIiiCoEzToCA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
client/wx/task_base.py
eonuallain/dcomp
0
2171211
from abc import ABC, abstractmethod class TaskBase(ABC): # abstract method def run(self): pass
119
boleto_bancario/boleto_bancario/doctype/emitir_boletos_por_faturas_de_venda/emitir_boletos_por_faturas_de_venda.py
guidupas/erpnext-boleto-bancario
0
2170253
# -*- coding: utf-8 -*- # Copyright (c) 2021, <NAME> and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class EmitirBoletosporFaturasdeVenda(Document): pass @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs def filtrar_faturas_child_table(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( """ SELECT tsi.name, tsi.posting_date, tsi.due_date, tsi.customer, tsi.customer_name, tsi.outstanding_amount FROM `tabSales Invoice` tsi LEFT JOIN `tabFaturas de Boletos por Fatura de Venda` tfdbpfdv ON tsi.name = tfdbpfdv.fatura WHERE tfdbpfdv.fatura IS NULL AND tsi.status = 'Unpaid' AND tsi.docstatus = 1 AND (tsi.customer LIKE '%%%(txt)s%%' OR tsi.name LIKE '%%%(txt)s%%') AND tsi.company = '%(company)s' """ % { 'txt': txt, 'company': filters.get('company')} ) @frappe.whitelist() def filtrar_faturas_multiselect_dialog(doctype, txt, searchfield, start, page_len, filters): if filters.get('posting_date'): return frappe.db.sql( """ SELECT tsi.name, tsi.posting_date, tsi.due_date, tsi.customer, tsi.customer_name, tsi.outstanding_amount FROM `tabSales Invoice` tsi LEFT JOIN `tabFaturas de Boletos por Fatura de Venda` tfdbpfdv ON tsi.name = tfdbpfdv.fatura WHERE tfdbpfdv.fatura IS NULL AND tsi.status = 'Unpaid' AND tsi.docstatus = 1 AND (tsi.customer LIKE '%%%(customer)s%%' AND tsi.name LIKE '%%%(txt)s%%' AND tsi.posting_date BETWEEN '%(posting_date_inicio)s' AND '%(posting_date_fim)s') AND tsi.company = '%(company)s' """ % { 'customer': filters.get('customer', ''), 'posting_date_inicio': filters.get('posting_date')[1][0], 'posting_date_fim': filters.get('posting_date')[1][1], 'txt': txt, 'company': filters.get('company')}, as_dict = True) else: return frappe.db.sql( """ SELECT tsi.name, tsi.posting_date, tsi.due_date, tsi.customer, tsi.customer_name, tsi.outstanding_amount FROM `tabSales Invoice` tsi LEFT JOIN `tabFaturas de Boletos por Fatura de Venda` tfdbpfdv ON tsi.name = tfdbpfdv.fatura WHERE tfdbpfdv.fatura IS NULL AND tsi.status = 'Unpaid' AND tsi.docstatus = 1 AND (tsi.customer LIKE '%%%(customer)s%%' AND tsi.name LIKE '%%%(txt)s%%') AND tsi.company = '%(company)s' """ % { 'customer': filters.get('customer', ''), 'txt': txt, 'company': filters.get('company')}, as_dict = True)
2,543
pyleecan/Methods/Simulation/LossFEMM/run.py
Eomys/Pyleecan
4
2170136
from ....Methods.Simulation.Input import InputError def run(self): """Run the LossFEMM module""" if self.parent is None: raise InputError("The Loss object must be in a Simulation object to run") if self.parent.parent is None: raise InputError("The Loss object must be in an Output object to run") self.get_logger().info("Running LossFEMM module") # get output output = self.parent.parent axes_dict = self.comp_axes(output) out_dict = self.comp_loss(output, axes_dict) output.loss.store( out_dict, axes_dict, self.is_get_meshsolution, type_skin_effect=self.type_skin_effect, Tsta=self.Tsta, )
695
OldCode/eog-Plugin-MenuEnhancer/eog-Plugin-MenuEnhancer/eog-plugin-menuenhancer.py
honzi/Files
1
2169596
#!/usr/bin/env python3 from gi.repository import Eog, GObject, Gtk class eog_plugin_menuenhancer_window(GObject.Object, Eog.WindowActivatable): __gtype_name__ = 'eog_plugin_menuenhancer_window' window = GObject.property(type = Eog.Window) def __init__(self): GObject.Object.__init__(self) def action_menuitem_file_randomize(self, action): _thumbview = self.window.get_thumb_view() _thumbview.select_single(5) # EOG_THUMB_VIEW_SELECT_RANDOM def attach_menu_to_window(self): manager = self.window.get_ui_manager() self._groups = Gtk.ActionGroup('eog_plugin_menuenhancer-actions') self._groups.add_actions([ ('eog_Plugin_MenuEnhancer_Randomize', Gtk.STOCK_ADD, _('Random Image'), 'm', _('Go to a random image of the gallery'), self.action_menuitem_file_randomize), ]) manager.insert_action_group(self._groups, -1) menuxml = """ <menubar name="MainMenu"> <menu action="Go"> <menuitem action="eog_Plugin_MenuEnhancer_Randomize"/> </menu> </menubar> """ self._ui = manager.add_ui_from_string(menuxml) def do_activate(self): self.attach_menu_to_window() self._handler_load = self.window.connect('show', self.show) def do_deactivate(self): manager = self.window.get_ui_manager() manager.remove_action_group(self._groups) manager.remove_ui(self._ui) manager.ensure_update() def do_update_state(self): pass def show(self, window, data = None): self.attach_menu_to_window() self.window.disconnect(self._handler_load)
1,649
client/controller_impl/exactmatches_controller_impl.py
NCATS-Tangerine/biolink-beacon
3
2171203
from controller_impl import utils from ontobio.golr.golr_query import GolrSearchQuery from swagger_server.models.exact_match_response import ExactMatchResponse from cachetools.func import ttl_cache def get_exact_matches_to_concept_list(c): # noqa: E501 """get_exact_matches_to_concept_list Given an input list of [CURIE](https://www.w3.org/TR/curie/) identifiers of known exactly matched concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch), retrieves the list of [CURIE](https://www.w3.org/TR/curie/) identifiers of additional concepts that are deemed by the given knowledge source to be exact matches to one or more of the input concepts **plus** whichever identifiers from the input list which specifically matched these new additional concepts. If an empty set is returned, the it can be assumed that the given knowledge source does not know of any new equivalent concepts matching the input set. # noqa: E501 :param c: set of [CURIE-encoded](https://www.w3.org/TR/curie/) identifiers of exactly matching concepts, to be used in a search for additional exactly matching concepts [*sensa*-SKOS](http://www.w3.org/2004/02/skos/core#exactMatch). :type c: List[str] :rtype: List[str] """ s = [] for conceptId in c: e = _get_exact_matches(conceptId) s.append(e) return s # ttl is "time to live" in seconds @ttl_cache(maxsize=1000, ttl=86400) def _get_exact_matches(conceptId): results = GolrSearchQuery( term=conceptId, fq={'id' : conceptId}, rows=1, hl=False ).search() docs = results.docs exactmatches = [] for d in docs: if utils.get_property(d, 'id') == conceptId: matches = utils.get_property(d, 'equivalent_curie', []) exactmatches.extend(matches) e = ExactMatchResponse( id=conceptId, within_domain=len(docs) != 0, has_exact_matches=exactmatches ) return e
1,973
codekitchen/theapp/app.py
bhanuchandrika/pylearn
0
2169784
import streamlit as st st.sidebar.subheader("About dspy") st.sidebar.info("A webapp that is running on python and teaching python!") st.sidebar.markdown(""" <img src="https://media.giphy.com/media/3o7527pa7qs9kCG78A/giphy.gif" width="200"> """, unsafe_allow_html=True) st.title("`dspy` - Data Science with Python") st.markdown(""" ___ """) st.subheader("Please select what you would like to do") features = ["python 101 - Learn the basics of python", "pyPrac - Solve problems using python", "pandas - Learn data analysis and manipulation",] selection = st.radio("", features) if selection == features[0]: st.balloons() else: st.write("![](https://media3.giphy.com/media/STZxU3AXEdwW4caLwS/giphy.gif?cid=790b761115e96593923fc6494cb027cacde63a309c048f29&rid=giphy.gif&ct=g)")
809
observations/r/nightingale.py
hajime9652/observations
199
2170382
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def nightingale(path): """<NAME>'s data on deaths from various causes in the Crimean W ar In the history of data visualization, <NAME> is best remembered for her role as a social activist and her view that statistical data, presented in charts and diagrams, could be used as powerful arguments for medical reform. After witnessing deplorable sanitary conditions in the Crimea, she wrote several influential texts (Nightingale, 1858, 1859), including polar-area graphs (sometimes called "Coxcombs" or rose diagrams), showing the number of deaths in the Crimean from battle compared to disease or preventable causes that could be reduced by better battlefield nursing care. Her *Diagram of the Causes of Mortality in the Army in the East* showed that most of the British soldiers who died during the Crimean War died of sickness rather than of wounds or other causes. It also showed that the death rate was higher in the first year of the war, before a Sanitary Commissioners arrived in March 1855 to improve hygiene in the camps and hospitals. A data frame with 24 observations on the following 10 variables. `Date` a Date, composed as `as.Date(paste(Year, Month, 1, sep='-'), "%Y-%b-%d")` `Month` Month of the Crimean War, an ordered factor `Year` Year of the Crimean War `Army` Estimated average monthly strength of the British army `Disease` Number of deaths from preventable or mitagable zymotic diseases `Wounds` Number of deaths directly from battle wounds `Other` Number of deaths from other causes `Disease.rate` Annual rate of deaths from preventable or mitagable zymotic diseases, per 1000 `Wounds.rate` Annual rate of deaths directly from battle wounds, per 1000 `Other.rate` Annual rate of deaths from other causes, per 1000 The data were obtained from: <NAME>. and <NAME>. (2007). Understanding Uncertainty: Mathematics of the Coxcomb. http://understandinguncertainty.org/node/214. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `nightingale.csv`. Returns: Tuple of np.ndarray `x_train` with 24 rows and 10 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'nightingale.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/HistData/Nightingale.csv' maybe_download_and_extract(path, url, save_file_name='nightingale.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
3,166
RNN/RNN_LSTM_GRU.py
JimCurryWang/Deep-Learning-Jot
0
2170990
import torch import torchvision import torch.nn.functional as F import torchvision.datasets as datasets import torchvision.transforms as transforms from torch import optim from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm """ Example code of a simple RNN, GRU, LSTM on the MNIST dataset. https://pytorch.org/docs/stable/generated/torch.nn.RNN.html input_size – The number of expected features in the input x hidden_size – The number of features in the hidden state h num_layers – Number of recurrent layers. E.g., setting num_layers=2 would mean stacking two RNNs together to form a stacked RNN, with the second RNN taking in outputs of the first RNN and computing the final results. Default: 1 batch_first – If True, then the input and output tensors are provided as (batch, seq_len, feature). Default: False ** input of shape (seq_len, batch, input_size): -> (batch, seq_len, input_size) ** output of shape (seq_len, batch, num_directions * hidden_size) -> (batch, seq_len, num_directions * hidden_size) bidirectional – If True, becomes a bidirectional RNN. Default: False """ # Set device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Hyperparameters sequence_length = 28 input_size = 28 hidden_size = 256 num_layers = 2 num_classes = 10 learning_rate = 0.005 batch_size = 64 num_epochs = 3 class RNN(nn.Module): '''Recurrent neural network (many-to-one) ''' def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): ''' # Set initial hidden and cell states h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, h0 = self.rnn(x, h0) out = out[:, -1, :] ''' out, _ = self.rnn(x) # Decode the hidden state of the last time step # only take the last hidden state and send it into fc out = out[:, -1, :] # out = [64, 256] out = self.fc(out) return out class GRU(nn.Module): '''Recurrent neural network with GRU (many-to-one) ''' def __init__(self, input_size, hidden_size, num_layers, num_classes): super(GRU, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): ''' # Set initial hidden and cell states h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, h0 = self.gru(x, h0) out = out[:, -1, :] ''' out, _ = self.gru(x) # Decode the hidden state of the last time step # only take the last hidden state and send it into fc out = out[:, -1, :] out = self.fc(out) return out class LSTM_Multi2Multi(nn.Module): '''Recurrent neural network with LSTM (many-to-one) ''' def __init__(self, input_size, hidden_size, num_layers, num_classes): super(LSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # Using the last rnn output with fc to obtain the final classificaiton result self.fc = nn.Linear(hidden_size * sequence_length, num_classes) def forward(self, x): ''' # Set initial hidden and cell states, # h0, c0 -> (num_layers * num_direction, batch, hidden_size) h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, (h, c) = self.lstm( x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size) out = out.reshape(out.shape[0], -1) ''' out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size) # If use this, it means that we would take all output from seq1 ~ seq28 out = out.reshape(out.shape[0], -1) # out=[64, 28*256] # Decode the hidden state of all the time step # only take the last hidden state and send it into fc out = self.fc(out) return out class LSTM(nn.Module): '''Recurrent neural network with LSTM (many-to-one) ''' def __init__(self, input_size, hidden_size, num_layers, num_classes): super(LSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # Using the last rnn output with fc to obtain the final classificaiton result self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): ''' # Set initial hidden and cell states, # h0, c0 -> (num_layers * num_direction, batch, hidden_size) h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Forward propagate LSTM out, (h, c) = self.lstm( x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size) out = out[:, -1, :] ''' out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size) # Decode the hidden state of the last time step # only take the last hidden state and send it into fc out = out[:, -1, :] # out = [64, 256] out = self.fc(out) return out class BLSTM(nn.Module): '''Bidirectional LSTM ''' def __init__(self, input_size, hidden_size, num_layers, num_classes): super(BLSTM, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bidirectional=True ) self.fc = nn.Linear(hidden_size * 2, num_classes) def forward(self, x): ''' # Set initial hidden and cell states, # h0, c0 -> (num_layers * num_direction, batch, hidden_size) h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device) c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(device) # Forward propagate BLSTM out, (h, c) = self.lstm( x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size) out = out.reshape(out.shape[0], -1) ''' # BI-LSTM will have a forward and a backward, # but they are all going to get concatenated into the same hidden state out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 2*256]=(batch, seq_len, num_directions * hidden_size) # Decode the hidden state of the last time step # only take the last hidden state and send it into fc out = out[:, -1, :] out = self.fc(out) return out def check_accuracy(loader, model): '''Check accuracy on training & test to see how good our model ''' num_correct = 0 num_samples = 0 # Set model to eval model.eval() with torch.no_grad(): for x, y in loader: x = x.to(device=device).squeeze(1) y = y.to(device=device) scores = model(x) _, predictions = scores.max(1) num_correct += (predictions == y).sum() num_samples += predictions.size(0) # Toggle model back to train model.train() return num_correct / num_samples # Load Data train_dataset = datasets.MNIST(root="mnist/MNIST", train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root="mnist/MNIST", train=False, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) # Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM) model = LSTM(input_size, hidden_size, num_layers, num_classes).to(device) # model = BLSTM(input_size, hidden_size, num_layers, num_classes).to(device) # Loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Train Network for epoch in range(num_epochs): for batch_idx, (data, targets) in enumerate(tqdm(train_loader)): # (torch.Size([64, 1, 28, 28]), torch.Size([64])) # Get data to cuda if possible data = data.to(device=device).squeeze(1) # [64, 1, 28, 28] -> [64, 28, 28] targets = targets.to(device=device) # forward scores = model(data) loss = criterion(scores, targets) # backward optimizer.zero_grad() loss.backward() # gradient descent update step/adam step optimizer.step() print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}") print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")
9,787
tea_reader_consumer/run.py
ojarva/home-info-display
1
2171241
from local_settings import BASE_URL import datetime import json import redis import requests import requests.exceptions class TeaReaderConsumer(object): def __init__(self): self.redis = redis.StrictRedis() def run(self): pubsub = self.redis.pubsub(ignore_subscribe_messages=True) pubsub.subscribe("tea-reader-pubsub") for message in pubsub.listen(): try: data = json.loads(message["data"]) except (ValueError, TypeError) as err: print "Failed to decode redis data: %s" % err continue resp = requests.get(BASE_URL + "tea/get/" + data["id"]) print resp.content if resp.status_code != 200: print "Getting details for %s failed: %s" % (data["id"], resp.status_code) continue tag_data = resp.json() if tag_data["fields"]["boil_water"]: self.redis.publish("kettle-commands", json.dumps({"on": tag_data["fields"]["boil_water"]})) requests.post(BASE_URL + "tea/get/" + data["id"]) def main(): runner = TeaReaderConsumer() runner.run() if __name__ == '__main__': main()
1,217
electionnight/serializers/election_day_page.py
The-Politico/politico-civic-election-night
0
2171072
from election.models import Election, ElectionDay from electionnight.models import PageContent from geography.models import Division, DivisionLevel from rest_framework import serializers from rest_framework.reverse import reverse from government.models import Party from .division import DivisionSerializer from .election import ElectionSerializer from .party import PartySerializer class ElectionDayPageListSerializer(serializers.ModelSerializer): url = serializers.SerializerMethodField() def get_url(self, obj): return reverse( "electionnight_api_election-day-detail", request=self.context["request"], kwargs={"pk": obj.pk}, ) class Meta: model = Division fields = ("url", "uid", "slug") class ElectionDayPageSerializer(serializers.ModelSerializer): division = serializers.SerializerMethodField() parties = serializers.SerializerMethodField() elections = serializers.SerializerMethodField() content = serializers.SerializerMethodField() def get_division(self, obj): us_object = Division.objects.get(level__name=DivisionLevel.COUNTRY) us = DivisionSerializer(us_object).data us["children"] = [ DivisionSerializer( state, context={"children_level": DivisionLevel.DISTRICT} ).data for state in us_object.children.all() ] return us def get_parties(self, obj): return PartySerializer(Party.objects.all(), many=True).data def get_elections(self, obj): elections = Election.objects.filter(election_day=obj) return ElectionSerializer(elections, many=True).data def get_content(self, obj): return PageContent.objects.site_content(obj) class Meta: model = ElectionDay fields = ("uid", "content", "elections", "parties", "division")
1,896
LargeScaleDeployment/fortimanager/lsd_vpn_add.py
dmitryperets/testbeds
11
2170812
#!/usr/bin/env python3 from fmg_api.device_manager import DeviceManagerApi from fmg_api.vpn_manager import VpnManagerApi from lsd_base import * def main(): cfg = readConfig() dev_session = DeviceManagerApi( url = cfg['fmg_api'], adom = cfg['adom'], user = cfg['fmg_user'], password = cfg['<PASSWORD>'] ) session = VpnManagerApi( url = cfg['fmg_api'], adom = cfg['adom'], user = cfg['fmg_user'], password = cfg['<PASSWORD>'] ) overlay_list = session.getOverlays() for region in cfg['regions']: for i, hub in enumerate(region['hubs']): overlay_intfs = [] edge_group_name = f"Edge-{region['name']}" if len(cfg['regions']) > 1 else "Edge" print(" Hub = " + hub + ", Edge Group = " + edge_group_name) for t in [1, 2]: overlay_name = region['shortname'] + "_H" + str(i+1) + "T" + str(t) + "V1" overlay_intfs.append(overlay_name + "_0") if overlay_name not in overlay_list: print(" Adding " + overlay_name) session.addOverlay( overlay_name = overlay_name, hub_name = hub, spoke_group = edge_group_name, wan_intf = "port" + str(t+1), network_id = str(i+1) + str(t) + "1" ) else: print(" Updating " + overlay_name) session.updateOverlay( overlay_name = overlay_name, network_id = str(i+1) + str(t) + "1" ) if hub not in overlay_list[overlay_name]: session.addVpnHub( community_name = overlay_name, hub_name = hub, wan_intf = "port" + str(t+1) ) if edge_group_name not in overlay_list[overlay_name]: session.addVpnSpokeGroup( community_name = overlay_name, spoke_group = edge_group_name, wan_intf = "port" + str(t+1) ) # Create interface zones on Hubs (SD-WAN will take care of zones on Edge) if not set(overlay_intfs).issubset(set(dev_session.getInterfaces(hub))): dev_session.installPolicy("default", "Hubs") dev_session.createZone( dev_name = hub, zone_name = "underlay", intf_list = [ "port2", "port3" ] ) dev_session.createZone( dev_name = hub, zone_name = "overlay", intf_list = overlay_intfs ) if __name__ == "__main__": main()
2,939
src/game.py
mihirkhandekar/efficient-taxi-reward-structures
0
2170899
import numpy as np from strategy.greedy_strategy import GreedyStrategy from strategy.opt_strategy import OptStrategy from agent import Agent from goal import Goal from grid import Grid import copy from config import DEBUG, SUPER_DEBUG, GRID_WIDTH, GRID_HEIGHT from strategy.central_planner import CentralPlanner import time INITIAL_AGENTS = 8 INITIAL_GOALS = 8 MAX_AGENT_CAPACITY = 10 MAX_GOAL_CAPACITY = 10 SEED_AGENT = None SEED_GOAL = None TIMEOUT = 100 class Game: def __init__(self, i_grid, strategy=GreedyStrategy()): self.init_grid = copy.deepcopy(i_grid) self.time_grid = [copy.deepcopy(i_grid)] self.strategy = strategy self.current_utility = 0 def generate_strategy_over_time(self): time = 0 grid = copy.deepcopy(self.init_grid) grid.visualize() print('Time :', time) print('Grid :', grid.summary()) while(len(grid.goals) > 0 and len(grid.agents) > 0 and time < TIMEOUT): agent_assignments = self.strategy.get_strategy(grid) move_directions = [] move_agents = [] move_goals = [] self.__extract_move_directions_from_strategy(agent_assignments, move_directions, move_agents, move_goals) if all(direction=='STAY' for direction in move_directions): break grid = grid.move(move_agents, move_directions, move_goals) self.time_grid.append(copy.deepcopy(grid)) time += 1 print('Time :', time) print('Grid :', grid.summary()) grid.visualize() print( f'Stopping all agents with {len(grid.goals)} goals and {time}/{TIMEOUT} time. Agent can now think of other career options.' ) return time def __extract_move_directions_from_strategy(self, agent_assignments, move_directions, move_agents, move_goals): for agent, goal in agent_assignments.items(): if goal is None: move_directions.append('STAY') elif agent.pos_x < goal.pos_x and agent.pos_y < goal.pos_y: move_directions.append('UP_RIGHT') elif agent.pos_x < goal.pos_x and agent.pos_y > goal.pos_y: move_directions.append('UP_LEFT') elif agent.pos_x > goal.pos_x and agent.pos_y > goal.pos_y and agent.pos_x > 0 and agent.pos_y > 0: move_directions.append('DOWN_LEFT') elif agent.pos_x > goal.pos_x and agent.pos_y < goal.pos_y and agent.pos_x > 0: move_directions.append('DOWN_RIGHT') elif agent.pos_x > goal.pos_x and agent.pos_y == goal.pos_y and agent.pos_x > 0: move_directions.append('DOWN') elif agent.pos_x < goal.pos_x and agent.pos_y == goal.pos_y: move_directions.append('UP') elif agent.pos_x == goal.pos_x and agent.pos_y > goal.pos_y and agent.pos_y > 0: move_directions.append('LEFT') elif agent.pos_x == goal.pos_x and agent.pos_y < goal.pos_y: move_directions.append('RIGHT') else: move_directions.append('STAY') move_agents.append(agent) move_goals.append(goal) if SUPER_DEBUG: print(f'Agent {move_agents[-1].id} to move {move_directions[-1]}') def visualize(self): # Visualize all grids over time T for tgrid in self.time_grid: tgrid.visualize() def summary(self): # Show total utility and agent-specific utility grids = [tgrid.summary() for tgrid in self.time_grid] return self.current_utility, grids def initialize_agents(): np.random.seed(SEED_AGENT) agents = [] for i in range(INITIAL_AGENTS): pos_x = np.random.randint(0, GRID_HEIGHT) pos_y = np.random.randint(0, GRID_WIDTH) agent = Agent(pos_x, pos_y, id=i, capacity=np.random.randint(1, MAX_AGENT_CAPACITY)) agents.append(agent) return agents def initialize_goals(): np.random.seed(SEED_GOAL) goals = [] for i in range(INITIAL_GOALS): pos_x = np.random.randint(0, GRID_HEIGHT) pos_y = np.random.randint(0, GRID_WIDTH) goal = Goal(pos_x, pos_y, id=i, capacity=np.random.randint(1, MAX_GOAL_CAPACITY)) goals.append(goal) return goals def initialize_grid(init_agents, goals): return Grid(grid_height=GRID_HEIGHT, grid_width=GRID_WIDTH, agents=init_agents, goals=goals) def get_agents_utility(game): grid = game.time_grid[-1] agents = grid.agents print([agent.cur_utility for agent in agents]) return np.sum(np.array([agent.cur_utility for agent in agents])) def get_agents_distance(game): grid = game.time_grid[-1] agents = grid.agents print([agent.cur_distance for agent in agents]) return np.sum(np.array([agent.cur_distance for agent in agents])) def get_goals_pending_cap(game): grid = game.time_grid[-1] goals = grid.goals return np.sum(np.array([goal.capacity for goal in goals])) def simulate(): init_agents = initialize_agents() goals = initialize_goals() init_grid = initialize_grid(init_agents, goals) st = time.time() cp_strategy = CentralPlanner().get_strategy(copy.deepcopy(init_grid)) print('CP time : ', time.time() - st) print(init_grid.summary()) # Greedy game st = time.time() greedy_game = Game(init_grid, strategy=GreedyStrategy()) print('---------------------', greedy_game.summary()) greedy_time = greedy_game.generate_strategy_over_time() print('Greedy time : ', time.time() - st) st = time.time() nash_game = Game(init_grid, strategy=OptStrategy()) print('---------------------', nash_game.summary()) nash_time = nash_game.generate_strategy_over_time() print('CUMAX time : ', time.time() - st) epsilon = 0.000001 print('Greedy Game social cost:' , get_agents_utility(greedy_game), greedy_time, get_goals_pending_cap(greedy_game), get_agents_distance(greedy_game)) print('Nash Game social cost:' , get_agents_utility(nash_game), nash_time, get_goals_pending_cap(nash_game), get_agents_distance(nash_game)) print('Central Planner:' , cp_strategy) poa = get_agents_utility(greedy_game) / (get_agents_utility(nash_game) + epsilon) #timediff = greedy_time/nash_time #print('Price of Anarchy : ', poa) return get_agents_distance(greedy_game), get_agents_distance(nash_game), cp_strategy if __name__ == '__main__': # Initialize agents and goals randomly greedy_dists = [] opt_dists = [] cps = [] for _ in range(1): greedy_dist, opt_dist, cp = simulate() greedy_dists.append(greedy_dist) opt_dists.append(opt_dist) cps.append(cp) print( f'Greedy : {np.average(greedy_dists)}, Optimal : {np.average(opt_dists)}, Central Planner : {np.average(cps)}' )
6,959
dodo_commands/framework/container/facets/__init__.py
mnieber/dodo-commands
8
2167403
from dodo_commands.framework import ramda as R from .command_line import CommandLine # noqa from .commands import Commands, init_commands # noqa from .config import Config # noqa from .layers import Layers, init_layers # noqa def i_(facet_class, member, prefix=None, alt_name=None): arg_name = alt_name if alt_name else prefix + "_" + member if prefix else member return ("in", facet_class, member, arg_name) def o_(facet_class, member): return ("out", facet_class, member) def map_datas(*args, transform): def action(ctr): kwargs = {} def is_input(arg): return arg[0] == "in" def do_add_to_kwargs(input_arg): _, facet_class, member, arg_name = input_arg value = getattr(facet_class.get(ctr), member) kwargs[arg_name] = value x = args # [(in | out, facet_class, member)] x = R.filter(is_input)(x) # [(in | out, facet_class, member)] x = R.for_each(do_add_to_kwargs)(x) # [value] output_values = transform(**kwargs) def is_output(arg): return arg[0] == "out" def zip_with_output_values(output_args): return [ [output_arg, output_values[output_arg[2]]] for output_arg in output_args ] def do_store(output_arg, output_value): _, facet_class, member = output_arg setattr(facet_class.get(ctr), member, output_value) x = args # [(in | out, facet_class, member)] x = R.filter(is_output)(x) # [(in | out, facet_class, member)] x = zip_with_output_values(x) # [((in | out, facet_class, member), output_value)] R.for_each(R.ds(do_store))(x) return output_values return action def register(*args): def decorator(func): setattr(func, "action_args", args) return func return decorator def run(ctr, action): return map_datas( *action.action_args, transform=action, )(ctr)
2,043
qurkexp/join/pair-datadump.py
marcua/qurk_experiments
1
2171128
#!/usr/bin/env python import sys, os ROOT = os.path.abspath('%s/../..' % os.path.abspath(os.path.dirname(__file__))) sys.path.append(ROOT) os.environ['DJANGO_SETTINGS_MODULE'] = 'qurkexp.settings' from django.core.management import setup_environ from django.conf import settings from qurkexp.join.models import * from qurkexp.join.gal import getbtjoindata, getjoindata, run_gal from qurkexp.hitlayer.models import HitLayer from scipy import stats #batch = (sys.argv[1] == "batch") #num_to_compare = int(sys.argv[2]) #run_name = "joinpairs-actual-4" # match 6x6, 5 assignments each, 1 cent #run_name = "joinpairs-30-2" # match 30x30, 5 assignments each, 1 cent #run_name = "joinpairs-30-5" # match 30x30, 5 assignments each, 1 cent #run_name = "joinpairs-20-2" # match 20x20, 5 assignments each, 1 cent #run_name = "joinpairs-20-4" # match 20x20, 5 assignments each, 1 cent #run_name = "joinpairs-15-1" # match 15x15, 5 assignments each, 1 cent run_groups = [ # [False, "joinpairs-30-5",], # [False, "joinpairs-30-2",], # [False, "joinpairs-30-2", "joinpairs-30-5",], # [False, "joinpairs-20-2",], # [False, "joinpairs-20-4",], # [False, "joinpairs-20-2", "joinpairs-20-4",], # [False, "joinpairs-15-1",], # [True, "30-10-naive-ordered-1",], # match 30x30, batch size 10, 5 assignments each, 1 cent # [True, "30-10-naive-ordered-20",], # match 30x30, batch size 10, 5 assignments each, 1 cent # [True, "30-10-naive-ordered-1", "30-10-naive-ordered-20",], # [True, "30-5-naive-ordered-1",], # match 30x30, batch size 5, 5 assignments each, 1 cent # [True, "30-5-naive-ordered-20",], # match 30x30, batch size 5, 5 assignments each, 1 cent # [True, "30-5-naive-ordered-1", "30-5-naive-ordered-20",], # [True, "30-3-naive-ordered-1",], # match 30x30, batch size 3, 5 assignments each, 1 cent # [True, "30-3-naive-ordered-20",], # match 30x30, batch size 3, 5 assignments each, 1 cent # [True, "30-3-naive-ordered-1", "30-3-naive-ordered-20",], # [True, "20-1-naive-ordered-3",], # match 20x20, batch size 3, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-4",], # match 20x20, batch size 3, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-3", "20-1-naive-ordered-4",], # [True, "20-1-naive-ordered-1-ACTUALLYSMART",], # match 20x20, batch size 1, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-2-ACTUALLYSMART",], # match 20x20, batch size 1, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-1-ACTUALLYSMART", "20-1-naive-ordered-2-ACTUALLYSMART",], # [True, "8-2-smart-ordered-1",], # match 8x8, batch size 2, 5 assignments each, 1 cent (bad join interface taint?) # [True, "30-5-smart-ordered-1",], # match 30x30, batch size 5, 5 assignments each, 1 cent (bad join interface taint?) # "30-2-smart-ordered-1", # match 30x30, batch size 2, 5 assignments each, 1 cent (bad join interface taint?) # "20-1-smart-ordered-1", # match 20x20, batch size 1, 5 assignments each, 1 cent (bad join interface taint?) # [True, "30-3-smart-ordered-1",], # match 30x30, batch size 3, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-3-smart-ordered-2",], # match 30x30, batch size 3, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-3-smart-ordered-1", "30-3-smart-ordered-2"], # [True, "20-1-smart-ordered-3",], # match 20x20, batch size 1, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-2",], # match 30x30, batch size 2, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-3",], # match 30x30, batch size 2, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-2", "30-2-smart-ordered-3"], ] def main(batch, run_names): if batch: pairs = BPPair.objects.filter(bpbatch__experiment__run_name__in = run_names) else: pairs = Pair.objects.filter(run_name__in = run_names) #print "num pairs", pairs.count() #print "Turker histogram" turkers = {} for pair in pairs: if batch: resps = pair.bprespans_set.all() else: resps = pair.pairresp_set.all() for resp in resps: if batch: wid = resp.bprm.wid else: wid = resp.wid if wid not in turkers: turkers[wid] = [] turkers[wid].append((pair.left, pair.right, resp.same)) anonid = 0 for wid, arr in turkers.items(): for (left, right, same) in arr: print "%d\t%d\t%d\t%d\t%d" % (anonid, left, right, left == right, same) anonid += 1 if __name__ == "__main__": print "wid\tleft\tright\tcorrectans\tworkerans" for runs in run_groups: main(runs[0], runs[1:])
5,084
expath/__init__.py
csJoax/expath
1
2170979
__author__ = "csJoax" __email__ = "<EMAIL>" __version__ = "0.1.0" from pathlib import Path from .expath import set_attr, unset_attr, PathError set_attr()
157
server/ui/__init__.py
elise-baumgartner/onramp
2
2171014
from django.core.exceptions import AppRegistryNotReady # IMPORTANT: the following code needs to be here so we can import # our models without having to call django.setup() in every single # file that tries to imports them try: from django.apps import apps apps.check_apps_ready() except AppRegistryNotReady: import django django.setup()
354
scripts/figure4/find_lagged_modules.py
jiawu/Roller
0
2166477
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from scipy.stats import gaussian_kde import pandas as pd import pdb import Swing.util.lag_identification as lag_id from Swing.util.Evaluator import Evaluator import pickle import numpy as np import math from timeit import default_timer as timer def get_true_edges(gold_filename): evaluator = Evaluator(gold_filename, '\t') edges = evaluator.gs_flat.tolist() return edges, evaluator def get_true_lags(exp_path, timepoints, perturbs, dset = 'omranian'): exp_inter = lag_id.get_experiment_list(exp_path, timepoints = timepoints, perturbs=perturbs) start = timer() exp_xcor = lag_id.xcorr_experiments(exp_inter, 1) end = timer() print(end - start) print("Calculated x correlation matrices") gene_list = list(exp_inter[0].columns.values) print("Calculated edge lag from x correlation matrices") if 'omranian' in dset: signed_edge_list = pd.read_csv('../data/invitro/omranian_signed_parsed_goldstandard.tsv',sep='\t',header=None) goldstandard = '../data/invitro/omranian_parsed_goldstandard.tsv' elif 'marbach' in dset: signed_edge_list = pd.read_csv('../data/invitro/marbach_signed_parsed_goldstandard.tsv',sep='\t',header=None) goldstandard = '../data/invitro/marbach_parsed_goldstandard.tsv' signed_edge_list.columns=['regulator', 'target', 'signs'] signed_edge_list['regulator-target'] = tuple(zip(signed_edge_list['regulator'], signed_edge_list['target'])) lags=lag_id.calc_edge_lag(exp_xcor, gene_list, 0.7, 0.5, timestep=1, signed_edge_list = signed_edge_list, flat = False) lags = lags[lags['Parent'] != lags['Child']] edge_df = pd.DataFrame(lags['Lag'].values, index=lags['Edge'].values, columns=['Lag']) true_edges, evaluator = get_true_edges(goldstandard) all_lag_list = edge_df['Lag'].tolist() edge_df['lag_stderr'] = [np.std(x)/math.sqrt(len(x)) if type(x) is list else 'nan' for x in all_lag_list] edge_df['lag_std'] = [np.std(x) for x in all_lag_list] edge_df['lag_mean'] = [np.mean(x) for x in all_lag_list] only_true = edge_df[edge_df.index.isin(true_edges)] lag_list = only_true['Lag'].tolist() only_true['lag_stderr'] = [np.std(x)/math.sqrt(len(x)) if type(x) is list else 'nan' for x in lag_list] only_true['lag_std'] = [np.std(x) for x in lag_list] only_true['lag_mean'] = [np.mean(x) for x in lag_list] return(only_true, edge_df) def get_mean_list(lag_df): return(lag_df[~lag_df['lag_mean'].isnull()]['lag_mean'].values) def get_kde(mean_list): density = gaussian_kde(mean_list) density.covariance_factor = lambda : .24 density._compute_covariance() return(density) def main(): my_paths = ['../data/invitro/omranian_parsed_timeseries.tsv','../data/invitro/omranian_parsed_heatstress_timeseries.tsv','../data/invitro/omranian_parsed_coldstress_timeseries.tsv', '../data/invitro/omranian_parsed_control_timeseries.tsv'] xs = np.linspace(0,5,200) colors = ['r','g','y','b'] for idx, path in enumerate(my_paths): if idx is 0: lag_df = get_true_lags(path, 6, 9) else: lag_df = get_true_lags(path, 6, 3) ms = get_mean_list(lag_df) density = get_kde(ms) plt.plot(xs,density(xs), lw=2.0, color = colors[idx]) plt.savefig('multiple_lag_kernel_density.png') plt.hist(ms, bins=5) plt.savefig('multiple_lag_hist.png') if __name__ == '__main__': main() #combine/parse omranian # Time Gene Names #map omranian to strong regulondb gold standard #use swing on gold standard #check if edges are lagged in omranian #convert network into modules #get lagged modules (percentage of each module that is lagged) #get functional enrichment analysis of modules
3,879
tests/06_partial_catcher_unit_test.py
andybrice/Pypework
3
2170159
from pypework import PartialCatcher from functions import * ## Unit Tests ## class TestPartialCatcher(object): def test_exists(self): assert PartialCatcher def test_instantiates_with_no_arguments(self): f = PartialCatcher() assert f.__class__ == PartialCatcher def test_instantiates_with_named_scope(self): f = PartialCatcher( scope = __name__ ) assert f.__class__ == PartialCatcher def test_scope_named(self): current_module = __import__(__name__) f = PartialCatcher( scope = __name__ ) assert f._identifier_chain == current_module def test_scope_automatic(self): current_module = __import__(__name__) f = PartialCatcher() assert f._identifier_chain == current_module
783
sample_script/qm.py
RadonPy/RadonPy
1
2171187
#!/usr/bin/env python3 # Copyright (c) 2022. RadonPy developers. All rights reserved. # Use of this source code is governed by a BSD-3-style # license that can be found in the LICENSE file. __version__ = '0.2.1' import matplotlib matplotlib.use('Agg') import pandas as pd import os import platform import radonpy from radonpy.core import const const.mpi4py_avail = os.environ.get('RadonPy_mpi4py', False) == 'True' #const.mpi_cmd = 'mpiexec -stdout ./%%n.%%j.out -stderr ./%%n.%%j.err -n %i' from radonpy.core import utils, calc from radonpy.ff.gaff2_mod import GAFF2_mod from radonpy.sim import qm if __name__ == '__main__': data = { 'DBID': os.environ.get('RadonPy_DBID'), 'monomer_ID': os.environ.get('RadonPy_Monomer_ID', None), 'smiles_list': os.environ.get('RadonPy_SMILES'), 'smiles_ter_1': os.environ.get('RadonPy_SMILES_TER', '*C'), 'ter_ID_1': os.environ.get('RadonPy_TER_ID', 'CH3'), 'qm_method': os.environ.get('RadonPy_QM_Method', 'wb97m-d3bj'), 'charge': os.environ.get('RadonPy_Charge', 'RESP'), 'remarks': os.environ.get('RadonPy_Remarks', ''), 'Python_ver': platform.python_version(), 'RadonPy_ver': radonpy.__version__, } omp_psi4 = int(os.environ.get('RadonPy_OMP_Psi4', 4)) mem_psi4 = int(os.environ.get('RadonPy_MEM_Psi4', 1000)) conf_mm_omp = int(os.environ.get('RadonPy_Conf_MM_OMP', 0)) conf_mm_mpi = int(os.environ.get('RadonPy_Conf_MM_MPI', utils.cpu_count())) conf_mm_gpu = int(os.environ.get('RadonPy_Conf_MM_GPU', 0)) conf_mm_mp = int(os.environ.get('RadonPy_Conf_MM_MP', 0)) conf_psi4_omp = int(os.environ.get('RadonPy_Conf_Psi4_OMP', omp_psi4)) conf_psi4_mp = int(os.environ.get('RadonPy_Conf_Psi4_MP', 0)) work_dir = './%s' % data['DBID'] if not os.path.isdir(work_dir): os.makedirs(work_dir) save_dir = os.path.join(work_dir, 'analyze') if not os.path.isdir(save_dir): os.makedirs(save_dir) tmp_dir = os.environ.get('RadonPy_TMP_Dir', work_dir) if not os.path.isdir(tmp_dir): os.makedirs(tmp_dir) smi_list = data['smiles_list'].split(',') if data['monomer_ID']: monomer_id = data['monomer_ID'].split(',') ff = GAFF2_mod() mols = [] for i, smi in enumerate(smi_list): monomer_data = { 'smiles': smi, 'qm_method': data['qm_method'], 'charge': data['charge'], 'remarks': data['remarks'], 'Python_ver': data['Python_ver'], 'RadonPy_ver': data['RadonPy_ver'], } data['smiles_%i' % (i+1)] = smi # Conformation search and RESP charge calculation of a repeating unit mol = utils.mol_from_smiles(smi) mol, energy = qm.conformation_search(mol, ff=ff, work_dir=work_dir, tmp_dir=tmp_dir, opt_method=data['qm_method'], psi4_omp=conf_psi4_omp, psi4_mp=conf_psi4_mp, mpi=conf_mm_mpi, omp=conf_mm_omp, gpu=conf_mm_gpu, mm_mp=conf_mm_mp, log_name='monomer%i' % (i+1), memory=mem_psi4) qm.assign_charges(mol, charge=data['charge'], work_dir=work_dir, tmp_dir=tmp_dir, omp=omp_psi4, opt=False, log_name='monomer%i' % (i+1), memory=mem_psi4) mols.append(mol) if data['monomer_ID']: data['monomer_ID_%i' % (i+1)] = monomer_data['monomer_ID'] = monomer_id[i] utils.pickle_dump(mol, os.path.join(save_dir, 'monomer_%s.pickle' % monomer_id[i])) else: utils.pickle_dump(mol, os.path.join(save_dir, 'monomer%i.pickle' % (i+1))) # Get monomer properties data['mol_weight_monomer%i' % (i+1)] = monomer_data['mol_weight'] = calc.molecular_weight(mol) data['vdw_volume_monomer%i' % (i+1)] = monomer_data['vdw_volume'] = calc.vdw_volume(mol) qm_data = qm.sp_prop(mol, opt=False, work_dir=work_dir, tmp_dir=tmp_dir, sp_method=data['qm_method'], omp=omp_psi4, log_name='monomer%i' % (i+1), memory=mem_psi4) polar_data = qm.polarizability(mol, opt=False, work_dir=work_dir, tmp_dir=tmp_dir, polar_method=data['qm_method'], omp=conf_psi4_omp, mp=conf_psi4_mp, log_name='monomer%i' % (i+1), memory=mem_psi4) qm_data.update(polar_data) for k in qm_data.keys(): data['%s_monomer%i' % (k, i+1)] = qm_data[k] monomer_data.update(qm_data) monomer_df = pd.DataFrame(monomer_data, index=[0]) if data['monomer_ID']: monomer_df = monomer_df.set_index('monomer_ID') monomer_df.to_csv(os.path.join(save_dir, 'monomer_%s_data.csv' % monomer_id[i])) else: monomer_df.to_csv(os.path.join(save_dir, 'monomer%i_data.csv' % (i+1))) # RESP charge calculation of a termination unit ter = utils.mol_from_smiles(data['smiles_ter_1']) qm.assign_charges(ter, charge=data['charge'], work_dir=work_dir, tmp_dir=tmp_dir, opt_method=data['qm_method'], omp=omp_psi4, log_name='ter1', memory=mem_psi4) if data['ter_ID_1']: utils.pickle_dump(ter, os.path.join(save_dir, 'ter_%s.pickle' % data['ter_ID_1'])) else: utils.pickle_dump(ter, os.path.join(save_dir, 'ter1.pickle')) # Input data and monomer properties are outputted data_df = pd.DataFrame(data, index=[0]).set_index('DBID') data_df.to_csv(os.path.join(save_dir, 'qm_data.csv'))
5,370
module04-file.operations/exercise3.py
deepcloudlabs/dcl162-2020-sep-02
0
2169708
import json bank_accounts = { 'accounts': [ {'i': 'TR1', 'f': '<NAME>', 'b': 100000}, {'i': 'TR2', 'f': '<NAME>', 'b': 200000}, {'i': 'TR3', 'f': '<NAME>', 'b': 300000} ] } country = { "name": "turkey", "cities": [{"id": 1000, "name": "ankara"}], "capital": 1000 } with open("accounts.json", "r") as json_file: print(json.dumps(json.load(json_file), indent=4)) #json.dump(bank_accounts,json_file)
452
lintcode/NineChapters/03/remove-node-in-binary-search-tree.py
shootsoft/practice
0
2170873
__author__ = 'yinjun' """ Definition of TreeNode: class TreeNode: def __init__(self, val): this.val = val this.left, this.right = None, None """ class Solution: """ @param root: The root of the binary search tree. @param value: Remove the node with given value. @return: The root of the binary search tree after removal. """ def removeNode(self, root, value): # write your code here rootParent = TreeNode(0) rootParent.right = root parent = self.find(rootParent, root, value) if parent.left == None and parent.right==None: return rootParent.right node = None if parent.left!=None and parent.left.val == value: node = parent.left if parent.right!=None and parent.right.val == value: node = parent.right self.deleteNode(parent, node) return rootParent.right def find(self, parent, root, value): if root == None: return parent if root.val == value: return parent if root.val < value: return self.find(root, root.right, value) else: return self.find(root, root.left, value) def deleteNode(self, parent, node): if node == None: return if node.right == None: if parent.left == node: parent.left = node.left else: parent.right = node.left else: father = node temp = node.right while temp.left!=None: father = temp temp = temp.left if father.left == temp: father.left = temp.right else: father.right = temp.right if parent.left == node: parent.left = temp else: parent.right = temp temp.left = node.left temp.right = node.right
2,034
src/oscutil/oscrecord.py
neonkingfr/VizBench
7
2171338
from nosuch.midiutil import * from nosuch.midifile import * from nosuch.oscutil import * from nosuch.midiosc import * from traceback import format_exc from time import sleep import sys time0 = time.time() def mycallback(ev,outfile): global time0 tm = time.time()-time0 line = "[%.6f"%tm for m in ev.oscmsg: line = line + "," + str(m) line = line + "]\n" outfile.write(line) if __name__ == '__main__': if len(sys.argv) < 3: print "Usage: oscrecord {port@addr} {outputfile}" sys.exit(1) input_name = sys.argv[1] output_name = sys.argv[2] port = re.compile(".*@").search(input_name).group()[:-1] host = re.compile("@.*").search(input_name).group()[1:] print "host=",host," port=",port," outputfile=",output_name outfile = open(output_name,"w") oscmon = OscMonitor(host,port) oscmon.setcallback(mycallback,outfile) sleep(3600) # an hour
864
Python/01. Introduction - Tests/IntroductionTests.py
jc-johnson/HackerrankPractice
0
2171333
# Problem: https://www.hackerrank.com/challenges/py-if-else/problem # Score: 10 import unittest def wierd(n): if n % 2 == 1 or 6 <= n <= 20: print('Weird') return('Weird') else: print('Not Weird') return('Not Weird') class TestIfElse(unittest.TestCase): def test_Weird(self): self.assertEqual(wierd(20), 'Weird') if __name__ == "__main__": unittest.main()
470
hallgrim/IliasXMLCreator/packer.py
janmax/Hallgrim
1
2170572
import xml.etree.ElementTree as et from . import abstract_question from .. import messages __all__ = ['compile', 'print_xml'] def create_xml_tree(item_list): root = et.Element('questestinterop') tree = et.ElementTree(root) for item in item_list: root.append(item) return tree def compile(data_gen, script_type): """ passes the intermediate representation to xml creators Generates the final list of descriptions for each test from a generator and passes each one to the XML compilers. Arguments: data_gen {generator} -- generates dictionaries that contain task description script_type {str} -- to specify which comiler to use Returns: ElementTree -- the final xml tree ready for print """ try: question_class = abstract_question.IliasQuestion.available_types()[script_type] item_list = [question_class(**data).xml() for data in data_gen] except KeyError: messages.abort('Question type not found.') return create_xml_tree(item_list) def print_xml(tree, file): """ Only a wrapper for the print function Arguments: tree {ElementTree} -- the final task file file {str} -- output destination (path has to exist) """ tree.write(file, encoding="utf-8", xml_declaration=True)
1,320
tests/test_utils_lazy_import.py
brunonicko/objetto
8
2170668
# -*- coding: utf-8 -*- from itertools import chain from math import floor import pytest from objetto.utils.lazy_import import decorate_path, get_path, import_path class MyClass(object): class MyNestedClass(object): pass def test_import_path(): assert import_path("math|floor") is floor assert import_path("itertools|chain") is chain assert import_path(__name__ + "|MyClass") is MyClass assert import_path(__name__ + "|MyClass.MyNestedClass") is MyClass.MyNestedClass with pytest.raises(ValueError): import_path("module.submodule|<locals>.Test") def test_get_path(): assert get_path(floor) == "math|floor" assert get_path(chain) == "itertools|chain" assert get_path(MyClass) == __name__ + "|MyClass" assert get_path(MyClass.MyNestedClass) == __name__ + "|MyClass.MyNestedClass" class LocalClass(object): pass with pytest.raises(ValueError): get_path(LocalClass) def test_decorate_path(): assert decorate_path("abstractmethod", "abc") == "abc|abstractmethod" assert decorate_path(".|abstractmethod", "abc") == "abc|abstractmethod" assert decorate_path("abc|abstractmethod", "") == "abc|abstractmethod" assert decorate_path(".abc|Mapping", "collections") == "collections.abc|Mapping" with pytest.raises(ValueError): decorate_path("abstract method|a b c", "xyz") with pytest.raises(ValueError): decorate_path("abstract method", "abc") with pytest.raises(ValueError): decorate_path("abstractmethod", "a b c") assert decorate_path("..|Counter", "collections.abc") == "collections|Counter" assert decorate_path("..._objects.base|BaseObject", "objetto.changes.base") == ( "objetto._objects.base|BaseObject" ) if __name__ == "__main__": pytest.main()
1,819
python/Graph.py
gineer01/programming-challenges
0
2170265
class Graph: def __init__(self): self.edges = {} self.vertices = set() def add_edge(self, u, v, data): self.vertices.add(u) self.vertices.add(v) self.edges.setdefault(u, {})[v] = data def remove_edge(self, u, v): return self.edges[u].pop(v, None) def bfs(self, start_node, node_func): """ Do BFS and for each node, call node_func :param start_node: :param node_func: a function that takes parent node, child node, and edge data :return: None """ import collections visited = set() q = collections.deque() q.append(start_node) node_func(None, start_node, None) visited.add(start_node) while len(q) > 0: node = q.popleft() for c in self.edges[node]: if c in visited: continue else: node_func(node, c, self.edges[node][c]) visited.add(c) q.append(c) def dijkstra(self, start): """ Find shortest paths to all vertices :param start: start node :return: D, P where where D[v] is the distance from start to v and P[v] is the predecessor of v along the shortest path from s to v. """ import heapq D = {} P = {} q = [(0, start, None)] # initial heap visited = set() while len(q) > 0: (cost, v, parent) = heapq.heappop(q) if v not in visited: visited.add(v) D[v] = cost P[v] = parent for c in self.edges.get(v, []): if c not in visited: item = (cost + self.edges[v][c], c, v) heapq.heappush(q, item) return D, P def solve_minimum_assignment(edge_cost, n): SOURCE = ("s", 0) SINK = ("t", 0) def reduce_cost(x, y, p, edge_cost): return p[x] + edge_cost - p[y] def get_edge_cost(edge_cost, x, y): return edge_cost[x[1]][y[1]] def update_matching(v, u, M, g): if v[0] == 'x' and u[0] == 'y': M[v[1]] = u[1] g.remove_edge(v, u) g.add_edge(u, v, 0) #cost to be updated later elif v[0] == 'y' and u[0] == 'x': del M[u[1]] g.remove_edge(v, u) g.add_edge(u, v, 0) #cost to be updated later else: raise Exception("Not an X-Y edge") def update_cost(n, g, p, edge_cost): for i in range(n): for j in range(n): x = x_node(i) y = y_node(j) if x in g.edges and y in g.edges[x]: g.add_edge(x, y, reduce_cost(x, y, p, get_edge_cost(edge_cost, x, y))) else: assert g.edges[y][x] == 0 def x_node(i): return ("x", i) def y_node(i): return ("y", i) def augment_path(n, M, P, g): u = SINK v = P[u] path = [] while v and v != SOURCE: if u != SINK: path.append((v, u)) u = v v = P[u] path.reverse() for p in path: update_matching(p[0], p[1], M, g) vals = set(M.values()) for i in range(n): x = x_node(i) if i in M: g.remove_edge(SOURCE, x) else: g.add_edge(SOURCE, x, 0) y = y_node(i) if i in vals: g.remove_edge(y, SINK) else: g.add_edge(y, SINK, 0) assert len(M.keys()) == len(set(M.values())), M M = {} p = {} for i in range(n): p[x_node(i)] = 0 p[y_node(i)] = min(edge_cost[j][i] for j in range(n)) g = Graph() for i in range(n): g.add_edge(SOURCE, x_node(i), 0) g.add_edge(y_node(i), SINK, 0) for j in range(n): x = x_node(i) y = y_node(j) g.add_edge(x, y, reduce_cost(x, y, p, get_edge_cost(edge_cost, x, y))) while len(M) < n: D, P = g.dijkstra(SOURCE) augment_path(n, M, P, g) for k in p: p[k] = D[k] + p[k] update_cost(n, g, p, edge_cost) # print(M) return M if __name__ == "__main__": g = Graph() g.add_edge('a', 'w', 14) g.add_edge('a', 'x', 7) g.add_edge('a', 'y', 9) g.add_edge('b', 'w', 9) g.add_edge('b', 'z', 6) g.add_edge('w', 'a', 14) g.add_edge('w', 'b', 9) g.add_edge('w', 'y', 2) g.add_edge('x', 'a', 7) g.add_edge('x', 'y', 10) g.add_edge('x', 'z', 15) g.add_edge('y', 'a', 9) g.add_edge('y', 'w', 2) g.add_edge('y', 'x', 10) g.add_edge('y', 'z', 11) g.add_edge('z', 'b', 6) g.add_edge('z', 'x', 15) g.add_edge('z', 'y', 11) print(g.dijkstra('a'))
4,969
troupon/payment/models.py
andela/troupon
14
2169530
from django.db import models from django.contrib.auth.models import User from deals.models import Advertiser, Deal class TransactionHistory(models.Model): """ stores details of every charge on stripe """ transaction_id = models.CharField(max_length=100, null=False, blank=False, default='') transaction_status = models.CharField(max_length=100, null=False, blank=False, default='') transaction_amount = models.IntegerField() transaction_created = models.IntegerField() transaction_currency = models.CharField(max_length=3, null=False, blank=False, default='') failure_code = models.IntegerField(null=True, blank=True) failure_message = models.CharField(max_length=200, null=True, blank=True) user = models.ForeignKey(User) class Purchases(models.Model): PAYMENT_STATUS = [(1, 'Succeeded'), (2, 'Failed')] item = models.ForeignKey('deals.Deal') price = models.IntegerField() quantity = models.IntegerField(default=1) advertiser = models.ForeignKey('deals.Advertiser') title = models.CharField(max_length=100, null=False, blank=False) description = models.TextField(blank=True, default='') stripe_transaction_id = models.CharField(max_length=100, null=False, blank=False, default='') stripe_transaction_status = models.CharField(max_length=100, null=False, blank=False, default=2, choices=PAYMENT_STATUS) user = models.ForeignKey(User) def __str__(self): return self.title
2,243
src/hostphot/local_photometry.py
temuller/host_phot
0
2171295
# Check the following urls for more info about Pan-STARRS: # # https://outerspace.stsci.edu/display/PANSTARRS/PS1+Image+Cutout+Service#PS1ImageCutoutService-ImportantFITSimageformat,WCS,andflux-scalingnotes # https://outerspace.stsci.edu/display/PANSTARRS/PS1+Stack+images#PS1Stackimages-Photometriccalibration # # For DES: # # https://des.ncsa.illinois.edu/releases/dr1/dr1-docs/processing # # For SDSS: # # https://www.sdss.org/dr12/algorithms/fluxcal/#SDSStoAB # https://data.sdss.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html # # Some parts of this notebook are based on https://github.com/djones1040/PS1_surface_brightness/blob/master/Surface%20Brightness%20Tutorial.ipynb and codes from <NAME> import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from photutils import CircularAperture from photutils import aperture_photometry from astropy.io import fits from astropy.table import Table from astropy import coordinates, units as u, wcs from astropy.cosmology import FlatLambdaCDM from astropy.stats import sigma_clipped_stats from .utils import (get_survey_filters, extract_filters, check_survey_validity, check_filters_validity, calc_ext, calc_sky_unc) H0 = 70 Om0 = 0.3 cosmo = FlatLambdaCDM(H0, Om0) #------------------------------- def calc_aperture_size(z, ap_radius): """Calculates the size of the aperture in arsec, for aperture photometry, given a physical size. Parameters ---------- z: float Redshift. ap_radius: float Physical aperture size in kpc. Returns ------- radius_arcsec: float Aperture size in arcsec. """ ap_radius = ap_radius*u.kpc # transverse separations transv_sep_per_arcmin = cosmo.kpc_proper_per_arcmin(z) transv_sep_per_arcsec = transv_sep_per_arcmin.to(u.kpc/u.arcsec) radius_arcsec = ap_radius/transv_sep_per_arcsec return radius_arcsec.value def extract_aperture(data, error, px, py, radius): """Extracts aperture photometry of a single image. Parameters ---------- data: array Image data in a 2D numpy array. error: array Errors of `data`. px: float x-axis pixel coordinate of the aperture center. py: float y-axis pixel coordinate of the aperture center. radius: float Aperture radius in pixels. Returns ------- raw_flux: float Aperture photometry ("raw" flux). raw_flux_err: float Uncertainty on the aperture photometry. """ aperture = CircularAperture((px, py), r=radius) ap_results = aperture_photometry(data, aperture, error=error) raw_flux = ap_results['aperture_sum'][0] raw_flux_err = ap_results['aperture_sum_err'][0] return raw_flux, raw_flux_err def extract_local_photometry(fits_file, ra, dec, z, ap_radius=4, survey="PS1", plot_output=None): """Extracts local photometry of a given fits file. Parameters ---------- fits_file: str Path to the fits file. ra: float Right Ascensions in degrees. dec: float Declinations in degrees. z: float Redshift of the SN. ap_radius: float, default `4` Physical size of the aperture in kpc. This is used for aperture photometry. survey: str, default `PS1` Survey to use for the zero-points and pixel scale. plot_output: str, default `None` If not `None`, saves the output plots with the given name. Returns ------- mag: float Magnitude. mag_err: float Error on the magnitude. """ check_survey_validity(survey) img = fits.open(fits_file) header = img[0].header data = img[0].data img_wcs = wcs.WCS(header, naxis=2) exptime = float(header['EXPTIME']) radius_arcsec = calc_aperture_size(z, ap_radius) # arcsec to number of pixels (0.XXX arcsec/pix) pixel_scale_dict = {'PS1':0.25, 'DES':0.263, 'SDSS':0.396} pixel_scale = pixel_scale_dict[survey] radius_pix = radius_arcsec/pixel_scale px, py = img_wcs.wcs_world2pix(ra, dec, 1) error = calc_sky_unc(data, exptime) raw_flux, raw_flux_err = extract_aperture(data, error, px, py, radius_pix) zp_dict = {'PS1':25 + 2.5*np.log10(exptime), 'DES':30, 'SDSS':22.5} zp = zp_dict[survey] mag = -2.5*np.log10(raw_flux) + zp mag_err = 2.5/np.log(10)*raw_flux_err/raw_flux if plot_output is not None: fig, ax = plt.subplots() m, s = np.nanmean(data), np.nanstd(data) im = ax.imshow(data, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower') circle = plt.Circle((px, py), radius_pix, color='r', fill=False) ax.add_patch(circle) plt.tight_layout() plt.savefig(plot_output) plt.close(fig) return mag, mag_err def multi_local_photometry(name_list, ra_list, dec_list, z_list, ap_radius, work_dir='', filters=None, survey="PS1", correct_extinction=True, plot_output=False): """Extract local photometry for multiple SNe. Parameters ---------- name_list: list-like List of SN names. ra_list: list-like List of right ascensions in degrees. dec_list: list-like List of declinations in degrees. z_list: list-like List of redshifts. ap_radius: float Physical size of the aperture in kpc. This is used for aperture photometry. work_dir: str, default '' Working directory where to find the objects' directories with the images. Default, current directory. filters: str, defaul `None` Filters used to extract photometry. If `None`, use all the available filters for the given survey. survey: str, default `PS1` Survey to use for the zero-points and pixel scale. correct_extinction: bool, default `True` If `True`, the magnitudes are corrected for extinction. plot_output: bool, default `False` If `True`, saves the output plots. Returns ------- local_phot_df: DataFrame Dataframe with the photometry, errors and SN info. """ check_survey_validity(survey) check_filters_validity(filters, survey) if filters is None: filters = get_survey_filters(survey) # dictionary to save results mag_dict = {filt:[] for filt in filters} mag_err_dict = {filt+'_err':[] for filt in filters} mag_dict.update(mag_err_dict) results_dict = {'name':[], 'ra':[], 'dec':[], 'zspec':[]} results_dict.update(mag_dict) # filter funcstions for extinction correction filters_dict = extract_filters(filters, survey) for name, ra, dec, z in zip(name_list, ra_list, dec_list, z_list): sn_dir = os.path.join(work_dir, name) image_files = [os.path.join(sn_dir, f'{survey}_{filt}.fits') for filt in filters] for image_file, filt in zip(image_files, filters): try: if plot_output: plot_output = os.path.join(sn_dir, f'local_{filt}.jpg') else: plot_output = None mag, mag_err = extract_local_photometry(image_file, ra, dec, z, ap_radius=ap_radius, survey=survey, plot_output=plot_output) if correct_extinction: wave = filters_dict[filt]['wave'] transmission = filters_dict[filt]['transmission'] A_ext = calc_ext(wave, transmission, ra, dec) mag -= A_ext results_dict[filt].append(mag) results_dict[filt+'_err'].append(mag_err) except Exception as message: results_dict[filt].append(np.nan) results_dict[filt+'_err'].append(np.nan) print(f'{name} failed with {filt} band: {message}') results_dict['name'].append(name) results_dict['ra'].append(ra) results_dict['dec'].append(dec) results_dict['zspec'].append(z) local_phot_df = pd.DataFrame(results_dict) return local_phot_df
8,719
webapp/models.py
rogeriao/myplanningpoker
0
2170399
from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() users_games = db.Table( 'user_game', db.Column('user_email', db.String(), db.ForeignKey('user.email')), db.Column('game_id', db.Integer(), db.ForeignKey('game.id')) ) class User(db.Model): email = db.Column(db.String(100), primary_key=True) username = db.Column(db.String(50)) password = db.Column(db.String(50)) owner_games = db.relationship( 'Game', backref='user', lazy='dynamic' ) user_games = db.relationship( 'Game', secondary=users_games, backref=db.backref('User', lazy='dynamic') ) votes = db.relationship( 'Vote', backref='user', lazy='dynamic' ) stories = db.relationship( 'Story', backref='user', lazy='dynamic' ) def __init__(self, email, username): self.email = email self.username = username def __repr__(self): return '<user {}>'.format(self.username) class Game(db.Model): id = db.Column(db.Integer(), primary_key=True) title = db.Column(db.String(100)) owner = db.Column(db.String(100), db.ForeignKey('user.email')) stories = db.relationship( 'Story', backref='game', lazy='dynamic' ) users = db.relationship( 'User', secondary=users_games, backref=db.backref('game', lazy='dynamic') ) def __init__(self, title, owner): self.title = title self.owner = owner def __repr__(self): return '<Game {}>'.format(self.title) class Story(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(100)) created_by = db.Column(db.String(100), db.ForeignKey('user.email')) description = db.Column(db.Text) game_id = db.Column(db.Integer(), db.ForeignKey('game.id')) user_votes = db.relationship( 'Vote', backref='story', lazy='dynamic' ) def __init__(self, title): self.title = title def __repr__(self): return '<Story {}>'.format(self.title) class Vote(db.Model): id = db.Column(db.Integer, primary_key=True) value = db.Column(db.String(5)) user_email = db.Column(db.String(100), db.ForeignKey('user.email')) story_id = db.Column(db.Integer(), db.ForeignKey('story.id')) def __init__(self, user_email, value, story_id): self.user_email = user_email self.story_id = story_id self.value = value def __repr__(self): return '<Voted by {}>'.format(self.user_email)
2,675
settings.py
eklipse2009/ZX-Pokemaster
8
2170721
ZX_POKEMASTER_VERSION = '1.51' POKEMASTER_DB_PATH = 'pokemaster.db' POKEMASTER_MIN_DB_PATH = 'minified_database/pokemaster.db' DB_DISK_CACHE_FILE = 'hashes.dat' LOCAL_FTP_ROOT = 'ftp' WOS_SITE_ROOT = 'http://www.worldofspectrum.org' WOS_MIRRORS = [ 'https://archive.org/download/World_of_Spectrum_June_2017_Mirror/World%20of%20Spectrum%20June%202017%20Mirror.zip/World%20of%20Spectrum%20June%202017%20Mirror', 'http://spectrumcomputing.co.uk', ] WOS_GAME_FILES_DIRECTORY = 'pub/sinclair/games' WOS_TRDOS_GAME_FILES_DIRECTORY = 'pub/sinclair/trdos/games' WOS_INGAME_SCREENS_DIRECTORY = 'pub/sinclair/screens/in-game' WOS_LOADING_SCREENS_DIRECTORY = 'pub/sinclair/screens/load' WOS_MANUALS_DIRECTORY = 'pub/sinclair/games-info' TIPSHOP_SITE_ROOT = 'http://www.the-tipshop.co.uk' GAME_EXTENSIONS = ['tap', 'dsk', 'z80', 'sna', 'dsk', 'trd', 'tzx', 'img', 'mgt', 'rom', 'scl', 'slt', 'szx', 'fdi', 'opd', 'mdr', 'wdr', 'd80', 'd40', 'sp', 'dck', 'ipf', 'csw', 'udi', 'spg', 'wmf'] ARCHIVE_EXTENSIONS = ['zip', 'rar', '7z', 'gz', 'iso', 'tar', 'bz2', 'bzip2', 'tbz2', 'tbz', 'gz', 'gzip', 'tgz', 'tar', 'xz', 'txz', 'arj', 'dmg', 'iso', 'lzh', 'lha', 'lzma', 'r00', 'z', 'taz'] DISALLOWED_SUPPLEMENTARY_FILES = GAME_EXTENSIONS + ARCHIVE_EXTENSIONS + \ ['pok', '$b', '$c', '$m', '$t', '$u', '$w', '$x', '$z'] DISK_FORMATS = ('dsk', 'trd', 'scl') TAPE_FORMATS = ('tzx', 'tap') MAX_GAME_NAME_LENGTH = 100 MIN_GAME_NAME_LENGTH = 30 MAX_DESTINATION_PATH_LENGTH = 240 AVAILABILITY_TYPES = { 'A':'Available', 'D':'Distribution denied', 'd':'Distribution denied (still for sale)', '?':'Missing in action', 'N':'Never released', 'R':'Recovered' } MULTIPLAYER_TYPES = { 'c':'Coop', 'm':'Vs+Coop', 'n':'Vs+Team', 't':'Team', 'v':'Vs' } CHEAT_SOURCE_SCRAPE = 0 CHEAT_SOURCE_OLD_DB = 1 CHEAT_SOURCE_WOS_FILE = 2 CHEAT_SOURCE_NEW_DB = 99 ALPHABETIC_DIRNAMES = ['123', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] GAME_PREFIXES = ['A', 'An', 'The', 'La', 'Le', 'De', "L'", "D'" 'Les', 'Los', 'Las', 'El', 'Une', 'Una', 'Uno', 'Het', 'Der', 'Die', 'Das'] SIDE_A = 1 SIDE_B = 2 PREDEFINED_OUTPUT_PATH_STRUCTURES = [ '/{TOSECName}', '{Letter}/{TOSECName}', '{Letter}/{Name}/{TOSECName}', '{Genre}/{Year}/{TOSECName}', '{Publisher}/{Year}/{GameName}/{TOSECName}', '{Genre}/{Publisher}/{TOSECName}', '{MachineType}/{MaxPlayers}/{Genre}/{TOSECName}', '{Genre}/{MaxPlayers}/{Letter}/{TOSECName}' ] MESSAGE_BOX_TITLE = 'ZX Pokemaster' INCLUDED_TYPES_LIST = [ '' ] INCLUDED_LANGUAGES_LIST = [ ('en', 'English'), ('es', 'Spanish'), ('ru', 'Russian'), ('hr', 'Croatian'), ('cz', 'Czech'), ('nl', 'Dutch'), ('de', 'German'), ('fr', 'French'), ('it', 'Italian'), ('hu', 'Hungarian'), ('no', 'Norwegian'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('sh', 'Serbo-Croatian'), ('sr', 'Serbian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sv', 'Swedish') ] COUNTRY_LANGUAGE_DICT = { 'GB':'en', 'AU':'en', 'US':'en', 'AR':'es', 'BR':'pt', 'CZ':'cs', 'SI':'sl', '':'en', } TOSEC_COMPLIANT_FILENAME_STRUCTURE = \ '{GameName} ({Year})({Publisher})({MachineType})({Country})({Language})({Media}){ModFlags}{Notes}' DEFAULT_MACHINE_TYPE = '48K' DEFAULT_GAME_LANGUAGE = 'en' OUTPUT_PATH_STRUCTURE_KEYS = [ 'Type', 'Genre', 'Type', 'Year', 'Letter', 'MachineType', 'Publisher', 'MaxPlayers', 'GameName', 'Language', 'Format', 'Side', 'Part', 'ModFlags' 'ZXDB_ID', 'Notes', 'TOSECName' ] MOD_FLAGS_ORDER = ['cr', 'f', 'h', 'm', 'p', 't', 'tr', 'o', 'u', 'v', 'b'] X_RATED_FLAG = '[adult]' COVERTAPE_PUBLISHERS = ['Your Sinclair', 'Crash', 'Sinclair User', 'Run Magazine']
4,220
Books/DeepLearningfromScratch/P04_NeuralNetwork_Learning/numerical_diff.py
Tim232/Python-Things
2
2169936
import numpy as np import matplotlib.pylab as plt # 수치 미분 def numerical_diff(f, x): h = 1e-4 # 0.0001 return (f(x+h)-f(x-h))/(2*h) # 나쁜 구현의 예 # h = 10e-50 # return (f(x+h)-f(x))/h def function_1(x): return 0.01*x**2 + 0.1*x def tangent_line(f, x): d = numerical_diff(f, x) y = f(x) - d*x return lambda t: d*t + y x = np.arange(0.0, 20.0, 0.1) y = function_1(x) plt.xlabel('x') plt.ylabel('f(x)') plt.plot(x, y) tf = tangent_line(function_1, 5) y2 = tf(x) plt.plot(x, y2, linestyle='--') tf = tangent_line(function_1, 10) y3 = tf(x) plt.plot(x, y3, linestyle='--') plt.show()
616
tests/test_text.py
OpenBigDataPlatform/tpljson
0
2169689
import unittest from tpljson.text import surround_text, preview_text, highlight_text class TestTextFunctions(unittest.TestCase): TEXT = '''{ "replaces": "0.9.1", "hash": "f082b775fc70cf973d91c31ddbbcf36f21088377", "depends": "conda", "components": [ { "version": "3.4.1", "name": "conda" } ], "conflicts": "", "package": "tpljson-1.0.0" } ''' def test_highlight_text_defaults(self): t = highlight_text('test') self.assertEqual(t, '\x1b[30m\x1b[103mtest\x1b[0m') def test_surround_text_defaults(self): t = surround_text(' hello world ') self.assertEqual(t, ' >> hello world << ') def test_surround_text_custom(self): t = surround_text(' hello world ', left='[', right=']', pad=0) self.assertEqual(t, ' [hello world] ') def test_surround_text_no_ws(self): t = surround_text(' hello world ', left='[', right=']', pad=1, preserve_whitespace=False) self.assertEqual(t, '[ hello world ]') # TODO test highlight_text() def test_preview_text_defaults(self): t = preview_text(self.TEXT) self.assertEqual(t, ''' { "replaces": "0.9.1", "hash": "f082b775fc70cf973d91c31ddbbcf36f21088377", "depends": "conda", "components": [''') def test_preview_text_dedent(self): t = preview_text(' a\n b', indent=0, dedent=True) self.assertEqual(t, 'a\nb') def test_preview_center_line(self): t = preview_text('one\ntwo\nthree\nfour\nfive\nsix\nseven\neight\nnine', indent=2, max_lines=3, center_line=4) self.assertEqual(t, ' three\n four\n five') def test_preview_max_chars(self): pass t = preview_text('united states of america\ncanada\nrepublic of congo\nsouth africa\n' 'democratic republic of tao tom\nunited federation of russia\n' 'united kingdom of great britain and northern ireland', indent=2, max_lines=5, center_line=4, max_chars=100) self.assertEqual(t, ''' canada republic of congo south africa democratic republic... united federation o...''') def test_preview_prefix_suffix(self): t = preview_text('one\ntwo\nthree\nfour\nfive\nsix\nseven\neight\nnine', indent=2, max_lines=3, center_line=4, prefix=' PREFIX\n', suffix='\n SUFFIX') self.assertEqual(t, ' PREFIX\n three\n four\n five\n SUFFIX') def test_preview_highlight(self): text = 'one\ntwo\nthree\nfour\nfive\nsix\nseven\neight\nnine' t1 = preview_text(text, indent=2, max_lines=3, center_line=4, highlight=4) t2 = preview_text(text, indent=2, max_lines=3, center_line=4, highlight=4, colored=True) self.assertEqual(t1, ' three\n\x1b[30m\x1b[103m four\x1b[0m\n five') self.assertEqual(t1, t2) def test_preview_surround(self): t = preview_text('one\ntwo\nthree\nfour\nfive\nsix\nseven\neight\nnine', indent=2, max_lines=3, center_line=4, highlight=4, colored=False) self.assertEqual(t, ' three\n>> four <<\n five')
3,214
src/powerdns_manager/migrations/0006_auto_20151018_0256.py
gnotaras/django-powerdns-manager
12
2170588
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('powerdns_manager', '0005_auto_20150311_1002'), ] operations = [ migrations.AlterField( model_name='comment', name='modified_at', field=models.PositiveIntegerField(help_text=b'Timestamp for the last modification time.', verbose_name='modified at'), ), migrations.AlterField( model_name='domain', name='last_check', field=models.PositiveIntegerField(help_text=b'Last time this domain was checked for freshness.', null=True, verbose_name='last check'), ), migrations.AlterField( model_name='domain', name='notified_serial', field=models.PositiveIntegerField(help_text=b'The last notified serial of a master domain. This is updated from the SOA record of the domain.', null=True, verbose_name='notified serial'), ), migrations.AlterField( model_name='dynamiczone', name='domain', field=models.OneToOneField(related_name='powerdns_manager_dynamiczone_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain, the A and AAAA records of which might be updated dynamically over HTTP.'), ), migrations.AlterField( model_name='record', name='change_date', field=models.PositiveIntegerField(help_text=b'Timestamp for the last update. This is used by PowerDNS internally.', null=True, verbose_name='change date'), ), migrations.AlterField( model_name='record', name='prio', field=models.PositiveIntegerField(help_text=b'For MX records, this should be the priority of the mail exchanger specified.', null=True, verbose_name='priority'), ), migrations.AlterField( model_name='record', name='ttl', field=models.PositiveIntegerField(help_text=b'How long the DNS-client are allowed to remember this record. Also known as Time To Live(TTL) This value is in seconds.', null=True, verbose_name='TTL', blank=True), ), ]
2,289
server/handlers/dummy_handler.py
jojowither/Question-Answer-Project
0
2171087
from question_answer_project.server.handlers.qa_handler import handler # because the CWD of buliding the torchserve is /tmp/xxxx, # we must use dummy handler to change work directory to get the model path def handle(data, context): return handler(data, context)
266
share/lib/python/neuron/rxd/geometry3d/simplevolume_helper.py
tommorse/nrn
1
2171336
from . import graphicsPrimitives as graphics import math import random def get_verts(voxel,g): """return list (len=8) of point coordinates (x,y,z) that are vertices of the voxel (i,j,k)""" (i,j,k) = voxel dx,dy,dz = g['dx'],g['dy'],g['dz'] v1_0,v1_1,v1_2 = g['xlo'] + i*dx, g['ylo'] + j*dy, g['zlo'] + k*dz vertices = [(v1_0,v1_1,v1_2), (v1_0+dx,v1_1,v1_2), (v1_0+dx,v1_1+dy,v1_2), (v1_0,v1_1+dy,v1_2), (v1_0,v1_1,v1_2+dz), (v1_0+dx,v1_1,v1_2+dz), (v1_0+dx,v1_1+dy,v1_2+dz), (v1_0,v1_1+dy,v1_2+dz)] return vertices def get_subverts(voxel,g,step): """return list (len=8) of point coordinates (x,y,z) that are vertices of the voxel (i,j,k)""" (i,j,k) = voxel dx,dy,dz = g['dx'],g['dy'],g['dz'] DX, DY, DZ = step v1_0,v1_1,v1_2 = g['xlo'] + i*dx, g['ylo'] + j*dy, g['zlo'] + k*dz vertices = [(v1_0,v1_1,v1_2), (v1_0+DX,v1_1,v1_2), (v1_0+DX,v1_1+DY,v1_2), (v1_0,v1_1+DY,v1_2), (v1_0,v1_1,v1_2+DZ), (v1_0+DX,v1_1,v1_2+DZ), (v1_0+DX,v1_1+DY,v1_2+DZ), (v1_0,v1_1+DY,v1_2+DZ)] return vertices def add_res(flist, voxel, verts_in, res, g): (i, j, k) = voxel dx,dy,dz = g['dx'],g['dy'],g['dz'] bit = (dx * dy * dz)/(res**3) step = [dx/2, dy/2, dz/2] subverts = get_subverts(voxel, g, step) # the 'voxel positions' for the new subvoxels #verts_in = [0,1,2,3,4,5,6,7] count = 0 #select only the subvoxels of the vertices that are in for i in verts_in: v0 = subverts[i] startpt = (v0[0]+dx/(2*res), v0[1]+dy/(2*res), v0[2]+dx/(2*res)) for x in range(res//2): for y in range(res//2): for z in range(res//2): v = (startpt[0] + x*(dx/res), startpt[1] + y*(dy/res), startpt[2] + z*(dz/res)) if min([f.distance(v[0],v[1],v[2]) for f in flist]) <= 0: count += 1 vol = count*bit # add in partials if still 0 if vol == 0: vol = bit/2 return vol def Put(flist, voxel, v0, verts_in, res, g): """ add voxel key with partial volume value to dict of surface voxels""" # v0 is the start coordinates of voxel (verts[0] feed in) # res is resolution of sampling points (only works for even values of res!) dx,dy,dz = g['dx'],g['dy'],g['dz'] bit = (dx * dy * dz)/(res**3) count = 0 startpt = (v0[0]+dx/(2*res), v0[1]+dy/(2*res), v0[2]+dx/(2*res)) for x in range(res): for y in range(res): for z in range(res): v = (startpt[0] + x*(dx/res), startpt[1] + y*(dy/res), startpt[2] + z*(dz/res)) if min([f.distance(v[0],v[1],v[2]) for f in flist]) <= 0: count += 1 bitvol = count*bit if bitvol == 0: bitvol = add_res(flist, voxel, verts_in, res*2, g) return bitvol def simplevolume(flist,distances,voxel,g): """return the number of vertices of this voxel that are contained within the surface""" verts = get_verts(voxel,g) verts_in = [] for i in range(8): if distances[i] <= 0: verts_in.append(i) Vol = Put(flist, voxel, verts[0], verts_in, 2, g) return Vol
3,546
email_recorder/admin.py
sgupta-codeprofile/ondemandservices
0
2170496
from django.contrib import admin from email_recorder.models import email,cat,liststore # Register your models here. admin.site.register(email) admin.site.register(cat) admin.site.register(liststore)
199
uhb/analytical.py
benranderson/uhb
0
2170489
from collections import namedtuple import scipy.optimize from uhb import general from uhb import psi def required_download(delta, E, I, EAF, w_o): """ Returns the required download for stability. """ term1 = 1.16 - 4.76 * (E * I * w_o / delta) ** 0.5 / EAF term2 = ((delta * w_o) / (E * I)) ** 0.5 return term1 * EAF * term2 def required_sand_cover_height(required_resistance, D, gamma, f, c): """ Returns the sand cover height to provide the required uplift resistance. """ def solve_sand(H): return psi.R_max(H, D, gamma, f) - required_resistance def solve_clay(H): return psi.P_otc6486(H, D, gamma, c) - required_resistance # TODO: exception catch for solve if c > 0: return scipy.optimize.newton(solve_clay, 1e-3) else: return scipy.optimize.newton(solve_sand, 1e-3) def run_analytical_calc(data): D, t, t_coat = data.D, data.t, data.t_coat delta_P = data.P_i - data.P_e delta_T = data.T - data.T_a v, alpha, E, rho_p = data.v, data.alpha, data.E, data.rho_p rho_coat, rho_cont = data.rho_coat, data.rho_cont delta = max(data.deltas) soil_type = data.soil_type gamma, f, c = data.gamma_s, data.f, data.c rho_sw, g = data.rho_sw, data.g D_tot = general.total_outside_diameter(D, t_coat) A_i = general.internal_area(D, t) A_s = general.area_of_steel(D, t) EAF = abs(general.effective_axial_force( 0, delta_P, A_i, v, A_s, E, alpha, delta_T)) I = general.second_moment_of_area(D, t) w_o = general.submerged_weight( D, t, t_coat, rho_p, rho_coat, rho_cont, rho_sw, g) w = required_download(delta, E, I, EAF, w_o) q = max(w - w_o, 0) H = required_sand_cover_height(q, D_tot, gamma, f, c) Results = namedtuple("Results", "I EAF w_o w q H") return Results(I, EAF, w_o, w, q, H)
1,860
Mac/Extras.install.py
sireliah/polish-python
1
2170805
"""Recursively copy a directory but skip undesired files oraz directories (CVS, backup files, pyc files, etc)""" zaimportuj sys zaimportuj os zaimportuj shutil verbose = 1 debug = 0 def isclean(name): jeżeli name == 'CVS': zwróć 0 jeżeli name == '.cvsignore': zwróć 0 jeżeli name == '.DS_store': zwróć 0 jeżeli name == '.svn': zwróć 0 jeżeli name.endswith('~'): zwróć 0 jeżeli name.endswith('.BAK'): zwróć 0 jeżeli name.endswith('.pyc'): zwróć 0 jeżeli name.endswith('.pyo'): zwróć 0 jeżeli name.endswith('.orig'): zwróć 0 zwróć 1 def copycleandir(src, dst): dla cursrc, dirs, files w os.walk(src): assert cursrc.startswith(src) curdst = dst + cursrc[len(src):] jeżeli verbose: print("mkdir", curdst) jeżeli nie debug: jeżeli nie os.path.exists(curdst): os.makedirs(curdst) dla fn w files: jeżeli isclean(fn): jeżeli verbose: print("copy", os.path.join(cursrc, fn), os.path.join(curdst, fn)) jeżeli nie debug: shutil.copy2(os.path.join(cursrc, fn), os.path.join(curdst, fn)) inaczej: jeżeli verbose: print("skipfile", os.path.join(cursrc, fn)) dla i w range(len(dirs)-1, -1, -1): jeżeli nie isclean(dirs[i]): jeżeli verbose: print("skipdir", os.path.join(cursrc, dirs[i])) usuń dirs[i] def main(): jeżeli len(sys.argv) != 3: sys.stderr.write("Usage: %s srcdir dstdir\n" % sys.argv[0]) sys.exit(1) copycleandir(sys.argv[1], sys.argv[2]) jeżeli __name__ == '__main__': main()
1,736
src/data/1235.py
NULLCT/LOMC
0
2168116
from collections import deque import sys input = sys.stdin.readline def main(): N, Q = map(int, input().split()) graph = [[] for _ in range(N)] for _ in range(N - 1): a, b = map(int, input().split()) a -= 1 b -= 1 graph[a].append(b) graph[b].append(a) fr = 0 que = deque([fr]) goneset = set([fr]) count = [0] * N while True: fr = que.popleft() for to in graph[fr]: if to in goneset: continue count[to] = count[fr] + 1 que.append(to) goneset.add(to) if len(que) == 0: break for _ in range(Q): c, d = map(int, input().split()) c -= 1 d -= 1 if count[c] % 2 == count[d] % 2: print('Town') else: print('Road') if __name__ == "__main__": main()
867
pt_helper/tools/nmap.py
six-two/pentest-logger
0
2169513
import os from pt_helper import ParamManipulator, ParamInfo, Settings, BaseToolWrapper from pt_helper.utils import ArgumentRater, NMAP # -V is short for --version FLAG_BLOCKLIST = ["-V", "--version", "-h", "--help"] PARAMS = {} # nmap -h | grep oA # -oA <basename>: Output in the three major formats at once OUTPUT_ALL = ParamInfo("-oA") class Nmap(BaseToolWrapper): def __init__(self, settings: Settings) -> None: super().__init__(settings, FLAG_BLOCKLIST, PARAMS, [NMAP]) def rewrite_arguments_custom(self, params: ParamManipulator, output_dir: str) -> None: output_file = os.path.join(output_dir, "results") params.add(OUTPUT_ALL, [output_file])
687
src/wdl/working_wdls_toils/toilwdl_compiled.py
pditommaso/scalability-tst
1
2168289
from toil.job import Job from toil.common import Toil from toil.lib.docker import apiDockerCall from toil.wdl.wdl_functions import generate_docker_bashscript_file from toil.wdl.wdl_functions import select_first from toil.wdl.wdl_functions import sub from toil.wdl.wdl_functions import size from toil.wdl.wdl_functions import glob from toil.wdl.wdl_functions import process_and_read_file from toil.wdl.wdl_functions import process_infile from toil.wdl.wdl_functions import process_outfile from toil.wdl.wdl_functions import abspath_file from toil.wdl.wdl_functions import combine_dicts from toil.wdl.wdl_functions import parse_memory from toil.wdl.wdl_functions import parse_cores from toil.wdl.wdl_functions import parse_disk from toil.wdl.wdl_functions import read_string from toil.wdl.wdl_functions import read_int from toil.wdl.wdl_functions import read_float from toil.wdl.wdl_functions import read_tsv from toil.wdl.wdl_functions import read_csv from toil.wdl.wdl_functions import defined from toil.wdl.wdl_functions import basename import fnmatch import textwrap import subprocess import os import errno import time import shutil import shlex import uuid import logging asldijoiu23r8u34q89fho934t8u34fcurrentworkingdir = os.getcwd() logger = logging.getLogger(__name__) def initialize_jobs(job): job.fileStore.logToMaster("initialize_jobs") class hostTaskCls(Job): def __init__(self, i=None, *args, **kwargs): super(hostTaskCls, self).__init__(*args, **kwargs) cores=parse_cores(1) Job.__init__(self, cores=cores) self.id_i = i def run(self, fileStore): fileStore.logToMaster("hostTask") tempDir = fileStore.getLocalTempDir() try: os.makedirs(os.path.join(tempDir, 'execution')) except OSError as e: if e.errno != errno.EEXIST: raise i = self.id_i try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command0 = r''' hostname sleep 30 ''' except: command0 = '' cmd = command0 cmd = textwrap.dedent(cmd.strip("\n")) this_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = this_process.communicate() taskID = 'i' result = (read_string((stdout))) rvDict = {"taskID": taskID, "result": result} return rvDict class catHostsTaskCls(Job): def __init__(self, result1="", logfile="", *args, **kwargs): super(catHostsTaskCls, self).__init__(*args, **kwargs) self.id_result1 = result1 self.id_logfile = logfile def run(self, fileStore): fileStore.logToMaster("catHostsTask") tempDir = fileStore.getLocalTempDir() try: os.makedirs(os.path.join(tempDir, 'execution')) except OSError as e: if e.errno != errno.EEXIST: raise result1 = self.id_result1 logfile = self.id_logfile try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command1 = r''' echo "''' except: command1 = '' try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command2 = str(';'.join(str(x) for x in result1) if not isinstance(';'.join(str(x) for x in result1), tuple) else process_and_read_file(';'.join(str(x) for x in result1), tempDir, fileStore)).strip("\n") except: command2 = '' try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command3 = r'''"| tr ";" "\n" | sort -u > ''' except: command3 = '' try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command4 = str(logfile if not isinstance(logfile, tuple) else process_and_read_file(logfile, tempDir, fileStore)).strip("\n") except: command4 = '' try: # Intended to deal with "optional" inputs that may not exist # TODO: handle this better command5 = r''' ''' except: command5 = '' cmd = command1 + command2 + command3 + command4 + command5 cmd = textwrap.dedent(cmd.strip("\n")) this_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = this_process.communicate() # output-type: File output_filename = str(logfile) result = process_outfile(output_filename, fileStore, tempDir, '/home/a-m/azzaea/scalability-tst/wdl/working_wdls_toils') rvDict = {"result": result} return rvDict class scatter0Cls(Job): def __init__(self, logfile=None, ntasks=None, *args, **kwargs): Job.__init__(self) self.id_logfile = logfile self.id_ntasks = ntasks def run(self, fileStore): fileStore.logToMaster("scatter0") tempDir = fileStore.getLocalTempDir() try: os.makedirs(os.path.join(tempDir, 'execution')) except OSError as e: if e.errno != errno.EEXIST: raise logfile = self.id_logfile ntasks = self.id_ntasks host1_taskID = [] host1_result = [] for n in (range(ntasks)): job_host1 = self.addFollowOn(hostTaskCls(i=n)) host1_taskID.append(job_host1.rv("taskID")) host1_result.append(job_host1.rv("result")) rvDict = {"host1_taskID": host1_taskID, "host1_result": host1_result} return rvDict if __name__=="__main__": parser = Job.Runner.getDefaultArgumentParser() options = parser.parse_args() options.clean = 'always' with Toil(options) as fileStore: # WF Declarations logfile = "log.txt" ntasks = 3 job0 = Job.wrapJobFn(initialize_jobs) job0 = job0.encapsulate() scatter0 = job0.addChild(scatter0Cls(logfile=logfile, ntasks=ntasks)) host1_taskID = scatter0.rv("host1_taskID") host1_result = scatter0.rv("host1_result") job0 = job0.encapsulate() catHostsTask = job0.addChild(catHostsTaskCls(result1=(host1_result), logfile=logfile)) catHostsTask_result = catHostsTask.rv("result") fileStore.start(job0)
6,702
programs/pgm04_07.py
danielsunzhongyuan/python_practice
0
2170731
# # This file contains the Python code from Program 4.7 of # "Data Structures and Algorithms # with Object-Oriented Design Patterns in Python" # by <NAME>. # # Copyright (c) 2003 by <NAME>, P.Eng. All rights reserved. # # http://www.brpreiss.com/books/opus7/programs/pgm04_07.txt # class MultiDimensionalArray(object): def getOffset(self, indices): if len(indices) != len(self._dimensions): raise IndexError offset = 0 for i, dim in enumerate(self._dimensions): if indices[i] < 0 or indices[i] >= dim: raise IndexError offset += self._factors[i] * indices[i] return offset def __getitem__(self, indices): return self._data[self.getOffset(indices)] def __setitem__(self, indices, value): self._data[self.getOffset(indices)] = value # ...
858
scripts/fitnessMutations/infer_fitness.py
felixhorns/BCellSelection
3
2170576
import sys import pandas as pd from Bio import SeqIO, Align, AlignIO, Phylo import matplotlib matplotlib.use('Agg') import pylab as plt sys.path.append("/local10G/rfhorns/resources/FitnessInference/prediction_src/") import sequence_ranking import tree_utils def load_tree(f): t = Phylo.read(f, 'newick') t.root_with_outgroup("germline") t.get_nonterminals()[0].branch_length = 0.0 # t.ladderize(reverse=True) return t def load_aln(infile): aln = Align.MultipleSeqAlignment([]) aln_dict = {} with open(infile, 'r') as f: for seq_record in SeqIO.parse(f, 'fasta'): aln.append(seq_record) aln_dict[seq_record.id] = str(seq_record.seq) return aln, aln_dict def get_parent(tree, child_clade): node_path = tree.get_path(child_clade) return node_path[-2] def get_fitness_changes(fit): """ Get fitness changes on each branch of the tree """ header = ["name", "depth", "mean_fitness", "var_fitness", "parent_name", "parent_depth", "parent_mean_fitness", "parent_var_fitness", "delta_mean_fitness", "length"] df = pd.DataFrame(columns=header) i = 0 depths = fit.T.depths() for clade in fit.T.find_clades(): if clade.name in [None, "germline", "2_"]: continue parent = get_parent(fit.T, clade) delta = clade.mean_fitness - parent.mean_fitness features = [clade.name, depths[clade], clade.mean_fitness, clade.var_fitness, parent.name, depths[parent], parent.mean_fitness, parent.var_fitness, delta, depths[clade] - depths[parent]] df.loc[i] = features i += 1 df.set_index("name", inplace=True) return df if __name__ == "__main__": infile_tree = sys.argv[1] infile_aln = sys.argv[2] outfile_fitness_tree = sys.argv[3] outfile_df_fitness = sys.argv[4] outfile_tree_pdf = sys.argv[5] outfile_tree_pdf_labeled = sys.argv[6] print infile_tree print infile_aln # Load alignment aln = Align.MultipleSeqAlignment([]) outgroup = None with open(infile_aln) as f: for record in SeqIO.parse(f, 'fasta'): if record.name != "germline": aln.append(record) else: outgroup = record if outgroup is None: print "outgroup not in alignment -- FATAL" quit() # Load tree T = load_tree(infile_tree) # Create sequence data object seq_data = sequence_ranking.alignment(aln, outgroup, build_tree=False) seq_data.T = T # use predetermined tree # Infer fitness eps_branch_length = 1e-4 diffusion = 0.5 distance_scale = 2.0 samp_frac = 0.1 prediction = sequence_ranking.sequence_ranking(seq_data, eps_branch_length=eps_branch_length, pseudo_count = 5, methods = ['mean_fitness'], D=diffusion, distance_scale=distance_scale, samp_frac=samp_frac) best_node = prediction.predict() # Write fitness tree to file (for mutation annotation) Phylo.write(prediction.T, outfile_fitness_tree, "newick") # Get fitness changes on each branch df_fitness = get_fitness_changes(prediction) df_fitness.sort_values("delta_mean_fitness", ascending=False, inplace=True) # Write fitness changes to file df_fitness.to_csv(outfile_df_fitness) # Plot tree colored by fitness with and without node labels tree_utils.plot_prediction_tree(prediction) plt.savefig(outfile_tree_pdf) tree_utils.plot_prediction_tree(prediction, node_label_func=lambda x: x.name) plt.savefig(outfile_tree_pdf_labeled) print "Done!!"
3,694
orchestra/admin.py
hajime-f/octave_backend
0
2167549
from .models import Orchestra from django.contrib import admin from django.utils.translation import gettext_lazy as _ class OrchestraAdmin(admin.ModelAdmin): fieldsets = ( (_('Orchestra info'), {'fields': ('orchestra_name', 'email', 'admin_users', 'orchestra_type1', 'orchestra_type2', 'birthday', 'postal_code', 'prefecture', 'address', 'building', 'tel', 'url', 'photo')}), ) list_display = ('get_full_name', 'email', 'orchestra_type1', 'orchestra_type2', 'prefecture', 'address', 'created_at',) search_fields = ('orchestra_name', 'orchestra_name_kana', 'orchestra_type1', 'orchestra_type2', 'email', 'prefecture', 'address',) ordering = ('created_at',) admin.site.register(Orchestra, OrchestraAdmin)
734
utils/dcp_population_excel_helpers.py
NYCPlanning/db-equitable-development-tool
1
2171322
import pandas as pd # Creat helpful global mappers for dcp count_suffix_mapper_global = { "e": "count", "m": "count_moe", "c": "count_cv", "p": "pct", "z": "pct_moe", } median_suffix_mapper_global = { "e": "median", "m": "median_moe", "c": "median_cv", "p": "median_pct", # To be removed on further processing "z": "median_pct_moe", # To be removed on further processing } race_suffix_mapper_global = {"a": "anh", "b": "bnh", "h": "hsp", "w": "wnh"} race_suffix_mapper = { "_a": "_anh_", "_b": "_bnh_", "_h": "_hsp_", "_w": "_wnh_", } stat_suffix_mapper = { "_00e": "_count", "_00m": "_count_moe", "_00c": "_count_cv", "_00p": "_pct", "_00z": "_pct_moe", } stat_suffix_mapper_ty = { "_0812e": "_0812_count", "_0812m": "_0812_count_moe", "_0812c": "_0812_count_cv", "_0812p": "_0812_pct", "_0812z": "_0812_pct_moe", "_1519e": "_1519_count", "_1519m": "_1519_count_moe", "_1519c": "_1519_count_cv", "_1519p": "_1519_pct", "_1519z": "_1519_pct_moe", } stat_suffix_mapper_md = { "_0812e": "_0812_median", "_0812m": "_0812_median_moe", "_0812c": "_0812_median_cv", "_0812p": "_0812_pct", "_0812z": "_0812_pct_moe", "_1519e": "_1519_median", "_1519m": "_1519_median_moe", "_1519c": "_1519_median_cv", "_1519p": "_1519_pct", "_1519z": "_1519_pct_moe", } ### Create base load function that reads dcp population xlsx for 2000 census pums def load_2000_census_pums_all_data() -> pd.DataFrame: df = pd.read_excel( "./resources/ACS_PUMS/EDDT_Census2000PUMS.xlsx", skiprows=1, dtype={"GeoID": str}, ) df = df.replace( { "GeoID": { "Bronx": "BX", "Brooklyn": "BK", "Manhattan": "MN", "Queens": "QN", "Staten Island": "SI", "NYC": "citywide", } } ) df.set_index("GeoID", inplace=True) return df def remove_duplicate_cols(df): """Excel spreadsheet has some duplicate columns that Erica used for calculations""" df = df.drop(df.filter(regex="E.1$|M.1$|C.1$|P.1$|Z.1$").columns, axis=1) return df
2,245
simio_di/containers.py
RB387/simio-di
1
2170249
from functools import partial, wraps from inspect import isfunction from typing import Type, Any, Optional, Callable, TypeVar, Iterable, Tuple try: from typing import Protocol except ImportError: from typing_extensions import Protocol T = TypeVar("T") class DependenciesContainerProtocol(Protocol): def set(self, obj: Type[T], **obj_kwargs: Any): ... def get(self, obj: Type[T]) -> Optional[Callable[[], T]]: ... def iter(self) -> Iterable[Tuple[Type[T], Callable[[], T]]]: ... class DependenciesContainer: def __init__(self): self._deps = {} def set(self, obj: Type[T], **obj_kwargs: Any): self._deps[obj] = wraps(obj)(partial(obj, **obj_kwargs)) def get(self, obj: Type[T]) -> Optional[Callable[[], T]]: dependency = self._deps.get(obj) if dependency is not None: return dependency return None def iter(self) -> Iterable[Tuple[Type[T], Callable[[], T]]]: for obj_type, injected in self._deps.items(): yield obj_type, injected class SingletoneDependenciesContainer: def __init__(self): self._deps = {} def set(self, obj: Type[T], **obj_kwargs): if isfunction(obj): self._deps[obj] = wraps(obj)(partial(obj, **obj_kwargs)) else: injected = obj(**obj_kwargs) self._deps[obj] = lambda: injected def get(self, obj: Type[T]) -> Optional[Callable[[], T]]: return self._deps.get(obj) def iter(self) -> Iterable[Tuple[Type[T], Callable[[], T]]]: for obj_type, injected in self._deps.items(): yield obj_type, injected
1,664
Chapter10/ch10/start_with_info.py
clperez/Learn-Python-Programming-Second-Edition
55
2170363
import threading from time import sleep def sum_and_product(a, b): sleep(.2) print_current() s, p = a + b, a * b print(f'{a}+{b}={s}, {a}*{b}={p}') def status(t): if t.is_alive(): print(f'Thread {t.name} is alive.') else: print(f'Thread {t.name} has terminated.') def print_current(): print('The current thread is {}.'.format( threading.current_thread() )) print('Threads: {}'.format(list(threading.enumerate()))) print_current() t = threading.Thread( target=sum_and_product, name='SumProd', args=(3, 7) ) t.start() status(t) t.join() status(t) """ $ python start_with_info.py The current thread is <_MainThread(MainThread, started 140735733822336)>. Threads: [<_MainThread(MainThread, started 140735733822336)>] Thread SumProd is alive. The current thread is <Thread(SumProd, started 123145375604736)>. Threads: [ <_MainThread(MainThread, started 140735733822336)>, <Thread(SumProd, started 123145375604736)> ] 3+7=10, 3*7=21 Thread SumProd has terminated. """
1,050
bot.py
Kousthubhbhat/Duconver
0
2168482
from os import environ import os import time from unshortenit import UnshortenIt from urllib.request import urlopen from urllib.parse import urlparse import aiohttp from pyrogram import Client, filters from pyshorteners import Shortener from bs4 import BeautifulSoup import requests import re API_ID = environ.get('API_ID') API_HASH = environ.get('API_HASH') BOT_TOKEN = environ.get('BOT_TOKEN') API_KEY = environ.get('API_KEY') CHANNEL = environ.get('CHANNEL') HOWTO = environ.get('HOWTO') bot = Client('Droplink bot', api_id=API_ID, api_hash=API_HASH, bot_token=BOT_TOKEN) @bot.on_message(filters.command('start') & filters.private) async def start(bot, message): await message.reply( f"**Hi {message.chat.first_name}⚡**\n" "**🤖I Am (Not At All 😏) Simple Bot \n🔀 I can Convert Your Links To linkshortify\n📁 Send ME Image or Text_Message \n🔃 Convert All Links To Your Links \n👨🏻‍💻 I Was Developed [HALF INTELLIGENT](https://t.me/Half_Intelligent_2) \n©️ Powered By [linkshortify](https://linkshortify.com)\n[ InlineKeyboardButton(🔮Join Our Channel🔮, url=https://t.me/serials_funda) ]**") @bot.on_message(filters.command('help') & filters.private) async def start(bot, message): await message.reply( f"**Hello, {message.chat.first_name}!**\n\n" "**If you send post which had Links, texts & images... Than I'll convert & replace all links with your links \nMessage me @Half_Intelligent_2 For more help-**") @bot.on_message(filters.command('support') & filters.private) async def start(bot, message): await message.reply( f"**Hey, {message.chat.first_name}!**\n\n" "**please contact 🤗\n@Half_Intelligent_2**") @bot.on_message(filters.command('API') & filters.private) async def start(bot, message): await message.reply( f"**Api Status 🔮 \n[Your linkshortify Api Is Linked Successfully ✅](https://linkshortify.com/member/tools/quick)\n now you can short your Links 🤩**") @bot.on_message(filters.command('plans') & filters.private) async def start(bot, message): await message.reply( f"**Hey, {message.chat.first_name}!**\n\n" "**Our Plans just like this 💰 \n\nFor 30 Dayes = 2$ \nFor 90 Dayes= 5$ \nFor 180 Dayes = 8$ \nFor 365 Dayes = 15$ \n\n If you Want to Buy Our Bot Subscription Contact \n@Half_Intelligent_2**") @bot.on_message(filters.text & filters.private) async def pdisk_uploader(bot, message): new_string = str(message.text) conv = await message.reply("Converting your link to linkshortify...") dele = conv["message_id"] try: pdisk_link = await multi_pdisk_up(new_string) await bot.delete_messages(chat_id=message.chat.id, message_ids=dele) await message.reply(f'{pdisk_link}' , quote=True) except Exception as e: await message.reply(f'Error: {e}', quote=True) @bot.on_message(filters.photo & filters.private) async def pdisk_uploader(bot, message): new_string = str(message.caption) conv = await message.reply("Converting your link to linkshortify...") dele = conv["message_id"] try: pdisk_link = await multi_pdisk_up(new_string) if(len(pdisk_link) > 1020): await bot.delete_messages(chat_id=message.chat.id, message_ids=dele) await message.reply(f'{pdisk_link}' , quote=True) else: await bot.delete_messages(chat_id=message.chat.id, message_ids=dele) await bot.send_photo(message.chat.id, message.photo.file_id, caption=f'{pdisk_link}') except Exception as e: await message.reply(f'Error: {e}', quote=True) async def pdisk_up(link): if ('mega' in link or 'google' in link or 'mdisk' in link or 'entertainvideo' in link or 'dood' in link or 'bit' in link or 'mediafire' in link or 'shadowave' in link or 'dashboard' in link or 'tnvalue' in link or 'tnlink' in link or 'pdisklink' in link or 'mikilinks' in link or 'clickcafe' in link or 'mdiskshortner' in link or 'Afly' in link or 'Sdfly' in link or 'Clickfly' in link or 'Pdiskshortener' in link or 'urlshortx' in link or 'adrinolinks' in link or 'linkshortify' in link or 'earn4clicks' in link or 'Streaam' in link or 'telegram.me' in link or 'voot' in link or 'zee5' in link or 'hotstar' in link or 'studyranks' in link or 'cbse' in link or 'jatinbeniwal' in link or 'discord' in link ): url = 'https://linkshortify.com/api' params = {'api': API_KEY, 'url': link} async with aiohttp.ClientSession() as session: async with session.get(url, params=params, raise_for_status=True) as response: data = await response.json() v_url = """__**__ - """ + data["shortenedUrl"] + """**""" else: v_url = link return (v_url) async def multi_pdisk_up(ml_string): list_string = ml_string.splitlines() ml_string = ' \n'.join(list_string) new_ml_string = list(map(str, ml_string.split(" "))) new_ml_string = await remove_username(new_ml_string) new_join_str = "".join(new_ml_string) urls = re.findall(r'(https?://[^\s]+)', new_join_str) nml_len = len(new_ml_string) u_len = len(urls) url_index = [] count = 0 for i in range(nml_len): for j in range(u_len): if (urls[j] in new_ml_string[i]): url_index.append(count) count += 1 new_urls = await new_pdisk_url(urls) url_index = list(dict.fromkeys(url_index)) i = 0 for j in url_index: new_ml_string[j] = new_ml_string[j].replace(urls[i], new_urls[i]) i += 1 new_string = " ".join(new_ml_string) return await addFooter(new_string) async def new_pdisk_url(urls): new_urls = [] for i in urls: time.sleep(0.2) new_urls.append(await pdisk_up(i)) return new_urls async def remove_username(new_List): for i in new_List: if('@' in i or 't.me' in i or 'https://bit.ly/abcd' in i or 'https://bit.ly/123abcd' in i or 'telegra.ph' in i or 'Join' in i or 'channel' in i or '🚀 Here is your Link:' in i): new_List.remove(i) return new_List async def addFooter(str): footer = """\n__▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬__ 📣Its A Oficial Bot OF @"""+ CHANNEL + """ ____""" return str + footer bot.run()
6,284
pcask1d/src/wavefunction.py
NickWoods1/pcask1d
1
2170956
# Distributed under the terms of the MIT License. """ Module that defines a single-particle wavefunction and its various properties """ import numpy as np from .density import Density class Wavefunction: """ This class represents a single-particle wavefunction :math:`\phi_{ik}^\sigma (G)` with associated single-particle energy :math:`\varepsilon` and occupancy :math:`f_i`. Each wavefunction has a spin :math:`\sigma`, k-point :math:`k`, and band index :math:`i`""" def __init__(self, params, **kwargs): # Number of plane-waves N = len(params.planewave_grid) self._pw_coefficients = kwargs.get('pw_coefficients', np.zeros(N, dtype=complex)) self._energy = kwargs.get('energy', 0) self._k_point = kwargs.get('k_point', 0) self._spin = kwargs.get('spin', 0) self._band_index = kwargs.get('band_index', 0) self._occupancy = kwargs.get('occupancy', 1) def __str__(self): return 'Band {0} of system Hamiltonian with k-point {1}'.format(self._band_index, self._k_point) @property def pw_coefficients(self): """ The plane-wave coefficients of the wavefunction """ return self._pw_coefficients @pw_coefficients.setter def pw_coefficients(self, coeffs: np.ndarray) -> np.ndarray: self._pw_coefficients = coeffs @property def energy(self): return self._energy @energy.setter def energy(self, energy: float): self._energy = energy @property def k_point(self): return self._k_point @k_point.setter def k_point(self, k_point: float): self._k_point = k_point @property def band_index(self): return self._band_index @band_index.setter def band_index(self, band_index: int): self._band_index = band_index @property def occupancy(self): return self._occupancy @occupancy.setter def occupancy(self, occ: float): self._occupancy = occ def get_density(self): """ Obtain the (occupancy weighted) single-particle density corresponding to a single-particle wavefunction """ #TODO: put wavefunction on big grid, pad with zeros, then FFT. wavefunction = np.fft.ifft(self._pw_coefficients) return Density(coeffs=self._occupancy * wavefunction.conj() * wavefunction)
2,366
python/mcpc-bid.py
wnzhang/rtb-unbiased-learning
20
2170871
#!/usr/bin/python import sys import math def bidding_mcpc(ecpc, pctr): return int(ecpc * pctr) def win_auction(case, bid): return bid > case[1] # bid > winning price # budgetProportion clk cnv bid imp budget spend para def simulate_one_bidding_strategy_with_parameter(cases, ctrs, tcost, proportion, writer_win, writer_lose): budget = int(tcost / proportion) # intialise the budget wins = [] cost = 0 clks = 0 bids = 0 imps = 0 for idx in range(0, len(cases)): pctr = ctrs[idx] bid = bidding_mcpc(original_ecpc, pctr) bids += 1 case = cases[idx] if win_auction(case, bid): imps += 1 clks += case[0] cost += case[1] writer_win.write(str(case[0]) + '\t' + str(case[1]) + '\t' + str(case[2])+'\n') else: writer_lose.write(str(bid)+'\n') if cost > budget: break #return str(proportion) + '\t' + str(clks) + '\t' + str(bids) + '\t' + \ #str(imps) + '\t' + str(budget) + '\t' + str(cost) + '\t' + str(para) return "finished" ''' def simulate_one_bidding_strategy(cases, ctrs, tcost, proportion, writer_win, writer_lose): para = 1 res = simulate_one_bidding_strategy_with_parameter(cases, ctrs, tcost, proportion,para) #print res for win_bid in res: writer.write(win_bid + '\n') ''' ''' if len(sys.argv) < 3: print 'Usage: python mcpc-bid.py train.yzp.txt train.win.yzp.txt' exit(-1) ''' clicks_prices = [] # clk and price y_z_x = [] # y z and x pctrs = [] # pCTR from logistic regression prediciton total_cost_train = 0 total_cost = 0 # total original cost during the test data original_ecpc = 0. # original eCPC from train data original_ctr = 0. # original ctr from train data # read in train.yzp.base for original_ecpc and original_ctr and orginal pctr fi = open(sys.argv[1], 'r') # train.yzp.base.txt first = True imp_num = 0 clk_num = 0 for line in fi: s = line.strip().split() click = int(s[0]) # y cost = int(s[1]) # z pctrs.append(float(s[2])) #p imp_num += 1 clk_num += click #original_ctr += click #original_ecpc += cost total_cost_train += cost fi.close() original_ecpc = total_cost_train * 1.0 / clk_num # read in train.yzx.bid for bid information fi = open(sys.argv[2], 'r') # train.yzp.txt for line in fi: s = line.strip().split() click = int(s[0]) #pctrs.append(float(s[2])) winning_price = int(s[1]) x = ' '.join(s[2:]) y_z_x.append((click, winning_price, x)) clicks_prices.append((click, winning_price)) total_cost += winning_price fi.close() # parameters setting for each bidding strategy budget_proportions = [1] #64, 16] # , 32, 8] mcpc_paras = [1] # initalisation finished # rock! fo = open(sys.argv[3], 'w') # train.yzx.imp.txt fp = open(sys.argv[4], 'w') # train.yzx.lose.txt #header = "proportion\tclicks\tbids\timpressions\tbudget\tspend\tparameter" #header = "prop\tclks\tbids\timps\tbudget\tspend\tpara" #fo.write(header + "\n") #print header for proportion in budget_proportions: #simulate_one_bidding_strategy(clicks_prices, pctrs, total_cost, proportion, fo, fp) simulate_one_bidding_strategy_with_parameter(y_z_x, pctrs, total_cost, proportion, fo, fp) fo.close() fp.close() print 'finished'
3,350
sokannonser/repository/companynames_mapper.py
JobtechSwe/castaway
0
2170835
import logging import math from flashtext.keyword import KeywordProcessor from sokannonser.repository import elastic from sokannonser.repository.elastic_connection_with_retries import elastic_search_with_retry from sokannonser import settings log = logging.getLogger(__name__) class CompanyNamesMapper(object): def __init__(self, companynames_from_file=False, companynames_filepath=None): self.companynames_from_file = companynames_from_file self.companynames_filepath = companynames_filepath self.concept_to_term = {} self.keyword_processor = KeywordProcessor() self._init_keyword_processor(self.keyword_processor) self._init_companynames(self.keyword_processor) def _get_partial_to_companynames_mappings(self): """ # "ikea" --> "ikea ab" # "ikea ab" --> "ikea ab" # "Volvo Car" --> "volvo car mobility ab", "volvo car retail solutions ab" # "Banan-Kompaniet" --> "ab banan-kompaniet" :return: Mappings between partial companyname to original variants of the complete name. """ companylist = self.load_all_companynames() companies_mappings = dict() for name in companylist: original_name = name.strip() original_name_lower = original_name.lower() name_parts = original_name_lower.split(' ') len_parts = len(name_parts) partial_company_name = '' if len(name_parts) == 1 or len(name_parts) == 2 and original_name_lower.endswith(' ab'): partial_company_name = name_parts[0].strip() self.map_partial_to_company(companies_mappings, original_name_lower, partial_company_name) elif len(name_parts) == 2 and original_name_lower.startswith('ab '): partial_company_name = name_parts[1].strip() self.map_partial_to_company(companies_mappings, original_name_lower, partial_company_name) else: for i in range(len_parts): clean_part = name_parts[i].strip() if clean_part != '': partial_company_name = partial_company_name + ' ' + clean_part partial_company_name = partial_company_name.strip() if i > 0: # Don't add single word if companyname consist of more words than 1. self.map_partial_to_company(companies_mappings, original_name_lower, partial_company_name) return companies_mappings @staticmethod def map_partial_to_company(companies_mappings, original_name_lower, partial_company_name): if partial_company_name not in companies_mappings: companies_mappings[partial_company_name] = [] if original_name_lower not in companies_mappings[partial_company_name]: log.debug(f'Mapping: {partial_company_name} to: {original_name_lower}') companies_mappings[partial_company_name].append(original_name_lower) @staticmethod def company_name_iterator(max_size=10 ** 10): # companyname_field = "arbetsgivarenamn.keyword" # es_index = 'platsannonser_gdpr_behandlade' companyname_field = "employer.name.keyword" es_index = settings.ES_INDEX query_unique_names = { "size": 0, "aggs": { "names_count": { "cardinality": { "field": companyname_field } } } } unique_res = elastic_search_with_retry(elastic, query_unique_names, es_index) unique_amount = int(unique_res['aggregations']['names_count']['value']) batch_size = 1000 num_partitions = int(math.ceil(unique_amount / batch_size)) aggs_query = { "size": 0, "aggs": { "names_agg": { "terms": { "field": companyname_field, "include": { "partition": 0, "num_partitions": num_partitions }, "size": batch_size } } } } i = 0 for partition_counter in range(num_partitions): aggs_query['aggs']['names_agg']['terms']['include']['partition'] = partition_counter res = elastic_search_with_retry(elastic, aggs_query, es_index) for bucket in res['aggregations']['names_agg']['buckets']: if i == max_size: break i = i + 1 yield bucket['key'] def load_companynames_from_elastic(self): elastic_companynames = set() for name in self.company_name_iterator(): elastic_companynames.add(name) log.info(f'Loaded: {len(elastic_companynames)} company names from Elastic') return sorted(list(elastic_companynames)) @staticmethod def _load_companynames_from_file(filepath): with open(filepath, encoding='utf-8') as f: company_list = f.read().splitlines() return company_list def load_companynames_from_file(self): companylist = self._load_companynames_from_file(self.companynames_filepath) log.info(f'Loaded: {len(companylist)} company names from file') return companylist def load_all_companynames(self): if self.companynames_from_file and self.companynames_filepath: companylist = self.load_companynames_from_file() else: companylist = self.load_companynames_from_elastic() return companylist def _init_companynames(self, keyword_processor): for key, term_obj_list in self._get_partial_to_companynames_mappings().items(): keyword_processor.add_keyword(key, term_obj_list) @staticmethod def _init_keyword_processor(keyword_processor): [keyword_processor.add_non_word_boundary(token) for token in list('åäöÅÄÖüÜ()-')] def _get_keyword_processor(self): return self.keyword_processor def extract_companynames(self, text, span_info=False): concepts = self._get_keyword_processor().extract_keywords(text, span_info=span_info) companylist = [] for sublist in concepts: for item in sublist: companylist.append(item) return companylist
6,507
mlplaygrounds/datasets/parsers/parser.py
rennym19/ml-playgrounds
0
2171286
from json import loads from io import StringIO import pandas as pd from django.core.files.base import File from .exceptions import InvalidFormat, InvalidFile, InvalidFeature class DatasetParser: FILE_FORMATS = ['csv', 'json'] def __init__(self, file, file_format, to_format, label=None, index_col=None, problem_type=None): self.file = file self.file_format = file_format self.to_format = to_format self.index_col = index_col self.label = label self.problem_type = problem_type def parse(self): if self.is_valid(): df = self.read() data = self.write(df) return ParsedDataset(data, df, self.label, self.index_col, self.file_format, self.problem_type) return None def is_valid(self): return all([ self.file_is_valid(), self.file_formats_are_valid(), self.problem_type_is_valid() ]) def file_is_valid(self): if isinstance(self.file, (File, StringIO, str)): return True raise InvalidFile('Not a proper file') def file_formats_are_valid(self): if self.file_format not in self.FILE_FORMATS: raise InvalidFormat( 'The specified file format is not supported/valid') if self.to_format not in self.FILE_FORMATS: raise InvalidFormat( 'The desired format is not supported/valid') return True def problem_type_is_valid(self): if self.problem_type is None: raise ValueError('You must specify the problem type') return True def read(self): try: if self.file_format == self.FILE_FORMATS[0]: return pd.read_csv(self.file, index_col=self.index_col) elif self.file_format == self.FILE_FORMATS[1]: return pd.read_json(self.file) raise InvalidFormat( f'Can not read file format {self.file_format.upper()}') except (ValueError, pd.errors.ParserError, pd.errors.EmptyDataError): raise InvalidFile( f'Could not read data as a {self.file_format.upper()} file') def write(self, df, path_or_buffer=None): if self.to_format == self.FILE_FORMATS[0]: return df.to_csv(path_or_buffer) elif self.to_format == self.FILE_FORMATS[1]: return loads(df.to_json(orient='records')) raise InvalidFormat( f'Can not write data to {self.to_format.upper()} format') class ParsedDataset: def __init__(self, parsed_data, data=None, label=None, index_col=None, original_format=None, problem_type=None): self.parsed_data = parsed_data self.data = data self.label = label self.label_data = None self.index_col = index_col self.original_format = original_format self.problem_type = problem_type if self.label is not None: if self._label_is_valid(): self.label_data = self.data.pop(self.label) else: raise InvalidFeature( f'Could not set label: {self.label} is not a valid column') def _label_is_valid(self): if self.label in [col for col in self.data.columns]: return True return False def get_data(self): return self.parsed_data def get_num_records(self): return len(self.data) def get_features(self): return [col for col in self.data.columns if col != self.label] def get_label(self): return self.label def get_label_data(self): return self.label_data.to_list() if self.label is not None else None def get_index_col(self): return self.index_col def get_not_assigned_pct(self): na_pct = self.data.isnull().mean() * 100 return round(na_pct.mean(), 2) def get_original_format(self): return self.original_format def get_y_value_counts(self): if self.problem_type == 'regression': return self._calculate_interval_distribution() elif self.problem_type == 'classification': return self._calculate_class_distribution() raise ValueError(f'{self.problem_type} is not a valid problem type') def _calculate_interval_distribution(self): bin_counts = pd.cut(self.label_data, bins=6).value_counts() return self._y_counts(bin_counts) def _calculate_class_distribution(self): class_counts = self.label_data.value_counts() return self._y_counts(class_counts) def _y_counts(self, value_counts, by_interval=False): """ Transforms value_counts into a three-element dict, not only for simplicity purposes but also to ease plotting. Two of the elements are the most common y values, the other is a sum of all other y values' counts. """ counts = [] others_count = 0 for i, (name, count) in enumerate(value_counts.to_dict().items()): if i > 1: others_count += count else: counts.append({ 'y': str(name), 'count': count }) if others_count > 0: counts.append({ 'y': 'others', 'count': others_count }) return counts
5,558
nets/encoders/decoder.py
inboxedshoe/attention-learn-to-route
0
2169077
from entmax.activations import Entmax15 import torch from torch import nn from torch.utils.checkpoint import checkpoint import math from typing import NamedTuple from utils.tensor_functions import compute_in_batches from nets.graph_encoder import GraphAttentionEncoder from torch.nn import DataParallel from utils.beam_search import CachedLookup from utils.functions import sample_many from entmax import entmax15, Entmax15Loss from nets.encoders.kool_encoder import AttentionEncoder from typing import Optional class kool_decoder (nn.Module): def __init__(self, input_dim: int, output_dim: int, hidden_dim: int = 128, ff_hidden_dim: int = 512, n_heads: int = 8, add_bias: bool = False, dropout: float = 0, attention_activation: str = "softmax", skip: bool = False, norm_type: Optional[str] = None, attention_neighborhood: int = 0, num_layers: int = 2, **kwargs): """ Args: input_dim: dimension of node features output_dim: embedding dimension to output, 0 if no reembed is required hidden_dim: dimension of hidden layers in the MHA ff_hidden_dim: dimension of hidden layers in the FF network (0 if a single FF layer is desired instead) attention_activation: activation function to use: softmax(default), entmax, or sparsemax skip: flag to use skip (residual) connections add_bias: add bias to layers in the MHA, care if use with normalization num_layers: number of attention blocks required where one block is an MHA layer, and FF network/layer norm_type: type of norm to use dropout: dropout to be used in the MHA network n_heads: number of heads in the MHA network attention_neighborhood: size of node neighborhood to consider for node attention """ super(AttentionEncoder, self).__init__(input_dim, output_dim) self.attention_activation = { 'softmax': 0, 'entmax': 1, 'sparse': 2 }.get(attention_activation, "softmax") ##TODO: implement attention neighborhood self.attention_neighborhood = attention_neighborhood self.n_heads = n_heads self.skip = skip self.norm_type = norm_type self.dropout = dropout self.add_bias = add_bias self.ff_hidden_dim = ff_hidden_dim self.input_dim = input_dim self.output_dim = output_dim self.hidden_dim = hidden_dim self.input_proj = None self.output_proj = None self.num_layers = num_layers self.attention_blocks = None self.create_layers(**kwargs) self.reset_parameters()
3,044
scripts/trawl_for_raw_image_data.py
JIC-Image-Analysis/fishtools
0
2171215
import os import shutil import logging import click import dtoolcore logger = logging.getLogger(__file__) def is_image(filename, image_exts=['.czi']): _, ext = os.path.splitext(filename) return ext in image_exts @click.command() @click.argument('root_dirpath') @click.argument('output_base_uri') @click.argument('output_name') def main(root_dirpath, output_base_uri, output_name): logging.basicConfig(level=logging.INFO) dirpaths_fns = [] for dirpath, dirnames, filenames in os.walk(root_dirpath): for fn in filenames: if is_image(fn): dirpaths_fns.append((dirpath, fn)) with dtoolcore.DataSetCreator(output_name, output_base_uri) as output_ds: for dirpath, filename in dirpaths_fns: basedir = os.path.basename(dirpath) relpath = f"{basedir}/{filename}" src_abspath = os.path.join(dirpath, filename) dst_abspath = output_ds.prepare_staging_abspath_promise(relpath) shutil.copy(src_abspath, dst_abspath) if __name__ == "__main__": main()
1,106
src/OOXMLParser/xlsxsupport.py
rjsmith/robot-ooxml
3
2171218
# Copyright (c) 2014 <NAME>, https://github.com/rjsmith # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from robot.errors import DataError try: from openpyxl import (load_workbook, Workbook) except ImportError: raise DataError("Using xlsx test data requires having " "'openpyxl' module installed.") class XlsxWorkbookReader: ''' Wrapper class around openpyxlx API calls to inspect structure of xlsx file ''' def __init__(self, xlsxfile): self.workbook = load_workbook(xlsxfile, use_iterators = True) def getTestSheet(self): ''' Returns reference to first sheet in workbook ''' return self.workbook.get_sheet_by_name(self.workbook.get_sheet_names()[0]) def getTextOfRowCells(self, row): cellTexts = [] for cell in row: value = cell.internal_value if value is None: value = '' cellTexts.append(unicode(value)) return cellTexts
1,541
Exercícios/Ex.13.py
mattheuslima/Projetos-Curso_Python
0
2171309
#Desafio 13 Faça um algorítimo que leia o salário de um funcionário e mostre seu novo salário com 15% de aumento print('{:=^20}'.format('Desafio 13')) sal=float(input('Qual o salário do funcionário: '+'R$')) percen=float(input('Percentual de aumento: ')) novo_sal=float(sal*(1+percen/100)) print('O salário R${:.2F} com {}% de reajuste, vai para R${:.2F}'.format(sal,percen,novo_sal))
388
detection/scrfd/search_tools/search_train.py
qaz734913414/insightface
12,377
2170506
import os, sys def train(group, prefix, idx, gpuid): assert idx>=0 cmd = "CUDA_VISIBLE_DEVICES='%d' PORT=%d bash ./tools/dist_train.sh ./configs/%s/%s_%d.py 1 --no-validate"%(gpuid,29100+idx, group, prefix, idx) print(cmd) os.system(cmd) gpuid = int(sys.argv[1]) idx_from = int(sys.argv[2]) idx_to = int(sys.argv[3]) group = 'scrfdgen' if len(sys.argv)>4: group = sys.argv[4] for idx in range(idx_from, idx_to): train(group, group, idx, gpuid)
475
13.py
ednl/aoc2017
0
2171362
layer = {} with open("13.txt") as f: for line in f: a = list(map(int, line.strip().split(": "))) layer[a[0]] = a[1] # Part 1 severity = 0 for d, r in layer.items(): if d % (2 * (r - 1)) == 0: severity += d * r print(severity) # Part 2 delay = 0 caught = True while caught: delay += 1 for d, r in layer.items(): if caught := (((d + delay) % (2 * (r - 1))) == 0): break print(delay)
443
render_blender.py
shamitlal/stanford-shapenet-renderer
0
2171099
# A simple script that uses blender to render views of a single object by rotation the camera around it. # Also produces depth map at the same time. # # Example: # blender --background --python mytest.py -- --views 10 /path/to/my.obj # ''' blender --background --python render_blender.py -- --output_folder /home/mprabhud/dataset/stanford_shapenet /home/mprabhud/dataset/preprocessed_shapenet_4/02958343_ffbf897d9867fadff9a62a8acc9e8cfe.obj 1. Single obj rendering blender --background --python render_blender.py -- --output_folder /home/mprabhud/dataset/stanford_shapenet /home/mprabhud/dataset/preprocessed_shapenet_4/02958343_ffbf897d9867fadff9a62a8acc9e8cfe.obj --format OPEN_EXR --color_depth 16 ''' import argparse, sys, os import ipdb st = ipdb.set_trace parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') parser.add_argument('--views', type=int, default=30, help='number of views to be rendered') parser.add_argument('obj', type=str, help='Path to the obj file to be rendered.') parser.add_argument('--output_folder', type=str, default='/tmp', help='The path the output will be dumped to.') parser.add_argument('--scale', type=float, default=1, help='Scaling factor applied to model. Depends on size of mesh.') parser.add_argument('--remove_doubles', type=bool, default=True, help='Remove double vertices to improve mesh quality.') parser.add_argument('--edge_split', type=bool, default=True, help='Adds edge split filter.') parser.add_argument('--depth_scale', type=float, default=1.4, help='Scaling that is applied to depth. Depends on size of mesh. Try out various values until you get a good result. Ignored if format is OPEN_EXR.') parser.add_argument('--color_depth', type=str, default='16', help='Number of bit per channel used for output. Either 8 or 16.') parser.add_argument('--format', type=str, default='OPEN_EXR', help='Format of files generated. Either PNG or OPEN_EXR') parser.add_argument('--radius', type=float, default=8.0, help='Radius at which camera is placed') argv = sys.argv[sys.argv.index("--") + 1:] args = parser.parse_args(argv) import bpy # Set up rendering of depth map. bpy.context.scene.use_nodes = True tree = bpy.context.scene.node_tree links = tree.links # Add passes for additionally dumping albedo and normals. bpy.context.scene.render.layers["RenderLayer"].use_pass_normal = True bpy.context.scene.render.layers["RenderLayer"].use_pass_color = True bpy.context.scene.render.image_settings.file_format = args.format bpy.context.scene.render.image_settings.color_depth = args.color_depth # Clear default nodes for n in tree.nodes: tree.nodes.remove(n) # Create input render layer node. render_layers = tree.nodes.new('CompositorNodeRLayers') depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile") depth_file_output.label = 'Depth Output' if args.format == 'OPEN_EXR': links.new(render_layers.outputs['Depth'], depth_file_output.inputs[0]) else: # Remap as other types can not represent the full range of depth. map = tree.nodes.new(type="CompositorNodeMapValue") # Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map. map.offset = [-0.7] map.size = [args.depth_scale] map.use_min = True map.min = [0] links.new(render_layers.outputs['Depth'], map.inputs[0]) links.new(map.outputs[0], depth_file_output.inputs[0]) scale_normal = tree.nodes.new(type="CompositorNodeMixRGB") scale_normal.blend_type = 'MULTIPLY' scale_normal.use_alpha = True scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1) links.new(render_layers.outputs['Normal'], scale_normal.inputs[1]) bias_normal = tree.nodes.new(type="CompositorNodeMixRGB") bias_normal.blend_type = 'ADD' bias_normal.use_alpha = True bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0) links.new(scale_normal.outputs[0], bias_normal.inputs[1]) normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile") normal_file_output.label = 'Normal Output' links.new(bias_normal.outputs[0], normal_file_output.inputs[0]) albedo_file_output = tree.nodes.new(type="CompositorNodeOutputFile") albedo_file_output.label = 'Albedo Output' links.new(render_layers.outputs['Color'], albedo_file_output.inputs[0]) # Delete default cube bpy.data.objects['Cube'].select = True bpy.ops.object.delete() bpy.ops.import_scene.obj(filepath=args.obj) for object in bpy.context.scene.objects: if object.name in ['Camera', 'Lamp']: continue bpy.context.scene.objects.active = object if args.scale != 1: bpy.ops.transform.resize(value=(args.scale,args.scale,args.scale)) bpy.ops.object.transform_apply(scale=True) if args.remove_doubles: bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.remove_doubles() bpy.ops.object.mode_set(mode='OBJECT') if args.edge_split: bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers["EdgeSplit"].split_angle = 1.32645 bpy.ops.object.modifier_apply(apply_as='DATA', modifier="EdgeSplit") # Make light just directional, disable shadows. lamp = bpy.data.lamps['Lamp'] lamp.type = 'SUN' lamp.shadow_method = 'NOSHADOW' # Possibly disable specular shading: lamp.use_specular = False # Add another light source so stuff facing away from light is not completely dark bpy.ops.object.lamp_add(type='SUN') lamp2 = bpy.data.lamps['Sun'] lamp2.shadow_method = 'NOSHADOW' lamp2.use_specular = False lamp2.energy = 0.015 bpy.data.objects['Sun'].rotation_euler = bpy.data.objects['Lamp'].rotation_euler bpy.data.objects['Sun'].rotation_euler[0] += 180 def obj_centered_camera_pos(dist, azimuth_deg, elevation_deg): import math phi = float(elevation_deg) / 180 * math.pi theta = float(azimuth_deg) / 180 * math.pi x = (dist * math.cos(theta) * math.cos(phi)) y = (dist * math.sin(theta) * math.cos(phi)) z = (dist * math.sin(phi)) return x, y, z def parent_obj_to_camera(b_camera): origin = (0, 0, 0) b_empty = bpy.data.objects.new("Empty", None) b_empty.location = origin b_camera.parent = b_empty # setup parenting scn = bpy.context.scene scn.objects.link(b_empty) scn.objects.active = b_empty return b_empty scene = bpy.context.scene scene.render.resolution_x = 256 scene.render.resolution_y = 256 scene.render.resolution_percentage = 100 scene.render.alpha_mode = 'TRANSPARENT' cam = scene.objects['Camera'] cam.location = (0+5, 1+5, 0.6+5) cam_constraint = cam.constraints.new(type='TRACK_TO') cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' cam_constraint.up_axis = 'UP_Y' b_empty = parent_obj_to_camera(cam) cam_constraint.target = b_empty # model_identifier = os.path.split(os.path.split(args.obj)[0])[1] model_identifier = args.obj.split('/')[-1][:-4] # st() fp = os.path.join(args.output_folder, model_identifier, model_identifier) scene.render.image_settings.file_format = 'PNG' # set output format to .png from math import radians stepsize = 360.0 / args.views rotation_mode = 'XYZ' for output_node in [depth_file_output]:#, normal_file_output, albedo_file_output]: output_node.base_path = '' radius = args.radius THETAS = list(range(0, 360, 45)) PHIS = list(range(20, 80, 20)) i=0 for theta in THETAS: for phi in PHIS: i+=1 # for i in range(0, args.views): print("Rotation {}, {}".format((stepsize * i), radians(stepsize * i))) cam.location = obj_centered_camera_pos(radius, theta, phi) # scene.render.filepath = fp + '_r_{0:03d}'.format(int(i * stepsize)) scene.render.filepath = fp + '_{}_{}_'.format(theta, phi) # st() depth_file_output.file_slots[0].path = scene.render.filepath + "_depth.png" normal_file_output.file_slots[0].path = scene.render.filepath + "_normal.png" albedo_file_output.file_slots[0].path = scene.render.filepath + "_albedo.png" # bpy.context.scene.render.film_transparent = True bpy.context.scene.render.image_settings.color_mode = 'RGBA' bpy.ops.render.render(write_still=True) # render still # st() # b_empty.rotation_euler[2] += radians(stepsize)
8,347
app/temperature_functions.py
seraph776/TemperatureConverter
1
2171384
#!/usr/bin/env python3 """ created: 2022-03-10 19:02:45 @author: seraph★1001100 contact: <EMAIL> project: Temperature Converter details: Converts Fahrenheit to Celsius and Celsius to Fahrenheit. """ import sys def fahrenheit_to_celsius(fahrenheit: float) -> float: """Converts fahrenheit to celsius. :param fahrenheit: temperature in fahrenheit. :return: temperature in celsius """ celsius = (fahrenheit - 32) * 5 / 9 return celsius def celsius_to_fahrenheit(celsius: float) -> float: """Converts celsius to fahrenheit. :param celsius: temperature in celsius. :return: temperature in fahrenheit """ fahrenheit = celsius * 9 / 5 + 32 return fahrenheit def get_metric() -> [str, None]: """Allows users to select either fahrenheit, celsius or quit the program. :return: either fahrenheit, celsius or None """ metric = '' while True: choice = input('Do you want to convert (f)ahrenheit, (c)elsius, or (q)uit?:\n> ') if choice not in 'fcq': print(error_message('Invalid Input! Enter either (f), (c) or (q)!')) continue else: break if choice == 'f': metric = 'fahrenheit' elif choice == 'c': metric = 'celsius' else: metric = None return metric def error_message(s: str) -> str: """Changes text to red color. Used for errors.""" return '\033[1;91m' + s + '\033[0m' def output(s: str) -> str: """Returns the string output cyan""" return '\033[96m' + s + '\033[0m' def is_valid(n: str) -> bool: """Tests if n is float or int""" try: float(n) or int(n) return True except ValueError: return False def get_temperature(metric) -> float: """Asks for and returns temperature input""" while True: temperature = input(f'Enter the temperature in {metric}:\n> ') if not is_valid(temperature): print(error_message('Invalid input! Integer or float input only.')) continue else: break return float(temperature) def calculate_temperature() -> None: """Main function of program. Calculates temperature conversion.""" while True: metric = get_metric() if metric is None: print('Goodbye!') sys.exit() temperature = get_temperature(metric) if metric == 'fahrenheit': print(output(f'{temperature}° {metric} == {fahrenheit_to_celsius(temperature)}° celsius')) elif metric == 'celsius': print(output(f'{temperature}° {metric} == {celsius_to_fahrenheit(temperature)}° fahrenheit')) print()
2,677
tests/test_science_utils.py
caffery-chen/oopt-gnpy
0
2170698
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Checks that RamanFiber propagates properly the spectral information. In this way, also the RamanSolver and the NliSolver are tested. """ from pathlib import Path from pandas import read_csv from numpy.testing import assert_allclose from numpy import array import pytest from gnpy.core.info import create_input_spectral_information, create_arbitrary_spectral_information from gnpy.core.elements import Fiber, RamanFiber from gnpy.core.parameters import SimParams from gnpy.tools.json_io import load_json TEST_DIR = Path(__file__).parent def test_fiber(): """ Test the accuracy of propagating the Fiber.""" fiber = Fiber(**load_json(TEST_DIR / 'data' / 'test_science_utils_fiber_config.json')) # fix grid spectral information generation spectral_info_input = create_input_spectral_information(f_min=191.3e12, f_max=196.1e12, roll_off=0.15, baud_rate=32e9, power=1e-3, spacing=50e9) # propagation spectral_info_out = fiber(spectral_info_input) p_signal = spectral_info_out.signal p_nli = spectral_info_out.nli expected_results = read_csv(TEST_DIR / 'data' / 'test_fiber_fix_expected_results.csv') assert_allclose(p_signal, expected_results['signal'], rtol=1e-3) assert_allclose(p_nli, expected_results['nli'], rtol=1e-3) # flex grid spectral information generation frequency = 191e12 + array([0, 50e9, 150e9, 225e9, 275e9]) slot_width = array([37.5e9, 50e9, 75e9, 50e9, 37.5e9]) baud_rate = array([32e9, 42e9, 64e9, 42e9, 32e9]) signal = 1e-3 + array([0, -1e-4, 3e-4, -2e-4, +2e-4]) spectral_info_input = create_arbitrary_spectral_information(frequency=frequency, slot_width=slot_width, signal=signal, baud_rate=baud_rate, roll_off=0.15) # propagation spectral_info_out = fiber(spectral_info_input) p_signal = spectral_info_out.signal p_nli = spectral_info_out.nli expected_results = read_csv(TEST_DIR / 'data' / 'test_fiber_flex_expected_results.csv') assert_allclose(p_signal, expected_results['signal'], rtol=1e-3) assert_allclose(p_nli, expected_results['nli'], rtol=1e-3) @pytest.mark.usefixtures('set_sim_params') def test_raman_fiber(): """ Test the accuracy of propagating the RamanFiber.""" # spectral information generation spectral_info_input = create_input_spectral_information(f_min=191.3e12, f_max=196.1e12, roll_off=0.15, baud_rate=32e9, power=1e-3, spacing=50e9) SimParams.set_params(load_json(TEST_DIR / 'data' / 'sim_params.json')) fiber = RamanFiber(**load_json(TEST_DIR / 'data' / 'test_science_utils_fiber_config.json')) # propagation spectral_info_out = fiber(spectral_info_input) p_signal = spectral_info_out.signal p_ase = spectral_info_out.ase p_nli = spectral_info_out.nli expected_results = read_csv(TEST_DIR / 'data' / 'test_raman_fiber_expected_results.csv') assert_allclose(p_signal, expected_results['signal'], rtol=1e-3) assert_allclose(p_ase, expected_results['ase'], rtol=1e-3) assert_allclose(p_nli, expected_results['nli'], rtol=1e-3)
3,273
App/Api/admin/endpoints/users.py
pyworksasia/pyworks
0
2171305
from fastapi import APIRouter, Depends, HTTPException, Response, status from App.Http.responses.user_response import ( UserItemResponse, UserPaginationResponse ) from App.Http.requests.user_request import ( UserCreateRequest, UserUpdateRequest ) from App.Repositories.user_repository import UserRepository router = APIRouter() userRepository = UserRepository() @router.get("/", response_model=UserPaginationResponse) async def read_users(per_page: int=10, page: int=1): users = userRepository.paginate(per_page, page) return users @router.get("/{id}", response_model=UserItemResponse) async def read_user(id: int): user = userRepository.find(id) return user @router.post("/", response_model=UserItemResponse, status_code=status.HTTP_201_CREATED) async def create_user(request: UserCreateRequest): user = userRepository.create(request) # return request return user @router.put("/{id}", response_model=UserItemResponse) async def update_user(id: int, request: UserUpdateRequest): user = userRepository.update(id, request) return user @router.delete("/{id}") async def delete_item(id: int, response: Response): is_deleted = userRepository.delete(id) if is_deleted is None: response.status_code = status.HTTP_404_NOT_FOUND message = 'User {id} is not found!'.format(id=id) elif is_deleted == True: message = 'Delete user successfully' return { 'message': message }
1,476
ope/parallel.py
camillescott/fuckitall
4
2169858
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (c) <NAME>, 2019 # File : parallel.py # License: MIT # Author : <NAME> <<EMAIL>> # Date : 11.12.2019 import os import subprocess from .utils import which MIN_VERSION = 20171222 def parallel_fasta(input_filename, n_jobs, check_dep=True): '''Given an input FASTA source, target, shell command, and number of jobs, construct a gnu-parallel command to act on the sequences. Args: input_filename (str): The source FASTA. n_jobs (int): Number of cores or nodes to split to. Returns: str: The constructed shell command. ''' exc = which('parallel') if not check_dep else check_parallel() cmd = [exc, '-a', input_filename, '--block', '-1', '--pipepart', '--recstart', '\'>\'', '--gnu', '-j', str(n_jobs)] return cmd def parallel_fasta_pipe(n_jobs, file_size_kb=10, check_dep=True): exc = which('parallel') if not check_dep else check_parallel() block_size = file_size_kb // n_jobs cmd = [exc, '--block', f'{block_size}K', '--pipe', '--recstart', '\'>\'', '--gnu', '-j', str(n_jobs)] return cmd def check_parallel(): parallel = which('parallel') if parallel is None: raise RuntimeError('parallel not found.') else: try: version_string = subprocess.check_output(['parallel', '--version']) except subprocess.CalledProcessError as e: raise RuntimeError(f'Error checking parallel '\ 'version: [{e.returncode}] {e.output}') except OSError as e: raise RuntimeError(f'Error checking parallel version: '\ '[{e.errno}] {str(e)}') else: version = version_string.strip().split()[2] if int(version) < MIN_VERSION: raise RuntimeError(f'parallel version {version} < {MIN_VERSION}, '\ 'please update') return parallel
2,193
tests/test_valid_properties.py
git-afsantos/hpl-specs
3
2171274
# -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT # Copyright © 2021 <NAME> ############################################################################### # Imports ############################################################################### import logging from sys import exit from hpl.parser import property_parser from hpl.exceptions import HplSanityError, HplSyntaxError, HplTypeError ############################################################################### # Property Examples ############################################################################### GOOD_PROPERTIES = [ 'globally: some topic {int < 1 and float < 2 and string = "hello"}', "globally: no topic within 1s", "globally: input causes output", "globally: input causes output within 1s", "globally: output requires input", "globally: output requires input within 100 ms", """after ~events/bumper {state = PRESSED}: some ~cmd_vel {linear.x < 0.0 and angular.z = 0.0}""", "after input: no output", "globally: some topic {m.int in [0 to 10]!}", "globally: some topic {not int in [0 to 10]}", "globally: some topic {float_array[0] < float_array[1]}", "globally: some topic {forall i in [0 to len(int_array)]!: int_array[@i] > 0}", r"globally: some topic {exists x in int_array: @x > 0}", "globally: some topic {len(twist_array) > 0}", "until input: some output", "after input as M: some output {x = @M.x}", "globally: no /joy_teleop/joy {not buttons[0] in {0, 1}}", "globally: no /agrob/agrob_mode {not mode in {0,1,2,3}}", """after (p1 or p2 as P2 or p3 {phi} or p4 as P4 {phi}) until (q1 or q2 as Q2 or q3 {phi} or q4 as Q4 {phi}): (a1 or a2 as A2 or a3 {phi} or a4 as A4 {phi}) causes (b1 or b2 as B2 or b3 {phi} or b4 as B4 {phi}) within 100 ms""" ] ############################################################################### # Test Code ############################################################################### def test_valid_properties(): parser = property_parser() for test_str in GOOD_PROPERTIES: print "\n #", repr(test_str) try: ast = parser.parse(test_str) print "[Parsing] OK (expected)" print "" print repr(ast) except (HplSanityError, HplSyntaxError, HplTypeError) as e: print "[Parsing] FAIL (unexpected)" print " >>", str(e) return 1 print "\nAll", str(len(GOOD_PROPERTIES)), "tests passed." return 0 def main(): logging.basicConfig(level=logging.DEBUG) if test_valid_properties(): assert False return 0 if __name__ == "__main__": exit(main())
2,752
flask_app.py
conderodrigo98/spynotes
0
2171368
from flask import Flask, render_template, request, g app = Flask('_name_') import sqlite3 def get_db(): if 'db' not in g: g.db = sqlite3.connect('data.db') cur = g.db.cursor() return g.db @app.teardown_appcontext def close_db(e=None): db = g.pop('db', None) if db is not None: db.close() import hashlib from datetime import datetime @app.route('/', methods=['GET']) def index(): return render_template('index.html') @app.route('/write', methods=['GET','POST']) def write(): if request.method == 'GET': return render_template('/write.html') else: db = get_db() cur = db.cursor() cur.execute('''SELECT COUNT(*) FROM messages''') n = cur.fetchone()[0] now = datetime.now() my_str = str(n) + str(now) hash_ = hashlib.sha1(my_str.encode()) key = hash_.hexdigest() print(key) cur.execute('''INSERT INTO messages (msg, key) VALUES (?, ?)''', [request.form['msg'], key]) db.commit() return render_template('/success.html', key=key) @app.route('/read', methods=['GET', 'POST']) def read(): if request.method == 'GET': return render_template('/read.html') else: db = get_db() cur = db.cursor() cur.execute('''SELECT msg FROM messages WHERE key=?''', (str(request.form['key']),)) fetched = cur.fetchone() if(not fetched is None): msg = fetched[0] cur.execute('''DELETE FROM messages WHERE key=?''',(str(request.form['key']),)) db.commit() return render_template('msg.html', msg=msg) else: return "ERROR!" @app.route('/test') def test(): return 'HOLA!'
1,735
api/sentiment_predict.py
bhattbhuwan13/fuseai-training
1
2171096
from __future__ import absolute_import import os import sys import argparse import time import json print(os.path.realpath(__file__)) full_absolute_location = os.path.realpath(__file__) full_absolute_location = full_absolute_location.split("/") full_absolute_location = "/".join( full_absolute_location[:-2]) sys.path.append(os.path.join(full_absolute_location, 'src')) from models.predict_model import make_predictions from flask import Flask, render_template, request, redirect, url_for from flask_wtf import Form from wtforms import TextField from flask import jsonify from bson.json_util import dumps from bson.json_util import loads from pymongo import MongoClient app = Flask(__name__, template_folder="./templates") #MONGO_URL = "mongodb://127.0.0.1:27017" #MONGO_URL = 'mongodb://' + str(host.docker.internal) + ':27017' IP_ADDR = '192.168.1.67' MONGO_URL = 'mongodb://' + IP_ADDR + ':27017' print("Ip address for mongo is {}".format(MONGO_URL)) @app.route('/') def hello_world(): return 'Hello, World!' @app.route('/get_sentiment', methods=['GET', 'POST']) def find_sentiment(): if request.method == 'POST': # Form being submitted; grab data from form. text = request.form['text'] a = time.monotonic() sentiment, probability = make_predictions(text) sentiment = sentiment[0] b = time.monotonic() time_elapsed = b - a database_item = { 'query':text, 'sentiment':sentiment, 'eta' : time_elapsed, 'confidence': probability } # inserting to dtabase #client = MongoClient(MONGO_URL) client = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'], 27017) db=client['sentiments'] sentiments = db.sentiments sentiments.insert_one(database_item) return render_template('submit_text.html', message = sentiment) return render_template('submit_text.html') @app.route('/predict_sentiment', methods=['POST']) def predict_sentiment(): if request.method == 'POST': # Form being submitted; grab data from form. text = request.args.get("text", '') a = time.monotonic() print("printing the text {}".format(request.get_json(force=True)['text'])) text = request.get_json(force=True)['text'] sentiment, probability = make_predictions(text) sentiment = sentiment[0] b = time.monotonic() time_elapsed = b-a response = { 'sentiment':sentiment, 'eta' : time_elapsed, 'confidence': probability } response = jsonify(response) database_item = { 'query':text, 'sentiment':sentiment, 'eta' : time_elapsed, 'confidence': probability } # inserting to dtabase #client = MongoClient(MONGO_URL) client = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'], 27017) db=client['sentiments'] sentiments = db.sentiments sentiments.insert_one(database_item) return response @app.route('/get_all_items', methods=['GET']) def get_all_items(): if request.method == 'GET': # Form being submitted; grab data from form. # getting all request from database #client = MongoClient(MONGO_URL) client = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'], 27017) db=client['sentiments'] sentiments = db.sentiments items = [] for item in sentiments.find(): item.pop('_id') items.append(item) resp = jsonify(items) return resp if __name__ == '__main__': app.run(host='0.0.0.0') #app.run()
3,855
evap/results/tests/test_exporters.py
JenniferStamm/EvaP
0
2168637
from django.test import TestCase from evap.evaluation.models import Semester from evap.results.exporters import ExcelExporter class TestExporters(TestCase): def test_grade_color_calculation(self): exporter = ExcelExporter(Semester()) self.assertEqual(exporter.STEP, 0.2) self.assertEqual(exporter.normalize_number(1.94999999999), 1.8) # self.assertEqual(exporter.normalize_number(1.95), 2.0) # floats ftw self.assertEqual(exporter.normalize_number(1.95000000001), 2.0) self.assertEqual(exporter.normalize_number(1.99999999999), 2.0) self.assertEqual(exporter.normalize_number(2.0), 2.0) self.assertEqual(exporter.normalize_number(2.00000000001), 2.0) self.assertEqual(exporter.normalize_number(2.1), 2.0) self.assertEqual(exporter.normalize_number(2.149999999999), 2.0) # self.assertEqual(exporter.normalize_number(2.15), 2.2) # floats again self.assertEqual(exporter.normalize_number(2.150000000001), 2.2) self.assertEqual(exporter.normalize_number(2.8), 2.8)
1,074
tests/api/test_ts_operation_rate_fix.py
NOWUM/EnSysMod
1
2171016
from typing import Dict from fastapi import status from fastapi.testclient import TestClient from sqlalchemy.orm import Session from ensysmod.schemas import OperationRateFixCreate from tests.utils import data_generator as data_gen from tests.utils.utils import random_float_numbers def get_random_fix_operation_rate_create(db: Session) -> OperationRateFixCreate: source = data_gen.fixed_existing_energy_sink(db) region = data_gen.fixed_existing_region(db) return OperationRateFixCreate( ref_dataset=region.ref_dataset, component=source.component.name, region=region.name, fix_operation_rates=random_float_numbers(8760) ) def test_create_fix_operation_rate(client: TestClient, normal_user_headers: Dict[str, str], db: Session): """ Test creating a fix operation rate time series. """ create_request = get_random_fix_operation_rate_create(db) response = client.post("/fix-operation-rates/", headers=normal_user_headers, data=create_request.json()) assert response.status_code == status.HTTP_200_OK created_ts = response.json() assert created_ts["component"]["name"] == create_request.component assert created_ts["region"]["name"] == create_request.region assert created_ts["fix_operation_rates"] == create_request.fix_operation_rates
1,327
cfgov/v1/migrations/0229_browsepage_share_and_print.py
adebisi-aden/consumerfinance.gov
37
2169732
# Generated by Django 2.2.13 on 2020-08-21 14:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('v1', '0228_add_audio_player'), ] operations = [ migrations.AddField( model_name='browsepage', name='share_and_print', field=models.BooleanField(default=False, help_text='Include share and print buttons above page content.'), ), ]
459
Face Reconstruction/Facial Reconstruction and Dense Alignment/python/facerda.py
swapnilgarg7/Face-X
187
2170454
import onnxruntime as ort import numpy as np import cv2 import datetime class FaceRDA(object): def __init__(self, model_path, bfm=False): self.ort_session = ort.InferenceSession(model_path) self.input_name = self.ort_session.get_inputs()[0].name self.bfm = bfm def __call__(self, img, roi_box): h, w = img.shape[:2] if self.bfm: image = cv2.resize(img, (120, 120)) else: image = cv2.resize(img, (112, 112)) input_data = ((image - 127.5) / 128).transpose((2, 0, 1)) tensor = input_data[np.newaxis, :, :, :].astype("float32") begin = datetime.datetime.now() output = self.ort_session.run(None, {self.input_name: tensor})[0][0] end = datetime.datetime.now() print("facerda cpu times = ", end - begin) if self.bfm: vertices = self.decode_bfm(output, w, h, roi_box) else: vertices = self.decode(output, w, h, roi_box) return vertices def decode(self, output, w, h, roi_box): x1, x2, y1, y2 = w / 2, w / 2, -h / 2, h / 2 v = np.array([[x1, 0, 0, x2], [0, y1, 0, y2], [0, 0, 1, 0], [0, 0, 0, 1]]) vertices = v @ output sx, sy, ex, ey = roi_box vertices[0, :] = vertices[0, :] + sx vertices[1, :] = vertices[1, :] + sy return vertices def decode_bfm(self, output, w, h, roi_box): print(output.shape) # move to center of image output[0, :] = output[0, :] + 120 / 2 output[1, :] = output[1, :] + 120 / 2 # flip vertices along y-axis. output[1, :] = 120 - output[1, :] - 1 vertices = output sx, sy, ex, ey = roi_box scale_x = (ex - sx) / 120 scale_y = (ey - sy) / 120 vertices[0, :] = vertices[0, :] * scale_x + sx vertices[1, :] = vertices[1, :] * scale_y + sy return vertices
1,983
harvest/models/schemas.py
mouradmourafiq/harvest
0
2171239
class SliceSchema(object): """ The `SliceSchema` class represents the schema of the data slice to query. """ def __init__(self, table, metrics, dimensions, filters, group_by, operations): self.table = table self.metrics = metrics self.dimensions = dimensions self.filters = filters self.group_by = group_by self.operations = operations self.metric_val_schemas, self.metric_agg_schemas, metric_tables = self._get_metric_schemas() self.dimension_schemas, dimension_tables = self._get_dimension_schemas() self.filter_schemas, filter_tables = self._get_filter_schemas() self.group_by_schema = self._get_group_by_schema() self.operation_schemas = self._get_operation_schemas() self.used_tables = metric_tables | dimension_tables | filter_tables self.joins = [j for j in self.joins or [] if j.table in self.used_tables] # assert that the schema representation is correct assert (not all([dimensions and group_by]), 'no dimension is allowed if group by is passed.') assert (not all([self.metric_val_schemas, self.metric_agg_schemas]), 'no metric value is allowed if metric aggregations are passed.') assert (not any([self.group_by, self.metric_agg_schemas]) or all([self.group_by, self.metric_agg_schemas]), 'no metric aggregation is allowed if no group by is passed.') self.query = self._query() def _get_metric_schemas(self): metric_agg_schemas = [m.schema for m in self.metrics if m.is_aggregation] metric_val_schemas = [m.schema for m in self.metrics if not m.is_aggregation] metric_tables = {t for m in self.metrics for t in m.tables} return metric_val_schemas, metric_agg_schemas, metric_tables def _get_dimension_schemas(self): dimension_schemas = [d.schema for d in self.dimensions or []] dimension_tables = {t for d in self.dimensions or [] for t in d.tables} return dimension_schemas, dimension_tables def _get_filter_schemas(self): filter_schemas = [f.schema for f in self.filters or []] filter_tables = {t for f in self.filters or [] for t in f.element.tables} return filter_schemas, filter_tables def _get_group_by_schema(self): return [g.schema for g in self.group_by or []] def _get_operation_schemas(self): return [o.schema for o in self.operations or []] def _add_joins(self, query): for join in self.joins: query = getattr(query, join.join_type)(join.table.schema, join.criteria) return query def _add_projections(self, query): return query[self.dimension_schemas + self.metric_val_schemas] def _add_filters(self, query): return query[self.filter_schemas] def _add_groupby(self, query): return query.group_by(self.group_by) def _add_metrics(self, query): return query.aggregate(self.metric_agg_schemas) def _query(self): query = self.table.schema query = self._add_joins(query) query = self._add_projections(query) return self._add_filters(query)
3,229
apps/accounts/admin.py
spiperac/bctf
6
2170974
from django.contrib import admin from apps.accounts.models import Account class AccountAdmin(admin.ModelAdmin): pass admin.site.register(Account, AccountAdmin)
168
sesion14/controllers/authors_controller.py
joelibaceta/backend-codigo-10
1
2171003
from flask_restful import Resource from flask import request from sesion14.models.author import Author from sesion14.schemas.author import AuthorSchema from sesion14.app import db class AuthorsController(Resource): def get(self): authors = Author.query.all() schema = AuthorSchema() data = schema.dump(authors, many=True) return data def post(self): data = request.json new_author = Author(**data) db.session.add(new_author) db.session.commit() return {"status": "ok"}, 201
567
tpDcc/tools/datalibrary/widgets/menus/libraries.py
tpDcc/tpDcc-tools-datalibrary
0
2169204
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Module that contains data library libraries widget implementation """ from __future__ import print_function, division, absolute_import from functools import partial from Qt.QtWidgets import QMenu, QAction from tpDcc.managers import resources # from tpDcc.libs.datalibrary.core import utils class LibrariesMenu(QMenu, object): def __init__(self, settings_path=None, library_window=None): super(LibrariesMenu, self).__init__(library_window) self._library_window = library_window self._settings_path = settings_path self.setTitle('Libraries') self.setIcon(resources.icon('books')) self.refresh() def settings_path(self): return self._settings_path def set_settings_path(self, settings_path): self._settings_path = settings_path self.refresh() def refresh(self): self.clear() # libraries = utils.read_settings(self._settings_path) # default = utils.default_library(self._settings_path) # # for name in libraries: # library = libraries[name] # path = library.get('path', '') # kwargs = library.get('kwargs', dict()) # enabled = True # if self._library_window: # enabled = name != self._library_window.name() # text = name # if name == default and name.lower() != 'default': # text = name + ' (default)' # # action = QAction(text, self) # action.setEnabled(enabled) # action_callback = partial(self._on_show_library, name, path, **kwargs) # action.triggered.connect(action_callback) # self.addAction(action) def _on_show_library(self, name, path, **kwargs): """ Internal callback function that shows the library window which has given name and path :param name: str :param path: str :param kwargs: dict """ raise NotImplementedError()
2,067
libs/gpio.py
tomasz-solik/raspberry-pi-visualizations
1
2171082
from libs import config try: if(config.GPIO): import RPi.GPIO as GPIO except RuntimeError: print("Error importing RPi.GPIO! This is probably because you need superuser privileges. You can achieve this by using 'sudo' to run your script") vfx = None ## # Init gpio module # # @param: vfx object # @return: void ## def init(vfx_obj): if(not config.GPIO): pass global vfx vfx = vfx_obj try: # set gpio GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(config.GPIO_IN_BTN_PREV, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(config.GPIO_IN_BTN_NEXT, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(config.GPIO_IN_BTN_AUDIO_TRIG, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(config.GPIO_IN_BTN_MIDI_TRIG, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(config.GPIO_IN_BTN_OSD_MENU, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(config.GPIO_OUT_LED_AUDIO_TRIG, GPIO.OUT) GPIO.setup(config.GPIO_OUT_LED_MIDI_TRIG, GPIO.OUT) vfx.gpio_connected = True except: vfx.gpio_connected = False ## # Receive gpio # # @return: void ## def recv(): global vfx if(vfx.gpio_connected): # --------------------------------------------------------------- # BTN PREV if (GPIO.input(config.GPIO_IN_BTN_PREV) == GPIO.HIGH): vfx.gpio_btn_prev = True if (vfx.vfx_mod_number > 0): vfx.vfx_mod_number -= 1 else: vfx.gpio_btn_prev = False # --------------------------------------------------------------- # BTN NEXT if (GPIO.input(config.GPIO_IN_BTN_NEXT) == GPIO.HIGH): vfx.gpio_btn_next = True if (vfx.vfx_mod_number < len(vfx.vfx_names) - 1): vfx.vfx_mod_number += 1 else: vfx.gpio_btn_next = False # --------------------------------------------------------------- # AUDIO TRIG if(vfx.audio_connected): if(GPIO.input(config.GPIO_IN_BTN_AUDIO_TRIG) == GPIO.HIGH): vfx.gpio_btn_audio_trig = True if (vfx.audio_trig == False): vfx.audio_trig = True GPIO.output(config.GPIO_OUT_LED_AUDIO_TRIG, GPIO.HIGH) else: vfx.audio_trig = False GPIO.output(config.GPIO_OUT_LED_AUDIO_TRIG, GPIO.LOW) else: vfx.gpio_btn_audio_trig = False if(vfx.audio_trig): if(vfx.audio_peak > 0): GPIO.output(config.GPIO_OUT_LED_AUDIO_TRIG, GPIO.HIGH) else: GPIO.output(config.GPIO_OUT_LED_AUDIO_TRIG, GPIO.LOW) # --------------------------------------------------------------- # MIDI TRIG if(vfx.usb_midi_connected): if(GPIO.input(config.GPIO_IN_BTN_MIDI_TRIG) == GPIO.HIGH): vfx.gpio_btn_midi_trig = True if(vfx.usb_midi_trig == False): vfx.usb_midi_trig = True GPIO.output(config.GPIO_OUT_LED_MIDI_TRIG, GPIO.HIGH) else: vfx.usb_midi_trig = False GPIO.output(config.GPIO_OUT_LED_MIDI_TRIG, GPIO.LOW) else: vfx.gpio_btn_midi_trig = False if(vfx.usb_midi_trig): if(vfx.midi_new): GPIO.output(config.GPIO_OUT_LED_MIDI_TRIG, GPIO.HIGH) else: GPIO.output(config.GPIO_OUT_LED_MIDI_TRIG, GPIO.LOW) # --------------------------------------------------------------- # BTN MENU if (GPIO.input(config.GPIO_IN_BTN_OSD_MENU) == GPIO.HIGH): vfx.gpio_btn_osd_menu = True if (vfx.osd_menu == False): vfx.osd_menu = True else: vfx.osd_menu = False else: vfx.gpio_btn_osd_menu = False
4,012
app/web/control_api/eventReceiver.py
BobWatson/workspace
1
2171073
import json from flask_restful import Resource from pymitter import EventEmitter class eventReceiver(Resource): def __init__(self) -> None: pass @classmethod def withEmitter(cls, eventEmitter: EventEmitter): cls.app_state = {} cls.ee = eventEmitter cls.state_watchers(cls) return cls def get(self, event_id): try: res = json.dumps(self.app_state[event_id]) except: res = json.dumps({"status": "error"}) return res def post(self, event_id): self.ee.emit(event_id) return "OK " + event_id def state_watchers(self): def prodigy_start(): self.app_state["prodigy"] = {"status": "running"} self.ee.on("prodigy.start", prodigy_start) def prodigy_stop(): self.app_state["prodigy"] = {"status": "stopped"} self.ee.on("prodigy.stop", prodigy_stop) def dw_start(): self.app_state["directory_watcher"] = {"status": "running"} self.ee.on("directory_watcher.start", dw_start) def dw_stop(): self.app_state["directory_watcher"] = {"status": "stopped"} self.ee.on("directory_watcher.start", dw_stop)
1,237
cisco43calc.py
wifiwizardofoz/py43calc
5
2169697
# Script by <NAME> | @wifiwizardofoz # Version: v1.0 | 30-09-2020 import sys import ipaddress from ipaddress import AddressValueError import socket # User executes script with IPs specified as arguments at CLI def arg_mode(): try: ip_args = (sys.argv[1::]) iplist = [] for wlc_ip in ip_args: # Validate IP addresses and convert to hex wlc_ip = ipaddress.IPv4Address(wlc_ip) wlc_ip = str(wlc_ip) wlc_ip = socket.inet_aton(wlc_ip).hex() # Ignore duplicate entries, convert to list and join on a single line with no spacing if wlc_ip not in iplist: iplist.append(wlc_ip) else: continue hexlist = (''.join(iplist)) # Convert IP count to hex/base 16, remove leading '0xf'. Print =< 2 digits or add leading '0' as required by Cisco hexcount = int(hex(len(iplist)*4), 16) hexcount = '{:02X}'.format(hexcount) # Combine and print output to screen for user print ('f1' + hexcount + hexlist) except AddressValueError: sys.exit('Error: One or more specified arguments are not Valid IPv4 Addresses') # User runs script without any arguments specified at CLI def interactive_mode(): try: count = int(0) user_input = int(input('\n' + 'Number of WLCs in network: ')) assert 0 < user_input < 17 except AssertionError: print ('\n' + 'Please enter a number between 1 and 16, or press CTRL+C to exit.') main() except KeyboardInterrupt: sys.exit(0) except: print ('\n' + 'Not a valid number. Try again or press CTRL+C to exit.') main() else: iplist = [] while count < user_input: count += 1 try: # Present user with required number of 'input' prompt. Validate IP addresses and convert to hex wlc_ip = ipaddress.IPv4Address(input('WLC #' + str(count) + ' IP Address: ')) wlc_ip = str(wlc_ip) wlc_ip = socket.inet_aton(wlc_ip).hex() # Don't allow duplicate entries and append to list if wlc_ip not in iplist: iplist.append(wlc_ip) else: count -= 1 print ('\n' + 'IP Address already entered. Try again or press CTRL+C to exit.') except KeyboardInterrupt: sys.exit(0) except: count -= 1 print ('\n' + 'Not a valid IP Address. Try again or press CTRL+C to exit.') else: pass # Join hex formatted IP addresses together in single line, with no spacing hexlist = (''.join(iplist)) # Convert IP count to hex/base 16, remove leading '0xf'. Print =< 2 digits or add leading '0' as required by Cisco hexcount = int(hex(len(iplist)*4), 16) hexcount = '{:02X}'.format(hexcount) # Combine and print output to screen for user print ('\n' + 'Your DHCP Option 43 value is: ' + 'f1' + hexcount + hexlist) def main(): if len(sys.argv) > 1: arg_mode() else: interactive_mode() # Start Here # Script is executed as main program if __name__ == "__main__": main() # Script has been imported from within another module else: arg_mode()
3,486
code/basic_testing_mp.py
Go-Trojans/trojan-go
0
2171144
""" This program is creating 4 child processes and each child process is loading a saved model and try to predict a given input. """ from algos.utils import set_gpu_memory_target, load_model_from_disk import numpy as np import os import multiprocessing multiprocessing.set_start_method('spawn', force=True) def do_predict_in_parallel(num_workers): gpu_frac = 0.95/num_workers set_gpu_memory_target(gpu_frac) agent_model = ("./checkpoints/iteration_Savedmodel/initial.json", "./checkpoints/iteration_Savedmodel/initial.h5") model = load_model_from_disk(agent_model) model.summary() model_input = [] for _ in range(100): board_tensor = np.random.randint(0, 3, size=(7, 5, 5)) model_input.append(board_tensor) model_input = np.array(model_input) action_target = [] for _ in range (100): search_prob = np.random.randn(26) #search_prob_flat = search_prob.reshape(25,) action_target.append(search_prob) action_target = np.array(action_target) value_target = np.random.rand(100) value_target = np.array(value_target) X = model_input[0] X = np.expand_dims(X, axis=0) prediction = model.predict(X) print("[PID {}] {}".format(os.getpid(),prediction)) def predict_in_parallel(): num_workers = 4 board_size = 5 workers = [] for i in range(num_workers): worker = multiprocessing.Process( target=do_predict_in_parallel, args=( num_workers, ) ) worker.start() workers.append(worker) # Wait for all workers to finish. print('Waiting for workers...') for worker in workers: worker.join() def main(): predict_in_parallel() if __name__ == '__main__': main()
1,830
data.py
vanquisher2122/my-projects
0
2169576
import mysql.connector from mysql.connector import Error import pandas as pd import getpass import os dbms = mysql.connector.connect(host= 'localhost', user= 'root', passwd= '<PASSWORD>', database= 'credentials') #If you are copying this so remember to put in your SQL host, user and password curs = dbms.cursor() #point at operators in mysql def menu(): os.system('cls') print(''' 1) log in 2) sign up 3) help 4) exit ''') opt = int(input("enter your choice in digits: ")) if opt == 1: login() elif opt == 2: signup() elif opt == 3: help() elif opt == 4: exit() else: print("Why are you joking?") #startup UI def userid(): os.system('cls') ii = input("enter your User ID: ") curs.execute("SELECT ID FROM users") #shows ID row of users table rows = curs.fetchall() #fetch all data of ID row if (ii,) in rows: #if value of ii is in the row, condition evaluates password() else: exit() def password(): ps = getpass.getpass("enter your pin: ") curs.execute("SELECT pin FROM users") #shows PIN row of users table row = curs.fetchall() #fetch all data of pin row if (ps,) in row: #if data in row matches with data in ps, condition evaluates main() else: exit() def login(): userid() def exit(): os.system('cls') print("I think, you need time. Bye!") input("press enter...") def signup(): os.system('cls') nme = input("enter your name: ") usid = input("enter your userID: ") curs.execute("SELECT ID FROM users") rows = curs.fetchall() if (usid,) in rows: print("This userID is already taken. please select a different one.") signup() else: pasd = getpass.getpass("please enter a pin in digits, characters are not supported: ") entry = """insert into users (name, ID, pin) values(%s, %s, %s)""" data = (nme, usid, pasd) curs.execute(entry, data) dbms.commit() menu() def main(): os.system('cls') print("programme successful!!") def help(): os.system('cls') print('''If you are already an user, enter 1 and in the next window, enter your credentials. If you are a new user, enter 2 and hit enter and in the next window, do as directed.''') input('press enter to go back...') menu() menu()
2,353
whosaidwhat/quotes/migrations/0001_initial.py
shun-liang/whosaidwhat
0
2170788
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-04 15:39 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('candidates', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='QuoteAnswer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('accuracy', models.CharField(choices=[('0', 'No'), ('1', 'Yes'), ('2', 'Inaccurate/Out-of-context')], max_length=1)), ('created_at', models.DateTimeField()), ('content', models.TextField()), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='QuoteQuestion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now=True)), ('quote', models.TextField()), ('quote_date', models.DateField(blank=True, null=True)), ('quote_location', models.CharField(blank=True, max_length=255)), ('asked_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='candidates.ElectionCandidate')), ], ), migrations.CreateModel( name='VoteOnAnswer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now=True)), ('voted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('voted_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quotes.QuoteAnswer')), ], ), migrations.CreateModel( name='VoteOnQuestion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now=True)), ('voted_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('voted_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quotes.QuoteQuestion')), ], ), migrations.AddField( model_name='quoteanswer', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quotes.QuoteQuestion'), ), migrations.AlterUniqueTogether( name='voteonquestion', unique_together=set([('voted_to', 'voted_by')]), ), migrations.AlterUniqueTogether( name='voteonanswer', unique_together=set([('voted_to', 'voted_by')]), ), ]
3,367
enderecos/api/serializers.py
RodrigoMachado9/django-api
1
2168777
from rest_framework.serializers import ModelSerializer from enderecos.models import Endereco class EnderecoSerializer(ModelSerializer): class Meta: model = Endereco #todo warning cuidado com os espaços ao criar os atributos - Hehe fields = ('id','linha1', 'linha2', 'cidade', 'estado','pais', 'latitude', 'logitude')
365
509.fibonacci-number.py
W-KE/Leetcode-Solutions
0
2171276
class Solution: def fib(self, N): """ :type N: int :rtype: int """ # if N < 2: # return N # return self.fib(N - 1) + self.fib(N - 2) phi = (1 + 5 ** 0.5) / 2 return int(1 / 5 ** 0.5 * (phi ** N - (-phi) ** N))
290
triple_agent/classes/action_tests.py
andrewzwicky/TripleAgent
3
2171095
from enum import auto from triple_agent.classes.ordered_enum import ReverseOrderedEnum from triple_agent.constants.colors import PlotColorsBase class ActionTest(ReverseOrderedEnum): NoAT = 0 Green = auto() White = auto() Ignored = auto() Red = auto() Canceled = auto() def __repr__(self) -> str: return f"{self.__class__.__name__}.{self.name}" def create_action_test_color_dict(plot_colors: PlotColorsBase): return { ActionTest.Green: plot_colors.color_3, ActionTest.White: plot_colors.white, ActionTest.Ignored: plot_colors.light_grey, ActionTest.Red: plot_colors.color_2, ActionTest.Canceled: plot_colors.dark_grey, }
709
training.py
abhinav-2912/Rain-Haze-Removal
0
2170816
import os import random import shutil import sys import time import warnings from time import time import cv2 import imageio import keras import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf from keras.backend.tensorflow_backend import set_session from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import Callback, ReduceLROnPlateau, TensorBoard from tensorflow.keras.models import Model, load_model, model_from_json from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.python.client import device_lib import albumentations as A import segmentation_models as sm from models import ResidualModel1, ResidualModel2 from utils import guided_filter, visualize os.environ["CUDA_VISIBLE_DEVICES"] = "1" with tf.device("/device:GPU:0"): image_size = 64 num_channels = 3 x_train_path = "/home/two/final_code/data_generation/h5data/train/x/" y_train_path = "/home/two/final_code/data_generation/h5data/train/y/" x_valid_path = "/home/two/final_code/data_generation/h5data/validation/x/" y_valid_path = "/home/two/final_code/data_generation/h5data/validation/y/" x_test_path = "/home/two/final_code/data_generation/h5data/test/x/" y_test_path = "/home/two/final_code/data_generation/h5data/test/y/" image = Input((image_size, image_size, num_channels)) detail = Input((image_size, image_size, num_channels)) label = Input((image_size, image_size, num_channels)) model = ResidualModel2(image, detail, True) # model.summary() def round_clip_0_1(x, **kwargs): return x.round().clip(0, 1) # define heavy augmentations def get_training_augmentation(): train_transform = [ A.RandomCrop(height=image_size, width=image_size, always_apply=True), ] return A.Compose(train_transform) def get_validation_augmentation(): """Add paddings to make image shape divisible by 32""" test_transform = [ A.RandomCrop(height=image_size, width=image_size, always_apply=True) ] return A.Compose(test_transform) BATCH_SIZE = 64 LR = 0.001 EPOCHS = 15 IMAGE_ORDERING = 'channels_last' optim = Adam(LR) loss = tf.keras.losses.MeanSquaredError() model.compile(optim, loss) random.seed(7) train_dataset = Dataset( x_train_path, y_train_path, augmentation=get_training_augmentation()) # Dataset for validation images valid_dataset = Dataset( x_valid_path, y_valid_path, augmentation=get_validation_augmentation()) train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True) valid_dataloader = Dataloder(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) tensorboard_callback = TensorBoard(log_dir='/home/two/final_code/logs/{}'.format(time()), update_freq='batch') reduce_lr = ReduceLROnPlateau() callbacks = [tensorboard_callback, reduce_lr] train_samples = 300000 valid_samples = train_samples * 0.2 train_steps = int(train_samples / BATCH_SIZE) valid_steps = int(valid_samples / BATCH_SIZE) history = model.fit_generator( train_dataloader, steps_per_epoch=train_steps, epochs=EPOCHS, callbacks=callbacks, validation_data=valid_dataloader, validation_steps=valid_steps)
3,410
setup.py
NitiBaghel/Enron-Dataset-Field-Extraction-Library
0
2171089
from setuptools import find_packages, setup setup( name='extractenronlib', packages=find_packages(include=['extractenronlib']), version='0.1.0', description='Enron dataset field extration library', author='<NAME>', license='MIT', install_requires=['pandas'], setup_requires=['pytest-runner'], tests_require=['pytest==4.4.1'], test_suite='test', )
386
src/master_dictionary.py
rotomer/nlp-project
0
2166254
import os from itertools import islice class MasterDictionary(object): def __init__(self, master_dict): self._master_dict = master_dict self._sentiment_field_names = ['Negative', 'Positive', 'Uncertainty', 'Litigious', 'Constraining', 'Superfluous', 'Interesting', 'Modal'] self._default = [(field_name, 0) for field_name in self._sentiment_field_names] def sentiment_for_word(self, word): sentiment = self._master_dict.get(word) if sentiment is not None: return sentiment else: return self._default @staticmethod def from_file(master_dictionary_file_path): master_dict = {} with open(master_dictionary_file_path, 'r') as master_dictionary_file: line_number = 0 for line in master_dictionary_file: line_number = line_number + 1 if line_number == 1: continue splits = line.strip().split(',') sentiment = [('Negative', 1 if int(splits[7]) != 0 else 0), ('Positive', 1 if int(splits[8]) != 0 else 0), ('Uncertainty', 1 if int(splits[9]) != 0 else 0), ('Litigious', 1 if int(splits[10]) != 0 else 0), ('Constraining', 1 if int(splits[11]) != 0 else 0), ('Superfluous', 1 if int(splits[12]) != 0 else 0), ('Interesting', 1 if int(splits[13]) != 0 else 0), ('Modal', 1 if int(splits[14]) != 0 else 0)] master_dict[splits[0]] = sentiment return MasterDictionary(master_dict) @property def sentiment_field_names(self): return self._sentiment_field_names @property def inner_dict(self): return self._master_dict def _take(n, iterable): return list(islice(iterable, n)) if __name__ == '__main__': current_file_dir_path = os.path.dirname(os.path.realpath(__file__)) master_dict_file_path = os.path.join(current_file_dir_path, '..', 'input_data', 'Sentiment_Dictionary', 'LoughranMcDonald_MasterDictionary_2018.csv') master_dict = MasterDictionary.from_file(master_dict_file_path) for key, value in _take(100, master_dict.inner_dict.items()): print(key + ':' + str(value))
2,688
homehub/homehub/settings.py
stricoff92/homehub
0
2170376
""" Django settings for homehub project. Generated by 'django-admin startproject' using Django 3.2. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os.path from pathlib import Path from homehub import applocals # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ SECRET_KEY = applocals.SECRET_KEY DEBUG = applocals.DEBUG ENV = applocals.ENV ENV_DEV = 'DEV' ENV_TESTING = 'TESTING' ENV_PROD = 'PROD' if ENV not in [ENV_DEV, ENV_TESTING, ENV_PROD]: raise Exception("Invalid ENV") ALLOWED_HOSTS = applocals.ALLOWED_HOSTS APP_PORT = applocals.APP_PORT # This is not suitable for a remote host. SESSION_COOKIE_SECURE = False CSRF_COOKIE_SECURE = False # Application definition INSTALLED_APPS = [ 'api.apps.ApiConfig', 'website.apps.WebsiteConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'homehub.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'homehub.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ # { # 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # }, # { # 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # }, # { # 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # }, # { # 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "website/templates/angular_static"), ] # For collect static STATIC_ROOT = os.path.join(BASE_DIR, 'website/templates/static') LOGIN_URL = "/admin/login/" # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Selenium Driver CHROME_DRIVER_PATH = applocals.CHROME_DRIVER_PATH # API Tokens WEATHER_API_KEY = applocals.WEATHER_API_KEY WORDNIK_API_KEY = applocals.WORDNIK_API_KEY CALENDARIFIC_API_KEY = applocals.CALENDARIFIC_API_KEY PUSHOVER_APP_TOKEN = applocals.PUSHOVER_APP_TOKEN PUSHOVER_USER_TOKEN = applocals.PUSHOVER_USER_TOKEN
4,125
flex2mouse.py
AndrewQuinn2020/flex2mouse
0
2171125
from time import sleep from playsound import playsound from itertools import cycle import websocket import autopy # Okay, great. Next step is to start making this into a looping client that # does what I want. MIN_SWITCH = 2500 # EMG readings above this switch mouse mode. MAX_MOVE = 2000 # EMG readings above this either deadzone or switch. MIN_MOVE = 50 # EMG readings below this won't trigger movement. MOVE_SENSITIVITY = 0.2 # 1 means +1 readin = +1 pixel moved. MIN_CLICK = 50 # Minimum signal in to trigger a click. CHIME = True TICKS_TO_QUIT = 3 # How long to stay in mouse_quit mode before we exit. INSTANT_MOVE = False # Change whether the mouse moves smoothly or not. # Usually not a good idea to turn on, but if your # sensitivity is cranked up to the point smooth_move # is throwing off your timing, it might be worth a shot. ip = "ws://192.168.0.210" mouse_mvmts_possible = ['mouse_right', 'mouse_down', 'mouse_left', 'mouse_up', \ 'mouse_left_click', 'mouse_right_click', 'exit'] mouse_mvmts = cycle(mouse_mvmts_possible) def pixel_move(mag): if (mag > MAX_MOVE): return 0 elif (mag < MIN_MOVE): return 0 else: return (mag - MIN_MOVE) * MOVE_SENSITIVITY def next_mouse_mvmt(): return next(mouse_mvmts) def next_location(mvmt, mag): if (mvmt == "mouse_right"): return [autopy.mouse.location()[0] + mag, autopy.mouse.location()[1]] elif (mvmt == "mouse_down"): return [autopy.mouse.location()[0], autopy.mouse.location()[1] + mag] # Weird! Down is +, and up is -. Usually think of it the other way. elif (mvmt == "mouse_left"): return [autopy.mouse.location()[0] - mag, autopy.mouse.location()[1]] elif (mvmt == "mouse_up"): return [autopy.mouse.location()[0], autopy.mouse.location()[1] - mag] else: return autopy.mouse.location() def bounded_next_location(mvmt, mag): nl = next_location(mvmt, mag) if (nl[0] < 0.0): nl[0] = 0.0 elif (nl[0] > autopy.screen.size()[0] - 10.0): nl[0] = autopy.screen.size()[0] - 10.0 if (nl[1] < 0.0): nl[1] = 0.0 elif (nl[1] > autopy.screen.size()[1] - 10.0): nl[1] = autopy.screen.size()[1] - 10.0 return nl if __name__=="__main__": print("Connecting to the ESP-32.") ws = websocket.WebSocket() ws.connect(ip) print("Complete.") print("Entering movement loop.") quit_ticks_left = TICKS_TO_QUIT mouse_mvmt = next_mouse_mvmt() print("Mouse movement this cycle: {}".format(mouse_mvmt)) c = 500 while (c > 0): c = c - 1 print("--------------------------") raw_in = int(ws.recv()) print("Raw muscle sensor reading: {}".format(raw_in)) if (raw_in > MIN_SWITCH): if (CHIME): playsound('./sounds/tick_switch-cursor.wav') print("\n===> NEXT_MOVEMENT <===\n") mouse_mvmt = next_mouse_mvmt() print("Mouse movement changed: {}".format(mouse_mvmt)) elif (MIN_SWITCH > raw_in > MAX_MOVE): if (CHIME): playsound('./sounds/tick_dead-zone.wav') print("\n===> DEAD_ZONE <===\n") print("Values between {} and {} produce nothing.".format(MAX_MOVE, MIN_SWITCH)) else: if (CHIME): playsound('./sounds/tick_move.wav') print("\n===> MOUSE_MOVE <===\n") if (mouse_mvmt == 'exit'): print("Mouse `movement`: {}".format(mouse_mvmt)) print("Stay here for {} more ticks to quit the program.\n\n\n".format(quit_ticks_left)) if (quit_ticks_left == 0): break else: quit_ticks_left -= 1 else: quit_ticks_left = TICKS_TO_QUIT if (mouse_mvmt == 'mouse_left_click'): if (raw_in > MIN_CLICK): print("Performing click: {}.".format(mouse_mvmt)) print("Pressing down ...") autopy.mouse.toggle(autopy.mouse.Button.LEFT, True) print("... Releasing ... ") autopy.mouse.toggle(autopy.mouse.Button.LEFT, False) # Simulate a button click, arbitrarily fast. print("... Done.") else: print("Muscle didn't flex enough to perform click.") elif (mouse_mvmt == 'mouse_right_click'): if (raw_in > MIN_CLICK): print("Performing click: {}.".format(mouse_mvmt)) print("Pressing down ...") autopy.mouse.toggle(autopy.mouse.Button.RIGHT, True) print("... Releasing ... ") autopy.mouse.toggle(autopy.mouse.Button.RIGHT, False) # Simulate a button click, arbitrarily fast. print("... Done.") else: print("Muscle didn't flex enough to perform click.") else: pixels = pixel_move(raw_in) if (pixels == 0): print("Muscles not tensed enough; we will not move.\n") else: nl = bounded_next_location(mouse_mvmt, pixels) print("Moving {} pixels in direction {}.".format(pixels, mouse_mvmt)) print("Next location will be: {}.".format(nl)) if (INSTANT_MOVE): autopy.mouse.move(nl[0], nl[1]) else: autopy.mouse.smooth_move(nl[0], nl[1]) pass # put in code here. print("Now at {}.".format(autopy.mouse.location())) # Gracefully close WebSocket connection print("Closing out the connection. Thank you!") ws.close()
5,973
TestCaseCodes/Saadat/OLDVersion/RecomendWeb.py
Dieuzu/SDGP-Nullpoint-G22
0
2171359
# write-html.py import webbrowser def main(): f = open('4_RefferenceResultGen\TestFolder\TaskResults.html','a') r = open('4_RefferenceResultGen\TestFolder\Refined_Links.txt','r') P1 = """<!DOCTYPE html> <html style="font-size: 16px;"> <head> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta charset="utf-8"> <meta name="keywords" content="Learn marketing strategy, Our Courses, Drive Your Career Forward"> <meta name="description" content=""> <meta name="page_type" content="np-template-header-footer-from-plugin"> <title>NullPoint</title> <link rel="stylesheet" href="nicepage.css" media="screen"> <link rel="stylesheet" href="Page-2.css" media="screen"> <script class="u-script" type="text/javascript" src="jquery.js" defer=""></script> <script class="u-script" type="text/javascript" src="nicepage.js" defer=""></script> <meta name="generator" content="Nicepage 4.4.3, nicepage.com"> <link id="u-theme-google-font" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i,900,900i|Open+Sans:300,300i,400,400i,600,600i,700,700i,800,800i"> <link id="u-page-google-font" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Montserrat:100,100i,200,200i,300,300i,400,400i,500,500i,600,600i,700,700i,800,800i,900,900i"> <script type="application/ld+json">{ "@context": "http://schema.org", "@type": "Organization", "name": "", "logo": "images/default-logo.png" }</script> <meta name="theme-color" content="#478ac9"> <meta property="og:title" content="Page 2"> <meta property="og:type" content="website"> </head> <body class="u-body u-xl-mode"><header class="u-clearfix u-header u-header" id="sec-00c3"><div class="u-clearfix u-sheet u-sheet-1"> <a href="https://nicepage.com" class="u-image u-logo u-image-1"> <img src="images/default-logo.png" class="u-logo-image u-logo-image-1"> </a> </div></header> <section class="u-align-center u-clearfix u-image u-shading u-section-1" src="" id="carousel_0791" data-image-width="150" data-image-height="97"> <div class="u-clearfix u-sheet u-sheet-1"> <h2 class="u-text u-text-1">NULLPOINT<br>Assignment Manager </h2> </div> </section> <section class="u-align-left u-clearfix u-grey-10 u-section-2" id="carousel_77f5"> <div class="u-clearfix u-sheet u-valign-middle-md u-valign-middle-sm u-valign-middle-xs u-sheet-1"> <h2 class="u-align-center u-custom-font u-font-montserrat u-text u-text-default u-text-1"> Maybe the Following Links Might help you.....</h2> <div class="u-border-16 u-border-palette-3-base u-line u-line-horizontal u-line-1"></div> <div class="u-expanded-width u-list u-list-1"> <div class="u-repeater u-repeater-1"> """ P2_1 = """ <div class="u-align-left u-container-style u-list-item u-repeater-item u-white u-list-item-1"> <div class="u-container-layout u-similar-container u-valign-middle u-container-layout-1"> <h5 class="u-text u-text-default u-text-2"> <a href=\"""" P2_3 = """</a></h5> <div class="u-expanded-height-lg u-expanded-height-sm u-expanded-height-xl u-expanded-height-xs u-palette-3-base u-shape u-shape-rectangle u-shape-1"></div> </div> </div> """ P3 = """ </div> </div> </div> </section> <footer class="u-align-center u-clearfix u-footer u-grey-80 u-footer" id="sec-67f3"><div class="u-clearfix u-sheet u-sheet-1"> <p class="u-small-text u-text u-text-variant u-text-1">Nullpoint Assignment manager</p> </div></footer> </body> </html>""" print ("[SYSTEM] Creating the TaskResults.html") f.write(P1) Linkindex = 1 for x in r: spliter = x.split(",") P2_2 = "\" target=\"_blank\">Check out Link Number: " + str(Linkindex) + " (" + spliter[1] +"% Relevancy)" FullHtml = P2_1 + spliter[0] + P2_2 + P2_3 Linkindex += 1 f.write(FullHtml) f.write(P3) f.close() print("[SYSTEM] Deploying the TaskResults.html") url = '4_RefferenceResultGen\TestFolder\TaskResults.html' webbrowser.open(url, new=2) # open in new tab if __name__ == "__main__": main()
5,141
src/example_fuzzy_1.py
RichardOkubo/PythonScripts
0
2170457
# import numpy as np # import skfuzzy as fuzz # # from skfuzzy import control as ctrl # # design = ctrl.Antecedent(np.arange(1, 6), "design") # potencia = ctrl.Antecedent(np.arange(1, 6), "potencia") # economia = ctrl.Antecedent(np.arange(1, 6), "economia") # preco = ctrl.Antecedent(np.arange(1, 6), "preco") # espaco = ctrl.Antecedent(np.arange(1, 6), "espaco") # # percepcao = ctrl.Consequent(np.arange(1, 6), "percepcao") # # # Design # design["feio"] = fuzz.trapmf(design.universe, [1, 1, 2, 3]) # design["razoável"] = fuzz.trapmf(design.universe, [2, 3, 3, 4]) # design["belo"] = fuzz.trapmf(design.universe, [3, 4, 5, 5]) # # # Potência # potencia["baixa"] = fuzz.trapmf(potencia.universe, [1, 1, 2, 3]) # potencia["média"] = fuzz.trapmf(potencia.universe, [2, 3, 3, 4]) # potencia["alta"] = fuzz.trapmf(potencia.universe, [3, 4, 5, 5]) # # # Economia # economia["baixa"] = fuzz.trapmf(economia.universe, [1, 1, 2, 3]) # economia["média"] = fuzz.trapmf(economia.universe, [2, 3, 3, 4]) # economia["alta"] = fuzz.trapmf(economia.universe, [3, 4, 5, 5]) # # # Preço # preco["elevado"] = fuzz.trapmf(preco.universe, [1, 1, 2, 3]) # preco["coerente"] = fuzz.trapmf(preco.universe, [2, 3, 3, 4]) # preco["barato"] = fuzz.trapmf(preco.universe, [3, 4, 5, 5]) # # # Espaço Interno # espaco["apertado"] = fuzz.trapmf(espaco.universe, [1, 1, 2, 3]) # espaco["médio"] = fuzz.trapmf(espaco.universe, [2, 3, 3, 4]) # espaco["espaçoso"] = fuzz.trapmf(espaco.universe, [3, 4, 5, 5]) # # # Percepção # percepcao["dispensável"] = fuzz.trapmf(percepcao.universe, [1, 1, 2, 3]) # percepcao["importante"] = fuzz.trapmf(percepcao.universe, [2, 3, 3, 4]) # percepcao["crucial"] = fuzz.trapmf(percepcao.universe, [3, 4, 5, 5]) from pprint import pprint def multiplica(mat_A: "matriz", mat_B: "matriz") -> "matriz": """Multiplica as matrizes A com B.""" assert len(mat_A) == len(mat_B) and len(mat_A[0]) == len(mat_B[0]) matriz = [None] * len(mat_A) for linha in range(len(mat_A)): matriz[linha] = [None] * len(mat_A[0]) for coluna in range(len(mat_A[0])): matriz[linha][coluna] = mat_A[linha][coluna] * mat_B[linha][coluna] return matriz def reduz(matriz: "matriz") -> int: """Reduz a matriz em um número, que é a soma de cada elemento da matriz.""" soma = 0 for linha in matriz: soma += sum(linha) return soma def transposta(matriz: "matriz") -> "matriz": """Cria uma matriz transposta da matriz dada pelo usuário.""" matriz_transposta = [] for coluna in range(len(matriz[0])): nova_coluna = [] for linha in range(len(matriz)): nova_coluna.append(matriz[linha][coluna]) matriz_transposta.append(nova_coluna) return matriz_transposta # ------------------------------------------------------------------------------ def oferta_demanda(oferta: list, demanda: list) -> list: """Função de cruzamento entre oferta e demanda.""" resultado = [] for oferta_ in oferta: resultado.append(reduz(multiplica(oferta_, demanda))) return resultado def normalizador(matriz: list) -> list: """Normaliza os valores da matriz passada.""" matriz_normalizada = [] maximizante_da_matriz = matriz[-1] for linha in matriz: nova_coluna = [] for i, coluna in enumerate(linha): nova_coluna.append(coluna / maximizante_da_matriz[i]) matriz_normalizada.append(nova_coluna) return matriz_normalizada def resolve(oferta: list, demanda: (list, "matriz"), geral=False) -> "matriz": """Função que resolve tanto para um ou mais clientes passado.""" resultado_parcial = [] if not geral: resultado_parcial.append(oferta_demanda(oferta, demanda)) else: for i in range(len(demanda)): resultado_parcial.append(oferta_demanda(oferta, demanda[i])) resultado_final = normalizador(transposta(resultado_parcial)) return resultado_final # ------------------------------------------------------------------------------ # CARROS produto_A = [ [1, 1, 2, 3], # design - feio [1, 1, 2, 3], # potência - baixa [3, 4, 5, 5], # economia - alta [3, 4, 5, 5], # preço - barato [1, 1, 2, 3], # espaço - apertado ] produto_B = [[3, 4, 5, 5], [3, 4, 5, 5], [1, 1, 2, 3], [1, 1, 2, 3], [3, 4, 5, 5]] produto_C = [[2, 3, 3, 4], [2, 3, 3, 4], [2, 3, 3, 4], [2, 3, 3, 4], [3, 4, 5, 5]] produto_D = [[3, 4, 5, 5], [3, 4, 5, 5], [1, 1, 2, 3], [1, 1, 2, 3], [2, 3, 3, 4]] # MAXIMIZANTE maximizante = [ # Máximos valores possíveis para cada atributo de produto [3, 4, 5, 5], [3, 4, 5, 5], [3, 4, 5, 5], [3, 4, 5, 5], [3, 4, 5, 5], ] # CLIENTES cliente_A = [ [2, 3, 3, 4], # design - importante [3, 4, 5, 5], # potência - crucial [2, 3, 3, 4], # economia - importante [2, 3, 3, 4], # preço - importante [3, 4, 5, 5], # espaço - crucial ] cliente_B = [ [2, 3, 3, 4], # design - importante [3, 4, 5, 5], # potência - crucial [1, 1, 2, 3], # economia - dispensável [1, 1, 2, 3], # preço - dispensável [1, 1, 2, 3], # espaço - dispensável ] cliente_C = [ [1, 1, 2, 3], # design - dispensável [2, 3, 3, 4], # potência - importante [2, 3, 3, 4], # economia - importante [2, 3, 3, 4], # preço - importante [3, 4, 5, 5], # espaço - crucial ] cliente_D = [ [1, 1, 2, 3], # design - dispentável [1, 1, 2, 3], # potência - dispensável [3, 4, 5, 5], # economia - crucial [3, 4, 5, 5], # preço - crucial [1, 1, 2, 3], # espaço - dispensável ] produtos = [produto_A, produto_B, produto_C, produto_D, maximizante] clientes = [cliente_A, cliente_B, cliente_C, cliente_D] if __name__ == "__main__": pprint(resolve(oferta=produtos, demanda=cliente_A, geral=False))
5,791
dev/scripts/extHelperMOD.py
bforest-ariadne/touchdesigner-tox-prep-for-release
15
2169913
import os import subprocess import platform def Check_dep(): ''' This is a sample method. This sample method is intended to help illustrate what method docstrings should look like. Notes --------------- 'self' does not need to be included in the Args section. Args --------------- None Returns --------------- None ''' Dep_path = '{}/dep/python/'.format(project.folder) if Dep_path in sys.path: pass else: sys.path.insert(0, Dep_path) for each in sys.path: print(each) return def Check_dep_path(): ''' This method checks for and creates a dep path. More here shortly. Notes --------------- 'self' does not need to be included in the Args section. Args --------------- None Returns --------------- None ''' dep_path = '{}/dep'.format(project.folder) python_path = '{}/dep/python'.format(project.folder) scripts_reqs_path = '{proj}/dep/{name}'.format(proj=project.folder, name=parent().par.Name) requirements = '{}/requirements.txt'.format(scripts_reqs_path) reqs_dat = op('reqs') phue_path = '{}/dep/python/phue.py'.format(project.folder) win_py_dep = '{}/update-dep-python-windows.cmd'.format(scripts_reqs_path) mac_py_dep = '{}/update-dep-python-mac.sh'.format(scripts_reqs_path) # check to see if /dep is in the project folder if os.path.isdir(dep_path): pass # create the direcotry if it's not there else: os.mkdir(dep_path) # check to see if /python is in the project folder if os.path.isdir(python_path): pass # create the direcotry if it's not there else: os.mkdir(python_path) # check to see if there's a scripts and requirements folder if os.path.isdir(scripts_reqs_path): pass # create the direcotry if it's not there else: os.mkdir(scripts_reqs_path) # check to see if the requirements txt is in place if os.path.isfile(requirements): pass else: reqs_file = open(requirements, 'w') reqs_file.write(reqs_dat.text) reqs_file.close() # check to see if our auto-generaetd scripts are in place has_win_py = os.path.isfile(win_py_dep) has_mac_py = os.path.isfile(mac_py_dep) win_py_txt = me.mod.extHelperMOD.win_dep(scripts_reqs_path, python_path) mac_py_txt = me.mod.extHelperMOD.mac_dep(scripts_reqs_path, python_path) # identify platform osPlatform = platform.system() # on windows if osPlatform == "Windows": # create the script to handle grabbing our dependencies req_file = open(win_py_dep, 'w') req_file.write(win_py_txt) req_file.close() # check to see if there is anything in the python dep dir # for now we'll assume that if there are files here we # successfully installed our python dependencies if len(os.listdir(python_path)) == 0: subprocess.Popen([win_py_dep]) else: pass # on mac elif osPlatform == "Darwin": # create the script to handle grabbing our dependencies mac_file = open(mac_py_dep, 'w') mac_file.write(mac_py_txt) mac_file.close() # check to see if there is anything in the python dep dir # for now we'll assume that if there are files here we # successfully installed our python dependencies if len(os.listdir(python_path)) == 0: subprocess.Popen([mac_py_dep]) else: pass else: pass return def win_dep(requirementsPath, targetPath): win_txt = ''':: Update dependencies :: make sure pip is up to date python -m pip install --upgrade pip :: install requirements pip install -r {reqs}/requirements.txt --target="{target}"''' formatted_win_txt = win_txt.format(reqs=requirementsPath, target=targetPath) return formatted_win_txt def mac_dep(requirementsPath, targetPath): mac_txt = ''' #!/bin/bash dep=$(dirname "$0") pythonDir=/python # change current direcotry to where the script is run from dirname "$(readlink -f "$0")" # permission to run the file sudo chmod 755 udpate-dep-python-mac.sh # fix up pip with python3 curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py python3 get-pip.py # Update dependencies # make sure pip is up to date python3 -m pip install --upgrade pip # install requirements python3 -m pip install -r {reqs}/requirements.txt --target={target}''' formatted_mac_txt = mac_txt.format(reqs=requirementsPath, target=targetPath) return formatted_mac_txt
4,324
api/alembic/versions/28ba6976c1bc_set_groupid_as_foreign_key.py
mwath/Integration
0
2170986
"""Set groupId as foreign key Revision ID: <KEY> Revises: 7db537b8750a Create Date: 2021-11-10 11:52:26.341746 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '7db537b8750a' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_foreign_key(None, 'devices', 'groups', ['groupId'], ['id']) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'devices', type_='foreignkey') # ### end Alembic commands ###
668
sweetie_bot_flexbe_states/src/sweetie_bot_flexbe_states/sweetie_bot_follow_head_pose_smart.py
sweetie-bot-project/sweetie_bot_flexbe_behaviors
9
2171015
#!/usr/bin/env python import math import numpy import rospy import tf from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached, ProxyTransformListener, ProxyServiceCaller from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse from std_msgs.msg import Header from geometry_msgs.msg import Point, PointStamped, PoseStamped from sensor_msgs.msg import JointState from proto2.head_ik import HeadIK class SweetieBotFollowHeadPoseSmart(EventState): ''' SweetieBot follows object with head and eyes. Object is specified by PoseStamped on focus_topic. Robot tries to keep comfort distance beetween object and head. If it is not possible, corresponding outcome may be triggered. If distance between head is object is smaller then `distance_uncomfortable` set joint51 to `neck_angle_uncomfortable`. If it is greater then `distance_comfortable` set joint51 to `neck_angle_comfortable`. -- pose_topic string geometry_msgs.msg.PoseStamped topic, where object pose is published. -- follow_joint_state_controller string FollowJointState controller name without prefix. -- discomfort_time boolean If distance beetween head and object is less then `distance_uncomfortable` for `discomfort_time` seconds then `too_close` outcome is triggered. -- neck_control_parameteres float[] [ neck_angle_cofortable, distance_comfortable, neck_angle_uncofortable, distance_uncomfortable ] -- deactivate boolean Deactivate controller on exit state. -- controlled_chains string[] List of controlled kinematics chains, may contains 'head', 'eyes'. <= failed Unable to activate state (controller is unavailable and etc) <= too_close Object is too close to head. ''' def __init__(self, pose_topic, follow_joint_state_controller = 'joint_state_head', discomfort_time = 1000.0, neck_control_parameteres = [ -0.13, 0.3, 0.20, 0.2], deactivate = True, controlled_chains = ['head', 'eyes']): super(SweetieBotFollowHeadPoseSmart, self).__init__(outcomes = ['failed', 'too_close']) # store state parameter for later use. self._pose_topic = pose_topic if len(neck_control_parameteres) != 4: raise TypeError('SweetieBotFollowHeadPoseSmart: neck_control_parameteres must be float[4]') self._neck_params = neck_control_parameteres self._discomfort_time = discomfort_time self._controller = 'motion/controller/' + follow_joint_state_controller self._deactivate = deactivate self._control_head = 'head' in controlled_chains self._control_eyes = 'eyes' in controlled_chains # setup proxies self._set_operational_caller = ProxyServiceCaller({ self._controller + '/set_operational': SetBool }) self._pose_subscriber = ProxySubscriberCached({ self._pose_topic: PoseStamped }) self._joints_publisher = ProxyPublisher({ self._controller + '/in_joints_ref': JointState }) # head inverse kinematics self._ik = HeadIK() # state self._neck_angle = None self._comfortable_stamp = None # error in enter hook self._error = False def on_enter(self, userdata): self._error = False # activate head controller try: res = self._set_operational_caller.call(self._controller + '/set_operational', True) except Exception as e: Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to activate `' + self._controller + '` controller:\n%s' % str(e)) self._error = True return if not res.success: Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to activate `' + self._controller + '` controller (SetBoolResponse: success = false).') self._error = True return # set default value self._neck_angle = self._neck_params[0] self._comfortable_stamp = rospy.Time.now() Logger.loginfo('SweetieBotFollowHeadLeapMotion: controller `{0}` is active.'.format(self._controller)) def execute(self, userdata): if self._error: return 'failed' # check if new message is available if self._pose_subscriber.has_msg(self._pose_topic): # get object position pose = self._pose_subscriber.get_last_msg(self._pose_topic) # convert to PointStamped focus_point = PointStamped() focus_point.header = Header(frame_id = pose.header.frame_id) focus_point.point = pose.pose.position head_joints_msg = None eyes_joints_msg = None # CALCULATE HEAD POSITION if self._control_head: # calculate comfort heck_angle try: # convert point coordinates to bone54 frame fp = self._ik._tf.transformPoint('bone54', focus_point).point # distance and direction angle dist = math.sqrt(fp.x**2 + fp.y**2 + fp.z**2) angle = math.acos(fp.x / dist) # Logger.loginfo('SweetieBotFollowHeadLeapMotion: dist: %s, angle: %s' % (str(dist), str(angle))) # check comfort distance if angle < math.pi/4: if dist > self._neck_params[1]: self._neck_angle = self._neck_params[0] self._comfortable_stamp = rospy.Time.now() elif dist < self._neck_params[3]: self._neck_angle = self._neck_params[2] # check if discomfort timer is elasped if (rospy.Time.now() - self._comfortable_stamp).to_sec() > self._discomfort_time: return 'too_close' else: self._comfortable_stamp = rospy.Time.now() else: self._comfortable_stamp = rospy.Time.now() except tf.Exception as e: Logger.logwarn('SweetieBotFollowHeadPoseSmart: Cannot transform to bone54:\n%s' % str(e)) self._neck_angle = self._neck_params[0] # calculate head pose for given angle head_joints_msg = self._ik.pointDirectionToHeadPose(focus_point, self._neck_angle, 0.0) # CALCULATE EYES POSE if self._control_eyes: eyes_joints_msg = self._ik.pointDirectionToEyesPose(focus_point) # PUBLISH POSE if head_joints_msg: if eyes_joints_msg: # join head and eyes pose head_joints_msg.name += eyes_joints_msg.name head_joints_msg.position += eyes_joints_msg.position # publish pose self._joints_publisher.publish(self._controller + '/in_joints_ref', head_joints_msg) elif eyes_joints_msg: # publish pose self._joints_publisher.publish(self._controller + '/in_joints_ref', eyes_joints_msg) def on_exit(self, userdata): if self._deactivate: self.on_stop() def on_stop(self): try: res = self._set_operational_caller.call(self._controller + '/set_operational', False) except Exception as e: Logger.logwarn('SweetieBotFollowHeadPoseSmart: Failed to deactivate `' + self._controller + '` controller:\n%s' % str(e)) Logger.loginfo('SweetieBotFollowHeadPoseSmart: controller `{0}` is deactivated.'.format(self._controller))
7,921
ichnaea/constants.py
crankycoder/ichnaea
348
2170999
# We return position and accuracy values rounded to 7 # :term:`decimal degrees`, mostly to make the resulting JSON look # prettier. 1E-7 degrees =~ 1.1cm at the equator, so clients of our # external APIs will see that as our spatial resolution, though in # practice we are always in the multiple of tens of meters range. DEGREE_DECIMAL_PLACES = 7 MAX_LAT = 85.051 # Maximum latitude in :term:`Web Mercator` projection. MIN_LAT = -85.051 # Minimum latitude in :term:`Web Mercator` projection. MAX_LON = 180.0 # Maximum unrestricted longitude in :term:`WSG84`. MIN_LON = -180.0 # Minimum unrestricted longitude in :term:`WSG84`.
633
api/app.py
ca2315/PlasmoCount
0
2170580
from flask import Flask, request, jsonify, render_template, send_from_directory from flask_cors import CORS, cross_origin from pathlib import Path import matplotlib import warnings import json import time matplotlib.use('Agg') warnings.filterwarnings('ignore') from programs.model import Model from programs.result import Result from programs.summarize import summarize app = Flask(__name__, static_folder='../build', static_url_path='/') app.config.from_object('config') UPLOAD_FOLDER = app.config['UPLOAD_FOLDER'] EXAMPLE_FOLDER = app.config['EXAMPLE_FOLDER'] CORS(app, support_credentials=True) @app.errorhandler(404) def not_found(e): return app.send_static_file('index.html') @app.route('/') def index(): return app.send_static_file('index.html') @app.route('/api/uploads/<path:filename>') def download_file(filename): return send_from_directory(UPLOAD_FOLDER, filename) @app.route('/api/example/<path:filename>') def download_example(filename): return send_from_directory(EXAMPLE_FOLDER, filename) @app.route('/api/model', methods=['POST']) def run(upload_folder=UPLOAD_FOLDER): job = { 'id': request.form.get('id'), 'date': request.form.get('date'), 'email-address': request.form.get('email-address'), 'has-gams': request.form.get('has-gams') == 'true', 'data-contrib': request.form.get('data-contrib') == 'true', 'cut-offs': [1.5, 2.5] } upload_folder = Path(upload_folder) job_folder = upload_folder / job['id'] job_folder.mkdir(exist_ok=True) # get files files = request.files # load model model = Model(has_gams=job['has-gams']) results = [] for i in files: # load result img = model.load_image(files[i]) pred = model.predict() result = Result(i, files[i].filename, img, pred) result.run(upload_folder=job_folder) results.append(result.to_output()) output = { 'data': { 'summary': summarize(results), 'results': results }, 'statusOK': True } with open(job_folder / 'output.json', 'w') as f: json.dump(output, f) return output @app.route('/api/result', methods=['POST']) def return_result(upload_folder=UPLOAD_FOLDER, example_folder=EXAMPLE_FOLDER): job_id = request.get_json()['id'] if job_id == 'example': result_dir = Path(example_folder) else: result_dir = Path(upload_folder) / job_id result_path = result_dir / 'output.json' if result_path.exists(): with open(result_path) as f: return json.load(f) else: return {'statusOK': False} if __name__ == '__main__': app.run(debug=True)
2,718
corehq/apps/hqpillow_retry/urls.py
kkrampa/commcare-hq
1
2170713
from __future__ import absolute_import from __future__ import unicode_literals from django.conf.urls import url from corehq.apps.hqpillow_retry.views import EditPillowError urlpatterns = [ url(r'^edit_errors/$', EditPillowError.as_view(), name=EditPillowError.urlname), ]
277
library/source2/resource_types/vwrld/world.py
anderlli0053/SourceIO
199
2169882
from ..resource import ValveCompiledResource class ValveCompiledWorld(ValveCompiledResource): pass
104
util/stringutil.py
sharkbound/adventofcode2020
0
2170972
from typing import Iterable import re RE_ALL_INTS = re.compile(r'([+-]?\d+)') def striplines(lines: Iterable[str]): return map(str.strip, lines) def striplines_aslist(lines: Iterable[str]): return list(striplines(lines)) def find_all_ints(string: str): return [int(x) for x in RE_ALL_INTS.findall(string)]
325
main.py
Viniciuuz/redditIF-bot-publico
0
2170987
import praw from praw.reddit import Subreddit from PIL import Image import imagehash import requests import shutil import os from time import sleep reddit = praw.Reddit(client_id = "", client_secret = "", username = "", password = "", user_agent = "IF-moderator") subreddit = reddit.subreddit("IFFans") def timer(): while True: main() sleep(10) def remove_post(post_id): ac_post = reddit.submission(post_id) ac_post.mod.remove() reddit.redditor(str(ac_post.author)).message('Seu post foi removido', f"""o seu [post](https://reddit.com/{post_id}) em r/IFFans foi removido por ir contra as nossas regras. (Esta ação foi feita por um bot, se você acha que foi um engano, fale com o criador do bot: u/_3DWaffle_)""", from_subreddit="IFFans") def compare(image): l = os.listdir('memes/') post = image for i in l: meme = imagehash.average_hash(Image.open("memes/" + i)) post = imagehash.average_hash(Image.open(post)) if (meme - post) >= 50: return True def download_image(url, name): r = requests.get(url, stream = True) if r.status_code == 200: r.raw.decode_content = True with open(name,'wb') as f: shutil.copyfileobj(r.raw, f) print('imagem baixada: ', name) def main(): for submission in subreddit.new(limit=1): # for submission in subreddit.stream.submissions(): if not submission.stickied: if submission.url.endswith((".jpg", ".png")): print(f"começando analise do post: {submission.title} - {submission.author}") download_image(submission.url, f"{submission.title.lower()}.{submission.url.lower()[-3:]}") print("comparando imagens...") if compare(f"{submission.title.lower()}.{submission.url.lower()[-3:]}"): print("semelhança de imagem confere, apagando post...") remove_post(submission) print("post apagado") else: print('semelhança de imagem baixa') sleep(5) os.system("cls") else: print('post não pode ser analisado por não ser uma imagem') print(f"Post: {submission.title}, Por: {submission.author}") main()
2,671
modules/info.py
zenzue/viBot
2
2170314
#!/usr/bin/env python #-*- coding: utf-8 -*- # # """ Info module for viBot Project """ __author__ = "blackvkng" import os import ctypes import getpass import platform def run(): if os.name == "nt": admin_access = "Yes" if ctypes.windll.shell32.IsUserAnAdmin() != 0 else "No" else: admin_access = "Yes" if os.getuid() == 0 else "No" info = [("[>] Platform ", platform.system()), ("[>] Admin Access ", admin_access), ("[>] Architecture ", platform.architecture()[0]), ("[>] Username ", getpass.getuser())] return info
559