{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1]))\n return\n params = {}\n params[\"cmd\"] = cmdstr.group(1)\n params[\"tgt\"] = tgtstr.group(2)\n else:\n cmdstr=re.search(\"cmd=(?:domain|alexa|created)\",urlparams)\n tgtstr = re.search(\"tgt=\",urlparams)\n if not cmdstr or not tgtstr:\n self.wfile.write('API Documentation
http://%s:%s/?cmd=measure&tgt=&ltstring&gt
http://%s:%s/?cmd=normal&tgt=&ltstring&gt
http://%s:%s/?cmd=normal&tgt=&ltstring&gt&weight=&ltweight&gt ' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1]))\n return\n params={}\n try:\n for prm in urlparams.split(\"&\"):\n key,value = prm.split(\"=\")\n params[key]=value\n except:\n self.wfile.write('Unable to parse the url. ')\n return\n if params[\"cmd\"] == \"alexa\":\n if self.server.verbose: self.server.safe_print (\"Alexa Query:\", params[\"tgt\"])\n if not self.server.alexa:\n if self.server.verbose: self.server.safe_print (\"No Alexa data loaded. Restart program.\")\n self.wfile.write(\"Alexa not loaded on server. Restart server with -a or --alexa and file path.\")\n else:\n if self.server.verbose: self.server.safe_print (\"Alexa queried for:%s\" % (params['tgt'])) \n self.wfile.write(str(self.server.alexa.get(params[\"tgt\"],\"0\")))\n elif params[\"cmd\"] == \"domain\" or params[\"cmd\"] == \"created\":\n if params['tgt'] in self.server.cache:\n print(\"Found in cache!!\")\n domain_info = self.server.cache.get(params['tgt'])\n else:\n try:\n print (\"Querying the web\", params['tgt'])\n domain_info = whois.whois(params['tgt'])\n if not domain_info.get('creation_date'):\n self.wfile.write(str(\"No whois record for %s\" % (params['tgt'])))\n return\n except Exception as e:\n if self.server.verbose: self.server.safe_print (\"Error querying whois server: %s\" % (str(e)))\n \n return\n self.server.safe_print(\"Caching whois record %s\" % (str(domain_info)))\n domain_info[\"time\"] = time.time()\n if self.server.alexa:\n domain_info['alexa'] = self.server.alexa.get(params[\"tgt\"],\"0\")\n try:\n self.server.cache_lock.acquire()\n self.server.cache[params['tgt']] = domain_info\n finally:\n self.server.cache_lock.release()\n if params[\"cmd\"] == \"created\":\n self.wfile.write(domain_info.get('creation_date','not found').__str__())\n elif params[\"cmd\"] ==\"domain\":\n self.wfile.write(str(domain_info))\n return\n\n def log_message(self, format, *args):\n return\n\nclass ThreadedDomainStats(SocketServer.ThreadingMixIn, SocketServer.TCPServer, BaseHTTPServer.HTTPServer):\n def __init__(self, *args,**kwargs):\n self.cache = {}\n self.cache_lock = threading.Lock()\n self.cache_time = 1\n self.screen_lock = threading.Lock()\n self.alexa = \"\"\n self.verbose = False\n self.exitthread = threading.Event()\n self.exitthread.clear()\n BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)\n\n def safe_print(self,*args,**kwargs):\n try:\n self.screen_lock.acquire()\n print(*args,**kwargs)\n finally:\n self.screen_lock.release()\n\n def clear_old_cache(self):\n if self.verbose: self.safe_print ( \"Clearing old cache\")\n try:\n self.cache_lock.acquire()\n for item in self.cache:\n if (self.cache[item].get('time', time.time()) - time.time()) > self.cache_time*60*60:\n del self.cache[item]\n finally:\n self.cache_lock.release()\n #Reschedule yourself to run again in 1 hour\n if not self.exitthread.isSet():\n self.timer = threading.Timer(60*60, self.clear_old_cache, args = ())\n self.timer.start()\n\ndef main():\n parser=argparse.ArgumentParser()\n parser.add_argument('-ip','--address',required=False,help='IP Address for the server to listen on. Default is 127.0.0.1',default='127.0.0.1')\n parser.add_argument('-c','--cache_time',type=float,required=False,help='Number of hours to hold a whois record in the cache. Default is 1 hour. Set to 0 to save forever.',default=1)\n parser.add_argument('port',type=int,help='You must provide a TCP Port to bind to')\n parser.add_argument('-v','--verbose',action='count',required=False,help='Print verbose output to the server screen. -vv is more verbose.')\n parser.add_argument('-a','--alexa',required=False,help='Provide a local file path to an Alexa top-1m.csv')\n\n #args = parser.parse_args(\"-s 1 -vv 8081 english_lowercase.freq\".split())\n args = parser.parse_args()\n\n #Setup the server.\n server = ThreadedDomainStats((args.address, args.port), domain_api)\n if args.alexa:\n if not os.path.exists(args.alexa):\n print(\"Alexa file not found %s\" % (args.alexa))\n else:\n try:\n server.alexa = dict([(a,b) for b,a in re.findall(r\"^(\\d+),(.*)\", open(args.alexa).read(), re.MULTILINE)])\n except Exception as e:\n print(\"Unable to parse alexa file:%s\" % (str(e)))\n server.verbose = args.verbose\n server.cache_time = args.cache_time\n #Schedule the first save interval unless save_interval was set to 0.\n if args.cache_time:\n server.timer = threading.Timer(60 *args.cache_time, server.clear_old_cache, args = ())\n server.timer.start()\n \n #start the server\n print('Server is Ready. http://%s:%s/?cmd=measure&tgt=astring' % (args.address, args.port))\n print('[?] - Remember: If you are going to call the api with wget, curl or something else from the bash prompt you need to escape the & with \\& \\n\\n')\n while True:\n try:\n server.handle_request()\n except KeyboardInterrupt:\n break\n\n server.timer.cancel() \n server.safe_print(\"Web API Disabled...\")\n server.safe_print(\"Control-C hit: Exiting server. Please wait..\")\n\nif __name__==\"__main__\":\n main()"},"repo_name":{"kind":"string","value":"HASecuritySolutions/Logstash"},"sub_path":{"kind":"string","value":"configfiles-setup_required/freq/domain_stats.py"},"file_name":{"kind":"string","value":"domain_stats.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":8009,"string":"8,009"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":248,"string":"248"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114941,"cells":{"seq_id":{"kind":"string","value":"3641922355"},"text":{"kind":"string","value":"##\n##pedir = True\n##while pedir:\n## numero = int(input(\"Dame un numero del 1 al 100: \"))\n## if numero < 100 and numero > 0:\n## pedir = False\n\nfrom random import *\n\nprint(\"Piensa un número del 1 al 100,¡voy a intentar advinarlo!\")\nprint(\"Pulsa intro cuando estés listo...\")\ninput()\naleatorio = randint(1,100)\nacierto = True\nwhile acierto:\n respuesta = input(\"¿Es el {0} el número secreto? (s/n) \" .format(aleatorio))\n if respuesta == \"n\":\n mayor_menor = input(\"¿Es el número secreto mayor o menor que {0}?. \".format(aleatorio))\n if mayor_menor == \"mayor\":\n aleatorio = randint(aleatorio+1, 100)\n elif mayor_menor == \"menor\":\n aleatorio = randint(0,aleatorio-1)\n elif respuesta == \"s\":\n print(\"Estoy de suerte, ¡He acertado!\")\n acierto = False\n else:\n print(\"Lo siento no te he entendido\")\n \n \n\n"},"repo_name":{"kind":"string","value":"emiliobort/python"},"sub_path":{"kind":"string","value":"Practica2_Past/Programas/Ejercicio10.py"},"file_name":{"kind":"string","value":"Ejercicio10.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":896,"string":"896"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"es"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114942,"cells":{"seq_id":{"kind":"string","value":"22007133395"},"text":{"kind":"string","value":"import sqlite3\r\n\r\n# criar instancia de conexão com o banco\r\nconnection = sqlite3.connect('records.db')\r\n# inicializar cursor\r\ncursor = connection.cursor()\r\n\r\n# IF PARA CRIAR SE NAO TIVER CRIADO\r\ncreate_table = \"CREATE TABLE IF NOT EXISTS records (id INTEGER PRIMARY KEY, pontos int)\"\r\n\r\ncursor.execute(create_table)\r\n\r\nconnection.commit()\r\nconnection.close()\r\n"},"repo_name":{"kind":"string","value":"Murimaral/projeto_batalha_naval"},"sub_path":{"kind":"string","value":"criar_tabela.py"},"file_name":{"kind":"string","value":"criar_tabela.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":361,"string":"361"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114943,"cells":{"seq_id":{"kind":"string","value":"27539293247"},"text":{"kind":"string","value":"import json\nimport PySimpleGUI as sg\nfrom src.handlers import login\n\n\ndef config(dificultad,ayuda,tarjeta,tiempo,color,alerta):\n \"\"\" Guarda la configuracion del usuario en un archivo json\"\"\"\n datos_config = [dificultad,ayuda,tarjeta,tiempo,color,alerta]\n \n tiempo = str(tiempo)\n if (tiempo.isdigit()):\n configuraciones = leer_config() # Carga todas las configuraciones\n jugador_logueado = login.leer_sesion()\n configuraciones[jugador_logueado] = datos_config # Actualiza las configuraciones del usuario\n datos_json = json.dumps(configuraciones) \n # Guarda la configuracion en un json\n with open(\"configuracion.json\", \"w\", encoding=\"utf8\") as archivoJSON:\n archivoJSON.write(datos_json)\n sg.SystemTray.notify('Éxito!', 'Cambios guardados')\n\n\ndef leer_config():\n \"\"\"Devuelve todas las configuraciones guardadas\"\"\"\n configuraciones = {}\n with open(\"configuracion.json\", \"r\", encoding=\"utf8\") as archivoJSON:\n configuraciones = json.load(archivoJSON)\n #print(configuraciones)\n return configuraciones\n\n\ndef crear_configuracion_default(usuario):\n \"\"\"Crea la configuracion default cuando el jugador se registra\"\"\"\n try:\n configuraciones = leer_config()\n except Exception:\n # Si el archivo no existe\n configuraciones = {}\n configuraciones[usuario] = [\"Facil\", \"Con\", \"Texto\", \"60\", \"Topanga\", \"Ganaste, Perdiste\"]\n datos_json = json.dumps(configuraciones) \n # Guarda la configuracion en un json\n with open(\"configuracion.json\", \"w\", encoding=\"utf8\") as archivoJSON:\n archivoJSON.write(datos_json)\n"},"repo_name":{"kind":"string","value":"LauraCuenca/MempybyGrupo29"},"sub_path":{"kind":"string","value":"src/handlers/configuracion_h.py"},"file_name":{"kind":"string","value":"configuracion_h.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1640,"string":"1,640"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"es"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114944,"cells":{"seq_id":{"kind":"string","value":"23363173038"},"text":{"kind":"string","value":"import random\nimport math\n\ndepth = 5\nfunctions = [\"xd\", \"*\", \"+\", \"-\"]\nnoFunctions = 3\nterminals = [\"mizerie\", \"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\", \"c8\", \"c9\"]\nnoTerminals = 9\n\nclass Individ(object):\n\t\"\"\"docstring for Individ\"\"\"\n\tdef __init__(self):\n\t\tself.values = [0 for i in range(2 ** depth)]\n\t\tfor i in range(len(self.values)):\n\t\t\tif i < 3 or (i < 2 ** (depth - 1) - 1 and random.random() < 0.5):\n\t\t\t\tself.values[i] = -random.randint(1, len(functions) - 1)\n\t\t\telse:\n\t\t\t\tself.values[i] = random.randint(1, len(terminals) - 1)\n\n\n\tdef fitness(self, problem):\n\t\ts = 0\n\t\tfor i in range(len(problem.inData)):\n\t\t\trez = self.dfs(0, problem.inData[i])\n\t\t\ts += (rez - problem.outData[i]) * (rez - problem.outData[i])\n\t\t\t#print(\"Expected:\", problem.outData[i], \"Got: \", rez)\n\t\treturn 100000.0 / s\n\n\tdef eval(self, problem):\n\t\ts = 0\n\t\tfor i in range(len(problem.inData)):\n\t\t\trez = self.dfs(0, problem.inData[i])\n\t\t\ts += (rez - problem.outData[i]) * (rez - problem.outData[i])\n\t\t\tprint(\"Expected:\", problem.outData[i], \"Got: \", rez)\n\t\treturn 100000.0 / s\n\n\tdef dfs(self, pos, inputs):\n\t\tif (self.values[pos] > 0):\n\t\t\treturn inputs[self.values[pos] - 1]\n\n\t\tif (functions[-self.values[pos]] == \"*\"):\n\t\t\treturn self.dfs(2 * pos + 1, inputs) * self.dfs(2 * pos + 2, inputs)\n\n\t\tif (functions[-self.values[pos]] == \"-\"):\n\t\t\treturn self.dfs(2 * pos + 1, inputs) - self.dfs(2 * pos + 2, inputs)\n\n\t\tif (functions[-self.values[pos]] == \"+\"):\n\t\t\treturn self.dfs(2 * pos + 1, inputs) + self.dfs(2 * pos + 2, inputs)\n\n\n\tdef mutate(self, probability):\n\t\tindex1 = random.randint(0, (2 ** (depth - 1)) - 2)\n\t\tindex2 = random.randint(0, (2 ** (depth - 1)) - 2)\n\n\t\ti = min(index1, index2)\n\t\tj = max(index1, index2)\n\n\t\twhile i < j:\n\t\t\tself.values[i], self.values[j] = self.values[j], self.values[i]\n\t\t\ti += 1\n\t\t\tj -= 1\n\n\n\tdef crossover(individ1, individ2, probability):\n\t\toffspring1 = Individ()\n\t\toffspring2 = Individ()\n\n\t\tindex1 = random.randint(0, (2 ** (depth)))\n\t\tindex2 = random.randint(0, (2 ** (depth)))\n\n\t\ti = min(index1, index2)\n\t\tj = max(index1, index2)\n\n\t\tfor k in range(0, (2 ** (depth))):\n\t\t\tif k in range(i, j):\n\t\t\t\toffspring1.values[k] = individ1.values[k]\n\t\t\t\toffspring2.values[k] = individ2.values[k]\n\t\t\telse:\n\t\t\t\toffspring1.values[k] = individ2.values[k]\n\t\t\t\toffspring2.values[k] = individ1.values[k]\n\n\t\treturn offspring1, offspring2\n\n\tdef __str__(self):\n\t\treturn str(self.values)\n\n\n\nclass Algorithm(object):\n\t\"\"\"docstring for Algorithm\"\"\"\n\tdef __init__(self, problem, populationSize = 40):\n\t\tself.__problem = problem\n\t\tself.readParameters()\n\t\tself.__populationSize = populationSize\n\t\tself.__population = Population(self.__populationSize)\n\n\tdef getPopulation(self):\n\t\treturn self.__population\n\n\tdef iteration(self):\n\t\tself.__population.selection(self.__problem)\n\t\tself.__population.evaluate(self.__problem)\n\n\tdef run(self):\n\t\tfor i in range(100):\n\t\t\tself.iteration()\n\t\t\tprint(\n\t\t\t\tself.__population.getBest(), \n\t\t\t\tself.__population.getBest().fitness(self.__problem),\n\t\t\t\tself.__population.getPopSize()\n\t\t\t)\n\t\tself.__population.getBest().eval(self.__problem)\n\n\tdef readParameters(self):\n\t\tself.__problem.loadData(\"slump_test.data\")\n\t\t\nclass Population(object):\n\t\"\"\"docstring for Population\"\"\"\n\tdef __init__(self, noIndivids):\n\t\tself.__noIndivids = noIndivids\n\t\tself.__individs = []\n\t\tfor i in range(noIndivids):\n\t\t\tself.__individs.append(Individ())\n\n\tdef evaluate(self, problem):\n\t\tfor i in range(self.__noIndivids // 2 - 1):\n\t\t\toffspring1, offspring2 = Individ.crossover(self.__individs[i], self.__individs[i + 1], 0.5)\n\t\t\tself.__individs.append(offspring1)\n\t\t\tself.__individs.append(offspring2)\n\n\t\toffspring1, offspring2 = Individ.crossover(self.__individs[self.__noIndivids // 2 - 1], self.__individs[0], 0.5)\n\t\tself.__individs.append(offspring1)\n\t\tself.__individs.append(offspring2)\n\n\t\tfor i in range(self.__noIndivids):\n\t\t\tself.__individs[i].mutate(0.08)\n\t\tself.__individs.sort(key = lambda x : x.fitness(problem), reverse = True)\n\n\tdef getBest(self):\n\t\treturn self.__individs[0]\n\n\tdef getPopSize(self):\n\t\treturn len(self.__individs)\n\n\tdef selection(self, problem):\n\t\ts = 0\n\t\tnewPopulation = []\n\n\t\tfor i in range(self.__noIndivids):\n\t\t\ts += self.__individs[i].fitness(problem)\n\n\t\tfor i in range(self.__noIndivids // 2):\n\t\t\tr = random.random()\n\t\t\tj = 0\n\t\t\tpercents = 0\n\t\t\twhile j < self.__noIndivids and percents < r:\n\t\t\t\tpercents += self.__individs[j].fitness(problem) / s\n\t\t\t\tj += 1\n\t\t\tj -= 1\n\t\t\tnewPopulation.append(self.__individs[j])\n\t\t\ts -= self.__individs[j].fitness(problem)\n\t\t\tself.__individs.pop(j)\n\t\tself.__individs = newPopulation\n\n\n\tdef __str__(self):\n\t\ts = \"\"\n\t\tfor i in self.__individs:\n\t\t\ts += str(i) + \"\\n\"\n\t\treturn s\n\n\nclass Problem(object):\n\t\"\"\"docstring for Problem\"\"\"\n\tdef __init__(self):\n\t\tself.inData = []\n\t\tself.outData = []\n\n\n\tdef loadData(self, fileName):\n\t\twith open(fileName, \"r\") as f:\n\t\t\twhile True:\n\t\t\t\tline = f.readline()\n\t\t\t\tif (line == \"\"):\n\t\t\t\t\tbreak\n\t\t\t\tline = line.split(\",\")\n\t\t\t\tcrtIn = []\n\t\t\t\tfor i in range(1, 10):\n\t\t\t\t\tcrtIn.append(float(line[i].strip()))\n\t\t\t\tself.outData.append(float(line[10].strip()))\n\t\t\t\tself.inData.append(crtIn) \n\n\t\nif __name__ == \"__main__\":\n\tp = Problem()\n\ta = Algorithm(p, 40)\n\ta.run()\n\n\n\n\n\n"},"repo_name":{"kind":"string","value":"ggaaggaabbii/University-work"},"sub_path":{"kind":"string","value":"ai/lab6_2.py"},"file_name":{"kind":"string","value":"lab6_2.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5158,"string":"5,158"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114945,"cells":{"seq_id":{"kind":"string","value":"38534453006"},"text":{"kind":"string","value":"import torch\n\nfrom torch_geometric.nn import knn_graph, knn, CGConv\n\nclass GNNAttention(torch.nn.Module):\n '''Uses 2 graph layers. One for self attention and one for cross attention. Self-attention based on k-NN of coordinates. Cross-attention based on k-NN in feature space'''\n\n def __init__(self, dim, k):\n '''dim is the feature dimensions, k is the number of neighbours to consider'''\n super().__init__()\n\n self.k = k\n self.conv1 = CGConv(dim, aggr='max', batch_norm=True).cuda()\n self.conv2 = CGConv(dim, aggr='max', batch_norm=True).cuda()\n\n def forward(self, xyz0, xyz1, f0, f1):\n b, npts, d = f0.shape\n batch_idx = torch.arange(b).repeat_interleave(npts).to(xyz0.device)\n f0 = f0.reshape(-1, d)\n f1 = f1.reshape(-1, d)\n\n #creates edge graph for coordinates\n edge_idx_c0 = knn_graph(xyz0.reshape(-1,3), k=self.k, batch=batch_idx)\n edge_idx_c1 = knn_graph(xyz1.reshape(-1,3), k=self.k, batch=batch_idx)\n\n #self-attention (layer 1)\n f0 = self.conv1(f0, edge_idx_c0)\n f1 = self.conv1(f1, edge_idx_c1)\n\n #cross-attention (layer 2)\n edge_idx_f = knn(f1, f0, k=self.k, batch_x=batch_idx, batch_y=batch_idx, cosine=True)\n edge_idx_f[1] += b * npts\n f = self.conv2(torch.cat([f0,f1], dim=0), edge_idx_f)\n f0, f1 = f[:(b*npts)], f[(b*npts):]\n\n #convert f0, f1 to dense representation again\n f0 = f0.reshape(b, npts, d)\n f1 = f1.reshape(b, npts, d)\n\n return f0, f1"},"repo_name":{"kind":"string","value":"eduardohenriquearnold/fastreg"},"sub_path":{"kind":"string","value":"lib/models/attention.py"},"file_name":{"kind":"string","value":"attention.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1537,"string":"1,537"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":52,"string":"52"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114946,"cells":{"seq_id":{"kind":"string","value":"14370519259"},"text":{"kind":"string","value":"import pygame\nimport math\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\nf = open(\"dialog.txt\", \"r\")\nrawText = f.read().split(\"\\n\")\nf.close()\nmsg = rawText[0]\noptions = rawText[1:]\n\ndef finish(s):\n\tpygame.quit()\n\tf = open(\"dialog.txt\", \"w\")\n\tf.write(s)\n\tf.close()\n\texit()\n\npygame.font.init()\nFONT = pygame.font.Font(pygame.font.get_default_font(), 30)\nmsgRendered = FONT.render(msg, True, BLACK)\nmsgWidth = msgRendered.get_width()\nmsgHeight = msgRendered.get_height()\nSCREENSIZE = [msgWidth + 100, msgHeight + 50 + msgHeight]\nscreen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE)\n# Loop\nrunning = True\nc = pygame.time.Clock()\noption_width = (SCREENSIZE[0] - ((len(options) - 1) * 5)) / len(options)\nwhile running:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\t\telif event.type == pygame.VIDEORESIZE:\n\t\t\tSCREENSIZE = [*event.dict[\"size\"]]\n\t\t\tscreen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE)\n\t\telif event.type == pygame.MOUSEBUTTONUP:\n\t\t\tif pygame.mouse.get_pos()[1] < (SCREENSIZE[1] - msgHeight): continue\n\t\t\tpos = pygame.mouse.get_pos()[0]\n\t\t\tpos /= option_width\n\t\t\tpos = math.floor(pos)\n\t\t\tfinish(options[pos])\n\t# Message\n\tscreen.fill(WHITE)\n\tscreen.blit(msgRendered, ((SCREENSIZE[0] - msgWidth) / 2, ((SCREENSIZE[1] - msgHeight) - msgHeight) / 2))\n\t# Options\n\toption_width = SCREENSIZE[0] / len(options)\n\tcum_x = 0\n\tpygame.draw.rect(screen, BLACK, pygame.Rect(0, SCREENSIZE[1] - msgHeight, SCREENSIZE[0], msgHeight))\n\tfor o in options:\n\t\toRendered = FONT.render(o, True, WHITE)\n\t\tscreen.blit(oRendered, (cum_x + ((option_width - oRendered.get_width()) / 2), SCREENSIZE[1] - msgHeight))\n\t\tcum_x += option_width\n\t\tpygame.draw.line(screen, WHITE, (cum_x, SCREENSIZE[1] - msgHeight), (cum_x, SCREENSIZE[1]), 5)\n\t# Flip\n\tpygame.display.flip()\n\tc.tick(60)\n# End\nfinish(\"\")"},"repo_name":{"kind":"string","value":"sillypantscoder/pygame_zip"},"sub_path":{"kind":"string","value":"dialog/dialog.py"},"file_name":{"kind":"string","value":"dialog.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1907,"string":"1,907"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114947,"cells":{"seq_id":{"kind":"string","value":"16558290798"},"text":{"kind":"string","value":"import io\nimport re\nimport time\nfrom collections import defaultdict\n\nimport requests\nimport requests_cache\n\nfrom imicrobe.util import grouper\n\n\nrequests_cache.install_cache('kegg_api_cache')\n\n\ndef get_kegg_annotations(kegg_ids):\n all_kegg_annotations = {}\n all_bad_kegg_ids = set()\n # the missing_accessions_groups_of_10 generator returns groups of 10 KEGG ids\n # that are not already in the database and that are not 'bad' KEGG ids\n # the last group will be padded with 'None' if there are fewer than 10 KEGG ids\n for group_of_10 in grouper(sorted(kegg_ids), n=10):\n t0 = time.time()\n kegg_id_list = [k for k in group_of_10 if k is not None]\n #print(kegg_id_list)\n print('requesting {} KEGG annotation(s)'.format(len(kegg_id_list)))\n kegg_annotations, bad_kegg_ids = get_10_kegg_annotations(kegg_id_list)\n print(' received {} in {:5.2f}s'.format(len(kegg_annotations), time.time()-t0))\n all_kegg_annotations.update(kegg_annotations)\n all_bad_kegg_ids.update(bad_kegg_ids)\n\n return all_kegg_annotations, all_bad_kegg_ids\n\n\nkegg_orthology_field_re = re.compile(r'^(?P[A-Z]+)?(\\s+)(?P.+)$')\n\n\ndef get_10_kegg_annotations(kegg_ids):\n \"\"\" Request annotations for up to 10 KEGG ids. If a bad id is given there will be no response for it.\n\n The response from the KEGG API looks like this:\n ENTRY K01467 KO\n NAME ampC\n DEFINITION beta-lactamase class C [EC:3.5.2.6]\n PATHWAY ko01501 beta-Lactam resistance\n ko02020 Two-component system\n MODULE M00628 beta-Lactam resistance, AmpC system\n ...\n ENTRY K00154 KO\n NAME E1.2.1.68\n DEFINITION coniferyl-aldehyde dehydrogenase [EC:1.2.1.68]\n BRITE Enzymes [BR:ko01000]\n 1. Oxidoreductases\n 1.2 Acting on the aldehyde or oxo group of donors\n 1.2.1 With NAD+ or NADP+ as acceptor\n 1.2.1.68 coniferyl-aldehyde dehydrogenase\n K00154 E1.2.1.68; coniferyl-aldehyde dehydrogenase\n DBLINKS COG: COG1012\n GO: 0050269\n GENES GQU: AWC35_21175\n CED: LH89_09310 LH89_19560\n SMW: SMWW4_v1c32370\n SMAR: SM39_2711\n SMAC: SMDB11_2482\n ...\n\n return: a dictionary of dictionaries that looks like this\n {\n 'K01467': {\n 'ENTRY': 'K01467 KO',\n 'NAME': 'ampC',\n 'DEFINITION': '',\n 'PATHWAY': '',\n 'MODULE': '',\n ...\n },\n 'K00154': {\n 'ENTRY': 'K00154 KO',\n 'NAME': 'E1.2.1.68',\n 'DEFINITION': '',\n 'PATHWAY': '',\n 'MODULE': '',\n ...\n }\n\n }\n and a (possibly empty) set of KEGG ids for which no annotation was returned\n\n \"\"\"\n\n debug = False\n\n ko_id_list = '+'.join(['ko:{}'.format(k) for k in kegg_ids])\n response = requests.get('http://rest.kegg.jp/get/{}'.format(ko_id_list))\n if response.status_code == 404:\n print('no annotations returned')\n all_entries = {}\n bad_kegg_ids = set(kegg_ids)\n return all_entries, bad_kegg_ids\n if response.status_code != 200:\n error_msg = 'ERROR: response to \"{}\" is {}'.format(response.url, response.status_code)\n print(error_msg)\n raise Exception(error_msg)\n else:\n all_entries = defaultdict(lambda: defaultdict(list))\n kegg_id = None\n field_name = None\n for line in io.StringIO(response.text).readlines():\n field_match = kegg_orthology_field_re.search(line.rstrip())\n if field_match is None:\n # this line separates entries\n kegg_id = None\n field_name = None\n else:\n field_value = field_match.group('field_value')\n if 'field_name' in field_match.groupdict():\n field_name = field_match.group('field_name')\n if field_name == 'ENTRY':\n kegg_id, *_ = field_value.split(' ')\n # print('KEGG id: \"{}\"'.format(kegg_id))\n else:\n # just a field value is present\n pass\n\n all_entries[kegg_id][field_name].append(field_value)\n\n # were any of the KEGG ids bad?\n bad_kegg_ids = {k for k in kegg_ids} - {k for k in all_entries.keys()}\n\n return all_entries, bad_kegg_ids"},"repo_name":{"kind":"string","value":"hurwitzlab/imicrobe-data-loaders"},"sub_path":{"kind":"string","value":"imicrobe/util/kegg.py"},"file_name":{"kind":"string","value":"kegg.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4889,"string":"4,889"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114948,"cells":{"seq_id":{"kind":"string","value":"70409103517"},"text":{"kind":"string","value":"import os, random\n\nimport requests\nfrom bs4 import BeautifulSoup \nimport NBA as nba\n\n\nclass ziz() :\n\n def hello(self):\n print(\"---- Hello my name Ziz ----\")\n\n def NBA(self, args):\n\n if args[0] == 'games':\n return self.stringfy(nba.getGames())\n \n\n\n\n def getGames(self):\n url = 'https://reddit.nbabite.com/'\n page = requests.get(url)\n soup = BeautifulSoup(page.content,'html.parser')\n # table_MN = pd.read_html(page)\n competitions = soup.find(id='competitions')\n\n heure_matchs = soup.find_all(\"div\", {\"class\": \"status\"})\n team_names = soup.find_all(\"div\", {\"class\": \"team-name\"}) \n date = soup.find_all('div', {\"class\":\"date d-sm-block d-none\"})[0].text\n\n # print(matches)\n match = {}\n\n text = date + \"\\n\" \n\n for i, heure in enumerate(heure_matchs):\n s = heure.text + \" : \" +team_names[i+1].text + \" @ \" + team_names[i].text\n text += '\\n' + s\n\n \n return self.stringfy(text)\n\n\n def stringfy(self, text):\n s = \"```text\\n\"\n s += text +'\\n'\n s += \"```\"\n return s\n\n \n"},"repo_name":{"kind":"string","value":"ahandan/discord_bot"},"sub_path":{"kind":"string","value":"bot/zizBot.py"},"file_name":{"kind":"string","value":"zizBot.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1163,"string":"1,163"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114949,"cells":{"seq_id":{"kind":"string","value":"7764637836"},"text":{"kind":"string","value":"import socket\nfrom OpenSSL import SSL\nimport certifi\nimport datetime\n\n\nhostname = 'services.bq.com'\nport = 443\nnow = datetime.datetime.now()\n\n\n\ncontext = SSL.Context(method=SSL.TLSv1_METHOD)\ncontext.load_verify_locations(cafile=certifi.where())\n\nconn = SSL.Connection(context, socket=socket.socket(socket.AF_INET, socket.SOCK_STREAM))\nconn.settimeout(5)\nconn.connect((hostname, port))\nconn.setblocking(1)\nconn.do_handshake()\nconn.set_tlsext_host_name(hostname.encode())\ncerts = conn.get_peer_cert_chain()\nfor (idx, cert) in enumerate(certs):\n formated_date_after = datetime.datetime.strptime(cert.get_notAfter().decode('ascii'), '%Y%m%d%H%M%SZ')\n formated_date_before = datetime.datetime.strptime(cert.get_notBefore().decode('ascii'), '%Y%m%d%H%M%SZ')\n print(f'{idx} subject: {cert.get_subject()}')\n print(f' issuer: {cert.get_issuer()})')\n print(f' Valido-Desde :' , formated_date_before)\n print(f' Valido-Hasta :' , formated_date_after)\n print( ' --> Expira en : ' , formated_date_after - now)\n #if (now - formated_date_after) > now:\n # print(\"ERRROR Expired\")\n #print(f' fingerprint: {cert.digest(\"sha1\")}')\n #print('----',formated_date_after)\n print()\n\nconn.close()\n"},"repo_name":{"kind":"string","value":"dgardella/pys"},"sub_path":{"kind":"string","value":"check_cert.py"},"file_name":{"kind":"string","value":"check_cert.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1215,"string":"1,215"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114950,"cells":{"seq_id":{"kind":"string","value":"4635654089"},"text":{"kind":"string","value":"__author__ = \"Younes Bouhadjar, Vincent Marois, Tomasz Kornuta\"\n\nimport torch\nimport numpy as np\nfrom miprometheus.problems.seq_to_seq.algorithmic.algorithmic_seq_to_seq_problem import AlgorithmicSeqToSeqProblem\n\n\nclass ScratchPadCommandLines(AlgorithmicSeqToSeqProblem):\n \"\"\"\n Class generating sequences of random bit-patterns and targets forcing the\n system to learn the scratch pad problem (overwriting the memory).\n\n Minor modification I: the target contains may contain random command lines.\n\n \"\"\"\n\n def __init__(self, params):\n \"\"\"\n Constructor - stores parameters. Calls parent class ``AlgorithmicSeqToSeqProblem``\\\n initialization.\n\n :param params: Dictionary of parameters (read from configuration ``.yaml`` file).\n \"\"\"\n # Set default number of bits for a given problem.\n # This has to be done before calling base class constructor!\n params.add_default_params({\n 'control_bits': 2,\n 'data_bits': 8 }) # Call parent constructor - sets e.g. the loss function, dtype.\n # Additionally it extracts \"standard\" list of parameters for\n # algorithmic tasks, like batch_size, numbers of bits, sequences etc.\n super(ScratchPadCommandLines, self).__init__(params)\n self.name = 'ScratchPadCommandLines'\n\n assert self.control_bits >= 2, \"Problem requires at least 2 control bits (currently %r)\" % self.control_bits\n assert self.data_bits >= 1, \"Problem requires at least 1 data bit (currently %r)\" % self.data_bits\n\n # Number of subsequences.\n self.num_subseq_min = params[\"num_subseq_min\"]\n self.num_subseq_max = params[\"num_subseq_max\"]\n\n def generate_batch(self, batch_size):\n \"\"\"\n Generates a batch of samples of size ''batch_size'' on-the-fly.\n\n .. note::\n\n The sequence length is drawn randomly between ``self.min_sequence_length`` and \\\n ``self.max_sequence_length``.\n\n .. warning::\n All the samples within the batch will have the same sequence lengt.\n\n :param batch_size: Size of the batch to be returned. \n\n :return: DataDict({'sequences', 'sequences_length', 'targets', 'masks', 'num_subsequences'}), with:\n\n - sequences: [BATCH_SIZE, SEQ_LENGTH, CONTROL_BITS+DATA_BITS],\n - sequences_length: [BATCH_SIZE] (random value between self.min_sequence_length and self.max_sequence_length)\n - targets: [BATCH_SIZE, SEQ_LENGTH, DATA_BITS],\n - masks: [BATCH_SIZE, SEQ_LENGTH, 1]\n - num_subsequences: [BATCH_SIZE, 1] (number of subsequences)\n\n \"\"\"\n # Store marker.\n ctrl_store = np.zeros(self.control_bits)\n ctrl_store[self.store_bit] = 1 # [1, 0, 0]\n\n # Recall marker.\n ctrl_recall = np.zeros(self.control_bits)\n ctrl_recall[self.recall_bit] = 1 # [0, 1, 0]\n\n # Empty data marker.\n ctrl_data = np.zeros(self.control_bits) # [0, 0]\n\n # Define control lines.\n ctrl_aux = np.zeros(self.control_bits)\n if self.use_control_lines:\n if self.control_bits >= 3:\n if self.randomize_control_lines:\n # Randomly pick one of the bits to be set.\n ctrl_bit = np.random.randint(2, self.control_bits)\n ctrl_aux[ctrl_bit] = 1\n else:\n # Set last.\n ctrl_aux[self.control_bits - 1] = 1\n # Else: no control lines!\n\n # assign markers\n markers = ctrl_data, ctrl_store, ctrl_data\n\n # number sub sequences\n num_sub_seq = np.random.randint(self.num_subseq_min, self.num_subseq_max + 1)\n\n # set the sequence length of each marker\n seq_lengths = np.random.randint(low=self.min_sequence_length, high=self.max_sequence_length + 1,\n size=num_sub_seq)\n\n # generate subsequences for x and y\n x = [np.random.binomial(1, self.bias, (batch_size, n, self.data_bits)) for n in seq_lengths]\n\n # create the target\n seq_length_tdummies = sum(seq_lengths) + seq_lengths.shape[0] + 1\n dummies_target = np.zeros([batch_size, seq_length_tdummies, self.data_bits], dtype=np.float32)\n targets = np.concatenate((dummies_target, x[-1]), axis=1)\n\n # data of x and dummies\n xx = [self.augment(seq, markers, ctrl_start=ctrl_store,\n add_marker_data=True,\n add_marker_dummy=False) for seq in x]\n\n # data of x\n data_1 = [arr for a in xx for arr in a[:-1]]\n\n # this is a marker between sub sequence x and dummies\n inter_seq = self.add_ctrl(np.zeros((batch_size, 1, self.data_bits)), ctrl_recall, ctrl_data)\n\n # dummies of x\n data_2 = [xx[-1][-1]]\n\n # concatenate all parts of the inputs\n inputs = np.concatenate(data_1 + [inter_seq] + data_2, axis=1)\n\n # Set control lines for recall items. \n inputs[:, inputs.shape[1]-seq_lengths[-1]:,0:self.control_bits] = np.tile(\n ctrl_aux,(batch_size,seq_lengths[-1],1)) \n\n # Generate 3D ByteTensor for mask.\n ptmasks = torch.zeros([batch_size, inputs.shape[1], 1]).type(torch.ByteTensor)\n ptmasks[:, inputs.shape[1]-seq_lengths[-1]:, 0] = 1\n\n # Return data_dict.\n data_dict = self.create_data_dict()\n data_dict['sequences'] = torch.from_numpy(inputs).type(self.app_state.dtype)\n data_dict['targets'] = torch.from_numpy(targets).type(self.app_state.dtype)\n data_dict['masks'] = ptmasks\n data_dict['sequences_length'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * max(seq_lengths).item()\n data_dict['num_subsequences'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * num_sub_seq\n return data_dict\n\n\nif __name__ == \"__main__\":\n \"\"\" Tests sequence generator - generates and displays a random sample\"\"\"\n\n # \"Loaded parameters\".\n from miprometheus.utils.param_interface import ParamInterface\n\n params = ParamInterface()\n params.add_config_params({#'control_bits': 4,\n #'data_bits': 8,\n 'min_sequence_length': 1,\n 'max_sequence_length': 10,\n 'num_subseq_min': 2,\n 'num_subseq_max': 4})\n batch_size = 10\n\n # Create problem object.\n scratchpad = ScratchPad(params)\n\n # get a sample\n sample = scratchpad[0]\n print(repr(sample))\n print('__getitem__ works.')\n\n # wrap DataLoader on top\n from torch.utils.data import DataLoader\n\n problem = DataLoader(dataset=scratchpad, batch_size=batch_size, collate_fn=scratchpad.collate_fn,\n shuffle=False, num_workers=0)\n\n # generate a batch\n import time\n\n s = time.time()\n for i, batch in enumerate(problem):\n #print('Batch # {} - {}'.format(i, type(batch)))\n pass\n\n print('Number of workers: {}'.format(problem.num_workers))\n print('time taken to exhaust a dataset of size {}, with a batch size of {}: {}s'\n .format(scratchpad.__len__(), batch_size, time.time() - s))\n\n # Display single sample (0) from batch.\n batch = next(iter(problem))\n scratchpad.show_sample(batch, 0)\n print('Unit test completed.')\n\n"},"repo_name":{"kind":"string","value":"vincentalbouy/mi-prometheus"},"sub_path":{"kind":"string","value":"miprometheus/problems/seq_to_seq/algorithmic/recall/scratch_pad_cl.py"},"file_name":{"kind":"string","value":"scratch_pad_cl.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":7377,"string":"7,377"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114951,"cells":{"seq_id":{"kind":"string","value":"224890675"},"text":{"kind":"string","value":"'''\nThe purpose of the python code is as follows:\n 1) To load the trained classifier model to classify different hand signs\n 2) To capture the frames taken from users camera\n 3) Take the landmarks from the users hand\n 4) Load the landmark data into the model\n 5) Get the prediction from the model and print it in the frame\n'''\n\nimport pickle\nimport cv2\nimport mediapipe as mp\nimport numpy as np\n\n# Load the savel classifier model\nmodel_dict = pickle.load(open('./model2.p', 'rb'))\nmodel = model_dict['model']\n\ncap = cv2.VideoCapture(0)\n\n# Get the hands part from the picture using mediapipe\nmp_hands = mp.solutions.hands\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\nhands = mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.3)\n\n# Kae the alphabet labels\nlabels_dict = {}\nfor i in range(26):\n labels_dict[i] = chr(65+i)\n \nwhile True:\n\n data_aux = []\n x_ = []\n y_ = []\n\n ret, frame = cap.read()\n\n H, W, _ = frame.shape\n\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n #Capture the landmarks from the hand region\n results = hands.process(frame_rgb)\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(\n frame, # image to draw\n hand_landmarks, # model output\n mp_hands.HAND_CONNECTIONS, # hand connections\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n\n for hand_landmarks in results.multi_hand_landmarks:\n for i in range(len(hand_landmarks.landmark)):\n x = hand_landmarks.landmark[i].x\n y = hand_landmarks.landmark[i].y\n\n x_.append(x)\n y_.append(y)\n\n for i in range(len(hand_landmarks.landmark)):\n x = hand_landmarks.landmark[i].x\n y = hand_landmarks.landmark[i].y\n data_aux.append(x - min(x_))\n data_aux.append(y - min(y_))\n\n x1 = int(min(x_) * W) - 10\n y1 = int(min(y_) * H) - 10\n\n x2 = int(max(x_) * W) - 10\n y2 = int(max(y_) * H) - 10\n \n \n # Get the prediction of model after loading the hand landmark data\n prediction = model.predict([np.asarray(data_aux)])\n\n # Get the character predicted\n predicted_character = labels_dict[int(prediction[0])]\n\n # Plot a rectangle on the hand region and inscribe text above this hand region\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 0), 4)\n cv2.putText(frame, predicted_character, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 0), 3,\n cv2.LINE_AA)\n\n # Show the frame\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n\n\ncap.release()\ncv2.destroyAllWindows()\n"},"repo_name":{"kind":"string","value":"RexSan0x/Sign-Language-and-Emotion-Detection"},"sub_path":{"kind":"string","value":"Sign_Language_Training/inferece_sign_lang.py"},"file_name":{"kind":"string","value":"inferece_sign_lang.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2902,"string":"2,902"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114952,"cells":{"seq_id":{"kind":"string","value":"191147204"},"text":{"kind":"string","value":"import sqlite3\n\n# connecting to db\ncon = sqlite3.connect('technical_test.db')\ncur = con.cursor()\n\n# printing each row in the db\nfor row in cur.execute('select * from famous_people;'):\n print(row)\n print('')\n\n# closing connection to db\ncon.close()"},"repo_name":{"kind":"string","value":"MickyCompanie/technical_test_sneldev"},"sub_path":{"kind":"string","value":"query_db.py"},"file_name":{"kind":"string","value":"query_db.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":252,"string":"252"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114953,"cells":{"seq_id":{"kind":"string","value":"22593983762"},"text":{"kind":"string","value":"#-*- coding: utf-8 -*-\nfrom cadproj.models import OrientadorOuMediador, Projeto, Curso, TipoDeProjeto, ModoDeApresentacao, Cidade, Recurso, Calouro, Turma\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nclass EstudanteOptions(admin.ModelAdmin):\n list_display = ('nome','matricula')\n #date_hierarchy = 'data_e_hora'\n \nclass OrientadorOptions(admin.ModelAdmin):\n list_display = ('nome',)\n\n#class CursoOptions(admin.ModelAdmin):\n #list_display = ('nome',)\n\nclass ProjetoOptions(admin.ModelAdmin):\n #inlines = [Pendencia_Inline, Contatamento_Inline]\n list_display = ('estudante', 'titulo', 'orientador_ou_mediador')\n fieldsets = (\n (None, {\n 'fields': ('estudante','matricula','titulo','descricao')\n }),\n ('Outros Componentes da Equipe', {\n 'fields': ('estudante2','estudante3','outros_componentes')\n }),\n ('Curso', {\n 'fields': ('curso','turma')\n }),\n ('Contato', {\n 'fields': ('cidade_onde_mora','fone','email')\n }),\n ('Projeto', {\n 'fields': ('orientador_ou_mediador','colaborador','tipo_de_projeto','outro_tipo_de_projeto','palavra_chave1','palavra_chave2','palavra_chave3','cidade_de_abrangencia','local_e_ou_instituicao_de_abrangencia')\n }),\n ('Apresentação', {'fields':('modo_de_apresentacao','outro_modo','recursos_para_a_apresentacao')\n }),\n )\n list_per_page = 25\n search_fields = ['estudante', 'titulo', 'descricao', 'matricula', 'fone']\n list_filter = ('orientador_ou_mediador','curso','tipo_de_projeto','cidade_de_abrangencia')\n\nadmin.site.register(Curso)\nadmin.site.register(Projeto,ProjetoOptions)\nadmin.site.register(TipoDeProjeto)\nadmin.site.register(ModoDeApresentacao)\n\nadmin.site.register(OrientadorOuMediador)\n\nadmin.site.register(Cidade)\nadmin.site.register(Recurso)\nadmin.site.register(Calouro)\nadmin.site.register(Turma)\n\n"},"repo_name":{"kind":"string","value":"jamur/Mostra-de-Projetos"},"sub_path":{"kind":"string","value":"cadproj/admin.py"},"file_name":{"kind":"string","value":"admin.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1960,"string":"1,960"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"pt"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114954,"cells":{"seq_id":{"kind":"string","value":"25257312328"},"text":{"kind":"string","value":"from __future__ import annotations\n\nimport typing\n\nfrom flupy import flu\nfrom nebulo.config import Config\nfrom nebulo.gql.alias import FunctionPayloadType, MutationPayloadType, ObjectType, ResolveInfo, ScalarType\nfrom nebulo.gql.parse_info import parse_resolve_info\nfrom nebulo.gql.relay.node_interface import NodeIdStructure, to_node_id_sql\nfrom nebulo.gql.resolve.resolvers.claims import build_claims\nfrom nebulo.gql.resolve.transpile.mutation_builder import build_mutation\nfrom nebulo.gql.resolve.transpile.query_builder import sql_builder, sql_finalize\nfrom nebulo.sql.table_base import TableProtocol\nfrom sqlalchemy import literal_column, select\n\n\nasync def async_resolver(_, info: ResolveInfo, **kwargs) -> typing.Any:\n \"\"\"Awaitable GraphQL Entrypoint resolver\n\n Expects:\n info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine\n \"\"\"\n context = info.context\n engine = context[\"engine\"]\n default_role = context[\"default_role\"]\n jwt_claims = context[\"jwt_claims\"]\n\n tree = parse_resolve_info(info)\n\n async with engine.begin() as trans:\n # Set claims for transaction\n if jwt_claims or default_role:\n claims_stmt = build_claims(jwt_claims, default_role)\n await trans.execute(claims_stmt)\n\n result: typing.Dict[str, typing.Any]\n\n if isinstance(tree.return_type, FunctionPayloadType):\n sql_function = tree.return_type.sql_function\n function_args = [val for key, val in tree.args[\"input\"].items() if key != \"clientMutationId\"]\n func_call = sql_function.to_executable(function_args)\n\n # Function returning table row\n if isinstance(sql_function.return_sqla_type, TableProtocol):\n # Unpack the table row to columns\n return_sqla_model = sql_function.return_sqla_type\n core_table = return_sqla_model.__table__\n func_alias = func_call.alias(\"named_alias\")\n stmt = select([literal_column(c.name).label(c.name) for c in core_table.c]).select_from(func_alias) # type: ignore\n stmt_alias = stmt.alias()\n node_id_stmt = select([to_node_id_sql(return_sqla_model, stmt_alias).label(\"nodeId\")]).select_from(stmt_alias) # type: ignore\n ((row,),) = await trans.execute(node_id_stmt)\n node_id = NodeIdStructure.from_dict(row)\n\n # Add nodeId to AST and query\n query_tree = next(iter([x for x in tree.fields if x.name == \"result\"]), None)\n if query_tree is not None:\n query_tree.args[\"nodeId\"] = node_id\n base_query = sql_builder(query_tree)\n query = sql_finalize(query_tree.alias, base_query)\n ((stmt_result,),) = await trans.execute(query)\n else:\n stmt_result = {}\n else:\n stmt = select([func_call.label(\"result\")])\n (stmt_result,) = await trans.execute(stmt)\n\n maybe_mutation_id = tree.args[\"input\"].get(\"clientMutationId\")\n mutation_id_alias = next(\n iter([x.alias for x in tree.fields if x.name == \"clientMutationId\"]),\n \"clientMutationId\",\n )\n result = {tree.alias: {**stmt_result, **{mutation_id_alias: maybe_mutation_id}}}\n\n elif isinstance(tree.return_type, MutationPayloadType):\n stmt = build_mutation(tree)\n ((row,),) = await trans.execute(stmt)\n node_id = NodeIdStructure.from_dict(row)\n\n maybe_mutation_id = tree.args[\"input\"].get(\"clientMutationId\")\n mutation_id_alias = next(\n iter([x.alias for x in tree.fields if x.name == \"clientMutationId\"]),\n \"clientMutationId\",\n )\n node_id_alias = next(iter([x.alias for x in tree.fields if x.name == \"nodeId\"]), \"nodeId\")\n output_row_name: str = Config.table_name_mapper(tree.return_type.sqla_model)\n query_tree = next(iter([x for x in tree.fields if x.name == output_row_name]), None)\n sql_result = {}\n if query_tree:\n # Set the nodeid of the newly created record as an arg\n query_tree.args[\"nodeId\"] = node_id\n base_query = sql_builder(query_tree)\n query = sql_finalize(query_tree.alias, base_query)\n ((sql_result,),) = await trans.execute(query)\n result = {\n tree.alias: {**sql_result, mutation_id_alias: maybe_mutation_id},\n mutation_id_alias: maybe_mutation_id,\n node_id_alias: node_id,\n }\n\n elif isinstance(tree.return_type, (ObjectType, ScalarType)):\n base_query = sql_builder(tree)\n query = sql_finalize(tree.name, base_query)\n ((query_json_result,),) = await trans.execute(query)\n\n if isinstance(tree.return_type, ScalarType):\n # If its a scalar, unwrap the top level name\n result = flu(query_json_result.values()).first(None)\n else:\n result = query_json_result\n\n else:\n raise Exception(\"sql builder could not handle return type\")\n\n # Stash result on context to enable dumb resolvers to not fail\n context[\"result\"] = result\n return result\n"},"repo_name":{"kind":"string","value":"olirice/nebulo"},"sub_path":{"kind":"string","value":"src/nebulo/gql/resolve/resolvers/asynchronous.py"},"file_name":{"kind":"string","value":"asynchronous.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5380,"string":"5,380"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":90,"string":"90"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114955,"cells":{"seq_id":{"kind":"string","value":"26212162918"},"text":{"kind":"string","value":"from langchain.agents import Tool\nfrom htmlTemplates import css, bot_template, user_template, disclaimer_text, box_template, user_img, bot_img\nfrom typing import List\nfrom langchain.agents import Tool\nfrom streamlit.components.v1 import html\nfrom agentFunctions import simple_report_search, report_summarizer, one_person_search, tearm_search\n\ndef create_tools():\n # define usable Tools for the Agent\n tools = [\n Tool(\n name = \"TermSearch\",\n func=tearm_search,\n description=\"use this tool if you are not sure about a term. Input the term\"\n ),\n Tool(\n name = \"SimpleReportSearch\",\n func=simple_report_search,\n description=\"useful if you think that you need just a little information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space\"\n ),\n Tool(\n name = \"ReportSummarizer\",\n func = report_summarizer,\n description=\"useful if you think that you need a lot information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space\"\n ),\n Tool(\n name = \"OnePersonSearch\",\n func= one_person_search,\n description=\"useful if you think that you need personal information about a persons in the MPI to answer the User Question. Input a question with the name of the person you search for, Suitable for a keyword-based search in a vector space\"\n )\n ]\n return tools\n"},"repo_name":{"kind":"string","value":"kpister/prompt-linter"},"sub_path":{"kind":"string","value":"data/scraping/repos/HannesDiemerling~MinervasArchive/agentTools.py"},"file_name":{"kind":"string","value":"agentTools.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1654,"string":"1,654"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114956,"cells":{"seq_id":{"kind":"string","value":"43709154819"},"text":{"kind":"string","value":"#recommended way\nadmin_dict = {'1':'scie/065p','2':'scii/890p'}\n#getting value for a key using [] brackets\nprint(admin_dict['1'])\n\n#not recommended if key is an integer\ndict_func = dict(one='1',two='2')\n\n#change value\nadmin_dict['1'] = 'steve/07'\nprint(admin_dict['1'])\n\n#adding key value dictionary from one dict to another\nadmin_name = {'name':'steve','phone':'0756949393'}\n\nadmin_name.update(admin_dict)\nprint(admin_name)"},"repo_name":{"kind":"string","value":"steve-ryan/python-tutorial-for-beginners"},"sub_path":{"kind":"string","value":"dictionary.py"},"file_name":{"kind":"string","value":"dictionary.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":424,"string":"424"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114957,"cells":{"seq_id":{"kind":"string","value":"17060406313"},"text":{"kind":"string","value":"import json\n\nimport openpyxl\nfrom case_study.models import Question\nfrom core.decorators import staff_required\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\nfrom .common import populate_data, delete_model, patch_model\nfrom ..forms import QuestionImportForm\n\nschema_question = {\n \"endpoint\": \"/caseadmin/questions/\",\n \"fields\": [\n {\n \"title\": \"Question\",\n \"key\": \"body\",\n \"widget\": {\n \"template\": \"w-text.html\",\n },\n \"write\": True,\n },\n ]\n}\n\n\ndef render_question_view(request, message=None, message_type=None):\n data = populate_data(schema_question, Question.objects.all())\n c = {\n \"title\": \"Question Admin\",\n \"model_name\": \"Question\",\n \"toolbar_new\": True,\n \"toolbar_import\": True,\n \"data\": data,\n \"import_form\": QuestionImportForm(),\n \"import_endpoint\": \"/caseadmin/questions/import\",\n \"schema\": schema_question,\n \"admin_message\": message,\n \"admin_message_type\": message_type,\n \"hard_delete_only\": True,\n\n }\n return render(request, \"case-admin.html\", c)\n\n\n@staff_required\ndef api_admin_question(request, question_id):\n if request.method == \"PATCH\":\n return patch_model(request, Question, schema_question, question_id)\n elif request.method == \"DELETE\":\n return delete_model(request, Question, question_id)\n else:\n return JsonResponse({\n \"success\": False,\n \"message\": \"Unsupported HTTP method: \" + request.method,\n })\n\n\ndef question_import_txt(request, file, file_format):\n if file.content_type != \"text/plain\":\n return render_question_view(request,\n \"Failed to import questions as text/plain. \"\n \"Please ensure your text file contains one question per line. \",\n \"alert-danger\")\n questions = []\n for question in file.file.readlines():\n q = question.decode(\"utf-8\").strip()\n questions.append(Question(body=q))\n try:\n Question.objects.bulk_create(questions, ignore_conflicts=True)\n return render_question_view(request, \"Successfully imported {} questions.\".format(len(questions)), \"alert-success\")\n except IntegrityError as e:\n return render_question_view(request,\n \"Failed to import questions as text/plain. \"\n \"Please ensure your text file contains one question per line. \"\n \"Error: \" + str(e.args[0]),\n \"alert-danger\")\n\n\ndef question_import_csv(request, file, file_format):\n if file.content_type != \"text/csv\":\n return render_question_view(request,\n \"Failed to import questions as text/csv. \"\n \"Please ensure your csv file contains one question per line. \",\n \"alert-danger\")\n questions = []\n lines = file.read().decode(\"utf-8\").split(\"\\n\")\n for line in lines:\n q = line.strip()\n questions.append(Question(body=q))\n try:\n Question.objects.bulk_create(questions, ignore_conflicts=True)\n return render_question_view(request, \"Successfully imported {} questions.\".format(len(questions)), \"alert-success\")\n except IntegrityError as e:\n return render_question_view(request,\n \"Failed to import questions as text/csv. \"\n \"Please ensure your csv file contains one question per line. \"\n \"Error: \" + str(e.args[0]),\n \"alert-danger\")\n\n\ndef question_import_json(request, file, file_format):\n if file.content_type != \"application/json\":\n return render_question_view(request,\n \"Failed to import questions as application/json. \"\n \"Please ensure your json file contains a list of strings. \",\n \"alert-danger\")\n questions = []\n file_text = file.read().decode(\"utf-8\")\n file_json = json.loads(file_text)\n for question in file_json:\n q = question.strip()\n questions.append(Question(body=q))\n try:\n Question.objects.bulk_create(questions, ignore_conflicts=True)\n return render_question_view(request, \"Successfully imported {} questions.\".format(len(questions)), \"alert-success\")\n except IntegrityError as e:\n return render_question_view(request,\n \"Failed to import questions as application/json. \"\n \"Please ensure your json file contains a list of strings. \"\n \"Error: \" + str(e.args[0]),\n \"alert-danger\")\n\n\ndef question_import_xlsx(request, file, file_format):\n if not (str(file.content_type) == \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" or file.name.endswith('.xlsx')):\n return render_question_view(request,\n \"Failed to import questions as xlsx. \"\n \"Please ensure column A has a single question per cell. \",\n \"alert-danger\")\n questions = []\n wb = openpyxl.load_workbook(file)\n sheet = wb.worksheets[0]\n for col in sheet.iter_cols():\n for cel in col:\n q = str(cel.value).strip()\n questions.append(Question(body=q))\n\n try:\n Question.objects.bulk_create(questions, ignore_conflicts=True)\n return render_question_view(request, \"Successfully imported {} questions.\".format(len(questions)), \"alert-success\")\n except IntegrityError as e:\n return render_question_view(request,\n \"Failed to import questions as xlsx. \"\n \"Please ensure column A has a single question per cell. \"\n \"Error: \" + str(e.args[0]),\n \"alert-danger\")\n\n\n@staff_required\ndef api_admin_question_import(request):\n if request.method == \"POST\":\n form = QuestionImportForm(request.POST)\n file = request.FILES[\"file\"]\n file_format = str(form[\"file_format\"].value())\n if file_format == \"auto\":\n if file.content_type == \"text/csv\":\n file_format = \"csv\"\n elif file.content_type == \"application/json\":\n file_format = \"json\"\n elif file.content_type == \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" or file.name.endswith('.xlsx'):\n file_format = \"xlsx\"\n elif file.content_type == \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" or file.name.endswith('.xls'):\n file_format = \"xls\"\n elif file.content_type == \"text/plain\":\n file_format = \"txt\"\n\n if file_format == \"csv\":\n return question_import_csv(request, file, file_format)\n elif file_format == \"json\":\n return question_import_json(request, file, file_format)\n elif file_format == \"xlsx\":\n return question_import_xlsx(request, file, file_format)\n elif file_format == \"xls\":\n return question_import_xlsx(request, file, file_format)\n elif file_format == \"txt\":\n return question_import_txt(request, file, file_format)\n else:\n return render_question_view(request,\n \"Unknown file format: {}\".format(str(file_format)),\n \"alert-danger\")\n else:\n return JsonResponse({\n \"success\": False,\n \"message\": \"Unsupported method: \" + request.method,\n })\n\n\n@staff_required\ndef view_admin_question(request):\n if request.method == \"GET\":\n return render_question_view(request)\n elif request.method == \"POST\":\n try:\n body = json.loads(request.body)\n Question.objects.create(body=body[\"body\"])\n return JsonResponse({\n \"success\": True,\n \"message\": \"Question created\",\n })\n except Exception as e:\n return JsonResponse({\n \"success\": False,\n \"message\": \"Failed to create a question:\\n\" + str(e.args[0]),\n })\n"},"repo_name":{"kind":"string","value":"320011/case"},"sub_path":{"kind":"string","value":"core/case_admin/views/question.py"},"file_name":{"kind":"string","value":"question.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":8610,"string":"8,610"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114958,"cells":{"seq_id":{"kind":"string","value":"41390824167"},"text":{"kind":"string","value":"import os\nimport json\nimport sqlite3\nimport requests\n\n\ndb_stored = os.path.join(os.path.dirname(__file__), 'qaset.db') # r'D:\\Archive\\Voibot\\qabot\\data\\qabot\\data\\qaset.db'\nurl = 'http://10.1.163.22:5000/encode'\nheaders = {'Content-Type': 'application/json'}\n\n\ndef generate_all_features(db_stored, begin_id, end_id):\n conn = sqlite3.connect(db_stored)\n cursor = conn.cursor()\n ques_cursor = cursor.execute('select question from qaset where id between ? and ?', (begin_id, end_id))\n questions = []\n for ques in ques_cursor:\n questions.append(ques[0])\n data = {\n 'id': 123,\n 'texts': questions\n }\n r = requests.post(url=url, headers=headers, data=json.dumps(data))\n result = json.loads(r.text)\n qvectors = result['result']\n current_id = begin_id\n while current_id <= end_id:\n cursor.execute('update qaset set feature = ? where id = ?', (json.dumps(qvectors[current_id - begin_id]), current_id))\n current_id += 1\n conn.commit()\n conn.close()\n\n\nif __name__ == \"__main__\":\n # begin_id = 1\n # while(begin_id <= 36800):\n # end_id = begin_id + 99\n # generate_all_features(db_stored, begin_id, end_id)\n # print('%d to %d is done.' % (begin_id, end_id))\n # begin_id = end_id + 1\n begin_id = 36811\n end_id = 36843\n generate_all_features(db_stored, begin_id, end_id)\n"},"repo_name":{"kind":"string","value":"yaohsinyu/voibot"},"sub_path":{"kind":"string","value":"qabot/data/generate_all_feature.py"},"file_name":{"kind":"string","value":"generate_all_feature.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1382,"string":"1,382"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114959,"cells":{"seq_id":{"kind":"string","value":"4149350510"},"text":{"kind":"string","value":"\"\"\"empty message\n\nRevision ID: 96089780dc64\nRevises: 45811f048651\nCreate Date: 2022-07-14 08:54:14.167701\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '96089780dc64'\ndown_revision = '45811f048651'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('rounds', sa.Column('funny_count', sa.SmallInteger(), nullable=True))\n op.add_column('rounds', sa.Column('deeep_count', sa.SmallInteger(), nullable=True))\n op.execute(\"UPDATE rounds SET funny_count = 0 WHERE true\")\n op.execute(\"UPDATE rounds SET deeep_count = 0 WHERE true\")\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('rounds', 'deeep_count')\n op.drop_column('rounds', 'funny_count')\n # ### end Alembic commands ###\n"},"repo_name":{"kind":"string","value":"pamelafox/translation-telephone"},"sub_path":{"kind":"string","value":"migrations/versions/96089780dc64_.py"},"file_name":{"kind":"string","value":"96089780dc64_.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":924,"string":"924"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":16,"string":"16"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114960,"cells":{"seq_id":{"kind":"string","value":"23275279979"},"text":{"kind":"string","value":"from rest_framework import mixins, status\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.response import Response\nfrom api.models import UploadImage, UploadRequest\nfrom api.serializers import UploadSerializer\nfrom api.serializers.image_serializer import ImageSerializer\n\n\nclass UploadViewSet(mixins.CreateModelMixin, GenericViewSet):\n queryset = UploadRequest.objects.all()\n serializer_class = UploadSerializer\n\n def create(self, request, *args, **kwargs):\n serializer_data = {}\n processed_images = []\n\n request_data = request.data\n images_to_process = 'images[]' in request_data and \\\n request.FILES.getlist('images[]') or request_data.get('images',[])\n for image_to_process in images_to_process:\n transformed_image = UploadViewSet.process_image(image_to_process)\n processed_images.append(transformed_image['id'])\n\n serializer_data['images_id'] = bool(processed_images) and processed_images or None\n serializer = self.get_serializer(data=serializer_data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n @classmethod\n def process_image(cls, image):\n serializer_data = {}\n image_to_process = image\n serializer_data['source'] = isinstance(image_to_process, str) and \\\n UploadImage.ImageSource.Remote or UploadImage.ImageSource.Upload\n\n serializer_data['source_path'] = ''\n if serializer_data['source'] == UploadImage.ImageSource.Remote:\n serializer_data['source_path'] = image_to_process\n\n serializer_data['original_file'] = image_to_process\n serializer_data['transformed_file'] = None\n image_serializer = ImageSerializer(data=serializer_data)\n image_serializer.is_valid(raise_exception=True)\n image_serializer.save()\n return image_serializer.data\n"},"repo_name":{"kind":"string","value":"ongtzewei/django-image-manipulation-webapp"},"sub_path":{"kind":"string","value":"api/views/upload.py"},"file_name":{"kind":"string","value":"upload.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2050,"string":"2,050"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114961,"cells":{"seq_id":{"kind":"string","value":"17604501818"},"text":{"kind":"string","value":"\nimport numpy as np\nfrom scipy import misc\nimport matplotlib.pyplot as plt\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation\n of the previous layer.\n\n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n\n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice_prev and W. Do not add the bias yet.\n s = a_slice_prev * W\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z = np.float(np.add(b, Z))\n ### END CODE HERE ###\n\n return Z\n\n\n\ndef convolve2d(image, kernel, padding=False, striding=1):\n # This function which takes an image and a kernel\n # and returns the convolution of them\n # Args:\n # image: a numpy array of size [image_height, image_width].\n # kernel: a numpy array of size [kernel_height, kernel_width].\n # Returns:\n # a numpy array of size [image_height, image_width] (convolution output).\n\n assert kernel.shape[0] == kernel.shape[1], \"kernel must be square\"\n assert striding != 0, \"striding cannot be zero\"\n\n # The kernel is flipped so that we are not performing a \"correlation\" operation\n kernel = np.flipud(np.fliplr(kernel))\n\n kernel_h = kernel.shape[0]\n kernel_w = kernel.shape[1]\n\n h = kernel_h // 2\n w = kernel_w // 2\n\n image_h = image.shape[0]\n image_w = image.shape[1]\n\n # if padding turned on (to fix border effect) then set for \"same\" padding\n if padding:\n pad = (kernel_h - 1) // 2\n else:\n pad = 0\n\n new_height = int(((image_h + 2*pad - kernel_h) / striding) + 1)\n new_width = int(((image_w + 2*pad - kernel_w) / striding) + 1)\n image_out = np.zeros(new_height, new_width)\n\n # Add padding to the input image\n image_padded = np.pad(image, ((0,0), (pad, pad), (pad, pad), (0,0)), 'constant', constant_values = (0,0))\n\n for x in range(h, image_h - h): # Loop over every pixel of the image\n for y in range(w, image_w - w):\n sum = 0\n\n for m in range(kernel_h):\n for n in range(kernel_w):\n sum += kernel[m][n] * image_padded[x-h+m][y-w+n]\n\n image_out[x,y] = sum\n\n return image_out\n\nimg = misc.ascent()\nplt.grid(False)\nplt.gray()\nplt.axis('off')\nplt.imshow(img)\nplt.show()\n\n\n# This filter detects edges nicely\n# It creates a convolution that only passes through sharp edges and straight\n# lines.\n\n#Experiment with different values for fun effects.\n\nfilter_edge = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]\nimage_sharpen = convolve2d(img, filter_edge)\nplt.imshow(image_sharpen, cmap=plt.cm.gray)\nplt.axis('off')\nplt.show()\n\n\n\n# A couple more filters to try for fun!\nfilter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]]\n\n\nfilter = [ [0, 1, 1, 0], [1, 3, 3, 1], [-1, -3, -3, -1], [0, -1, -1, 0]]\nweight = 1\n\n#filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]\n\n# If all the digits in the filter don't add up to 0 or 1, you\n# should probably do a weight to get it to do so\n# so, for example, if your weights are 1,1,1 1,2,1 1,1,1\n# They add up to 10, so you would set a weight of .1 if you want to normalize them\n\n\ni_transformed = np.copy(i)\nsize_x = i_transformed.shape[0]\nsize_y = i_transformed.shape[1]\nprint(size_x, size_y)\n\nweight = 1\n\nfor x in range(2,size_x-2):\n for y in range(2,size_y-2):\n convolution = 0.0\n convolution = convolution + (i[x - 2, y-2] * filter[0][0])\n convolution = convolution + (i[x - 1, y-2] * filter[0][1])\n convolution = convolution + (i[x, y-2] * filter[0][2])\n convolution = convolution + (i[x + 1, y-2] * filter[0][3])\n\n convolution = convolution + (i[x-1, y] * filter[1][0])\n convolution = convolution + (i[x, y] * filter[1][1])\n convolution = convolution + (i[x+1, y] * filter[1][2])\n convolution = convolution + (i[x + 1, y] * filter[1][3])\n\n convolution = convolution + (i[x-1, y+1] * filter[2][0])\n convolution = convolution + (i[x, y+1] * filter[2][1])\n convolution = convolution + (i[x+1, y+1] * filter[2][2])\n convolution = convolution + (i[x + 1, y + 1] * filter[2][3])\n\n convolution = convolution + (i[x-1, y+1] * filter[3][0])\n convolution = convolution + (i[x, y+1] * filter[3][1])\n convolution = convolution + (i[x+1, y+1] * filter[3][2])\n convolution = convolution + (i[x + 1, y + 1] * filter[3][3])\n\n\n\n convolution = convolution * weight\n if(convolution<0):\n convolution=0\n if(convolution>255):\n convolution=255\n i_transformed[x, y] = convolution\n\n# Plot the image. Note the size of the axes -- they are 512 by 512\nplt.gray()\nplt.grid(False)\nplt.imshow(i_transformed)\n#plt.axis('off')\nplt.show()\n\nplt.imshow(image_sharpen, cmap=plt.cm.gray)\nplt.axis('off')\nplt.show()"},"repo_name":{"kind":"string","value":"sheldon-wall/DLSpecCourse4"},"sub_path":{"kind":"string","value":"Week1.py"},"file_name":{"kind":"string","value":"Week1.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5168,"string":"5,168"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114962,"cells":{"seq_id":{"kind":"string","value":"33948199381"},"text":{"kind":"string","value":"import http.server\nimport socketserver\n\nfrom .tools import HTTPTools\n\n\nclass Handler(http.server.SimpleHTTPRequestHandler):\n \"\"\" Subclass of pex.proto.http module.\n\n This subclass of pex.proto.http module represents\n HTTP handler for web server.\n \"\"\"\n\n def log_request(self, fmt, *args) -> None:\n pass\n\n def send_status(self, code: int = 200) -> None:\n self.send_response(int(code))\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n\n\nclass HTTPListener(object):\n \"\"\" Subclass of pex.proto.http module.\n\n This subclass of pex.proto.http module represents Python\n implementation of HTTP listener.\n \"\"\"\n\n def __init__(self, host: str, port: int, methods: dict = {}) -> None:\n \"\"\" Start HTTP listener on socket pair.\n\n :param str host: host to listen\n :param int port: port to listen\n :param dict methods: methods, method names as keys and\n method handlers as items\n :return None: None\n \"\"\"\n\n super().__init__()\n\n self.http_tools = HTTPTools()\n self.handler = Handler\n\n self.host = host\n self.port = int(port)\n\n self.sock = None\n self.methods = methods\n\n def listen(self) -> None:\n \"\"\" Start HTTP listener.\n\n :return None: None\n :raises RuntimeError: with trailing error message\n \"\"\"\n\n try:\n for method in self.methods:\n setattr(self.handler, f\"do_{method.upper()}\", self.methods[method])\n\n self.sock = socketserver.TCPServer((self.host, self.port), self.handler)\n except Exception:\n raise RuntimeError(f\"Failed to start HTTP listener on port {str(self.port)}!\")\n\n def stop(self) -> None:\n \"\"\" Stop HTTP listener.\n\n :return None: None\n :raises RuntimeError: with trailing error message\n \"\"\"\n\n try:\n self.sock.server_close()\n except Exception:\n raise RuntimeError(f\"HTTP listener is not started!\")\n\n def accept(self) -> None:\n \"\"\" Accept connection.\n\n :return None: None\n :raises RuntimeError: with trailing error message\n \"\"\"\n\n try:\n self.sock.handle_request()\n except Exception:\n raise RuntimeError(f\"HTTP listener is not started!\")\n"},"repo_name":{"kind":"string","value":"EntySec/Pex"},"sub_path":{"kind":"string","value":"pex/proto/http/listener.py"},"file_name":{"kind":"string","value":"listener.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2334,"string":"2,334"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":25,"string":"25"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114963,"cells":{"seq_id":{"kind":"string","value":"73961830874"},"text":{"kind":"string","value":"from urllib.parse import urlencode\n\nimport requests\nfrom dj_rest_auth.app_settings import api_settings\nfrom dj_rest_auth.jwt_auth import set_jwt_cookies\nfrom dj_rest_auth.models import get_token_model\nfrom dj_rest_auth.utils import jwt_encode\nfrom dj_rest_auth.views import LoginView\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import redirect\nfrom rest_framework import serializers\n\nfrom db.repository.user import UserRepository\n\n\ndef google_get_access_token(*, code: str, redirect_uri: str) -> str:\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#obtainingaccesstokens\n data = {\n \"code\": code,\n \"client_id\": settings.GOOGLE_OAUTH2_CLIENT_ID,\n \"client_secret\": settings.GOOGLE_OAUTH2_CLIENT_SECRET,\n \"redirect_uri\": redirect_uri,\n \"grant_type\": \"authorization_code\",\n }\n\n response = requests.post(settings.GOOGLE_ACCESS_TOKEN_OBTAIN_URL, data=data)\n\n if not response.ok:\n raise ValidationError(\"Failed to obtain access token from Google.\")\n\n return response.json()[\"access_token\"]\n\n\ndef google_get_user_info(*, access_token: str):\n # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#callinganapi\n response = requests.get(\n settings.GOOGLE_USER_INFO_URL, params={\"access_token\": access_token}\n )\n\n if not response.ok:\n raise ValidationError(\"Failed to obtain user info from Google.\")\n\n return response.json()\n\n\nclass GoogleLoginApi(LoginView):\n permission_classes = ()\n authentication_classes = ()\n\n class InputSerializer(serializers.Serializer):\n code = serializers.CharField(required=False)\n error = serializers.CharField(required=False)\n\n def get(self, request, *args, **kwargs):\n user_repository = UserRepository()\n\n input_serializer = self.InputSerializer(data=request.GET)\n input_serializer.is_valid(raise_exception=True)\n\n validated_data = input_serializer.validated_data\n\n code = validated_data.get(\"code\")\n error = validated_data.get(\"error\")\n\n if error or not code:\n params = urlencode({\"error\": error})\n return redirect(f\"{settings.PLATFORM_URL}?{params}\")\n\n # api_uri = reverse('api:v1:auth:login-with-google')\n api_uri = f\"{settings.PLATFORM_URL}/api/v1/auth/login/google\"\n\n access_token = google_get_access_token(code=code, redirect_uri=api_uri)\n user_data = google_get_user_info(access_token=access_token)\n\n profile_data = {\n \"username\": user_data[\"email\"],\n \"first_name\": user_data.get(\"givenName\", \"\"),\n \"last_name\": user_data.get(\"familyName\", \"\"),\n }\n # We use get-or-create logic here for the sake of the example.\n # We don't have a sign-up flow.\n self.user = user_repository.get_or_create(\n email=user_data[\"email\"], **profile_data\n )\n\n token_model = get_token_model()\n\n if api_settings.USE_JWT:\n self.access_token, self.refresh_token = jwt_encode(self.user)\n elif token_model:\n self.token = api_settings.TOKEN_CREATOR(\n token_model, self.user, self.serializer\n )\n\n if api_settings.SESSION_LOGIN:\n self.process_login()\n\n response = redirect(f\"{settings.PLATFORM_URL}/courses\")\n set_jwt_cookies(response, self.access_token, self.refresh_token)\n return response\n"},"repo_name":{"kind":"string","value":"edu4ml/WSB-ML-PLATFORM-FORKED"},"sub_path":{"kind":"string","value":"api/apis/v1/auth/auth.py"},"file_name":{"kind":"string","value":"auth.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3513,"string":"3,513"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114964,"cells":{"seq_id":{"kind":"string","value":"29905578649"},"text":{"kind":"string","value":"# coding:utf-8\nfrom unityagents import UnityEnvironment\nimport numpy as np\nfrom network.DQN import DQNAgent\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport time\n\nenv = UnityEnvironment(file_name=\"../environment/Banana_Windows_x86_64/Banana.exe\")\npath = \"../result/banana/\"\n# get the default brain\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\nenv_info = env.reset(train_mode=False)[brain_name]\naction_size = brain.vector_action_space_size\nstate = env_info.vector_observations[0]\nstate_size = len(state)\n\ntotal_scores = []\nscores = []\nbatch_size = 64\nmean = 0\ncount = 0\neps = 1.0\neps_end = 0.01\ndecay = 0.999\nmax_t = 1000\ngamma = 0.99\nalpha = 1e-4\ntua = 1e-3\nmax_memory_size = 50000\ntrain = False\n\nwith tf.Session() as session:\n brain_agent = DQNAgent(session, state_size, action_size, max_memory_size, gamma, alpha, tua)\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n while mean < 13 and train:\n env_info = env.reset(train_mode=True)[brain_name]\n score = 0\n time_b = time.time()\n loss = 0\n for i in range(max_t):\n if np.random.random() > eps:\n action = np.argmax(brain_agent.choose_action(state), axis=1)\n else:\n action = np.random.choice(action_size)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n score += reward\n brain_agent.store(state, action, reward, next_state, [done])\n state = next_state\n if brain_agent.step % 4 == 0:\n loss += brain_agent.learn(batch_size)\n if done:\n break\n scores.append(score)\n total_scores.append(score)\n eps = max(eps * decay, eps_end)\n print(\"\\rEpisode: {},\\tCurr Score: {},\\tAverage Score: {:.2f},\\tLoss:{:.4},\\tEPS:{:.4},\\tTime: {:.4}\".format(count, score, np.mean(scores), loss/250.0, eps, time.time()-time_b), end=\"\")\n if count % 100 == 0 and count > 0:\n mean = np.mean(scores)\n print(\"\\rEpisode: {}, \\tAverage Score: {:.2f}\".format(count, mean))\n scores.clear()\n count += 1\n\n if train:\n saver.save(session, path)\n fig = plt.figure()\n plt.plot(range(len(total_scores)), total_scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.show()\n else:\n saver.restore(session, path)\n\n saver.restore(session, path)\n for _ in range(10):\n done = False\n env_info = env.reset(train_mode=False)[brain_name]\n score = 0\n state = env_info.vector_observations[0]\n while not done:\n\n action = brain_agent.action = np.argmax(brain_agent.choose_action(state), axis=1)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n score += reward\n state = next_state\n print(\"Score is \", score)\n"},"repo_name":{"kind":"string","value":"lebesgue125/reinforce_learning"},"sub_path":{"kind":"string","value":"banana/dqn_agent.py"},"file_name":{"kind":"string","value":"dqn_agent.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3144,"string":"3,144"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114965,"cells":{"seq_id":{"kind":"string","value":"74910611675"},"text":{"kind":"string","value":"import numpy as np\nimport cv2\nimport pyrealsense2 as rs\nimport math\n\n\"\"\"INTIALIZING REALSENSE DATA\"\"\"\n# Initialize RealSense pipeline\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)\npipeline.start(config)\n\n# Initialize ORB detector\norb = cv2.ORB_create()\n\n# Brute-force Matcher\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n# Previous frame data\nprev_gray = None\nprev_kps = None\nprev_descs = None\nprev_matched_coords = None\ncurrent_matched_coords = None\nMAX_MATCH_DISTANCE = 40 # You can change this threshold based on your needs\nTOP_PERCENTAGE = 0.1 # Top 10% best matches\n\n# LIST OF DISTANCE VECTORS\nreal_points = None\ndistance_vectors = None\neuler_prediction = None\n\n\ndef rotation_matrix(theta_x, theta_y, theta_z):\n Rx = np.array([[1, 0, 0],\n [0, np.cos(theta_x), -np.sin(theta_x)],\n [0, np.sin(theta_x), np.cos(theta_x)]])\n\n Ry = np.array([[np.cos(theta_y), 0, -np.sin(theta_y)], # Note the negative sign for sin(theta_y)\n [0, 1, 0],\n [np.sin(theta_y), 0, np.cos(theta_y)]])\n\n Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],\n [np.sin(theta_z), np.cos(theta_z), 0],\n [0, 0, 1]])\n\n R = np.dot(Rz, np.dot(Ry, Rx))\n return R\n\n\ndef euler_displacement(theta_x, theta_y, theta_z, point):\n return np.dot(rotation_matrix(theta_x, theta_y, theta_z), point)\n\n\ndef distance_point(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2 + point[2] ** 2)\n return distance\n\n\ndef average_vectors(vectors):\n if not vectors:\n return None # return None if the list is empty\n\n total_x = sum(vec[0] for vec in vectors)\n total_y = sum(vec[1] for vec in vectors)\n total_z = sum(vec[2] for vec in vectors)\n\n num_vectors = len(vectors)\n\n return [total_x / num_vectors, total_y / num_vectors, total_z / num_vectors]\n\n\ndef average_list(list):\n return sum(list) / len(list)\n\n\ndef vector_between_points(p1, p2):\n return [p2[i] - p1[i] for i in range(3)]\n\n\n\"\"\"INTIATING BNO055 ROTATIONAL DATA\"\"\"\nimport os\nimport hid\n\nos.environ[\"BLINKA_MCP2221\"] = \"1\"\ndevice = hid.device()\ndevice.open(0x04D8, 0x00DD)\n\nimport board\nimport adafruit_bno055\n\ni2c = board.I2C() # uses board.SCL and board.SDA\nsensor = adafruit_bno055.BNO055_I2C(i2c)\nlast_val = 0xFFFF\n\n\"\"\"MAIN LOOP\"\"\"\ntry:\n while True:\n \"\"\"RGB AND DEPTH DATA PROCESSING\"\"\"\n # Create alignment\n align_to = rs.stream.color\n align = rs.align(align_to)\n # Get frameset of depth and color\n frames = pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n aligned_depth_frame = aligned_frames.get_depth_frame()\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n depth_intrinsics = frames.profile.as_video_stream_profile().intrinsics\n color_frame = frames.get_color_frame()\n\n # Convert color frame to numpy array\n color_image = np.asanyarray(color_frame.get_data())\n\n # Convert to grayscale for ORB\n gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)\n\n # Detect ORB keypoints and descriptors\n kps, descs = orb.detectAndCompute(gray, None)\n\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n depth_with_kps = cv2.drawKeypoints(depth_colormap, kps, None, color=(0, 255, 0), flags=0)\n cv2.imshow('Depth with Keypoints', depth_with_kps)\n\n # Match with previous frame's keypoints and descriptors, if available\n if prev_gray is not None:\n matches = bf.match(prev_descs, descs)\n\n if len(matches) > 0:\n # Sort the matches based on distance (lowest distance is better)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # Filter matches based on a distance threshold\n good_matches = [m for m in matches if m.distance < MAX_MATCH_DISTANCE]\n\n \"\"\"PERCENTAGE BASED FILTERING\"\"\"\n # 1. Percentage-based Filtering\n num_good_matches = int(len(matches) * TOP_PERCENTAGE)\n good_matches_percentage = matches[:num_good_matches]\n\n # Extract (x, y) coordinates of matched keypoints\n prev_matched_coords = [prev_kps[match.queryIdx].pt for match in good_matches_percentage]\n current_matched_coords = [kps[match.trainIdx].pt for match in good_matches_percentage]\n\n # Print matched coordinates (You can store or process them further based on your needs)\n print(\"Previous Frame Matched Coordinates:\", prev_matched_coords)\n print(\"Current Frame Matched Coordinates:\", current_matched_coords)\n print(\"Depth of current:\",\n depth_image[int(current_matched_coords[0][1])][int(current_matched_coords[0][0])])\n\n if len(good_matches) > 0:\n matched_image = cv2.drawMatches(prev_gray, prev_kps, gray, kps, good_matches_percentage,\n None) # or replace 'good_matches_percentage' with 'good_matches_ratio'\n cv2.imshow('Filtered Matched keypoints', matched_image)\n\n # Update the previous frame data\n prev_gray = gray\n prev_kps = kps\n prev_descs = descs\n\n # Exit on 'q'\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nexcept KeyboardInterrupt:\n pass\n\nfinally:\n pipeline.stop()\n cv2.destroyAllWindows()"},"repo_name":{"kind":"string","value":"vpark915/The-GingerLens"},"sub_path":{"kind":"string","value":"LocalPythonIdeas/FundamentalScripts/ORBDepthPrimitive.py"},"file_name":{"kind":"string","value":"ORBDepthPrimitive.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5665,"string":"5,665"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114966,"cells":{"seq_id":{"kind":"string","value":"2609538019"},"text":{"kind":"string","value":"import matplotlib.pyplot as plt\nimport scipy.optimize as optimize\nimport scipy.sparse as sparse\nimport scipy.sparse.linalg\nfrom math import ceil\nimport numpy as np\nimport sys\n\n\ndef solve_one_time_step(u_0, mu_vec, temp_a=0, temp_b=0):\n print(\"h\")\n def create_main_matrix(n_x_points, mu_vec):\n \"\"\"\n Matrix for theta method\n \"\"\"\n tri_diag = np.ones((3, n_x_points))\n tri_diag[1] = -2 * tri_diag[1]\n\n for row in range(n_x_points):\n tri_diag[:, row] *= float(mu_vec[row])\n\n a_matrix = sparse.spdiags(tri_diag, [-1, 0, 1], n_x_points, n_x_points)\n\n i_matrix = sparse.identity(n_x_points)\n return a_matrix, i_matrix\n\n u = u_0\n\n bv = np.zeros_like(u_0)\n bv[0] = mu_vec[0] * temp_a\n bv[-1] = mu_vec[0] * temp_b\n\n D2, I = create_main_matrix(n_x_points=u_0.shape[0], mu_vec=mu_vec)\n lhs = (I - D2 / 2)\n rhs = (I + D2 / 2) * u + bv\n u = np.transpose(np.mat(sparse.linalg.spsolve(lhs, rhs)))\n\n return u\n\n\ndef solve_heat_equation(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c, plot=False):\n \"\"\"\n This function approximates a solution to the generic heat equation\n\n u_0_func: function of x that returns the initial value.\n t_final: Latest time to simulate to [s]\n x_a: The lowest x-value of the domain [m]\n x_b: The highest x-value of the domain [m]\n temp_a: The temperature at x=a (Dirichlet BV) [deg C]\n temp_b: The temperature at x=b (Dirichlet BV) [deg C]\n n_x_points: The number of points required in the x-direction.\n c: The constant in the heat equation.\n \"\"\"\n mu = 1 # Arbitrarily chosen, pick a higher number to increase the time step.\n # This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme.\n dx = (x_b - x_a) / n_x_points\n dt = dx ** 2 * mu / c\n n_t_points = ceil(t_final / dt)\n\n x = np.linspace(x_a, x_b, n_x_points)\n t = np.arange(0, t_final, dt)\n u_0 = np.reshape(u_0_func(x), (100, 1))\n data = [u_0]\n\n u = u_0\n for t_i in range(n_t_points):\n u = solve_one_time_step(u_0=u, mu=mu, temp_a=temp_a - 1 + np.cos(t_i * dt),\n temp_b=temp_b - 1 + np.cos(t_i * dt))\n data.append(u)\n\n if (t_i % 1000) == 0:\n print(\".\", end=\"\")\n\n result = np.hstack(data)\n\n if plot:\n X, Y = np.meshgrid(x, t)\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n\n # Creating plot\n ax.plot_surface(X, Y, result[:, :-1].T)\n ax.set_xlabel(\"X [m]\")\n ax.set_ylabel(\"T [s]\")\n plt.show()\n\n return result\n\n\ndef initial_value(x):\n return -6 * np.sin(np.pi * (x - 0.5)) + 2 * (x - 0.5)\n\n\ndef find_zeros(y_arr, a, b):\n \"\"\"\n Returns the x-values (assuming y_arr is on a linear interpolation mesh between a and b) where the y_arr mesh\n function changes sign.\n \"\"\"\n zeros_i = []\n\n for i in range(len(y_arr) - 1):\n if y_arr[i] * y_arr[i + 1] < 0: # This means that there is a sign change.\n zeros_i.append(i) # We want to store the index\n\n # Let's now translate these indices into x values.\n dx = (b - a) / len(y_arr)\n zeros = []\n for index in zeros_i:\n zeros.append((index + 0.5) * dx) # Adding half a step because the zero is between i and i+1.\n\n return zeros\n\n\n# def find_zeros_func(f: callable, a, b):\n# k = 1\n# xs = np.linspace(a, b, 1000*k)\n# t_zero = f(xs)\n# sgn = np.sign(t_zero)\n# zbd = []\n#\n# for i in range(0,len(sgn)-1):\n# if sgn[i] != sgn[i+1]:\n# zbd.append((xs[i]+xs[i+1])/2)\n#\n# while len(zbd) != 2 and k < 11:\n# k += 1\n# xs = np.linspace(a, b, 1000 * k)\n# t_zero = f(xs)\n# sgn = np.sign(t_zero)\n# zbd = []\n# for i in range(0, len(sgn) - 1):\n# if sgn[i] != sgn[i + 1]:\n# zbd.append((xs[i] + xs[i + 1]) / 2)\n#\n# if len(zbd) != 2:\n# sys.exit(\"The function u_0 might not be a suitable choice. The function u_0 must be continuous and have exactly two zeros in [x_a,x_b]\")\n# h1 = zbd[0]\n# h2 = zbd[1]\n# h = [h1, h2]\n#\n# return h\n\n\n# def find_zeros_array(u, a, b, tol):\n# k = len(u)\n# xs = np.linspace(a, b, k)\n# sgn = np.sign(u)\n# zbd = []\n# zbd_id = []\n# h = []\n#\n# for i in range(0,len(sgn)-1):\n# if sgn[i] != sgn[i+1]:\n# zbd.append(xs[i])\n# zbd_id.append(i)\n#\n# if len(zbd) == 1:\n# if abs(u[zbd_id[0]]) < tol:\n# h.append(xs[zbd_id[0]])\n# h.append(xs[zbd_id[0]])\n# else:\n# h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2)\n# h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2)\n# elif len(zbd) == 2:\n# if abs(u[zbd_id[0]]) < tol:\n# h.append(xs[zbd_id[0]])\n# else:\n# h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2)\n# if abs(u[zbd_id[1]]) < tol:\n# h.append(xs[zbd_id[1]])\n# else:\n# h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2)\n# else:\n# h = []\n#\n# return h\n\n\ndef solve_model(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c1, c2, c3, tol, n_t_points, plot=False):\n \"\"\"\n u_0_func: function of x that returns the initial value.\n t_final: Latest time to simulate to [s]\n x_a: The lowest x-value of the domain [m], x_a = 0\n x_b: The highest x-value of the domain [m]\n temp_a: The temperature at x=a (Dirichlet BV) [deg C]\n temp_b: The temperature at x=b (Dirichlet BV) [deg C]\n n_x_points: The number of points required in the x-direction.\n c1: The constant in the heat equation in the first part.\n tol: Tolerance for zero finding.\n \"\"\"\n # This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme.\n dx = (x_b - x_a) / n_x_points\n dt = t_final / n_t_points\n mu1 = c1 * dt / dx ** 2\n mu2 = c2 * dt / dx ** 2\n mu3 = c3 * dt / dx ** 2\n\n x = np.linspace(x_a, x_b, n_x_points)\n t = np.arange(0, t_final, dt)\n u_0 = np.reshape(u_0_func(x), (100, 1))\n data = [u_0]\n\n # bd1 = []\n # bd2 = []\n # u_0 = u_0_func()\n # h = find_zeros(u_0_func, x_a, x_b)\n # bd1.append(h[0])\n # bd2.append(h[1])\n h_1_arr = []\n h_2_arr = []\n\n h_data = find_zeros(u_0, a=x_a, b=x_b)\n print(\"Starting boundary points: \", h_data)\n h_1_arr.append(h_data[0])\n h_2_arr.append(h_data[1])\n\n u = u_0\n for t_i in range(n_t_points):\n mu_vector = np.ones_like(u)\n\n mu_vector[[x < h_1_arr[-1]]] *= mu1\n mu_vector[np.logical_and(h_1_arr[-1] <= x, x < h_2_arr[-1])] *= mu2\n mu_vector[h_2_arr[-1] <= x] *= mu3\n\n u = solve_one_time_step(u_0=u, mu_vec=mu_vector, temp_a=temp_a, temp_b=temp_b)\n\n h_data = find_zeros(u, a=x_a, b=x_b)\n if len(h_data) == 0:\n h_1_arr.append(h_data[0])\n h_2_arr.append(h_data[1])\n\n data.append(u)\n\n if (t_i % 1000) == 0:\n print(\".\", end=\"\")\n\n result = np.hstack(data)\n\n if plot:\n X, Y = np.meshgrid(x, t)\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n\n # Creating plot\n ax.plot_surface(X, Y, result[:, :-1].T)\n ax.set_xlabel(\"X [m]\")\n ax.set_ylabel(\"T [s]\")\n plt.show()\n\n\nsolve_model(u_0_func=initial_value,\n t_final=50,\n x_a=0,\n x_b=2,\n temp_a=5,\n temp_b=9,\n n_x_points=100,\n c1=0.01,\n c2=0.04,\n c3=0.01,\n tol=10 ** (-10),\n n_t_points=500,\n plot=True)\n\n# solve_heat_equation(u_0_func=initial_value,\n# t_final=50,\n# x_a=-1,\n# x_b=2,\n# temp_a=-2,\n# temp_b=4,\n# n_x_points=100,\n# c=0.01,\n# plot=True)\n"},"repo_name":{"kind":"string","value":"liorarueff/MathematicalIce"},"sub_path":{"kind":"string","value":"main.py"},"file_name":{"kind":"string","value":"main.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":7928,"string":"7,928"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114967,"cells":{"seq_id":{"kind":"string","value":"42680391223"},"text":{"kind":"string","value":"import unittest\nfrom unittest.mock import patch\nfrom lotto.cities import Cities\n\nclass TestCities(unittest.TestCase):\n\n\n def test_get_city_wrong_input(self):\n\n self.assertNotIn('vxvx', Cities.total_cities)\n self.assertNotIn(1, Cities.total_cities) \n with patch('builtins.input', return_value='Tom'):\n with self.assertRaises(ValueError):\n Cities.get_city_input()\n\n def test_get_city_correct_input(self):\n\n self.assertIn('bari', Cities.total_cities) \n with patch('builtins.input', return_value='BAri'):\n self.assertEqual(Cities.get_city_input(), 'bari')\n\nif __name__ == '__main__':\n unittest.main()"},"repo_name":{"kind":"string","value":"erydegio/lotto-game"},"sub_path":{"kind":"string","value":"test/test_cities.py"},"file_name":{"kind":"string","value":"test_cities.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":693,"string":"693"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114968,"cells":{"seq_id":{"kind":"string","value":"35062260193"},"text":{"kind":"string","value":"# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n\nimport bpy\nimport string\nimport pdb\nimport time\nimport json\n# import urllib.request\nimport math \nimport operator\nimport ast\ntry:\n import _pickle as pickle\nexcept:\n import pickle\nimport os\nimport base64\nimport zlib\n\n# from materials import *\n\n\n\nfrom bpy_extras.io_utils import ImportHelper\nfrom bpy.props import StringProperty\n\ndbg = False\n\nbl_info = {\n \"name\": \"Minecraft motions import (*.mcmo)\",\n \"description\": \"This addon allows you to import minecraft worlds and mob motions\",\n \"author\": \"Aat Karelse\",\n \"version\": (0, 4, 0),\n \"blender\": (2, 6, 3),\n #\"api\": ???,\n \"location\": \"File > Import > minecraft stuff\",\n \"warning\": \"Alpha\",\n \"wiki_url\": \"https://github.com/aaps/MCmotions\",\n # \"tracker_url\": \"http://projects.blender.org/tracker/index.php?func=detail&aid=29552\",\n \"category\": \"Import-Export\"}\n\n \n\n# This class initiates and starts the state machine and uses the gathered data\n# to construct the model in Blender.\nclass DataImporter:\n \n\n def createMeshFromData(self, material, origin, verts, faces):\n # Create mesh and object\n \n mat = bpy.data.materials.new('TexMat')\n\n\n\n if material in self.materials:\n themat = self.materials\n else:\n themat = {material:{'name': 'Unknown - ' + str(material), 'color': (0, 0, 0), 'alpha':0, 'emittance':0 ,'textures':[]}}\n\n # print(themat[material])\n if 'textures' in themat[material] and len(themat[material]['textures']) > 0:\n\n for texpath in themat[material]['textures']:\n mtex = mat.texture_slots.add()\n mtex.texture = self.textures[texpath]\n\n # print('ok' + )\n me = bpy.data.meshes.new(themat[material]['name']+' Mesh')\n ob = bpy.data.objects.new(themat[material]['name'], me)\n ob.location = origin\n if len(themat[material]) >= 2:\n mat.diffuse_color = themat[material]['color']\n if len(themat[material]) >= 3 and themat[material]['alpha'] != 0:\n mat.alpha = themat[material]['alpha']\n mat.use_transparency = True\n mat.transparency_method = 'RAYTRACE'\n if len(themat[material]) >= 4 and themat[material]['emittance'] != 0:\n mat.emit = themat[material]['emittance']\n\n\n ob.show_name = True\n ob.active_material = mat\n # Link object to scene and make active\n scn = bpy.context.scene\n scn.objects.link(ob)\n \n \n # Create mesh from given verts, faces.\n me.from_pydata(verts, [], faces)\n\n # Update mesh with new data\n me.update() \n return ob\n\n def run(self, filepath, context):\n start_time = time.time()\n \n handle = open(filepath, 'rb')\n\n total = pickle.loads(zlib.decompress(handle.read()))\n \n indexi = 0\n \n vertices = total['vertices']\n faces = total['faces']\n entitys = total['allhistory']\n origins = total['origins']\n self.materials = total['materials']\n self.textures = total['textures']\n\n \n\n total = None\n\n extralist = {}\n\n self.tempdir = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'textures' \n try:\n os.makedirs(self.tempdir)\n except Exception:\n print('some dir error should be ok !')\n\n \n filelist = [ f for f in os.listdir(self.tempdir) ]\n for f in filelist:\n try:\n os.remove(f)\n except Exception:\n print('file removal trouble no biggy')\n\n \n if self.textures:\n for texture in self.textures:\n fileh = open(self.tempdir + os.sep + texture + \".png\", \"wb\")\n fileh.write(base64.b64decode(self.textures[texture]))\n\n\n temp = {}\n for material in self.materials:\n \n\n if 'textures' in self.materials[material] and len(self.materials[material]['textures']) > 0:\n \n for texpath in self.materials[material]['textures']:\n \n img = bpy.data.images.load(self.tempdir + os.sep + texpath + '.png')\n \n cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')\n cTex.image = img\n temp[texpath] = cTex\n self.textures = temp\n print(self.textures)\n \n \n\n\n for mat in vertices:\n\n\n\n if mat in vertices and mat in faces and mat in origins:\n self.createMeshFromData(mat, origins[mat], vertices[mat], faces[mat] )\n faces[mat] = None\n vertices[mat] = None\n else:\n print(str(mat) + 'not in faces, vertices or origins !')\n\n\n for value in entitys:\n \n aentity = entitys[value]\n if len( aentity['positions']) > 0:\n firstloc = aentity['positions'][0]['pos']\n firstloc = firstloc[0], firstloc[1]+2,firstloc[2]\n headloc = firstloc[0],firstloc[1]+1, firstloc[2]\n\n bpy.ops.mesh.primitive_cube_add(location=headloc)\n \n head = bpy.context.object\n head.rotation_mode = 'XYZ'\n head.scale = (0.25, 0.25, 0.25)\n\n bpy.ops.mesh.primitive_cube_add(location=firstloc)\n\n ob = bpy.context.object\n ob.rotation_mode = 'XYZ'\n ob.scale = (0.25, 0.75, 0.25)\n \n mat = bpy.data.materials.new(\"PKHG\")\n\n mobtype = aentity['type']\n if mobtype == '50':\n ob.name = \"creeper\"\n mat.diffuse_color = (0.0, 1.0, 0.0)\n elif mobtype == '51':\n ob.name = \"skeleton\"\n mat.diffuse_color = (1.0, 1.0, 1.0)\n elif mobtype == '52':\n ob.name = \"spider\"\n mat.diffuse_color = (0.2, 0.1, 0.1)\n elif mobtype == '54':\n ob.name = \"zombol\"\n mat.diffuse_color = (0.0, 0.3, 0.0)\n elif mobtype == '55':\n ob.name = \"slime\"\n mat.diffuse_color = (0.5, 1, 0.5)\n elif mobtype == '58':\n ob.name = \"enderman\"\n mat.diffuse_color = (0.5, 0.0, 0.5)\n elif mobtype == '90':\n ob.name = \"pig\"\n mat.diffuse_color = (0.5, 0.4, 0.4)\n elif mobtype == '65':\n ob.name = \"bat\"\n mat.diffuse_color = (1, 0.5, 0.2)\n elif mobtype == '91':\n ob.name = \"sheep\"\n mat.diffuse_color = (1, 1, 1)\n elif mobtype == '92':\n ob.name = \"cow\"\n mat.diffuse_color = (1, 0.2, 0.1)\n elif mobtype == '94':\n ob.name = \"squid\"\n mat.diffuse_color = (0.2, 0.2, 1)\n \n elif mobtype == '101':\n ob.name = \"rabbit\"\n mat.diffuse_color = (0.5, 0.1, 0.05)\n elif len(mobtype) > 10 or mobtype == 'player':\n if mobtype == 'player':\n ob.name = \"player: RECORDER\"\n \n mat.diffuse_color = (1, 0, 0)\n else:\n if 'type' in aentity:\n ob.name = \"player: \" + aentity['type']\n else:\n ob.name = \"player: unknown\"\n mat.diffuse_color = (1, 0.6, 0.4)\n\n else:\n mat.diffuse_color = (0.0, 0.0, 0.0)\n ob.name = str(mobtype)\n\n ob.active_material = mat\n\n bpy.ops.object.select_all(action='DESELECT')\n ob.select = True\n head.select = True\n \n put_on_layers = lambda x: tuple((i in x) for i in range(20))\n\n bpy.context.scene.objects.active = ob\n bpy.ops.object.parent_set()\n\n maincam = bpy.data.cameras.new(\"Camera\")\n maincam.clip_start = 1\n maincam.clip_end = 5000\n cam_ob = bpy.data.objects.new(\"Camera\", maincam)\n cam_ob.rotation_euler = (0, math.radians(180), 0)\n\n selfycam = bpy.data.cameras.new(\"Camera\")\n selfycam.clip_start = 1\n selfycam.clip_end = 5000\n selfy_cam_ob = bpy.data.objects.new(\"Camera\", selfycam)\n selfy_cam_ob.rotation_euler = (0, 0, 0)\n selfy_cam_ob.location = (0, 0, 25)\n \n selfy_cam_ob.layers[:] = put_on_layers({2})\n cam_ob.layers[:] = put_on_layers({2})\n ob.layers[:] = put_on_layers({2})\n head.layers[:] = put_on_layers({2})\n \n selfy_cam_ob.parent = head\n cam_ob.parent = head\n bpy.context.scene.objects.link(cam_ob)\n bpy.context.scene.objects.link(selfy_cam_ob)\n\n for posses in aentity['positions'][1:]:\n frame_num = int((posses['time'] / 20) * 25)\n bpy.context.scene.frame_set(frame_num)\n ob.location = (posses['pos'][0], posses['pos'][2], posses['pos'][1]+0.75)\n yaw = posses['yawpichhead'][1]\n\n head.rotation_euler = (math.radians(posses['yawpichhead'][1]), 0, 0)\n ob.rotation_euler = (math.radians(90), 0, math.radians(posses['yawpichhead'][0]) )\n ob.hide = not bool(posses['alive'])\n ob.hide_render = not bool(posses['alive'])\n\n ob.keyframe_insert(\"hide\")\n ob.keyframe_insert(\"hide_render\")\n ob.keyframe_insert(data_path=\"location\")\n ob.keyframe_insert(data_path=\"rotation_euler\")\n\n if ob.animation_data:\n for fc in ob.animation_data.action.fcurves:\n fc.extrapolation = 'LINEAR'\n for kp in fc.keyframe_points:\n kp.interpolation = 'LINEAR'\n\n print(\"Script finished after {} seconds\".format(time.time() - start_time))\n return {'FINISHED'}\n\n# This is the import operator.\nclass MineCraftImport(bpy.types.Operator, ImportHelper):\n '''Import form minecraft netrecorder some format (.mcmo)'''\n bl_idname = \"minecraft.importminecraftdump\"\n bl_label = \"MineCraft EntityPaths\"\n # mc ep\n \n filename_ext = \".mcmo\"\n \n filter_glob = StringProperty(\n default=\"*.mcmo\",\n options={'HIDDEN'}\n )\n \n @classmethod\n def poll(cls, context):\n return True\n \n def execute(self, context):\n di = DataImporter()\n return di.run(self.filepath, context)\n\ndef menu_func_import(self, context):\n self.layout.operator(MineCraftImport.bl_idname, text=\"Mcmo import (.mcmo)\")\n\ndef register():\n bpy.utils.register_class(MineCraftImport)\n bpy.types.INFO_MT_file_import.append(menu_func_import)\n\ndef unregister():\n bpy.utils.unregister_class(MineCraftImport)\n bpy.types.INFO_MT_file_import.remove(menu_func_import)\n\nif __name__ == \"__main__\":\n register()\n\n bpy.ops.something.minecraft('INVOKE_DEFAULT')"},"repo_name":{"kind":"string","value":"aaps/MCmotions"},"sub_path":{"kind":"string","value":"minecraftimport.py"},"file_name":{"kind":"string","value":"minecraftimport.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":12057,"string":"12,057"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":8,"string":"8"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114969,"cells":{"seq_id":{"kind":"string","value":"1391672757"},"text":{"kind":"string","value":"# N = input('enter N: ')\n# M = input('enter M: ')\nimport timeit\n\ndef draw_board():\n global board\n for line in transpose(board):\n print(*line)\n\ndef transpose(matr):\n res=[]\n n=len(matr)\n m=len(matr[0])\n for j in range(m):\n tmp=[]\n for i in range(n):\n tmp=tmp+[matr[i][j]]\n res=res+[tmp]\n return res \n\nN, M = 10, 10\n\nboard = [['.' for _ in range(M)] for _ in range(N)]\nturn = True\n\ndef fill(y1, y2, x):\n for y in range(y1 + 1, y2):\n\n global board, turn\n if turn:\n rock_color = 'W'\n else:\n rock_color = 'B'\n\n if board[x][y] != rock_color:\n board[x][y] = rock_color\n\n\ndef check_column(rock_color, x_placed, y_placed):\n y1, y2 = None, None\n for y, rock in enumerate(board[x_placed]):\n if rock == rock_color:\n if y1 == None:\n y1 = y\n elif y2 == None:\n y2 = y\n fill(y1, y2, x_placed)\n break\n\n x1, x2 = None, None\n for y, rock in enumerate(board[y_placed]):\n if rock == rock_color:\n if x1 == None:\n x1 = y\n elif x2 == None:\n x2 = y\n fill(x1, x2, y_placed)\n break \n\ndef chech_rock(x, y):\n global board, turn\n if turn:\n rock_color = 'W'\n else:\n rock_color = 'B'\n \n if board[x][y] == '.':\n board[x][y] = rock_color\n\n \n board = transpose(board)\n check_column(rock_color, x, y)\n board = transpose(board)\n check_column(rock_color, x, y)\n\n\n draw_board()\n turn = not turn\n else:\n print('error: field is taken')\n\ndef input_xy():\n return [int(number) - 1 for number in input('Enter: ').split(' ')]\n\n\n\n\ndef count():\n global board\n white_count = 0\n black_count = 0\n\n for column in board:\n for n in column:\n if n == 'W':\n white_count += 1\n elif n == 'B':\n black_count += 1\n print(f'white has {white_count - black_count} more rocks')\n\ndef main():\n x, y = input_xy()\n\n while x != -1 and y != -1:\n chech_rock(x, y)\n count()\n \n x, y = input_xy()\n\nmain()"},"repo_name":{"kind":"string","value":"matbitilya/rocks"},"sub_path":{"kind":"string","value":"2.py"},"file_name":{"kind":"string","value":"2.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2328,"string":"2,328"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114970,"cells":{"seq_id":{"kind":"string","value":"27653685459"},"text":{"kind":"string","value":"import collections\nimport os\nimport sys\n\nimport openpyxl\n\nimport database\nfrom truckmate_email import TruckmateEmail\n\nREPORT_EMAILS = [\n 'jwaltzjr@krclogistics.com'\n]\n\nclass Rate(object):\n\n def __init__(self, tariff, customers, origin, destination, break_value, is_min, rate):\n self.tariff = tariff\n self.customers = customers\n self.origin = origin\n self.destination = destination\n self.break_value = break_value\n self.is_min = (is_min.strip() == 'True')\n self.rate = rate\n\n def __repr__(self):\n return 'Rate(tariff=%s, origin=%s, dest=%s, break=%s, rate=%s)' % (\n self.tariff,\n self.origin,\n self.destination,\n self.rate_break,\n self.rate\n )\n\n @property\n def three_digit_zip(self):\n if self.destination.isdigit():\n if 600 <= int(self.destination[:3]) <= 606:\n return 'CHICOMM'\n else:\n return self.destination[:3]\n elif self.destination == 'CHICOMM':\n return 'CHICOMM'\n elif self.destination in ['497LP', '497UP']:\n return '497'\n else:\n return 'OTHER'\n\n @property\n def rate_break(self):\n if self.is_min:\n return 'MIN'\n else:\n rounded_break = round(self.break_value / 100.0) * 100.0\n return rounded_break\n\nclass RateReport(object):\n\n def __init__(self, file_name, datab):\n sql_file_path = os.path.join(sys.path[0], file_name)\n self.sql_query = self.load_query_from_file(sql_file_path)\n self.dataset = self.fetch_data_from_db(self.sql_query, datab)\n self.split_data = self.split_dataset(self.dataset)\n\n def load_query_from_file(self, file_path):\n with open(file_path, 'r') as sql_file:\n return sql_file.read()\n\n def fetch_data_from_db(self, query, db):\n with db as datab:\n with datab.connection.cursor() as cursor:\n cursor.execute(query)\n return cursor.fetchall()\n\n def split_dataset(self, dataset):\n split_data = collections.defaultdict(\n lambda: {\n 'breaks': set(),\n 'rates': collections.defaultdict(list)\n }\n )\n\n for rate in dataset:\n for origin in self.get_origins(rate):\n rate_obj = Rate(rate.TARIFF, rate.CUSTOMERS, origin, rate.DESTINATION, rate.BREAK, rate.IS_MIN, rate.RATE)\n\n if rate_obj.rate_break not in split_data[rate_obj.three_digit_zip]['breaks']:\n if not rate_obj.is_min:\n split_data[rate_obj.three_digit_zip]['breaks'].add(rate_obj.rate_break)\n\n rate_tup = (rate_obj.tariff, rate_obj.customers, rate_obj.origin, rate_obj.destination)\n split_data[rate_obj.three_digit_zip]['rates'][rate_tup].append(rate_obj)\n\n return split_data\n\n def get_origins(self, rate):\n origins = []\n\n if rate.ORIGIN_MS:\n for origin in rate.ORIGIN_MS.split(', '):\n origins.append(origin)\n\n if rate.ORIGIN:\n origins.append(rate.ORIGIN)\n\n return origins\n\n def export_as_xlsx(self):\n wb = openpyxl.Workbook()\n wb.remove_sheet(wb.active)\n\n for zone in sorted(self.split_data.keys()):\n ws = wb.create_sheet(zone)\n self._excel_insert_titles(ws, zone)\n self._excel_insert_data(ws, zone)\n\n virtual_wb = openpyxl.writer.excel.save_virtual_workbook(wb)\n return virtual_wb\n\n def _excel_insert_titles(self, worksheet, zone):\n titles = {\n 'A1': 'TARIFF',\n 'B1': 'CUSTOMER',\n 'C1': 'ORIGIN',\n 'D1': 'DESTINATION',\n 'E1': 'MIN'\n }\n\n row = 'F'\n for b in sorted(self.split_data[zone]['breaks']):\n cellname = row + str(1)\n titles[cellname] = b\n row = chr(ord(row) + 1)\n\n for cell, title in titles.items():\n worksheet[cell] = title\n\n def _excel_insert_data(self, worksheet, zone):\n current_row = 2\n for tariff_tup, rates in sorted(self.split_data[zone]['rates'].iteritems()):\n tariff, customers, origin, destination = tariff_tup\n worksheet.cell(row=current_row, column=1).value = tariff\n worksheet.cell(row=current_row, column=2).value = customers\n worksheet.cell(row=current_row, column=3).value = origin\n worksheet.cell(row=current_row, column=4).value = destination\n for rate in rates:\n current_column = self.find_column(worksheet, rate.rate_break)\n current_cell = worksheet.cell(row=current_row, column=current_column)\n current_cell.value = rate.rate\n current_cell.number_format = '#,##0.00'\n\n current_row += 1\n\n def find_column(self, worksheet, header):\n for cell in worksheet[1]:\n if cell.value == header:\n return cell.col_idx\n else:\n raise ValueError('No header found for %s' % header)\n\ndef main():\n rate_report = RateReport('ratereport.sql', database.truckmate)\n email_message = TruckmateEmail(\n REPORT_EMAILS,\n subject='Rate Report',\n attachments=[\n ('rate_report.xlsx', rate_report.export_as_xlsx())\n ]\n )\n email_message.send()\n\nif __name__ == '__main__':\n main()\n"},"repo_name":{"kind":"string","value":"jwaltzjr/truckmate"},"sub_path":{"kind":"string","value":"truckmate/ratereport.py"},"file_name":{"kind":"string","value":"ratereport.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5480,"string":"5,480"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":2,"string":"2"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114971,"cells":{"seq_id":{"kind":"string","value":"86734481945"},"text":{"kind":"string","value":"import pygame\n\nclass Ui:\n\n def __init__(self, screen, player) -> None:\n self.screen = screen\n self.player = player\n\n self.font = pygame.font.SysFont('Arial', 18)\n self.big_font = pygame.font.SysFont('Arial', 32)\n\n def render(self, score):\n score_text = self.big_font.render(str(score), 1, (255, 255, 255))\n hp_text = self.font.render(str(self.player.hp), 1, (255, 255, 255))\n\n self.screen.blit(score_text, (self.screen.get_width() / 2 - score_text.get_width() / 2, score_text.get_height()))\n self.screen.blit(hp_text, (self.player.rect.x + self.player.width / 2 - hp_text.get_width() / 2, self.player.rect.y + self.player.height))"},"repo_name":{"kind":"string","value":"JustThomi/SpaceShooter"},"sub_path":{"kind":"string","value":"ui.py"},"file_name":{"kind":"string","value":"ui.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":692,"string":"692"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114972,"cells":{"seq_id":{"kind":"string","value":"71994975835"},"text":{"kind":"string","value":"c50=0\r\nc20 = 0\r\nc10 = 0\r\nc1 = 0\r\nprint('Banco dos Crias')\r\nsaque = int(input('Valor a ser sacado:'))\r\nwhile saque !=0:\r\n if saque - 50 >= 0:\r\n c50 += 1\r\n saque = saque -50\r\n else:\r\n break\r\nwhile saque !=0:\r\n if saque - 20 >= 0:\r\n c20 += 1\r\n saque = saque -20\r\n else:\r\n break\r\nwhile saque !=0:\r\n if saque - 10 >= 0:\r\n c10 += 1\r\n saque = saque -10\r\n else:\r\n break \r\nwhile saque !=0:\r\n if saque - 1 >= 0:\r\n c1 += 1\r\n saque = saque -1\r\n else:\r\n break\r\nprint(f'{c50} Cedulas(a) de 50R$')\r\nprint(f'{c20} Cedulas(a) de 20R$')\r\nprint(f'{c10} Cedulas(a) de 10R$')\r\nprint(f'{c1} Cedulas(a) de 1R$')\r\n###if saque % 20 >= 0:\r\n ##print(f'{} Cedulas de 20R$')\r\n\r\n \r\n"},"repo_name":{"kind":"string","value":"ArthPx/learning-code"},"sub_path":{"kind":"string","value":"d 71.py"},"file_name":{"kind":"string","value":"d 71.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":769,"string":"769"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114973,"cells":{"seq_id":{"kind":"string","value":"70896170076"},"text":{"kind":"string","value":"import os\nimport subprocess\nimport tempfile\nfrom typing import Dict\n\nimport requests\n\nfrom . import errors\nfrom snapcraft.file_utils import calculate_hash, get_tool_path\nfrom snapcraft.internal.cache import FileCache\nfrom snapcraft.internal.indicators import download_requests_stream\n\n\nclass _Image:\n def __init__(\n self, *, base: str, snap_arch: str, url: str, checksum: str, algorithm: str\n ) -> None:\n self.base = base\n self.snap_arch = snap_arch\n self.url = url\n self.checksum = checksum\n self.algorithm = algorithm\n\n self._image_cache = FileCache(namespace=\"build-images-{}\".format(self.base))\n\n def _download_and_cache(self) -> str:\n request = requests.get(self.url, stream=True, allow_redirects=True)\n if not request.ok:\n raise errors.BuildImageRequestError(\n base=self.base, status_code=request.status_code\n )\n # First create the prefix as tempfile.TemporaryDirectory does not do that for you\n os.makedirs(self._image_cache.file_cache, exist_ok=True)\n with tempfile.TemporaryDirectory(\n prefix=self._image_cache.file_cache\n ) as tmp_dir:\n download_file = os.path.join(tmp_dir, \"{}-vm\".format(self.base))\n download_requests_stream(request, download_file)\n calculated_digest = calculate_hash(download_file, algorithm=self.algorithm)\n if self.checksum != calculated_digest:\n raise errors.BuildImageChecksumError(\n expected=self.checksum,\n calculated=calculated_digest,\n algorithm=self.algorithm,\n )\n return self._image_cache.cache(\n filename=download_file, algorithm=self.algorithm, hash=self.checksum\n )\n\n def get(self):\n cached_file = self._image_cache.get(\n hash=self.checksum, algorithm=self.algorithm\n )\n if not cached_file:\n cached_file = self._download_and_cache()\n # TODO verify nothing is using this as a backing store before implementing.\n # image_cache.prune(keep_hash=image.checksum)\n return cached_file\n\n\ndef _get_build_images(base: str) -> Dict[str, _Image]:\n if base == \"core16\":\n return dict(\n amd64=_Image(\n base=\"core16\",\n snap_arch=\"amd64\",\n url=\"https://cloud-images.ubuntu.com/releases/16.04/release-20180703/ubuntu-16.04-server-cloudimg-amd64-disk1.img\", # noqa: E501\n checksum=\"79549e87ddfc61b1cc8626a67ccc025cd7111d1af93ec28ea46ba6de70819f8c\", # noqa: E501\n algorithm=\"sha256\",\n )\n )\n elif base == \"core18\":\n return dict(\n amd64=_Image(\n base=\"core18\",\n snap_arch=\"amd64\",\n url=\"https://cloud-images.ubuntu.com/releases/18.04/release-20180724/ubuntu-18.04-server-cloudimg-amd64.img\", # noqa: E501\n checksum=\"6d663a8fd5eddd916f4aef4fd06d0f7f4cf0bb191f170b8c84cd2adf297bc5c3\", # noqa: E501\n algorithm=\"sha256\",\n )\n )\n else:\n raise KeyError(base)\n\n\ndef setup(*, base: str, snap_arch: str, size: str, image_path: str) -> None:\n \"\"\"Setup a build image for base and snap_arch of size at image_path.\n\n Example usage:\n >>> from snapcraft.internal.build_providers import _images\n >>> _images.setup(base=\"core18\", snap_arch=\"amd64\", size=\"10G\",\n image_path=\"images/core18.qcow2\")\n\n :param str base: the base of the build image to setup.\n :param str snap_arch: the architecture of the base for the build image.\n :param str size: the size of the disk for the build image.\n :param str image_path: the path to create the build image.\n :raises errors.BuildImageForBaseMissing:\n if there is no build image defined for the requested base or snap\n architecture.\n :raises errors.BuildImageRequestError:\n upon a network related issue that prevents download of the build image.\n :raises errors.BuildImageChecksumError:\n if the resulting downloaded build image does not match the expected\n checksum.\n :raises errors.BuildImageSetupError:\n if a build image cannot be created due to tooling or other system\n issues (e.g.; space issues).\n \"\"\"\n try:\n image = _get_build_images(base)[snap_arch]\n except KeyError as key_error:\n raise errors.BuildImageForBaseMissing(\n base=base, snap_arch=snap_arch\n ) from key_error\n\n cached_file = image.get()\n\n if os.path.dirname(image_path):\n os.makedirs(os.path.dirname(image_path), exist_ok=True)\n qemu_img_cmd = get_tool_path(\"qemu-img\")\n # qemu-img parameters:\n # -q: quiet.\n # -f: first image format.\n # -b: backing file.\n try:\n subprocess.check_call(\n [\n qemu_img_cmd,\n \"create\",\n \"-q\",\n \"-f\",\n \"qcow2\",\n \"-b\",\n cached_file,\n image_path,\n size,\n ]\n )\n except subprocess.CalledProcessError as process_error:\n raise errors.BuildImageSetupError(\n exit_code=process_error.returncode\n ) from process_error\n"},"repo_name":{"kind":"string","value":"Tymbur/Archive_Encrypted.zip"},"sub_path":{"kind":"string","value":"snapcraft/data/usr/lib/python3/dist-packages/snapcraft/internal/build_providers/_images.py"},"file_name":{"kind":"string","value":"_images.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5347,"string":"5,347"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114974,"cells":{"seq_id":{"kind":"string","value":"31526327859"},"text":{"kind":"string","value":"#\"D:\\UCLA+USC\\OPT\\fetch\\fetch_run.py\"\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import ScalarFormatter\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport streamlit as st\r\nimport os\r\nfrom collections import defaultdict\r\nimport torchvision.models as models\r\n\r\nstart_date_2021 = pd.to_datetime(\"2021-01-01\") # Start date for 2022\r\nend_date_2022 = pd.to_datetime(\"2022-12-31\") # End date for 2022\r\ndate_range_2021_2022 = pd.date_range(start_date_2021, end_date_2022, freq='D')\r\nx_new = pd.DataFrame({'# Date': date_range_2021_2022})\r\n\r\nscript_directory = os.path.dirname(os.path.abspath(__file__))\r\nmodel_path = os.path.join(script_directory, 'fetch_LSTM_model.pth')\r\n\r\nseq_length=90\r\ninput_size = seq_length\r\nhidden_size = 64\r\nnum_layers = 2\r\noutput_size = seq_length\r\nfetch_data_path= os.path.join(script_directory, 'data_daily.csv')\r\n\r\n\r\nmonthly_sums = defaultdict(float)\r\ndays_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\nmonthly_sum_2022 = {month: 0 for month in range(1, 13)}\r\n\r\nclass LSTM(nn.Module):\r\n def __init__(self, input_size, hidden_size, output_size):\r\n super().__init__()\r\n self.lstm = nn.LSTM(input_size, hidden_size)\r\n self.linear = nn.Linear(hidden_size, output_size)\r\n\r\n def forward(self, input):\r\n x, _ = self.lstm(input)\r\n x = self.linear(x)\r\n return x\r\n\r\n\r\n\r\ndef normalize_column(column):\r\n normalized = (column - column.min()) / (column.max() - column.min())\r\n return torch.tensor(normalized.values, dtype=torch.float32)\r\n\r\ndef revert(x,Y_min,Y_max):\r\n return Y_min+x*(Y_max-Y_min)\r\n\r\n\r\ndef main():\r\n\r\n #Load the trained LSTM model\r\n model = LSTM(input_size, hidden_size, output_size)\r\n checkpoint = torch.load(model_path)\r\n model.load_state_dict(checkpoint['model_state_dict'])\r\n\r\n #Load the original data\r\n raw = pd.read_csv(fetch_data_path)\r\n raw['# Date'] = pd.to_datetime(raw['# Date'])\r\n Y_min = raw['Receipt_Count'].values.astype(float).min()\r\n Y_max = raw['Receipt_Count'].values.astype(float).max()\r\n Y = raw['Receipt_Count'].values.astype(float)\r\n Y = normalize_column(raw['Receipt_Count'])\r\n Y = Y.reshape(-1, 1)\r\n Y_new = Y.detach().reshape(-1)\r\n\r\n #Use the loaded model to make predictions for 2022\r\n for j in range(365):\r\n with torch.no_grad():\r\n prediction = model(Y_new[-seq_length:].view(-1,seq_length))\r\n prediction = torch.tensor(prediction[0,-1].item()).view(1)\r\n Y_new = torch.cat((Y_new, prediction))\r\n output = revert(Y_new,Y_min,Y_max)\r\n\r\n output2 = output.detach().tolist()\r\n daily_number_of_receipts_2022 = output2[365:]\r\n start_date= 0\r\n for i in monthly_sum_2022.keys():\r\n monthly_sum = sum(daily_number_of_receipts_2022[start_date:(start_date +(days_in_month[i-1]))])\r\n monthly_sum_2022[i] +=monthly_sum\r\n start_date += days_in_month[i-1]\r\n\r\n x_to_be_plotted = monthly_sum_2022.keys()\r\n Y_to_be_plotted = [monthly_sum_2022[key] for key in monthly_sum_2022.keys() ]\r\n\r\n #Visualization\r\n plt.figure(figsize=(10, 6))\r\n #plt.plot(x_new['# Date'].tolist(), tensor_list, label='Predicted Number of Receipts per month', color='green', marker='o', linestyle='-')\r\n plt.plot(x_to_be_plotted, Y_to_be_plotted, label='Predicted Number of Receipts per month in 2022', color='green', marker='o', linestyle='-')\r\n plt.xlabel('Month for 2022')\r\n plt.ylabel('Number of Receipts')\r\n plt.title('Line Plot of Monthly Number of Receipts Over Time in 2022')\r\n plt.legend()\r\n plt.grid(True)\r\n\r\n y_formatter = ScalarFormatter(useMathText=True)\r\n y_formatter.set_scientific(False) # Disable scientific notation\r\n y_formatter.set_powerlimits((0, 0)) # Set the exponent range to (0, 0)\r\n plt.gca().yaxis.set_major_formatter(y_formatter)\r\n #plt.show()\r\n\r\n\r\n #Show the result using streamlit:\r\n\r\n st.title(\"LSTM model App for fetch analysis By Xiaoshu Luo\")\r\n selected_month = st.number_input(\"Please select a month (1-12) in 2022\", min_value=1, max_value=12, step=1, value=1)\r\n plt.scatter(selected_month, Y_to_be_plotted[selected_month - 1], color='red', marker='o', s=100, label='Selected Month')\r\n st.text(f\"The month you selected is: {selected_month}\")\r\n st.text(f\"The predicted monthly number of receipts in 2022 is: {int(monthly_sum_2022[selected_month])}\")\r\n st.pyplot(plt)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"},"repo_name":{"kind":"string","value":"tree2601/Fetch_LSTM_model"},"sub_path":{"kind":"string","value":"fetch_run.py"},"file_name":{"kind":"string","value":"fetch_run.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4500,"string":"4,500"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114975,"cells":{"seq_id":{"kind":"string","value":"26211678718"},"text":{"kind":"string","value":"import os\nimport json\nimport subprocess\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nfrom openai import OpenAI\nimport requests\nimport torch\nimport tiktoken\nimport argparse\n\n\ncommit_schema = {\n \"name\": \"git_commit\",\n \"description\": 'Performs a git commit by calling `git commit -m \"commit_message\"`',\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"commit_message\": {\n \"description\": \"A short but descriptive commit message\",\n \"type\": \"string\"\n }\n },\n \"required\": [\"commit_message\"]\n }\n}\n\ndef generate_commit_message_mistral(diff):\n \"\"\"Generate commit message using Mistral AI.\"\"\"\n tokenizer = AutoTokenizer.from_pretrained(\"mistralai/Mistral-7B-v0.1\")\n tokens = tokenizer.encode(diff)\n tokens = tokens[:7999]\n diff = tokenizer.decode(tokens)\n prompt = \"You are given the output of a git diff. Your task is to create a descriptive commit message based on this diff, max 15 words\\n\\n\" + diff\n data = {\n \"system\": \"You generate commit messages from a git diff that is provided to you. It is your job to create a descriptive commit message based on this diff. Do not include the diff in your commit message. Only include the commit message. The most important thing is to ensure you are only describing the changes that are marked with + or - in the diff. Do not include any other changes in your commit message.\",\n \"model\": \"mistral\",\n \"prompt\": \"{prompt}\".format(prompt=prompt),\n \"stream\": False,\n }\n response = requests.post(\"http://localhost:11434/api/generate\", json=data)\n json_strings = response.text.strip().split('\\n')\n responses = [json.loads(js)[\"response\"] for js in json_strings]\n result = \"\".join(responses)\n\n return result\n \ndef generate_commit_message_globe_server(diff):\n data = {\"diff\": diff}\n response = requests.post(\"http://globe.engineer/api/scommit-server\", json=data)\n commit_message = response.text.strip()\n return commit_message\n\ndef format_diff(diff):\n added = []\n removed = []\n lines = diff.split('\\n')\n for line in lines:\n if line.startswith('+'):\n added.append(line)\n elif line.startswith('-'):\n removed.append(line)\n formatted_diff = 'ADDED:\\n' + '\\n'.join(added) + '\\nREMOVED:\\n' + '\\n'.join(removed)\n return formatted_diff\n\ndef generate_commit_message_gpt(diff):\n \"\"\"Generate commit message using OpenAI's ChatGPT.\"\"\"\n\n client = OpenAI(api_key=os.environ[\"OPENAI_API_KEY\"])\n tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo')\n\n if len(diff) == 0:\n return 'default commit message'\n\n tokens = tokenizer.encode(diff)\n tokens = tokens[:15900]\n diff = tokenizer.decode(tokens)\n prompt = \"Can you commit this diff for me:\\n\\n\" + diff\n\n response = client.chat.completions.create(messages=[\n {'role': 'system', 'content': \"You call the git commit function with short and informative commit messages\"},\n {'role': 'user', 'content': prompt},\n ],\n functions=[commit_schema],\n function_call={'name': 'git_commit'},\n model='gpt-3.5-turbo-16k',\n temperature=0.5)\n args = json.loads(response.choices[0].message.function_call.arguments)\n commit_message = args['commit_message']\n return commit_message\n\n\ndef scommit():\n \"\"\"Perform a git commit with a generated or provided message.\"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', type=str, help='Commit message')\n parser.add_argument('-mi', action='store_true', help='Using mistral')\n parser.add_argument('-globe-server', action='store_true', help='Using globe server')\n args, unknown = parser.parse_known_args()\n\n try:\n # Check if there are any commits\n subprocess.check_output(['git', 'rev-parse', '--verify', 'HEAD'], text=True).strip()\n commits_exist = True \n except subprocess.CalledProcessError:\n commits_exist = False\n\n if commits_exist and args.mi:\n diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()\n formatted_diff = format_diff(diff)\n message = generate_commit_message_mistral(formatted_diff)\n message = message.replace('\"', '\\\\\"')\n \n elif commits_exist and args.globe_server:\n diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()\n formatted_diff = format_diff(diff)\n message = generate_commit_message_globe_server(formatted_diff)\n message = message.replace('\"', '\\\\\"')\n \n elif args.m is None and commits_exist:\n diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()\n formatted_diff = format_diff(diff)\n message = generate_commit_message_gpt(formatted_diff)\n\n else:\n message = args.m if args.m is not None else 'Initial commit'\n\n cmd = f'git commit {\" \".join(unknown)} -m \"{message}\"'\n os.system(cmd)\n \n\nif __name__ == '__main__':\n scommit()"},"repo_name":{"kind":"string","value":"kpister/prompt-linter"},"sub_path":{"kind":"string","value":"data/scraping/repos/Globe-Engineer~semantic-commit/scommit~scommit.py"},"file_name":{"kind":"string","value":"scommit~scommit.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5025,"string":"5,025"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114976,"cells":{"seq_id":{"kind":"string","value":"25249831556"},"text":{"kind":"string","value":"import cv2\nimport pandas as pd\nimport time\n\n# Can take a video file as input or video stream from the webcam\ncap = cv2.VideoCapture(\"C:/Users/harsh/Downloads/video (1080p).mp4\")\n#cap = cv2.VideoCapture(0)\n\nindex = [\"color\", \"color_name\", \"hex\", \"R\", \"G\", \"B\"]\ncsv = pd.read_csv(\"C:/Users/harsh/Downloads/colors.csv\", names=index, header=None)\nr = g = b = x_pos = y_pos = 0\n\n\n# Function to get the Color name from the dataset for which the RGB value is the closest.\ndef get_color_name(R, G, B):\n minimum = 10000\n for i in range(len(csv)):\n d = abs(R - int(csv.loc[i, \"R\"])) + abs(G - int(csv.loc[i, \"G\"])) + abs(B - int(csv.loc[i, \"B\"]))\n if d <= minimum:\n minimum = d\n cname = csv.loc[i, \"color_name\"]\n return cname\n\n\n# Function to get x,y coordinates of mouse double click which will also give the RGB values \ndef draw_function(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDBLCLK:\n global b, g, r, x_pos, y_pos, clicked\n clicked = True\n x_pos = x\n y_pos = y\n b, g, r = frame[y, x]\n b = int(b)\n g = int(g)\n r = int(r)\n\n\n# Outer Loop To keep the Video Stream on\nwhile True:\n\n ret, frame = cap.read()\n clicked = False\n cv2.namedWindow('Video')\n # draw_function will be called when the mouse event occured\n cv2.setMouseCallback('Video', draw_function)\n cv2.imshow('Video', frame)\n key = cv2.waitKey(1)\n\n\n # Inner Loop will be executed when key(p) is clicked which will pause the video stream to a single frame \n # This loop is used for the main task which is Color detection \n if cv2.waitKey(1) == ord(\"p\"):\n while True:\n \n cv2.imshow('Video', frame)\n \n # Display the color name once draw function is called and clicked is true\n if clicked:\n\n # cv2.rectangle(image, start point, endpoint, color, thickness)-1 fills entire rectangle\n cv2.rectangle(frame, (20, 20), (750, 60), (b, g, r), -1)\n\n # Creating text string to display( Color name and RGB values )\n text = get_color_name(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)\n\n # cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )\n cv2.putText(frame, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA)\n\n # For very light colours we will display text in black colour\n if r + g + b >= 600:\n cv2.putText(frame, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)\n\n clicked = False\n\n\n # Key to get out of the loops\n # Key(p) to resume the video stream and Key(esc) to get out of both the loops and end the execution \n key = cv2.waitKey(1)\n\n if key == ord(\"p\"):\n break\n if key == 27:\n break\n \n if key == 27:\n break\n\n\ncap.release()\ncv2.destroyAllWindows()\n"},"repo_name":{"kind":"string","value":"Harshil-Agrawal/RealTime_Color_Detection"},"sub_path":{"kind":"string","value":"Color_detection.py"},"file_name":{"kind":"string","value":"Color_detection.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2995,"string":"2,995"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114977,"cells":{"seq_id":{"kind":"string","value":"40559915413"},"text":{"kind":"string","value":"import random\r\n\r\n# def rotto():\r\n# num = [0, 0, 0, 0, 0, 0]\r\n# for i in range(0, 6):\r\n# num[i] = random.randint(1, 46)\r\n# for j in num:\r\n# if j == num[i]:\r\n# i -= 1\r\n# return num\r\n\r\n\r\n# print(rotto())\r\n\r\nlotto_number = []\r\n\r\ndef getRandomNumber():\r\n number = random.randint(1, 45)\r\n return number\r\n\r\nwhile True:\r\n if len(lotto_number) == 6:\r\n break\r\n random_number = getRandomNumber()\r\n if random_number not in lotto_number:\r\n lotto_number.append(random_number)\r\n\r\nwhile True:\r\n bonus_number = getRandomNumber()\r\n if bonus_number not in lotto_number:\r\n break\r\nprint(lotto_number, '+', bonus_number)\r\n\r\n"},"repo_name":{"kind":"string","value":"Getver/StartCoding"},"sub_path":{"kind":"string","value":"00_BasicLecture/09_로또번호.py"},"file_name":{"kind":"string","value":"09_로또번호.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":700,"string":"700"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114978,"cells":{"seq_id":{"kind":"string","value":"21652314287"},"text":{"kind":"string","value":"import sklearn\r\nfrom sklearn.linear_model import Perceptron\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.datasets import load_iris\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\niris=load_iris()\r\ndf=pd.DataFrame(iris.data,columns=iris.feature_names)\r\ndf['label']=iris.target\r\ndf.columns = [\r\n 'sepal length', 'sepal width', 'petal length', 'petal width', 'label'\r\n]\r\n\r\n\r\ndata = np.array(df.iloc[:100, [0, 1, -1]])\r\n#因为iloc[num_of_row_start : num_of_row_end, num_of_column_start : num_of_column_end]不包含num_of_end,所以需要 +1才能包含c行\r\nX, y = data[:,:-1], data[:,-1]\r\ny = np.array([1 if i == 1 else -1 for i in y])\r\n#感知机\r\nclf = Perceptron(fit_intercept=True,\r\n max_iter=1000,\r\n shuffle=True)\r\nclf.fit(X, y)\r\nprint(clf.coef_)\r\nprint(clf.intercept_)\r\n\r\nplt.figure(figsize=(10,10))\r\n\r\n# 中文标题\r\nplt.rcParams['font.sans-serif']=['SimHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\nplt.title('鸢尾花线性数据示例')\r\n\r\nplt.scatter(data[:50, 0], data[:50, 1], c='b', label='Iris-setosa',)\r\nplt.scatter(data[50:100, 0], data[50:100, 1], c='orange', label='Iris-versicolor')\r\n\r\n# 画感知机的线\r\nx_ponits = np.arange(4, 8)\r\ny_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]\r\nplt.plot(x_ponits, y_)\r\n\r\n# 其他部分\r\nplt.legend() # 显示图例\r\nplt.grid(False) # 不显示网格\r\nplt.xlabel('sepal length')\r\nplt.ylabel('sepal width')\r\nplt.legend()\r\nplt.show()"},"repo_name":{"kind":"string","value":"yishishizi/machinelearning"},"sub_path":{"kind":"string","value":"sk.py"},"file_name":{"kind":"string","value":"sk.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1453,"string":"1,453"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114979,"cells":{"seq_id":{"kind":"string","value":"3132557366"},"text":{"kind":"string","value":"import torch\nfrom torch import nn, reshape\nfrom torch import device as torch_device\n\nclass Simple(nn.Module):\n\t\"\"\"\n\tSimple model\n\tuse mlp to do denoise\t\n\t\"\"\"\n\tdef __init__(self, samples, chunk_size, channels, device):\n\t\tsuper().__init__()\n\t\tself.chunk_size = chunk_size\n\t\tself.channels = channels\n\t\tself.linear = nn.Linear(self.chunk_size*self.channels, self.chunk_size*self.channels, bias=False, device = device)\n\t\n\tdef forward(self, state, _input):\n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\tx : Tensor\n\t\t Input tensor of shape (batch_size, samples, channels)\n\t\tstate : Tensor\n\t\t Input tensor of shape (batch_size, hidden_dim, channels)\n\t\tReturns\n\t\t-------\n\t\tTensor\n\t\t State tensor of shape (batch_size, hidden_dim, channels)\n\t\tTensor\n\t\t Output tensor of shape (batch_size, samples, channels)\n\t\t\"\"\"\n\t\tif len(_input.shape)==3:\n\t\t\tbatch_size = _input.shape[0]\n\t\telse:\n\t\t\tbatch_size = 1\n\t\tshape_saved = _input.shape\n\t\tstd = torch.std(_input)\n\t\t_input = _input/std\n\t\t_res = reshape(_input, (batch_size,-1))\n\t\t_res = self.linear(_res)\n\t\t_res = reshape(_res, shape_saved)\t\n\t\treturn None, (_input+_res)*std\n\n\t@property\n\tdef is_recursive(self):\n\t\treturn True\n"},"repo_name":{"kind":"string","value":"zhouxinyu0723/audio-denoise-addon-v2"},"sub_path":{"kind":"string","value":"ZENNet/model/simple.py"},"file_name":{"kind":"string","value":"simple.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1156,"string":"1,156"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114980,"cells":{"seq_id":{"kind":"string","value":"7408358101"},"text":{"kind":"string","value":"#import tool\nimport sys\ninputfile_1=sys.argv[1]\ninputfile_2=sys.argv[2]\n#create dictionary\ndef list2dict(s):\n d={}\n for i in s:\n if i in d.keys():\n d[i]=d[i]+1\n else:\n d[i]=1\n return d\n#define a function to match key and value between 2 files\ndef cmplist (s1,s2):\n d1=list2dict(s1)\n d2=list2dict(s2)\n d1_keys=set(d1.keys())\n d2_keys=set(d2.keys())\n intersect_keys=d1_keys.intersection(d2_keys)\n added={}\n for i in (d2_keys-d1_keys):\n added[i]=d2[i]\n removed={}\n for i in (d1_keys-d2_keys):\n removed[i]=d1[i]\n modified={}\n for i in intersect_keys:\n if d1[i]!=d2[i]:\n modified[i]=d1[i],d2[i]\n same={}\n for i in intersect_keys:\n if d1[i]==d2[i]:\n same[i]=d1[i]\n return added, removed, modified,same\nwith open(inputfile_1,\"r\") as f1:\n result=f1.read().split('\\n')\n print(result[0]) \n f1list=[]\n for line in result:\n f1list.append(line)\n\nwith open(inputfile_2,\"r\") as f2:\n result2=f2.read().split('\\n')\n f2list=[]\n for line in result2:\n f2list.append(line)\nadded_out, removed_out, modified_out, same_out=cmplist(f2list,f1list)\n#print out the results on screen \nprint(\"Number of observations in spec but not in output ----------\")\nprint(len(added_out))\nprint(\"Number of observations in both docs--------\")\nprint(len(same_out))\nprint(\"Number of observations in the output but not in spec---------\")\nprint(len(removed_out))\nprint(\"Number of observations were modified in the output---------\")\nprint(len(modified_out))\nprint(\"Below is the records in spec but not in the output-----\")\nprint(added_out)\nprint(\"Below is the records in the output but not in spec------\")\nprint(removed_out)\n\n"},"repo_name":{"kind":"string","value":"Becky2012/Large-file-discrepancy-checks"},"sub_path":{"kind":"string","value":"check.py"},"file_name":{"kind":"string","value":"check.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1755,"string":"1,755"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114981,"cells":{"seq_id":{"kind":"string","value":"16325613872"},"text":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\n# ECON 280A\n# \n# PS 1\n# \n# By Yi-Fan, Lin\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.optimize import fsolve\nfrom sympy import symbols, Eq, solve, nsolve\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ndf = pd.read_excel(\"/Users/ricky/Documents/椰林大學/Berkeley/International Econ/Data for PS 1.xls\"\n , sheet_name=\"trade flows mfg\", header=0, nrows=20, usecols=\"B:T\")\n\ndf = df[1:]\n#exports from i to j\n\n\n# In[3]:\n\n\ndf.head(5)\n\n\n# In[4]:\n\n\n#name of countries\nprint(df.columns)\n\n\n# In[149]:\n\n\ntheta = 5 #the median theta as in slides\nnc = 19 #number of countries\nt_cost = np.ones([nc, nc]) #change in trade costs\nprod = np.ones(nc) #change in productivities\nlabor = np.ones(nc) #change in labors\n\ntotal_prod = df.sum(axis=1) #total production, Y_n\ntotal_cons = df.sum(axis=0) #total consumption\ndeficit = [total_cons.iloc[i] - total_prod.iloc[i] for i in range(nc)]\ndf_share = df.divide(total_cons.iloc[0], axis=0) #pi_n\ndef_share = deficit/np.array(total_prod)\n\n\n# In[190]:\n\n\ndef gen_denom(ncoun, w, theta, t_cost):\n #ncoun for the index of country\n \n denom = 0\n for k in range(nc):\n temp = df_share.iloc[ncoun, k]*(w[k]*t_cost[ncoun, k])**(-theta)\n denom = denom + temp\n \n return denom\n\ndef obj(w_vec, theta, t_cost):\n eq_vec = [0 for i in range(nc)]\n w_vec = w_vec\n \n for i in range(nc):\n rhs = 0\n for n in range(nc):\n denom = gen_denom(n, w_vec, theta, t_cost)\n rhs = rhs + df_share.iloc[n, i]*(w_vec[i]*t_cost[n, i])**(-theta)*(w_vec[n]*total_cons.iloc[n])/denom\n eq_vec[i] = w_vec[i]*total_cons.iloc[i] - rhs\n \n return eq_vec\n\n\n# In[151]:\n\n\ndef welfare(ncoun, w, theta, t_cost):\n \n c_share = (w[ncoun]*t_cost[ncoun, ncoun])**(-theta)/gen_denom(ncoun, w, theta, t_cost)\n \n return c_share**(-1/theta)\n\ndef output(theta, t_cost):\n \n wage = fsolve(obj, np.ones(nc), (theta, t_cost))\n real_wage = np.array([welfare(n, wage, theta, t_cost) for n in range(nc)])\n price = wage/real_wage\n \n return [wage, real_wage, price]\n\n\n# In[191]:\n\n\nbase = output(theta, t_cost)\n\n\n# In[192]:\n\n\nfor i in range(nc):\n print(def_share[i]*100, (base[0])[i], (base[1])[i], (base[2])[i])\n\n\n# In[154]:\n\n\nt_cost_dec = t_cost*(1/1.3)\nfor i in range(nc):\n t_cost_dec[i, i] = 1 #except for own\n\n\n# In[155]:\n\n\n#tariff cut\n\ntarcut = output(theta, t_cost_dec)\n\n\n# In[173]:\n\n\nfor i in range(nc):\n print((df.columns)[i], def_share[i]*100, tarcut[0][i], tarcut[1][i], tarcut[2][i])\n\n\n# In[159]:\n\n\n#us-canada FTA\n#canada: 3\n#us: 18\n\nt_cost_FTA = t_cost\nt_cost_FTA[3, 18] = t_cost_FTA[3, 18]*(1/1.3)\nt_cost_FTA[18, 3] = t_cost_FTA[18, 3]*(1/1.3)\n\n\n# In[160]:\n\n\ntarFTA = output(theta, t_cost_FTA)\n\n\n# In[174]:\n\n\nfor i in range(nc):\n print((df.columns)[i], def_share[i]*100, tarFTA[0][i], tarFTA[1][i], tarFTA[2][i])\n\n\n# In[162]:\n\n\nplt.rcParams['figure.figsize'] = [10, 6]\nplt.rcParams['figure.dpi'] = 100\n\n\n# In[179]:\n\n\nfig = plt.figure()\nfig, ax = plt.subplots()\n\nax.scatter(def_share, base[0], c='blue', label='Relative')\nax.scatter(def_share, base[1], c='green', label='Real')\n\nax.legend()\nplt.ylabel('Change in Wage')\nplt.xlabel('Trade deficit')\nplt.title('Baseline (graph 1.)')\nplt.show()\n\n\n# In[180]:\n\n\nfig = plt.figure()\nfig, ax = plt.subplots()\n\nax.scatter(def_share, tarcut[0], c='blue', label='Relative')\nax.scatter(def_share, tarcut[1], c='green', label='Real')\n\nax.legend()\nplt.ylabel('Change in Wage')\nplt.xlabel('Trade deficit')\nplt.title('Overall Tariff Cut (graph 2.)')\nplt.show()\n\n\n# In[181]:\n\n\nfig = plt.figure()\nfig, ax = plt.subplots()\n\nax.scatter(def_share, tarFTA[0], c='blue', label='Relative')\nax.scatter(def_share, tarFTA[1], c='green', label='Real')\n\nplt.text(def_share[3], tarFTA[0][3]+0.01, 'CAN')\nplt.text(def_share[18], tarFTA[0][18]+0.01, 'USA')\n\nax.legend()\nplt.ylabel('Change in Wage')\nplt.xlabel('Trade deficit')\nplt.title('US-Canada FTA (graph 3.)')\nplt.show()\n\n\n# In[182]:\n\n\nfig = plt.figure()\nfig, ax = plt.subplots()\n\nax.scatter(def_share, base[1], c='blue', label='Base')\nax.scatter(def_share, tarcut[1], c='green', label='Tariff cut')\nax.scatter(def_share, tarFTA[1], c='brown', label='FTA')\n\nplt.text(def_share[3], tarFTA[1][3]+0.01, 'CAN')\nplt.text(def_share[18], tarFTA[1][18]+0.01, 'USA')\n\nax.legend()\nplt.ylabel('Change in Real Wage')\nplt.xlabel('Trade deficit')\nplt.title('Comparison (graph 4.)')\nplt.show()\n\n\n# In[183]:\n\n\ntable1 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[1], 'Tariff Cut': tarcut[1], 'FTA': tarFTA[1]})\ntable1.index = df.columns\nprint(\"Change in Real wage (Table 1)\")\nprint(table1)\n\n\n# In[184]:\n\n\ntable2 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[0], 'Tariff Cut': tarcut[0], 'FTA': tarFTA[0]})\ntable2.index = df.columns\nprint(\"Change in Relative wage (Table 2)\")\nprint(table2)\n\n\n# In[185]:\n\n\ntable3 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[2], 'Tariff Cut': tarcut[2], 'FTA': tarFTA[2]})\ntable3.index = df.columns\nprint(\"Change in Price index (Table 3)\")\nprint(table3)\n\n\n# In[193]:\n\n\nfig = plt.figure()\nfig, ax = plt.subplots()\n\nax.scatter(def_share, base[2], c='blue', label='Base')\nax.scatter(def_share, tarcut[2], c='green', label='Tariff cut')\nax.scatter(def_share, tarFTA[2], c='brown', label='FTA')\n\nplt.text(def_share[3], tarFTA[2][3]+0.01, 'CAN')\nplt.text(def_share[18], tarFTA[2][18]+0.01, 'USA')\n\nax.legend()\nplt.ylabel('Change in Price index')\nplt.xlabel('Trade deficit')\nplt.title('Comparison (graph 5.)')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n"},"repo_name":{"kind":"string","value":"Yifan3018/Armington-model-in-international-trade"},"sub_path":{"kind":"string","value":"PS1.py"},"file_name":{"kind":"string","value":"PS1.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":5541,"string":"5,541"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114982,"cells":{"seq_id":{"kind":"string","value":"15892916608"},"text":{"kind":"string","value":"from collections import defaultdict\n\n\ndef solution(dirs):\n\n d = defaultdict(list)\n\n cur_x = 0\n cur_y = 0\n\n x = [0, 0, 1, -1]\n y = [1, -1, 0, 0]\n\n cnt = 0\n for e in dirs:\n to_x = cur_x\n to_y = cur_y\n if e == 'U':\n to_x += x[0]\n to_y += y[0]\n elif e == 'D':\n to_x += x[1]\n to_y += y[1]\n elif e == 'R':\n to_x += x[2]\n to_y += y[2]\n elif e == 'L':\n to_x += x[3]\n to_y += y[3]\n\n if -5 <= to_x <= 5 and -5 <= to_y <= 5:\n flag = False\n if d[(cur_x, cur_y)]:\n for pos in d[(cur_x, cur_y)]:\n dx, dy = pos\n if to_x == dx and to_y == dy:\n flag = True\n break\n if d[(to_x, to_y)]:\n for to_pos in d[(to_x, to_y)]:\n to_dx, to_dy = to_pos\n if cur_x == to_dx and cur_y == to_dy:\n flag = True\n break\n\n d[(cur_x, cur_y)].append((to_x, to_y))\n cur_x = to_x\n cur_y = to_y\n\n if flag is False:\n cnt += 1\n\n return cnt\n\n\nprint(solution(\"ULURRDLLU\"))\nprint(solution(\"LULLLLLLU\"))\nprint(solution(\"LLLLRRRRRRRRRRLLLLUUUUUUUUULLLLLLL\"))\nprint(solution(\"ULURRDLLUL\"))\nprint(solution(\"LLLLLLL\"))\nprint(solution(\"LLLLLLLDRU\"))\nprint(solution(\"LLLLLLLDRUD\"))\nprint(solution(\"URULDD\"))\n\n\n# 설명\n# 딕셔너리를 사용해서 두 가지 경우의 수를 확인해서 풀었습니다.\n# 현재 위치에서 다음 좌표를 방문했을 경우와, 방문할 위치에서 현재 위치를 이미 방문했는지 확인했습니다.\n# 이 두 가지의 경우는 해당 경로를 이미 지나쳤기 때문에 처음 걸어본 경로가 아닙니다."},"repo_name":{"kind":"string","value":"hyunsoolee991/cs"},"sub_path":{"kind":"string","value":"algorithm/programmers/방문 길이.py"},"file_name":{"kind":"string","value":"방문 길이.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1866,"string":"1,866"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"ko"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114983,"cells":{"seq_id":{"kind":"string","value":"71346745115"},"text":{"kind":"string","value":"from os import path\n\nfrom mediakit.utils.files import increment_filename_if_exists\nfrom mediakit.utils.commands import run_command_in_background\nfrom mediakit.constants import FFMPEG_BINARY\n\n\nVIDEO_FORMATS = {\"mp4\"}\n\n\nclass ConversionOptions:\n NO_AUDIO = \"-an\"\n\n\ndef merge_video_and_audio(\n video_path, audio_path, output_file_path, output_format=\"mp4\"\n):\n final_output_file_path = increment_filename_if_exists(output_file_path)\n\n command = (\n f'{FFMPEG_BINARY} -i \"{video_path}\" -i \"{audio_path}\" '\n f\"-vcodec copy -f {output_format} \"\n f'\"{final_output_file_path}\"'\n )\n\n run_command_in_background(command)\n\n\ndef convert_media(file_path, output_file_path, output_format, options=[]):\n final_output_file_path = increment_filename_if_exists(output_file_path)\n\n command = (\n f'{FFMPEG_BINARY} -i \"{file_path}\" '\n + (\"-vcodec copy \" if output_format in VIDEO_FORMATS else \"\")\n + f\"-f {output_format} \"\n + f'\"{final_output_file_path}\" '\n + \" \".join(options)\n )\n\n run_command_in_background(command)\n"},"repo_name":{"kind":"string","value":"diego-aquino/mediakit"},"sub_path":{"kind":"string","value":"mediakit/media/convert.py"},"file_name":{"kind":"string","value":"convert.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1080,"string":"1,080"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":11,"string":"11"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114984,"cells":{"seq_id":{"kind":"string","value":"36424255650"},"text":{"kind":"string","value":"#http://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html#sphx-glr-auto-examples-model-selection-plot-grid-search-digits-py\nfrom __future__ import print_function\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nimport data_service\n\nprint(__doc__)\n\nscale_data = True\ntransform_data = True\nrandom_slice = 10000\nrandom_seed = 777\ndataset=\"kdd\"\ntest_size=0.5\n\nx_train, x_test, y_train, y_test = data_service.load_and_split_data(scale_data=scale_data,\n transform_data=transform_data,\n random_slice=random_slice, random_seed=random_seed,\n dataset=dataset, test_size=test_size)\n\n# Set the parameters by cross-validation\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.0001],\n 'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]},\n {'kernel': ['linear'], 'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]}]\n\n# neural_network.MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, solver=solver,\n# activation=activation, alpha=alpha, random_state=1, max_iter=max_iter, learning_rate=learning_rate,\n# learning_rate_init=learning_rate_init)\ntuned_parameters = [{\n 'solver': ['lbfgs'],\n 'learning_rate_init': [0.0001, 0.01, 1],\n 'hidden_layer_sizes': [(100,)],\n 'activation': ['relu'],\n 'alpha': [0.0001, 0.01, 1]\n },\n {\n 'solver': ['sgd'],\n 'learning_rate': ['constant', 'invscaling', 'adaptive'],\n 'learning_rate_init': [0.0001, 0.01, 1],\n 'hidden_layer_sizes': [(100,)],\n 'activation': ['relu'],\n 'alpha': [0.0001, 0.01, 1]\n }\n]\n\nestimator = SVC();\nestimator = MLPClassifier()\n\nscores = ['precision_macro', 'recall_macro', 'accuracy']\n\nfor score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n print()\n\n clf = GridSearchCV(estimator, tuned_parameters, cv=5,\n scoring=score, n_jobs=-1)\n clf.fit(x_train, y_train)\n\n print(\"Best parameters set found on development set:\")\n print()\n print(clf.best_params_)\n print()\n print(\"Grid scores on development set:\")\n print()\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n\n print(\"Detailed classification report:\")\n print()\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n print()\n y_pred = clf.predict(x_test)\n print(classification_report(y_test, y_pred))\n print()"},"repo_name":{"kind":"string","value":"boyko11/ML1-SupervisedLearning"},"sub_path":{"kind":"string","value":"grid_search.py"},"file_name":{"kind":"string","value":"grid_search.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":3243,"string":"3,243"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114985,"cells":{"seq_id":{"kind":"string","value":"36046350963"},"text":{"kind":"string","value":"import os\n\n\nclass CustomValidator:\n\n @staticmethod\n def path_validate(path: str) -> str:\n \"\"\"\n try:\n path = validate_path(\" my /path /with spaces \")\n print(f\"The path {path} is valid.\")\n except FileNotFoundError as e:\n print(e)\n :param path:\n :return:\n \"\"\"\n # Remove spaces from the path\n path = path.replace(\" \", \"\")\n\n # Check if the path exists\n if not os.path.exists(path):\n raise FileNotFoundError(f\"The path {path} does not exist.\")\n\n return path\n"},"repo_name":{"kind":"string","value":"jerome-neo/Command-line-Data-Processor"},"sub_path":{"kind":"string","value":"validator/custom_validator.py"},"file_name":{"kind":"string","value":"custom_validator.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":579,"string":"579"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114986,"cells":{"seq_id":{"kind":"string","value":"3912202681"},"text":{"kind":"string","value":"from rk_diagram.models import RKPipeline, LocalizationAlgorithm, TransformNode\nfrom rk_diagram.visualize import RKModelVisualizer\nfrom rk_diagram.models.graph import EdgeType, Edge\n\nimport numpy as np\n\nclass HierarchicalFeatureExtractor1():\n '''\n Generates a heirarchical feature extractor\n TODO: Think about 2+ levels\n '''\n def __init__(self):\n self.children = {}\n\n def predict(self, X):\n self.children['range_measure'] = range_measure(X)\n self.children['max_measure'] = max_measure(X)\n\n def range_measure(self, X): # computes the range as a feature\n return np.max(X) - np.min(X)\n\n def max_measure(self, X): # computes the max as a measure\n return np.max(X)\n\n\nclass SimpleLinkage():\n '''\n Simple linkage:\n A simple linkage function.\n Compares the values of two nodes, draws a link if the euclidean distancd is\n less than the threshold.\n\n Sends back a list of edges\n '''\n def __init__(self, threshold):\n self.threshold = 5\n\n def link(self, nodes):\n edges = []\n for n, i in enumerate(nodes):\n l = n+1\n while l < len(nodes):\n if np.linalg.norm(nodes[i].value - nodes[l].values) < self.threshold:\n l_larger = nodes[i].value > nodes[l].value\n fid = nodes[l].from_id if l_larger else nodes[i].from_id\n tid = nodes[i].from_id if l_larger else nodes[j].from_id\n etype = EdgeType.DIRECTED\n if nodes[i].value == nodes[l].value:\n etype = EdgeType.UNDIRECTED\n edges.append(Edge(from_id=fid, to_id=tid, type=etype))\n l+=1\n\nclass MaxLocalizer(LocalizationAlgorithm):\n '''\n localizes the max position\n '''\n def localize(self, X):\n return np.argmax(X) # returns the max position of X\n\nclass MinMaxNormalizerNode(TransformNode):\n '''\n min max normalizer\n takes the max and min as a transform node\n and will normalize the data\n '''\n def __init__(self):\n self._fitted = False\n\n def fit(self, X):\n self._fitted = True\n self.min = np.min(X)\n self.max = np.max(X)\n\n def transform(self, X):\n return (X - self.min) / (self.max - self.min)\n\nclass StaticFilter():\n '''\n This static filter takes simple boundary conditions,\n a min and max, and provides a filter function over it\n '''\n def __init__(self, min=None, max=None):\n self._min = min\n self._max = max\n\n def filter(self, val):\n if self._min is not None and val < self._min:\n return False\n if self._max is not None and val > self._max:\n return False\n return True\n\ndef main(X):\n\n rk_models = []\n example_pipeline = RKPipeline(preprocess_nodes=[MinMaxNormalizerNode()],\n localization_algorithm=MaxLocalizer(),\n hierarchical_embedding_nodes= [\n {\n \"HFeatureExtractor1\": HierarchicalFeatureExtractor1()\n }\n ],\n filter_functions=[\n {\n \"HFeatureExtractor1\" :\n {\n 'range_measure': StaticFilter(min=.2, max=.8),\n 'max_measure': StaticFilter(min=0, max=1)\n }\n }\n ], # question: how to define which limits for which measure. Each filter and linkage has to be BY CLUSTER\n linkage_function=SimpleLinkage(threshold=.8))\n example_pipeline.build()\n example_pipeline.fit(X)\n rk_model = example_pipeline.transform(X)\n rk_models.append(rk_model)\n\n visualizer = RKModelVisualizer(method=\"circular\")\n visualizer.build(rk_models) # build requires a list of rk_models\n visualizer.show()\n\ndef parse_arguments():\n X = [1,2,3,4]\n main()\n"},"repo_name":{"kind":"string","value":"andorsk/rk_toolkit"},"sub_path":{"kind":"string","value":"example/example.py"},"file_name":{"kind":"string","value":"example.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4225,"string":"4,225"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":2,"string":"2"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114987,"cells":{"seq_id":{"kind":"string","value":"637859362"},"text":{"kind":"string","value":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom crud import crud\n\nimport sys\nsys.path.insert(1, './model')\nfrom algoritmo import algoritmoModel\n\n\nclass scraping:\n __obj_crud = None\n __obj_model = None\n def __init__(self):\n self.__obj_crud = crud() \n self.__obj_model = algoritmoModel() \n\n def switch(self, case, campeonato):\n if case == 'times':\n if campeonato == 'premier': \n return self.timesPremier()\n else:\n print(\"campeonato invalido\")\n elif case == 'rodadas':\n if campeonato == 'premier': \n return self.rodadasPremier()\n else:\n print(\"campeonato invalido\")\n else:\n print(\"metódo invalido\")\n\n\n def timesPremier(self):\n servico = Service(ChromeDriverManager().install())\n url = \"https://www.sofascore.com/tournament/football/england/premier-league/17#52186\" \n\n navegador = webdriver.Chrome(service=servico)\n WebDriverWait(navegador, timeout=10)\n navegador.get(url)\n\n click = navegador.find_element(By.XPATH, '//*[@id=\"__next\"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[3]')\n click.click()\n\n # TEMPO DE ESPERA PARA DISPARA AÇÃO\n navegador.implicitly_wait(20)\n\n dados = navegador.find_elements(By.CLASS_NAME, 'uTWnT') \n if(dados):\n time = []\n temporada = '2023/2024'\n for dado in dados:\n if dado.text and dado.text != '0':\n time.append(dado.text)\n else:\n print(\"Sem dados\") \n \n for t1 in time:\n print(t1) \n # self.__obj_crud.inserir(t1,temporada)\n\n\n \n def rodadasPremier(self):\n servico = Service(ChromeDriverManager().install())\n url = \"https://www.sofascore.com/tournament/football/england/premier-league/17#52186\" \n\n navegador = webdriver.Chrome(service=servico)\n WebDriverWait(navegador, timeout=10)\n navegador.get(url)\n\n \n click = navegador.find_element(By.XPATH, '//*[@id=\"__next\"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[2]')\n click.click()\n\n # TEMPO DE ESPERA PARA DISPARA AÇÃO\n navegador.implicitly_wait(20)\n \n click2 = navegador.find_element(By.XPATH, '//*[@id=\"__next\"]/main/div/div[2]/div[2]/div[2]/div/div/div[2]/div[2]')\n click2.click()\n\n # TEMPO DE ESPERA PARA DISPARA AÇÃO\n navegador.implicitly_wait(20)\n \n click3 = navegador.find_element(By.XPATH, '//*[@id=\"downshift-11-toggle-button\"]')\n click3.click()\n\n # TEMPO DE ESPERA PARA DISPARA AÇÃO\n navegador.implicitly_wait(20)\n \n click4 = navegador.find_element(By.XPATH, '//*[@id=\"downshift-11-item-0\"]')\n click4.click()\n \n lista = {}\n dados = navegador.find_elements(By.CLASS_NAME, 'bwUmPO') \n \n contador = 0\n for dado in dados: \n contador += 1\n div = contador % 2 \n \n if div == 1: \n lista[contador] = {'rodada':'1'}\n lista[contador]['campeonato'] = '1' \n time = self.__obj_model.getTimes(dado.text)\n if time:\n lista[contador]['mandante'] = time[0]\n else:\n lista[contador]['mandante'] = '?'\n else: \n cont = contador - 1 \n time = self.__obj_model.getTimes(dado.text)\n if time:\n lista[cont]['visitante'] = time[0]\n else:\n lista[cont]['visitante'] = '?'\n\n self.__obj_crud.setTable('rodadas')\n self.__obj_model.setTable('rodadas') \n if len(lista) > 0:\n for list in lista:\n self.__obj_crud.newInsert(lista[list]) \n \n \n \n\n\nscrap = scraping() \nscrap.switch('rodadas','premier')\n\n\n\n "},"repo_name":{"kind":"string","value":"CaioFreitas96/scraping"},"sub_path":{"kind":"string","value":"scraping.py"},"file_name":{"kind":"string","value":"scraping.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":4411,"string":"4,411"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"pt"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114988,"cells":{"seq_id":{"kind":"string","value":"29053233199"},"text":{"kind":"string","value":"x=10\r\ny=2\r\nprint(x//y)\r\n\r\nx=10\r\ny=3\r\nprint(x//y)\r\n\r\nx=10\r\ny=8.5\r\nprint(x//y)\r\n\r\n# algorithm\r\n# 10,1,8,3,6,5,4,7,x,y\r\n# Find the general solution of x and y\r\n# x-> 2 y->9\r\n# Step1: Start\r\n# Step2: Initialise a variable named n\r\n# Step3:x=n+1\r\n# Step4:a=x+2\r\n# Step5:b=x-2\r\n# Step6: if x%2=0, then x+a\r\n# Step7: if x%2!=0, then x+b\r\n# Step8: Stop\r\n"},"repo_name":{"kind":"string","value":"RiyaBaid/Python"},"sub_path":{"kind":"string","value":"floordivision.py"},"file_name":{"kind":"string","value":"floordivision.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":346,"string":"346"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114989,"cells":{"seq_id":{"kind":"string","value":"35880060544"},"text":{"kind":"string","value":"# O nome e a posição das colunas dos dados históricos e das estações são diferentes!\n# Esse dicionário vai nos auxiliar para pegar um determinado dado nas duas tabelas.\n# lista[0] -> Colunas como estão nos dados históricos.\n# lista[1] -> Colunas como estão nos dados das estações (website).\nd_dic = {\n \"Data\": ['DATA (YYYY-MM-DD)', 'DT_MEDICAO'],\n \"Hora\": ['HORA (UTC)', 'HR_MEDICAO'],\n \"Pressao\": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],\n \"Radiacao\": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'],\n \"Temperatura\": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],\n \"Umidade\": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']\n}\n\nd_dic_2019 = {\n \"Data\": ['Data', 'DT_MEDICAO'],\n \"Hora\": ['Hora UTC', 'HR_MEDICAO'],\n \"Pressao\": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],\n \"Radiacao\": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'],\n \"Temperatura\": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],\n \"Umidade\": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']\n}\n\n# Alguém de lá teve a brilhante ideia de modificar o nome das colunas e a formatação dos\n# dados a partir de 2019.\nd_dic_2020_greater = {\n \"Data\": ['Data', 'DT_MEDICAO'],\n \"Hora\": ['Hora UTC', 'HR_MEDICAO'],\n \"Pressao\": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],\n \"Radiacao\": ['RADIACAO GLOBAL (Kj/m²)', 'RAD_GLO'],\n \"Temperatura\": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],\n \"Umidade\": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']\n}\n\n# Para download feitos através de web scraping no site do INMET.\nd_dic_inmet = {\n \"Data\": ['Data', 'DT_MEDICAO'],\n \"Hora\": ['Hora (UTC)', 'HR_MEDICAO'],\n \"Pressao\": ['Pressao Ins. (hPa)', 'PRE_INS'],\n \"Radiacao\": ['Radiacao (KJ/m²)', 'RAD_GLO'],\n \"Temperatura\": ['Temp. Ins. (C)', 'TEM_INS'],\n \"Umidade\": ['Umi. Ins. (%)', 'UMD_INS']\n}\n\nclass ID:\n MENU_SCROLL = 1\n LISTBOX = 2\n\n \n POPUP_CONCAT = 2002\n POPUP_UPDATE = 2003\n POPUP_CLEAN = 2004 \n POPUP_DELETE = 2005\n POPUP_SAVE = 2006"},"repo_name":{"kind":"string","value":"NeoFahrenheit/inmet-scraper"},"sub_path":{"kind":"string","value":"id.py"},"file_name":{"kind":"string","value":"id.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2115,"string":"2,115"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"pt"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114990,"cells":{"seq_id":{"kind":"string","value":"39518772432"},"text":{"kind":"string","value":"from random import random\n\nimport requests\nfrom flask import Flask, request\n\nfrom conf import (\n get_healthy_server,\n healthcheck,\n load_configuration,\n process_firewall_rules_flag,\n process_rules,\n process_rewrite_rules,\n transform_backends_from_config,\n)\n\nloadbalancer = Flask(__name__)\n\nMAIL_BACKENDS = ['localhost:8081', 'localhost:8082']\nYANDEX_BACKENDS = ['localhost:9081', 'localhost:9082']\n\nconfig = load_configuration('balancer.yaml')\nregister = transform_backends_from_config(config)\n\n\n@loadbalancer.route('/')\ndef router():\n host_header = request.headers['Host']\n if host_header == 'www.mail.ru':\n response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}')\n return response.content, response.status_code\n elif host_header == 'www.yandex.ru':\n response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}')\n return response.content, response.status_code\n else:\n return 'Not Found', 404\n\n@loadbalancer.route('/mail')\ndef mmail_path():\n response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}')\n return response.content, response.status_code\n\n\n@loadbalancer.route('/yandex')\ndef yandex_path():\n response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}')\n return response.content, response.status_code"},"repo_name":{"kind":"string","value":"leader8901/testServer"},"sub_path":{"kind":"string","value":"balancer.py"},"file_name":{"kind":"string","value":"balancer.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1317,"string":"1,317"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114991,"cells":{"seq_id":{"kind":"string","value":"1282240873"},"text":{"kind":"string","value":"import os\nimport numpy as np\nimport random\nfrom gym.envs.mujoco.pusher import PusherEnv\nfrom evaluation.eval import Eval\nfrom data import utils\n\nXML_FOLDER = \"/media/stephen/c6c2821e-ed17-493a-b35b-4b66f0b21ee7/MIL/gym/gym/envs/mujoco/assets\"\n\n\nclass EvalMilPush(Eval):\n\n def _load_env(self, xml):\n xml = xml[xml.rfind('pusher'):]\n xml_file = 'sim_push_xmls/test2_ensure_woodtable_distractor_%s' % xml\n xml_file = os.path.join(XML_FOLDER, xml_file)\n env = PusherEnv(**{'xml_file': xml_file, 'distractors': True})\n env.set_visibility(self.render)\n env.render()\n viewer = env.viewer\n viewer.autoscale()\n viewer.cam.trackbodyid = -1\n viewer.cam.lookat[0] = 0.4\n viewer.cam.lookat[1] = -0.1\n viewer.cam.lookat[2] = 0.0\n viewer.cam.distance = 0.75\n viewer.cam.elevation = -50\n viewer.cam.azimuth = -90\n return env\n\n def _eval_success(self, obs):\n obs = np.array(obs)\n target = obs[:, -3:-1]\n obj = obs[:, -6:-4]\n dists = np.sum((target - obj) ** 2, 1) # distances at each time step\n return np.sum(dists < 0.017) >= 10\n\n def evaluate(self, iter):\n\n print(\"Evaluating at iteration: %i\" % iter)\n iter_dir = os.path.join(self.record_gifs_dir, 'iter_%i' % iter)\n utils.create_dir(iter_dir)\n\n successes = []\n for i in range(self.num_tasks):\n\n # demo_selection will be an xml file\n env = self._load_env(self.demos[i][0]['demo_selection'])\n\n selected_demo_indexs = random.sample(\n range(len(self.demos[i])), self.supports)\n\n embedding = self.get_embedding(i, selected_demo_indexs)\n gifs_dir = self.create_gif_dir(iter_dir, i)\n\n for j in range(self.num_trials):\n env.reset()\n observations = []\n world_state = []\n for t in range(self.time_horizon):\n env.render()\n # Observation is shape (100,100,3)\n obs, state = env.get_current_image_obs()\n observations.append(obs)\n obs = ((obs / 255.0) * 2.) - 1.\n\n action = self.get_action(obs, state, embedding)\n ob, reward, done, reward_dict = env.step(np.squeeze(action))\n world_state.append(np.squeeze(ob))\n if done:\n break\n\n if self._eval_success(world_state):\n successes.append(1.)\n else:\n successes.append(0.)\n self.save_gifs(observations, gifs_dir, j)\n\n env.render(close=True)\n\n final_suc = np.mean(successes)\n print(\"Final success rate is %.5f\" % (final_suc))\n return final_suc\n"},"repo_name":{"kind":"string","value":"stepjam/TecNets"},"sub_path":{"kind":"string","value":"evaluation/eval_mil_push.py"},"file_name":{"kind":"string","value":"eval_mil_push.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":2852,"string":"2,852"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":40,"string":"40"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114992,"cells":{"seq_id":{"kind":"string","value":"17566333407"},"text":{"kind":"string","value":"n=int(input())\r\nk=n\r\nl=(n*(n+1))//2\r\nnum=0\r\nif(l)%2==0:\r\n\tl=l//2\r\n\tls=[i for i in range(1,n+1)]\r\n\tls1=[]\r\n\twhile(num!=l):\r\n\t\tif(l-num)\\n\")\n f.write(\"#include \\n\")\n f.write(\"#include \\n\\n\")\n f.write(\"void my_handler(const char * reason, const char * file, int line, int gsl_errno)\\n\")\n f.write(\"{\\n\")\n f.write(\"\\tfprintf(stderr, \\\"%s\\\\n\\\", reason);\\n\")\n f.write(\"}\\n\\n\")\n\n for funcName, args in signatures.items():\n\n driverName = \"{}_DRIVER\".format(funcName)\n thisDriver = Driver(driverName, funcName, libraryName, \"c\", len(args[0]), funcName)\n\n thisDriver.add_line(\"double {}(double * doubleInput, int * intInput)\\n\".format(driverName))\n thisDriver.add_line(\"{\\n\")\n thisDriver.add_line(\"\\tdouble out;\\n\")\n\n # tally up number of ints and doubles\n numberOfInts = 0\n numberOfDoubles = 0\n for i in range(len(args[0])):\n if \"int\" in args[0][i]:\n numberOfInts += 1\n elif \"double\" in args[0][i]:\n numberOfDoubles += 1\n\n thisDriver.set_numberOfDoubles(numberOfDoubles)\n thisDriver.set_numberOfInts(numberOfInts)\n\n # for each extracted function, save all of its test arguments for test migration\n k = 1\n for j in range(1, len(args)):\n\n ints = []\n doubles = []\n\n for i in range(len(args[0])):\n if \"int\" in args[0][i]:\n ints.append(int(args[j][i]))\n elif \"double\" in args[0][i]:\n doubles.append(float(args[j][i]))\n\n TEST_INPUTS[\"{}~input_num{:0>3}\".format(driverName, j-1)] = (doubles, ints)\n k += 1\n if driverName in all_functions.keys():\n for m in range(k, len(all_functions[driverName])):\n TEST_INPUTS[\"{}~input_num{:0>3}\".format(driverName, m-1)] = ([all_functions[driverName][m-k]], [])\n\n thisDriver.add_line(\"\\tgsl_error_handler_t * old_handler = gsl_set_error_handler (&my_handler);\\n\\n\")\n\n thisDriver.add_line(\"\\tout = {}(\".format(funcName))\n\n for i in range(numberOfInts):\n thisDriver.add_line('intInput[{}]'.format(i))\n if i + 1 != numberOfInts or numberOfDoubles != 0:\n thisDriver.add_line(\", \")\n\n for i in range(numberOfDoubles):\n thisDriver.add_line('doubleInput[{}]'.format(i))\n if i + 1 != numberOfDoubles:\n thisDriver.add_line(\", \")\n \n if numberOfDoubles + numberOfInts < len(args[0]):\n thisDriver.add_line(\", GSL_PREC_DOUBLE\")\n\n thisDriver.add_line(');\\n\\n')\n\n #thisDriver.add_line(\"\\tgsl_set_error_handler (old_handler);\\n\\n\")\n\n thisDriver.add_line(\"\\treturn out;\\n\")\n thisDriver.add_line(\"}} //END_DRIVER {}\\n\\n\".format(funcName))\n\n f.write(thisDriver.get_driverText())\n DRIVER_LIST[thisDriver.get_driverName()] = thisDriver\n\n\ndef pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS):\n\n with open(\"spFunDrivers/\" + libraryName + \"_drivers.py\", 'w') as f:\n\n # write all imports\n for x in imports:\n if len(x) == 1:\n f.write(\"import {}\\n\".format(x[0]))\n if len(x) == 2:\n f.write(\"import {} as {}\\n\".format(x[0], x[1]))\n for x in fromImports:\n f.write(\"from {} import {}\\n\".format(x[0], x[1]))\n\n # for each collected function signature\n for funcName, args in signatures.items():\n\n # for a varying number of integers...\n for numberOfInts in range(len(args[0])):\n\n # get the number of doubles\n numberOfDoubles = len(args[0]) - numberOfInts\n \n # form driverName\n driverName = \"{}_{}_DRIVER{}\".format(libraryName, funcName.replace('.', '_'), numberOfInts )\n\n # form unique funcName without \"_alt\" and namespace info\n temp = funcName\n callName = funcName\n if '_alt' in temp:\n temp = temp[:temp.index(\"_alt\")]\n callName = temp\n if '.' in temp:\n temp = temp[temp.index(\".\") + 1:]\n\n # construct driver object\n thisDriver = Driver(driverName, temp, libraryName, \"python\", len(args[0]), callName)\n\n thisDriver.add_line(\"def {}(doubleInput, intInput):\\n\".format(driverName))\n\n # for each extracted function, save all of its test arguments for test migration\n for j in range(len(args)):\n\n ints = []\n doubles = []\n\n for k in range(numberOfInts):\n ints.append(int(args[j][k]))\n for k in range(len(args[0]) - numberOfInts):\n doubles.append(float(args[j][k]))\n\n TEST_INPUTS[\"{}~inputs_num{:0>3}\".format(driverName,j)] = (doubles, ints)\n\n thisDriver.set_numberOfDoubles(numberOfDoubles)\n thisDriver.set_numberOfInts(numberOfInts)\n\n if \"_alt\" in funcName:\n thisDriver.add_line(\"\\tout = {}(\".format(funcName[:funcName.find(\"_alt\")]))\n else:\n thisDriver.add_line(\"\\tout = {}(\".format(funcName))\n\n for i in range(numberOfInts):\n thisDriver.add_line(\"intInput[{}]\".format(i))\n if i + 1 != numberOfInts or numberOfDoubles != 0:\n thisDriver.add_line(\", \")\n\n for i in range(numberOfDoubles):\n thisDriver.add_line(\"doubleInput[{}]\".format(i))\n if i + 1 != numberOfDoubles:\n thisDriver.add_line(\", \")\n\n thisDriver.add_line(\")\\n\\n\")\n\n thisDriver.add_line(\"\\treturn float(out) #END_DRIVER {}\\n\\n\".format(funcName))\n\n f.write(thisDriver.get_driverText())\n DRIVER_LIST[thisDriver.get_driverName()] = thisDriver\n\nif __name__ == \"__main__\":\n # python3 driverGenerator mpmath python\n\n libraryName = sys.argv[1]\n language = sys.argv[2]\n\n try:\n with open(\"__temp/__driverCollection\", 'rb') as fp:\n DRIVER_LIST = pickle.load(fp)\n except:\n DRIVER_LIST = {}\n\n try:\n with open(\"__temp/__testInputs\", 'rb') as fp:\n TEST_INPUTS = pickle.load(fp)\n except:\n TEST_INPUTS = {}\n\n # load information from signature extractor\n with open(\"__temp/__\" + libraryName + \"_signatures\", 'rb') as fp:\n signatures = pickle.load(fp)\n with open(\"__temp/__\" + libraryName + \"_imports\", 'rb') as fp:\n imports = pickle.load(fp)\n with open(\"__temp/__\" + libraryName + \"_fromImports\", 'rb') as fp:\n fromImports = pickle.load(fp)\n\n if language == 'c':\n gslGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS)\n subprocess.call(['make'], cwd=\"spFunDrivers/\")\n\n if language == 'python':\n pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports,TEST_INPUTS)\n\n with open(\"__temp/__testInputs\", 'wb') as fp:\n pickle.dump(TEST_INPUTS, fp)\n\n with open(\"__temp/__driverCollection\", 'wb') as fp:\n pickle.dump(DRIVER_LIST, fp)"},"repo_name":{"kind":"string","value":"Sherryhh/fpdiff_extend"},"sub_path":{"kind":"string","value":"fp-diff-testing/workspace/driverGenerator.py"},"file_name":{"kind":"string","value":"driverGenerator.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":8600,"string":"8,600"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114994,"cells":{"seq_id":{"kind":"string","value":"10070441282"},"text":{"kind":"string","value":"import datetime\nimport os\nimport sys\nfrom importlib import reload\n\nfrom antlr4 import *\n\nfrom CnfUtility import CnfUtility\nfrom CnfVcGenerator import CnfVcGenerator\nfrom MyCFG import MyCFG\nfrom MyHelper import MyHelper\nfrom MyUtility import MyUtility\nfrom MyVisitor import MyVisitor\nfrom PreProcessor import PreProcessor\nfrom WpcStringConverter import WpcStringConverter\nfrom gen.MySsaStringGenerator import MySsaStringGenerator\nfrom gen.PlSqlLexer import PlSqlLexer\nfrom gen.PlSqlParser import PlSqlParser\n\nfrom MyRawCfgToGraph import MyRawCfgToGraph\n\n\ndef executeSinglePlSqlFile(data, spec):\n f = open(data, 'r')\n linesOfCode = len(f.readlines())\n f.close()\n\n processor = PreProcessor(spec, data)\n tableInfo, assumeConstraint, assertConstraint, resultString = processor.start()\n\n file = open('cnf/upper_input.sql', \"w\")\n file.write(resultString)\n file.close()\n\n # recording startTime\n startTime = datetime.datetime.now()\n\n input = FileStream('cnf/upper_input.sql')\n lexer = PlSqlLexer(input)\n stream = CommonTokenStream(lexer)\n parser = PlSqlParser(stream)\n tree = parser.sql_script()\n # ast = tree.toStringTree(recog=parser)\n # print(str(MyPlSqlVisitor(parser).getRuleName(tree)))\n # print(\"\\n\\n\", signature(tree.toStringTree), \"\\n\")\n\n cfg = MyCFG()\n helper = MyHelper(parser)\n helper.updateTableDict(tableInfo)\n utility = MyUtility(helper)\n v = MyVisitor(parser, cfg, utility)\n v.visit(tree)\n\n print(\"\\nRaw CFG : \", v.rawCFG)\n\n # for key in v.cfg.nodes:\n # if v.cfg.nodes[key].ctx != None:\n # print(key, \" --> \", v.cfg.nodes[key].ctx.getText())\n\n res = MyRawCfgToGraph(v.rawCFG, cfg)\n res.execute()\n # cfg.printPretty()\n # cfg.dotToPng(cfg.dotGraph, \"cnf/raw_graph\") # TODO: make dot file in cnf form\n utility.generateDomSet(cfg)\n # print(\"Dominator set ended----------->\\n\\n\")\n utility.generateSDomSet(cfg)\n # print(\"Strictly Dominator set ended ----------->\\n\\n\")\n utility.generatIDom(cfg)\n # print(\"Immediate Dominator ended ----------->\\n\\n\")\n utility.generateDFSet(cfg)\n utility.insertPhiNode(cfg)\n\n utility.initialiseVersinosedPhiNode(cfg)\n utility.versioniseVariable(cfg)\n utility.phiDestruction(cfg)\n\n ssaString = MySsaStringGenerator(cfg, parser)\n ssaString.execute()\n\n # utility.generateFinalDotGraph(cfg)\n # for nodeId in cfg.nodes:\n # cfg.nodes[nodeId].printPretty()\n\n # cfg.dotToPng(cfg.dotGraph, \"cnf/raw_graph\")\n #\n # hello1 = utility.generateBeforeVersioningDotFile(cfg)\n # cfg.dotToPng(hello1, \"cnf/before_versioning_graph\")\n #\n # hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg)\n # cfg.dotToPng(hello4, \"cnf/destructed_phi_node_wala_graph\")\n\n\n\n cnfUtility = CnfUtility(helper)\n iCnfCfg = cnfUtility.copyCfg(cfg)\n reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg)\n cnfUtility.unvisit(iCnfCfg)\n cnfUtility.setParentBranching(iCnfCfg)\n\n cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg)\n cnfUtility.copyParentBranching(cnfCfg, iCnfCfg)\n # print(\"\\n\\n\\n\\n\\n\\t\\t\\tThe intermediate CNF form is ------------------------------>\\n\\n\\n\\n\")\n\n # for nodeId in iCnfCfg.nodes:\n # iCnfCfg.nodes[nodeId].printPretty()\n\n # print(\"\\n\\n\\n\\n\\n\\t\\t\\tThe CNF form is ------------------------------>\\n\\n\\n\\n\")\n\n cnfVcGenerator = CnfVcGenerator(cnfCfg, parser)\n\n cnfPath = []\n\n for nodeId in cnfCfg.nodes:\n cnfPath.append(nodeId)\n\n cnfVcGenerator.generateCnfVc(cnfPath)\n\n # print(\"\\n\\n\\n\\n\\t\\t\\tThe CNF VCs are : ------------------------------->\\n\\n\\n\")\n # print(cnfVcs)\n\n # for nodeId in cnfCfg.nodes:\n # cnfCfg.nodes[nodeId].printPretty()\n\n # cnfVc = cnfUtility.cnfVc(cnfCfg)\n #\n # print(\"\\n\\n\\t\\tThe CNF VCs are ----------------->\\n\\n\\n\")\n #\n # for str in cnfVc:\n # print(str)\n\n varSet, z3Str = cnfUtility.iZ3format(cnfCfg)\n\n # print(\"\\n\\n*******************\\n\\n\", z3Str, \"\\n\\n--------------\\n\\n\")\n # print(varSet)\n #\n # print(\"\\n\\n\")\n z3Str = z3Str.replace(\" \", \" \")\n z3Str = z3Str.replace(\" == \", \" = \")\n z3Str = z3Str.replace(\" = \", \" == \")\n\n print(\"\\n**** Final CNF VC in Well_Bracketted_Format:\\n\\n\", z3Str, \"\\n\")\n\n z3StringConvertor = WpcStringConverter(z3Str)\n z3StringConvertor.execute()\n\n # print(\"\\n**** Final CNF VC in Z3 Format:\\n\", z3StringConvertor.convertedWpc, \"\\n\")\n\n z3FileString = \"# This file was generated at runtime on \" + str(datetime.datetime.now()) + \"\\n\"\n z3FileString = z3FileString + \"from z3 import *\\n\\n\"\n z3FileString = z3FileString + \"class Z3RuntimeCnfFile():\\n\"\n z3FileString = z3FileString + \"\\t\" + \"def __init__(self):\\n\"\n z3FileString = z3FileString + \"\\t\\t\" + \"self.finalFormula = \\\"\\\"\\n\"\n z3FileString = z3FileString + \"\\t\\t\" + \"self.satisfiability = \\\"\\\"\\n\"\n z3FileString = z3FileString + \"\\t\\t\" + \"self.modelForViolation = \\\"\\\"\\n\\n\"\n\n z3FileString = z3FileString + \"\\t\" + \"def execute(self):\\n\"\n for i in varSet:\n z3FileString = z3FileString + \"\\t\\t\" + i + \" = Real(\\'\" + i + \"\\')\\n\"\n z3FileString = z3FileString + \"\\n\\t\\ts = Solver()\\n\"\n\n if len(z3StringConvertor.implies_p) > 0:\n for i in range(len(z3StringConvertor.implies_p)):\n z3FileString = z3FileString + \"\\t\\t\" + \"s.add(\" + z3StringConvertor.implies_p[i] + \")\\n\"\n if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]:\n z3FileString = z3FileString + \"\\t\\t\" + \"s.add(\" + z3StringConvertor.implies_p_q[i] + \")\\n\"\n # if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q:\n # z3FileString = z3FileString + \"\\t\\t\" + \"s.add(\" + z3StringConvertor.convertedWpc + \")\\n\"\n # else:\n # z3FileString = z3FileString + \"\\t\\t\" + \"s.add(\" + z3StringConvertor.convertedWpc + \")\\n\"\n z3FileString = z3FileString + \"\\t\\t\" + \"s.add( Not( \" + z3StringConvertor.convertedWpc + \" ) )\\n\"\n\n # z3FileString = z3FileString + \"\\n\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\" + \"print(\\\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\\\n\\\", s)\"\n z3FileString = z3FileString + \"\\n\\t\\t\" + \"self.finalFormula = str(s)\"\n # z3FileString = z3FileString + \"\\n\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\" + \"print(\\\"%%%%%%%%%% Satisfiability %%%%%%%%%%\\\")\\n\"\n z3FileString = z3FileString + \"\\n\\t\\t\" + \"self.satisfiability = str(s.check())\"\n\n z3FileString = z3FileString + \"\\n\\t\\t\" + \"if self.satisfiability == \\\"sat\\\":\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print(\\\"-------->> Violation Occurred...\\\")\"\n z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"self.satisfiability = \\\"violation\\\"\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print(\\\"%%%%%%%%%% An Instance for which Violation Occurred %%%%%%%%%%\\\\n\\\", s.model())\"\n z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"self.modelForViolation = str(s.model())\"\n\n z3FileString = z3FileString + \"\\n\\t\\t\" + \"elif self.satisfiability == \\\"unsat\\\":\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print(\\\"-------->> NO Violation Detected so far...\\\")\"\n z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"self.satisfiability = \\\"sat\\\"\"\n # z3FileString = z3FileString + \"\\n\\t\\t\\t\" + \"print()\"\n # z3FileString = z3FileString + \"\\n\\t\\t\" + \"print()\\n\"\n\n file = open('cnf/Z3RuntimeCnfFile.py', \"w\")\n file.write(z3FileString)\n file.close()\n\n import cnf.Z3RuntimeCnfFile\n from cnf.Z3RuntimeCnfFile import Z3RuntimeCnfFile\n # Reload after module's creation to avoid old module remain imported from disk...VVI...\n cnf.Z3RuntimeCnfFile = reload(cnf.Z3RuntimeCnfFile)\n\n z3Runtime = Z3RuntimeCnfFile()\n z3Runtime.execute()\n\n finishTime = datetime.datetime.now()\n timeDifference = (finishTime - startTime).total_seconds()\n\n return linesOfCode, timeDifference, z3StringConvertor.convertedWpc, z3Runtime.satisfiability, z3Runtime.modelForViolation\n\n\n\ndef main(argv):\n if len(argv) < 3:\n print(\"Not Enough Arguments. Exiting...\")\n elif len(argv) == 3:\n data = \"cnf/data/\" + argv[1]\n spec = \"cnf/spec/\" + argv[2]\n linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(data, spec)\n print(\"\\n\\n*** Equivalent VC :\")\n print(vcGenerated)\n print(\"\\n*** Satisfibality :\\t\", satisfiability, \"\\n\\n*** Model for Violation :\\t\", modelForViolation, \"\\n\")\n print(\"\\n////// Execution completed for file :\", argv[1])\n print(\"No. of VCs = 1\")\n print(\"Time Taken =\", executionTime)\n print(\"LinesOfCode =\", linesOfCode)\n elif len(argv) == 4:\n if argv[1] == \"-dataset\":\n dataList = os.listdir(argv[2])\n specList = os.listdir(argv[3])\n # print(dataList)\n # print(specList)\n mat = []\n counter = 0\n for dataFile in dataList:\n specFile = dataFile.split(\".\")[0].strip() + \".spec\"\n print(\"~~~~~~~~~~~~~~~~ For PlSQL FileName => \" + dataFile + \" ~~~~~~~~~~~~~~~~\")\n if specFile in specList:\n linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(\n argv[2] + \"/\" + dataFile, argv[3] + \"/\" + specFile)\n temp = []\n temp.append(dataFile)\n temp.append(linesOfCode)\n temp.append(executionTime)\n # temp.append(vcGenerated)\n temp.append(satisfiability)\n temp.append(modelForViolation)\n mat.append(temp)\n file = open('cnf/Z3RuntimeCnfFile.py', \"w\")\n file.write(\"# Cleared content of this File...\\n\\nclass Z3RuntimeCnfFile():\\n\\tdef __init__(self):\\n\\t\\tself.finalFormula = \\\"\\\"\\n\\t\\tself.satisfiability = \\\"\\\"\\n\\t\\tself.modelForViolation = \\\"\\\"\\n\\n\\tdef execute(self):\\n\\t\\tprint('+++++++++++++++++++++++++++++%%%%%^^^^^^^^####')\\n\")\n file.close()\n else:\n print(specFile + \" do not exist!!!\")\n counter = counter + 1\n print(\"Counter =\", counter)\n print(\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Filename\\t\\tLinesOfCode\\t\\tExecutionTime\\t\\tSatisfiability\\t\\tViolatingInstance\\n\")\n for i in range(len(mat)):\n for j in range(len(mat[i])):\n print(mat[i][j], end=\"\\t\\t\")\n print()\n elif len(argv) == 6:\n if argv[1] == \"-datafilename\" and argv[3] == \"-data_spec_filepaths\":\n linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(argv[4], argv[5])\n print(\" \"+argv[2], end=\"\\t\\t\\t\")\n print(linesOfCode, end=\"\\t\\t\")\n print(executionTime, end=\"\\t\")\n print(\"1\", end=\"\\t\")\n print(satisfiability, end=\"\\t\\t\")\n print(modelForViolation.replace(\"\\n\", \" \"), end=\"\")\n print()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # data = \"cnf/data/\" + argv[1]\n # spec = \"cnf/spec/\" + argv[2]\n # processor = PreProcessor(spec, data)\n # tableInfo, assumeConstraint, assertConstraint, resultString = processor.start()\n #\n # file = open('cnf/upper_input.sql', \"w\")\n # file.write(resultString)\n # file.close()\n #\n # input = FileStream('cnf/upper_input.sql')\n # lexer = PlSqlLexer(input)\n # stream = CommonTokenStream(lexer)\n # parser = PlSqlParser(stream)\n # tree = parser.sql_script()\n # # ast = tree.toStringTree(recog=parser)\n # # print(str(MyPlSqlVisitor(parser).getRuleName(tree)))\n # # print(\"\\n\\n\", signature(tree.toStringTree), \"\\n\")\n #\n # cfg = MyCFG()\n # helper = MyHelper(parser)\n # helper.updateTableDict(tableInfo)\n # utility = MyUtility(helper)\n # v = MyVisitor(parser, cfg, utility)\n # v.visit(tree)\n #\n #\n #\n # print(v.rawCFG)\n #\n # for key in v.cfg.nodes:\n # if v.cfg.nodes[key].ctx != None:\n # print(key, \" --> \", v.cfg.nodes[key].ctx.getText())\n #\n #\n # res = MyRawCfgToGraph(v.rawCFG, cfg)\n # res.execute()\n # cfg.printPretty()\n # cfg.dotToPng(cfg.dotGraph, \"cnf/raw_graph\") #TODO: make dot file in cnf form\n # utility.generateDomSet(cfg)\n # print(\"Dominator set ended----------->\\n\\n\")\n # utility.generateSDomSet(cfg)\n # print(\"Strictly Dominator set ended ----------->\\n\\n\")\n # utility.generatIDom(cfg)\n # print(\"Immediate Dominator ended ----------->\\n\\n\")\n # utility.generateDFSet(cfg)\n # utility.insertPhiNode(cfg)\n #\n #\n # utility.initialiseVersinosedPhiNode(cfg)\n # utility.versioniseVariable(cfg)\n # utility.phiDestruction(cfg)\n #\n #\n # ssaString = MySsaStringGenerator(cfg, parser)\n # ssaString.execute()\n #\n # #utility.generateFinalDotGraph(cfg)\n # # for nodeId in cfg.nodes:\n # # cfg.nodes[nodeId].printPretty()\n #\n # cnfUtility = CnfUtility(helper)\n # iCnfCfg = cnfUtility.copyCfg(cfg)\n # reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg)\n # cnfUtility.unvisit(iCnfCfg)\n # cnfUtility.setParentBranching(iCnfCfg)\n #\n # cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg)\n # cnfUtility.copyParentBranching(cnfCfg, iCnfCfg)\n # print(\"\\n\\n\\n\\n\\n\\t\\t\\tThe intermediate CNF form is ------------------------------>\\n\\n\\n\\n\")\n #\n # for nodeId in iCnfCfg.nodes:\n # iCnfCfg.nodes[nodeId].printPretty()\n #\n # print(\"\\n\\n\\n\\n\\n\\t\\t\\tThe CNF form is ------------------------------>\\n\\n\\n\\n\")\n #\n #\n #\n # cnfVcGenerator = CnfVcGenerator(cnfCfg, parser)\n #\n # cnfPath = []\n #\n # for nodeId in cnfCfg.nodes:\n # cnfPath.append(nodeId)\n #\n # cnfVcGenerator.generateCnfVc(cnfPath)\n #\n # # print(\"\\n\\n\\n\\n\\t\\t\\tThe CNF VCs are : ------------------------------->\\n\\n\\n\")\n # # print(cnfVcs)\n #\n # for nodeId in cnfCfg.nodes:\n # cnfCfg.nodes[nodeId].printPretty()\n #\n # # cnfVc = cnfUtility.cnfVc(cnfCfg)\n # #\n # # print(\"\\n\\n\\t\\tThe CNF VCs are ----------------->\\n\\n\\n\")\n # #\n # # for str in cnfVc:\n # # print(str)\n #\n # varSet, z3Str = cnfUtility.iZ3format(cnfCfg)\n #\n # print(\"\\n\\n*******************\\n\\n\", z3Str, \"\\n\\n--------------\\n\\n\")\n # print(varSet)\n #\n # print(\"\\n\\n\")\n # z3Str = z3Str.replace(\" \", \" \")\n # z3Str = z3Str.replace(\" == \", \" = \")\n # z3Str = z3Str.replace(\" = \", \" == \")\n # z3StringConvertor = WpcStringConverter(z3Str)\n # z3StringConvertor.execute()\n # print(\"\\n**** WPC String in Z3 Format:\\n\", z3StringConvertor.convertedWpc, \"\\n\")\n #\n # z3FileString = \"# This file was generated at runtime \" + \"\\n\"\n # z3FileString = z3FileString + \"from z3 import *\\n\\n\"\n # for i in varSet:\n # z3FileString = z3FileString + i + \" = Real(\\'\" + i + \"\\')\\n\"\n # z3FileString = z3FileString + \"\\ns = Solver()\\n\"\n #\n # if len(z3StringConvertor.implies_p) > 0:\n # for i in range(len(z3StringConvertor.implies_p)):\n # z3FileString = z3FileString + \"s.add(\" + z3StringConvertor.implies_p[i] + \")\\n\"\n # if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]:\n # z3FileString = z3FileString + \"s.add(\" + z3StringConvertor.implies_p_q[i] + \")\\n\"\n # # if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q:\n # # z3FileString = z3FileString + \"s.add(\" + z3StringConvertor.convertedWpc + \")\\n\"\n # # else:\n # # z3FileString = z3FileString + \"s.add(\" + z3StringConvertor.convertedWpc + \")\\n\"\n # z3FileString = z3FileString + \"s.add( Not( \" + z3StringConvertor.convertedWpc + \" ) )\\n\"\n #\n # z3FileString = z3FileString + \"\\nprint()\\n\"\n # z3FileString = z3FileString + \"\\nprint(\\\"------------------------------------------------------------------\\\\nRunning script in /wpc/z3FormatWpcFile.py ....\\\\n\\\")\\n\"\n # z3FileString = z3FileString + \"\\nprint(\\\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\\\n\\\", s)\\n\"\n # z3FileString = z3FileString + \"\\nprint()\\n\"\n # z3FileString = z3FileString + \"print(\\\"%%%%%%%%%% Satisfiability %%%%%%%%%%\\\\n\\\", s.check())\\n\"\n # z3FileString = z3FileString + \"\\nprint()\\n\"\n # z3FileString = z3FileString + \"print(\\\"%%%%%%%%%% Satisfiable Model %%%%%%%%%%\\\\n\\\", s.model())\\n\"\n # z3FileString = z3FileString + \"\\nprint()\\n\"\n #\n # file = open('cnf/z3FormatCnfFile.py', \"w\")\n # file.write(z3FileString)\n # file.close()\n #\n # # call([\"python3\", \"cnf/z3FormatWpcFile.py\"])\n #\n # #\n # # hello = utility.generateFinalDotGraph(cfg)\n # # print(hello)\n # # cfg.dotToPng(hello, \"versioned_graph\")\n #\n # #hello2 = utility.generateVersionedDotFile(cfg)\n # #print(hello2)\n # #cfg.dotToPng(hello2, \"se/versioned_graph\")\n #\n # #hello3 = utility.generateVersionedPhiNodeWalaDotFile(cfg)\n # #print(hello3)\n # #cfg.dotToPng(hello3, \"se/versioned_phi_node_wala_graph\")\n #\n # #hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg)\n # #print(hello4)\n # #cfg.dotToPng(hello4, \"se/destructed_phi_node_wala_graph\")\n\n\n\nif __name__ == '__main__':\n main(sys.argv)"},"repo_name":{"kind":"string","value":"NabeelQaiser/BTP_2k18-19"},"sub_path":{"kind":"string","value":"simulator_cnf.py"},"file_name":{"kind":"string","value":"simulator_cnf.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":17501,"string":"17,501"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114995,"cells":{"seq_id":{"kind":"string","value":"18659979731"},"text":{"kind":"string","value":"from configparser import ConfigParser\nfrom datetime import timedelta, datetime\nfrom discord_webhook import DiscordWebhook\nimport os, random, requests, re\nfrom typing import TypedDict, Union\n\nclass UserNameResponseDict(TypedDict):\n personaname:str\n name:str\n\ndef get_username(steam_id:int) -> Union[UserNameResponseDict,None]:\n \"\"\"\n Returns dict of persona name and name from Steam Player Summaries API\n \"\"\"\n try:\n response = requests.get(f'https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key={str(steam_api_key)}&format=json&steamids={str(steam_id)}')\n except requests.exceptions.RequestException as e:\n return None\n\n if 'realname' in response.json()['response']['players'][0]:\n name = response.json()['response']['players'][0]['realname']\n else:\n name = response.json()['response']['players'][0]['personaname']\n user_dict = {\n 'personaname': response.json()['response']['players'][0]['personaname'],\n 'name': name\n }\n return user_dict\n\ndef check_name(steam_id):\n \"\"\"\n Checks if user steam id is known, then returns user dict:\n {\n 'status': (Is user recognized),\n 'name': \n 'personaname': \n }\n If username is unknown, looks up user via steam ID\n \"\"\"\n \n if str(steam_id) in known_ids:\n name = known_ids[str(steam_id)][0]\n personaname = known_ids[str(steam_id)][1]\n status = True\n else:\n response = get_username(steam_id)\n if response is not None:\n name = response['name']\n personaname = response['personaname']\n status = False\n else:\n personaname = name = \"Unknown Mystery Person\"\n status=False\n user_dict = {\n 'status': status,\n 'name': name,\n 'personaname': personaname,\n }\n return user_dict\n\ndef generate_greeting(steam_id, incoming):\n \"\"\"\n Generates random greeting after looking up steam ID.\n\n args\n steam_id: \n incoming: (true if user joining server, else false)\n \n returns:\n greeting: \n \"\"\"\n\n user = check_name(steam_id)\n status = user['status']\n name = user['name']\n personaname = user['personaname']\n #print(status)\n\n if incoming == True:\n greetings = [\n f'Hello {name}, or is it {personaname}? I don\\'t know... I don\\'t get paid enough.',\n f'Hello {name}, toilet paper is on isle 24.',\n f'Welcome to Walmart, {name}!',\n f'Enjoy shopping at Walmart, {name}!',\n f'Hi, {name} how can-- HEY, NO RIDING ON THE CARTS!',\n f'What do you want, {personaname}?',\n f'Yo, {personaname}, want to hear about the time I ran over a cat?',\n f'We don\\'t sell them, but possums are super tasty, {name}',\n f'Hey {name}, Have you ever seen a grown Walmart Greeter Naked?',\n ]\n if status == True:\n greetings.append(f'Welcome back {name}!')\n greetings.append(f'Wonderful seeing you again, {name}!')\n greetings.append(f'Lookin\\' fly today, {name}')\n greetings.append(f'Welcome back {name}... I\\'m watching you...')\n else: \n greetings = [\n f'Goodbye {name}',\n f'Thank you, come again {name}',\n f'Thank you for shopping at Walmart, see you next time, {name}',\n f'You better not have anything you didn\\'t pay for {name}'\n ]\n if status == True:\n greetings.append(f'I hate to watch {name} go, but I love to watch {name} leave...')\n greetings.append(f'See ya {name}, wouldn\\'t wanna be ya though.')\n\n\n result = greetings[random.randint(0, len(greetings)-1)]\n return result\n\ndef extract_date(line):\n \"\"\"Return a datetime from a log line\"\"\"\n fmt = '%m/%d/%Y %H:%M:%S'\n return datetime.strptime(line[:19], fmt)\n\n\nif __name__ == \"__main__\":\n # parse config file for paths and known ids\n config = ConfigParser()\n config.read('greeter_config.ini')\n vhlog = config['Paths'].get('RECENT_LOG','./example.log')\n lastupdated = config['Paths'].get('LAST_UPDATED','./last_updated.txt')\n webhook_url = config['Discord'].get('WEBHOOK_URL',False)\n steam_api_key = config['Steam'].get('API_KEY',False)\n\n if not steam_api_key:\n raise ValueError(\"Steam API Key is required to look up users. Please add one to greeter_config.ini\")\n if not webhook_url:\n raise ValueError(\"Webhook URL is required to post to discord. Please add one to greeter_config.ini\")\n \n suppress_old = True if config['Settings']['SUPPRESS_OLD'] == 'True' else False\n known_ids = dict()\n if config['Known Users']:\n for key in config['Known Users']:\n known_ids[key] = [w.strip() for w in str(config['Known Users'][key]).split(',')]\n\n ## get current time and last updated time\n end_date = datetime.now()\n\n # create lastupdated file if none exists\n if not os.path.exists(os.path.abspath(lastupdated)):\n print('Creating last_updated.txt')\n os.makedirs(os.path.dirname(lastupdated),exist_ok=True)\n new_file = open(lastupdated, 'a').close()\n\n\n with open(lastupdated, 'r') as date_file:\n if os.stat(lastupdated).st_size > 0:\n data = date_file.read(19)\n start_date = datetime.strptime(data, '%m/%d/%Y %H:%M:%S')\n else:\n start_date = datetime(2019,1,1)\n changed = False\n\n ## Prevent posting status more than a minute old\n ## Useful if listener is started when there are a bunch of old logs\n if suppress_old:\n if end_date - start_date > timedelta(seconds=60):\n start_date = end_date - timedelta(seconds=60)\n\n ## check for updates and post to discord if any\n with open(vhlog) as f:\n # from https://stackoverflow.com/questions/18562479/what-is-the-quickest-way-to-extract-entries-in-a-log-file-between-two-dates-in-p\n for line in f:\n if start_date < extract_date(line) < end_date:\n client_id = re.search(r'\\d+$', line).group(0)\n if \"Closing socket\" in line:\n incoming = False\n elif \"Got handshake from client\" in line:\n incoming = True\n greeting = generate_greeting(client_id, incoming)\n \n if webhook_url:\n print('Sending webhook:',greeting)\n webhook = DiscordWebhook(url=webhook_url, content=greeting)\n response = webhook.execute()\n else:\n print('No Webhook_URL specified, didn\\'t send greeting:', greeting)\n changed = True\n\n\n ## set last_updated time to end_date\n if changed == True:\n date_file = open(lastupdated, \"w\")\n date_file.write(end_date.strftime('%m/%d/%Y %H:%M:%S'))\n date_file.close()\n else:\n print(f'No changes found. Suppress old messages: {suppress_old}')\n\n\n\n\n\n\n"},"repo_name":{"kind":"string","value":"lekjos/vhserver-walmart-greeter"},"sub_path":{"kind":"string","value":"discord_post.py"},"file_name":{"kind":"string","value":"discord_post.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":7031,"string":"7,031"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":1,"string":"1"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114996,"cells":{"seq_id":{"kind":"string","value":"28076502272"},"text":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\n @Author 坦克手贝塔\n @Date 2023/2/8 0:25\n\"\"\"\nfrom typing import List\n\n\"\"\"\n你是一位系统管理员,手里有一份文件夹列表 folder,你的任务是要删除该列表中的所有 子文件夹,并以 任意顺序 返回剩下的文件夹。\n如果文件夹 folder[i] 位于另一个文件夹 folder[j] 下,那么 folder[i] 就是 folder[j] 的 子文件夹 。\n文件夹的“路径”是由一个或多个按以下格式串联形成的字符串:'/' 后跟一个或者多个小写英文字母。\n例如,\"/leetcode\" 和 \"/leetcode/problems\" 都是有效的路径,而空字符串和 \"/\" 不是。\n\n示例 1:\n输入:folder = [\"/a\",\"/a/b\",\"/c/d\",\"/c/d/e\",\"/c/f\"]\n输出:[\"/a\",\"/c/d\",\"/c/f\"]\n解释:\"/a/b/\" 是 \"/a\" 的子文件夹,而 \"/c/d/e\" 是 \"/c/d\" 的子文件夹。\n\n示例 2:\n输入:folder = [\"/a\",\"/a/b/c\",\"/a/b/d\"]\n输出:[\"/a\"]\n解释:文件夹 \"/a/b/c\" 和 \"/a/b/d/\" 都会被删除,因为它们都是 \"/a\" 的子文件夹。\n\n示例 3:\n输入: folder = [\"/a/b/c\",\"/a/b/ca\",\"/a/b/d\"]\n输出: [\"/a/b/c\",\"/a/b/ca\",\"/a/b/d\"]\n\"\"\"\n\"\"\"\n思路:先排序再挨个扫描。如果当前的路径f不是以我们想要的t开头,说明他不是子路径,就把我们的t更新为f+'/'继续扫描。\n\"\"\"\n\n\nclass Solution:\n @staticmethod\n def removeSubfolders(folder: List[str]) -> List[str]:\n res, t = [], ' '\n for f in sorted(folder):\n if not f.startswith(t):\n res.append(f)\n t = f + '/'\n return res\n"},"repo_name":{"kind":"string","value":"TankManBeta/LeetCode-Python"},"sub_path":{"kind":"string","value":"problem1233_medium.py"},"file_name":{"kind":"string","value":"problem1233_medium.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1569,"string":"1,569"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"zh"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114997,"cells":{"seq_id":{"kind":"string","value":"705515631"},"text":{"kind":"string","value":"import math\nimport os\nimport time\nfrom copy import deepcopy\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\n\ndef init_seeds(seed=0):\n torch.manual_seed(seed)\n\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n if seed == 0: # slower, more reproducible\n cudnn.deterministic = True\n cudnn.benchmark = False\n else: # faster, less reproducible\n cudnn.deterministic = False\n cudnn.benchmark = True\n\n\ndef select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n cpu_request = device.lower() == 'cpu'\n if device and not cpu_request: # if device requested other than 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity\n\n cuda = False if cpu_request else torch.cuda.is_available()\n if cuda:\n c = 1024 ** 2 # bytes to MB\n ng = torch.cuda.device_count()\n if ng > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)\n x = [torch.cuda.get_device_properties(i) for i in range(ng)]\n s = 'Using CUDA '\n for i in range(0, ng):\n if i == 1:\n s = ' ' * len(s)\n print(\"%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)\" %\n (s, i, x[i].name, x[i].total_memory / c))\n else:\n print('Using CPU')\n\n print('') # skip a line\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()\n\n\ndef is_parallel(model):\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # Finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef sparsity(model):\n # Return global model sparsity\n a, b = 0., 0.\n for p in model.parameters():\n a += p.numel()\n b += (p == 0).sum()\n return b / a\n\n\ndef prune(model, amount=0.3):\n # Prune model to requested global sparsity\n import torch.nn.utils.prune as prune\n print('Pruning model... ', end='')\n for name, m in model.named_modules():\n if isinstance(m, nn.Conv2d):\n prune.l1_unstructured(m, name='weight', amount=amount) # prune\n prune.remove(m, 'weight') # make permanent\n print(' %.3g global sparsity' % sparsity(model))\n\n\ndef fuse_conv_and_bn(conv, bn):\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n with torch.no_grad():\n # init\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n bias=True).to(conv.weight.device)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n # prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2\n fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS\n except:\n fs = ''\n\n print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = models.__dict__[name](pretrained=True)\n\n # Display model properties\n input_size = [3, 224, 224]\n input_space = 'RGB'\n input_range = [0, 1]\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n for x in [input_size, input_space, input_range, mean, std]:\n print(x + ' =', eval(x))\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio\n # scales img(bs,3,y,x) by ratio\n if ratio == 1.0:\n return img\n else:\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n gs = 128#64#32 # (pixels) grid size\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n # Copy attributes from b to a, options to only include [...] and to exclude [...]\n for k, v in b.__dict__.items():\n if (len(include) and k not in include) or k.startswith('_') or k in exclude:\n continue\n else:\n setattr(a, k, v)\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)\n"},"repo_name":{"kind":"string","value":"WongKinYiu/ScaledYOLOv4"},"sub_path":{"kind":"string","value":"utils/torch_utils.py"},"file_name":{"kind":"string","value":"torch_utils.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":8846,"string":"8,846"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":2013,"string":"2,013"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114998,"cells":{"seq_id":{"kind":"string","value":"72328303514"},"text":{"kind":"string","value":"import io\n\n# アクセスするときに使う\nimport requests\n\nimport zipfile\n\n\n# 普通に書いた場合\n# with open('/tmp/a.txt','w') as f:\n# f.write('test test')\n#\n# with open('/tmp/a.txt','r') as f:\n# print(f.read())\n#\n\nf =io.StringIO()\nf.write('string io test')\n# 最初に戻る\nf.seek(0)\n\nprint(f.read())\n\n\n# 使用例\n# zipfileをダウンロードをメモリ上で処理するときとかに使用する\n\nurl ='###########'\n\nf =io.BytesIO()\n\nr =requests.get(url)\nf.write(r.content)\n\nwith zipfile.ZipFile(f) as z:\n with z.open('ファイル指定') as r:\n print(r.read().decode())"},"repo_name":{"kind":"string","value":"magisystem0408/python_cord_dir"},"sub_path":{"kind":"string","value":"library/io.py"},"file_name":{"kind":"string","value":"io.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":609,"string":"609"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"ja"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}},{"rowIdx":1114999,"cells":{"seq_id":{"kind":"string","value":"70988579675"},"text":{"kind":"string","value":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom Mesh import *\nfrom Utils import *\nimport math\n\ndef F(Pi, Pj, k, r):\n return k * (torch.linalg.norm(Pi - Pj) - r) * (Pj - Pi) / torch.linalg.norm(Pi - Pj)\n\ndef force_magnitude_sum(mesh):\n l = 0\n for vIndex, this in enumerate(mesh.verts):\n force = torch.tensor([0.,0.])\n for key,edge in mesh.connected(vIndex).items():\n other = mesh.verts[key]\n force += F(this, other, k=edge.stiffness, r=edge.rest_length)\n # print(vIndex, force)\n l += torch.linalg.norm(force)\n return l\n\n \n\nmesh = generate_rectangle_mesh_grid((0,10), (10, 0), 5, 5)\n\n# fig, axs = plt.subplots(1,1)\n\n# axs.set_aspect('equal')\n# visualize_mesh(axs, mesh)\n\n# plt.show()\n\nverts = [torch.tensor(x, requires_grad=True) for x in mesh.verts]\ntInd = mesh.tInd\n\nspringMesh = SpringMesh(verts, tInd)\n\nT = [27]\nE = []\nfor t in T:\n E.append(tuple(sorted([tInd[t][0], tInd[t][1]])))\n E.append(tuple(sorted([tInd[t][1], tInd[t][2]])))\n E.append(tuple(sorted([tInd[t][0], tInd[t][2]])))\n\nfor k,v in springMesh.edges.items():\n if k in E:\n v.rest_length = v.length * 2\n v.stiffness = 1\n else:\n v.rest_length = v.length\n\nprint(force_magnitude_sum(springMesh))\n\noptimizer = optim.Adam(springMesh.verts, lr=0.0001)\n\ntheta = 1e-5\n\nhistory = []\nfor i in range(2000):\n optimizer.zero_grad() \n loss = force_magnitude_sum(springMesh)\n history.append(loss.item())\n if(history[-1] < theta):\n print(\"break early\")\n break\n loss.backward(retain_graph=True)\n optimizer.step()\n\nprint(history[-1])\n\nfig, axs = plt.subplots(1,3)\n\naxs[0].set_aspect('equal')\naxs[0].set_xlim((0,10))\naxs[0].set_ylim((0,10))\nvisualize_mesh(axs[0], mesh)\n\naxs[1].set_aspect('equal')\naxs[1].set_xlim((0,10))\naxs[1].set_ylim((0,10))\nvisualize_mesh(axs[1], springMesh)\n\naxs[2].plot(history)\n\nplt.show()"},"repo_name":{"kind":"string","value":"COMP0031VRProject/Framework"},"sub_path":{"kind":"string","value":"spring_mesh_example.py"},"file_name":{"kind":"string","value":"spring_mesh_example.py"},"file_ext":{"kind":"string","value":"py"},"file_size_in_byte":{"kind":"number","value":1950,"string":"1,950"},"program_lang":{"kind":"string","value":"python"},"lang":{"kind":"string","value":"en"},"doc_type":{"kind":"string","value":"code"},"stars":{"kind":"number","value":0,"string":"0"},"dataset":{"kind":"string","value":"github-code"},"pt":{"kind":"string","value":"50"}}}],"truncated":false,"partial":true},"paginationData":{"pageIndex":11149,"numItemsPerPage":100,"numTotalItems":1115872,"offset":1114900,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODM2MzA2OCwic3ViIjoiL2RhdGFzZXRzL2JjYi1pbnN0cnVjdC9iY2JfZGF0YSIsImV4cCI6MTc1ODM2NjY2OCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.PsyyV5kTC8WwEksGrM7SEqnypzCgLt1z1kRnMLQE1NobWVf2KwzGon3JOBAIomCrlNNynbg6c3NfKeGk3RB1BA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
27674411948
""" Author: Carlos Fernando Castaneda Class : CS 2302 Date Modified: May 12, 2019 Instructor: Olac Fuentes Assingment: Lab 8 Algorithm Design Techniques TA: Anindita Nath & Maliheh Zaragan Purpose: to implement both randomized algorithms and backtracking teachniques learned in class to check if two algorithmic identities are the same, and to check the partitions of a new array. """ #Imports various tools to help us calculate the hash tables to be used in this lab import time import random import mpmath import numpy as np #Method that goes through all of the strings of a givrn list, and and checks if they are similar in value def similarities(S): #Starts a counter to keep track of all the count=0 #For i in range of the length of S for i in range(len(S)):#goes through all the strings #For i in range of the length of S for j in range(i,len(S)): #If S[i] is equal to S[j], then it will print both items, and add one to count if(same_values(S[i],S[j])): print(S[i],S[j]) count+=1 #Returns the value of count to the user return count #Method that calculates if two strings are similar in value to each other,=. def same_values(string_1, string_2,calls=1000,tolerance=0.0001): #For i in the range of calls for i in range(calls): #Assigns a random number to variable x x = random.uniform(-mpmath.pi,mpmath.pi) #Sets a new number value1 which takes the information from string_1, and evaluates it value1 = eval(string_1) #Sets a new number value2 which takes the information from string_2, and evaluates it value2 = eval(string_2) #If the absolute value of value1 - value2 is greater than the tolerance value, then it returns false if np.abs(value1-value2)>tolerance: return False #Returns true if the statement abive is incorrect return True #Method that checks if apartion can be made from the two parts of S def arrayPartition(S1,S2): #If the sum of S1 % by 2 is not 0, then there is no partition if sum(S1)%2!=0:#if summation of sum is odd then return error message return "No partition exists" else: #Creates a set needed for the next section res,s,= subset_summation(S1,len(S1)-1,sum(S1)//2) #If the length of s equals 0, then there is no partition if len(s)==0: return "No partition exists" #For every i in s for i in s: #New counter is created used to get the position counter=0 #For every j in S1 for j in S1: #If the value of i equals the value of j, then S1 pops a value if i == j: S1.pop(counter) #Adds one to the counter counter+=1 #Returns the value of s and S1 return s,S1 #Method that creates a new subse def subset_summation(S,last,goal): #If the value of goal equals 0, then it returns true with a new blank array if goal == 0: return True, [] #If the value of goal is less than or greater than 0, then it retrens false with a new blank array if goal<0 or last<0: return False, [] #Takes a new subset res, subset = subset_summation(S,last-1,goal-S[last]) #If res is true, then it will append S[last and retrun true with the subset if res: subset.append(S[last]) return True, subset #Otherwise, it will not take S[last from the list and move on else: return subset_summation(S,last-1,goal) #Starts the timer for the running time for part 1 startTime1=time.time() print('Importing algorithim equations to test: ') print() #Creates a new array called 'part1' which will import all of the functions that will be compared its equalities part1=['mpmath.sin(x)', 'mpmath.cos(x)', 'mpmath.tan(x)', 'mpmath.sec(x)', '-mpmath.sin(x)', '-mpmath.cos(x)', '-mpmath.tan(x)', 'mpmath.sin(-x)', 'mpmath.cos(-x)', 'mpmath.tan(-x)', 'mpmath.sin(x)/mpmath.cos(x)', '2*mpmath.sin(x/2)*mpmath.cos(x/2)', 'mpmath.sin(x)**2', '1-mpmath.cos(x)**2', '(1-mpmath.cos(2*x))/2', '1/mpmath.cos(x)'] #The actual method t sim_count = similarities(part1) #Prints the count number found in the method similarities print() print('The number of similarities in the equations are a total of: ', sim_count) print() #Ends the timer for the running time for part 1 endTime1=time.time() #Creates the fianl time for the running time for part 1 finalTime1 = endTime1-startTime1 #Starts the timer for the running time for part 2 startTime2=time.time() #Creates a new array of integeres needed for part 2 of the lab part2=[2,4,5,9,12] print('Partition process using array', part2, 'commencing: ') print() #Sends the new array to method arrayPartition print(arrayPartition(part2,part2)) print() #Ends the timer for the running time for part 2 endTime2=time.time() #Creates the fianl time for the running time for part 2 finalTime2 = endTime2-startTime2 #Prints the running times of both part 1 and part 2 print('Running time for Part 1 in: ',finalTime1) print('Running time for Part 2 in: ',finalTime2)
cfcastaneda98/CS2302
Lab8/lab8.py
lab8.py
py
5,453
python
en
code
0
github-code
50
73961133915
import pymysql from modules.cardgen import CardGen import modules.config as cfg if __name__ == "__main__": cardgen = CardGen() cards = cardgen.get_cards(1) print(cards) # Connect to the database conn = pymysql.connect(host=cfg.host, user=cfg.user, password=cfg.password, db=cfg.db, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) conn.autocommit(True) query = 'INSERT INTO card (expansion, name, clk, pow, hp, effs, img) VALUES' for c in cards: query += '\n({0}, "{1}", {2}, {3}, {4}, "{5}", "{6}"),'.format(0, c['name'], c['clk'], c['pow'], c['hp'], c['eff'], c['img']) query = query[:-1] + ';' print("QUERY:\n\n\n" + query + "\n\n\n") conn.cursor().execute(query)
TaylorAbraham/Uncharted-Realms-ML
main.py
main.py
py
870
python
en
code
1
github-code
50
37492925281
def main(): fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya'] print(max(fruits)) print(min(fruits)) max_value = min_value=fruits[0] for elem in fruits: if elem >max_value: max_value=elem elif elem<min_value: min_value=elem print("Max:",max_value) print("Min:",min_value) if __name__ == '__main__': main()
ymjrchx/python-demo
Day07/findmax.py
findmax.py
py
392
python
en
code
0
github-code
50
70832032154
''' Main file Run in terminal 'python3 main.py' to use project Dependencies: - sqlite3 - requests ''' from api import * from sql import * from datetime import date, datetime # User input start/end date in ISO8601 format start_date = input("Start date (YYYY-MM-DD, default=[start of repo]): ") + "T" if start_date == "T": start_date = date(2008, 1, 1).strftime('%Y-%m-%dT') end_date = input("End date (YYYY-MM-DD, default=today): ") + "T" if end_date == "T": end_date = datetime.now().strftime('%Y-%m-%dT') # ensure dates are inclusive start_date += "00:00:00Z" end_date += "23:59:59Z" #Repeat until valid owner/name combination given while True: # User input to receive valid repo information repo_owner = input("Repository Owner (default = apache): ") if repo_owner == "": repo_owner = "apache" repo_name = input("Repository Name (default = hadoop): ") if repo_name == "": repo_name = "hadoop" # test if is valid repository commit_obj = test_repo_info(repo_owner, repo_name) if 'message' in commit_obj: # error in request print(commit_obj['message']) else: # valid repo information break if init_db(f"{repo_owner}.{repo_name}") == "Created": # new db, have to load data page_num = 1 commits_analyzed = 0 print("\nStarting to access GitHub...") while len(commit_obj) > 0: commit_obj = get_commits(repo_owner, repo_name, start_date, end_date, page_num) if 'message' in commit_obj: print(commit_obj['message']) break add_commits_to_db(commit_obj) commits_analyzed += len(commit_obj) print(f"{commits_analyzed} commits analyzed", end="\r") page_num += 1 print(f"{commits_analyzed} commits analyzed") top_authors = get_top_authors() print("\nTop Authors") for i in range(1, min(4, len(top_authors))): print(f"{i}: {top_authors[i-1][0]} - {top_authors[i-1][1]}") longest_window = get_longest_contribution_window() print("\nLongest Window:") print(f"{longest_window[0]} - {longest_window[1].days} days") heatmap, maxnum = generate_heatmap() days_of_week = ["M ", "T ", "W ", "Th", "F ", "S ", "Su"] timings = ["12AM-3AM", "3AM-6AM ", "6AM-9AM ", "9AM-12PM", "12PM-3PM", "3PM-6PM ", "6PM-9PM ", "9PM-12AM"] TABLE_DATA_MAX_LENGTH = max(2, len(str(maxnum))) MAX_TIMING_LENGTH = 8 print("\nHeatmap:") # table header row header = " " * MAX_TIMING_LENGTH for day in range(7): header += "|" + days_of_week[day] + " " * (TABLE_DATA_MAX_LENGTH - 2) print(header) # test of table rows for row in range(8): row_divider = "-" * (MAX_TIMING_LENGTH + (1 + TABLE_DATA_MAX_LENGTH) * 7) print(row_divider) row_str = timings[row] # row header (timing) for column in range(7): table_value = str(heatmap[column][row]) row_str += "|" + table_value + " " * (TABLE_DATA_MAX_LENGTH - len(table_value)) print(row_str) print("\n")
ARtheboss/github-repo-analysis
main.py
main.py
py
2,967
python
en
code
0
github-code
50
40126941120
# used by cmsDriver when called like # cmsDriver.py hlt -s HLT:@relval autoHLT = { 'fake' : 'Fake', 'fake1' : 'Fake1', 'fake2' : 'Fake2', 'relval50ns' : 'Fake', 'relval25ns' : 'Fake1', 'relval2016' : 'Fake2', 'relval2017' : 'Fake2', 'relval2018' : 'Fake2', 'relval2022' : 'Fake2', 'relval2023' : '2023v12', 'relval2024' : 'GRun', 'relval2026' : '75e33', 'test' : 'GRun', }
cms-sw/cmssw
Configuration/HLT/python/autoHLT.py
autoHLT.py
py
424
python
en
code
985
github-code
50
73110306076
from pydub import AudioSegment def split_mp3(file_name): sound = AudioSegment.from_mp3(file_name) halfway_point = len(sound) // 2 first_half = sound[:halfway_point] + sound[:halfway_point] # create a new file "first_half.mp3": first_half.export("first_half_twice.mp3", format="mp3")
Algostu/chungyo
pose_diff/core/audio.py
audio.py
py
306
python
en
code
6
github-code
50
24360173200
# bit_mask로 하는 방법도 있다. arr = [1, 2, 3] N = 3 sel = [0] * N # 사용. def perm(idx, check): if idx == N: print(sel) return for i in range(N): # 원소의 개수만큼 반복할 것. if (check & (1<<i)) != 0: # 이전에 사용한 원소 사용 X. -> 이걸 어떻게 체크? continue sel[idx] = arr[i] perm(idx + 1, check | (1 << i)) # '또는'이라는 비트 연산자가 들어갔으므로, check에 저장된 1인 bit가 다음 함수 호출에서는 제외된다. # ... perm(0, 0)
phoenix9373/Algorithm
2020/SWEA_문제/순열_BitMask.py
순열_BitMask.py
py
580
python
ko
code
0
github-code
50
25156426576
import optuna from optuna.pruners import SuccessiveHalvingPruner from optuna.samplers import TPESampler from optuna.trial import TrialState from functools import partial import matplotlib.pyplot as plt from ai.lab.base import LabEntity from ai.lab.trial import Trial from ai.util import print_header class Experiment(LabEntity): def __init__(s, path, clean=False, direction='min', prune=False, **trial_kw): super().__init__(path, clean) assert direction in ['min', 'max'] if prune: assert 'val_data' in trial_kw or 'task' in trial_kw, ( 'Need val_data or task in trial kwargs if prune==True') assert 'task' not in trial_kw, 'TODO: task-based early stopping' s._trial_kw = trial_kw optuna.logging.set_verbosity(optuna.logging.WARNING) s._exp = optuna.create_study( study_name=str(s.path), storage=f'sqlite:///{s.path}/experiment.db', load_if_exists=True, direction='minimize' if direction == 'min' else 'maximize', sampler=TPESampler(), pruner=SuccessiveHalvingPruner() if prune else None, ) s._prune = prune @property def trial_data(s): return s._exp.trials @property def best_hparams(s): return s._exp.best_params def run(s, n, fn): print(f'\nRUNNING EXPERIMENT {s.path} (n={n})\n') s._exp.optimize(partial(s._run, fn), n_trials=n) def show_plot(s, hparam, show_pruned=False, only_best=None): trials = [] for t in s._exp.trials: if show_pruned or t.state == TrialState.COMPLETE: trials.append(t) if only_best is not None: idx = int(len(trials) * only_best) trials = sorted(trials, key=lambda t: t.value)[:idx] x, y = [], [] for t in trials: x.append(t.params[hparam]) y.append(t.value) plt.scatter(x, y) plt.show() def _run(s, fn, optuna_trial): id = str(optuna_trial.number) print_header(f'TRIAL {id}') trial = _ExpTrial( s.path / f'trials/{id}', optuna_trial, s._prune, **s._trial_kw, ) result = fn(trial) print(f'RESULT: {result:.4f}\n') return result class _ExpTrial(Trial): def __init__(s, path, optuna_trial, prune, **kw): super().__init__(path, val_stopper=s.pruner, **kw) s._optuna_trial = optuna_trial s._prune = prune s.hp = _HyperParams(optuna_trial) def pruner(s, step, val_loss): s._optuna_trial.report(val_loss, step) if s._prune and s._optuna_trial.should_prune(): print_header('') print('PRUNED\n') raise optuna.TrialPruned() return False class _HyperParams: def __init__(s, optuna_trial, prefix=None): s._optuna = optuna_trial s._prefix = prefix def lin(s, name, min_, max_, step=1): name = s._prefix_name(name) has_float = False for x in [min_, max_, step]: if isinstance(x, float): has_float = True break if has_float: val = s._optuna.suggest_float(name, min_, max_, step=step) else: for x in [min_, max_, step]: assert isinstance(x, int) val = s._optuna.suggest_int(name, min_, max_, step=step) print(f'{name}: {val}') return val def log(s, name, min_, max_): name = s._prefix_name(name) if isinstance(min_, float) or isinstance(max_, float): val = s._optuna.suggest_float(name, min_, max_, log=True) else: assert isinstance(min_, int) and isinstance(max_, int) val = s._optuna.suggest_int(name, min_, max_, log=True) print(f'{name}: {val}') return val def lst(s, name, items): name = s._prefix_name(name) val = s._optuna.suggest_categorical(name, items) print(f'{name}: {val}') return val def _prefix_name(s, name): if s._prefix is None: return name return f'{s._prefix}.{name}'
calvinpelletier/ai
lab/exp.py
exp.py
py
4,226
python
en
code
0
github-code
50
9349714998
from flask import Flask, render_template, request app = Flask(__name__) @app.route('/') def inicio(): return render_template("formulario.html") @app.route('/procesar', methods=['POST']) def procesar(): palabra = request.form.get("palabra") significado = request.form.get("significado") return render_template("mostrar.html", palabra=palabra, significado=significado) if __name__ == "__main__": app.run( port=8088, debug=True)
Fersnake22/Sustitucion-de-CLI-por-Web
main.py
main.py
py
452
python
en
code
0
github-code
50
36817192015
from __future__ import print_function import os import yaml from config import config destination = os.path.expanduser("~/.exo") if not os.path.exists(destination): os.mkdir(destination, 0o755) print("created configuration folder:", destination) config_destination = os.path.join(destination, "template.yaml") if not os.path.exists(config_destination): pwd = os.getcwd() cfg = config.load("config/config.yaml") cfg["template"]["channel"] = os.path.join(pwd, "config/2012.input.yaml") cfg["template"]["plot"] = os.path.join(pwd, "config/2012.plot.yaml") with open(config_destination, "w") as output_: yaml.dump(cfg, output_) print("the application configuration is saved in:", config_destination) print("the system is setup for running")
baites/exo_plots
install.py
install.py
py
790
python
en
code
0
github-code
50
26895369081
from requests import request, exceptions as req_exceptions from .microsoft_api_auth import * from connectors.core.connector import get_logger, ConnectorError from connectors.core.utils import update_connnector_config logger = get_logger('azure-log-analytics') MANAGE_SERVER_URL = 'https://management.azure.com' MANAGE_API_VERSION = '2020-08-01' LOG_SERVER_URL = 'https://api.loganalytics.io' LOG_API_VERSION = '2022-10-27_Preview' class AzureLogAnalytics(object): def __init__(self, config): self.server_url = LOG_SERVER_URL self.manage_server_url = MANAGE_SERVER_URL self.verify_ssl = config.get('verify_ssl') self.ms_auth = MicrosoftAuth(config) self.tenant_id = config.get('tenant_id') self.connector_info = config.pop('connector_info', '') self.manage_token = self.ms_auth.validate_token(config, self.connector_info) self.log_token = self.ms_auth.validate_log_token(config, self.connector_info) self.api_version = MANAGE_API_VERSION def api_request(self, method, endpoint, config, params=None, data=None, headers={}, manage_api_endpoint=False): try: if manage_api_endpoint: headers = { 'Authorization': self.manage_token, 'Content-Type': 'application/json' } service_url = self.manage_server_url + endpoint params['api-version'] = MANAGE_API_VERSION else: headers = { 'Authorization': self.log_token, 'Content-Type': 'application/json' } service_url = self.server_url + endpoint params['api-version'] = LOG_API_VERSION try: response = request(method, service_url, headers=headers, params=params, json=data, verify=self.verify_ssl) logger.debug("Response Status Code: {0}".format(response.status_code)) logger.debug("Response: {0}".format(response.text)) logger.debug("API Header: {0}".format(response.headers)) if response.status_code in [200, 201, 204]: if response.text != "": return response.json() else: return True else: if response.text != "": err_resp = response.json() failure_msg = err_resp['error']['message'] error_msg = 'Response [{0}:{1} Details: {2}]'.format(response.status_code, response.reason, failure_msg if failure_msg else '') else: error_msg = 'Response [{0}:{1}]'.format(response.status_code, response.reason) logger.error(error_msg) raise ConnectorError(error_msg) except req_exceptions.SSLError: logger.error('An SSL error occurred') raise ConnectorError('An SSL error occurred') except req_exceptions.ConnectionError: logger.error('A connection error occurred') raise ConnectorError('A connection error occurred') except req_exceptions.Timeout: logger.error('The request timed out') raise ConnectorError('The request timed out') except req_exceptions.RequestException: logger.error('There was an error while handling the request') raise ConnectorError('There was an error while handling the request') except Exception as err: raise ConnectorError(str(err)) except Exception as err: raise ConnectorError(str(err)) def check_payload(payload): final_payload = {} for key, value in payload.items(): if isinstance(value, dict): nested = check_payload(value) if len(nested.keys()) > 0: final_payload[key] = nested elif value is not None and value != '': final_payload[key] = value return final_payload def build_payload(payload): payload = {k: v for k, v in payload.items() if v is not None and v != ''} return payload def execute_query(config, params): try: al = AzureLogAnalytics(config) endpoint = '/v1/workspaces/{0}/query'.format(config.get('workspace_id')) workspaces = config.get("workspace_name") if workspaces: workspaces = workspaces.split(",") payload = { 'query': params.get('query'), 'timespan': params.get('timespan'), 'workspaces': workspaces } payload = build_payload(payload) logger.debug("Payload: {0}".format(payload)) response = al.api_request("POST", endpoint, config=config, data=payload, params={}) return response except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def list_saved_searches(config, params): try: al = AzureLogAnalytics(config) endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches'.format( config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name')) response = al.api_request("GET", endpoint, config, manage_api_endpoint=True, params={}) return response except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def get_saved_searches(config, params): try: al = AzureLogAnalytics(config) endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format( config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'), params.get('savedSearchId')) response = al.api_request("GET", endpoint, config, manage_api_endpoint=True, params={}) return response except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def create_saved_searches(config, params): try: al = AzureLogAnalytics(config) endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format( config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'), params.get('savedSearchId')) additional_fields = params.get('additional_fields') payload = { "etag": params.get('etag'), "properties": { "category": params.get('category'), "displayName": params.get('displayName'), "query": params.get('query') } } if additional_fields: payload['properties'].update(additional_fields) payload = check_payload(payload) logger.debug("Payload: {0}".format(payload)) response = al.api_request("PUT", endpoint, config, data=payload, manage_api_endpoint=True, params={}) return response except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def update_saved_searches(config, params): try: al = AzureLogAnalytics(config) endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format( config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'), params.get('savedSearchId')) additional_fields = params.get('additional_fields') payload = { "etag": "*", "properties": { "category": params.get('category'), "displayName": params.get('displayName'), "query": params.get('query') } } if additional_fields: payload['properties'].update(additional_fields) payload = check_payload(payload) logger.debug("Payload: {0}".format(payload)) response = al.api_request("PUT", endpoint, config, data=payload, manage_api_endpoint=True, params={}) return response except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def delete_saved_search(config, params): try: al = AzureLogAnalytics(config) endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format( config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'), params.get('savedSearchId')) response = al.api_request("DELETE", endpoint, config, manage_api_endpoint=True, params={}) return {'result': 'Deleted Saved Search {0} successfully'.format(params.get('savedSearchId'))} except Exception as err: logger.exception("{0}".format(str(err))) raise ConnectorError("{0}".format(str(err))) def check(config, connector_info): try: ms = MicrosoftAuth(config) config_id = config['config_id'] if 'accessToken' in config and 'logAccessToken' in config: ms.validate_token(config, connector_info) and ms.validate_log_token(config, connector_info) elif 'accessToken' not in config and 'logAccessToken' in config: token_resp = ms.generate_token() config['accessToken'] = token_resp.get('accessToken') config['expiresOn'] = token_resp.get('expiresOn') config['refresh_token'] = token_resp.get('refresh_token') update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config, config['config_id']) and ms.validate_log_token(config, connector_info) elif 'accessToken' in config and 'logAccessToken' not in config: token_resp = ms.generate_token(LOG_SCOPE) config['logAccessToken'] = token_resp['accessToken'] config['logExpiresOn'] = token_resp['expiresOn'] config['logRefreshToken'] = token_resp.get('refresh_token') update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config, config['config_id']) and ms.validate_log_token(config, connector_info) else: token_resp = ms.generate_token() config['accessToken'] = token_resp.get('accessToken') config['expiresOn'] = token_resp.get('expiresOn') config['refresh_token'] = token_resp.get('refresh_token') token_resp = ms.generate_token(LOG_SCOPE) config['logAccessToken'] = token_resp['accessToken'] config['logExpiresOn'] = token_resp['expiresOn'] config['logRefreshToken'] = token_resp.get('refresh_token') update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config, config['config_id']) and ms.validate_log_token(config, connector_info) config['config_id'] = config_id return True except Exception as err: raise ConnectorError(str(err)) operations = { 'execute_query': execute_query, 'list_saved_searches': list_saved_searches, 'get_saved_searches': get_saved_searches, 'create_saved_searches': create_saved_searches, 'update_saved_searches': update_saved_searches, 'delete_saved_search': delete_saved_search }
fortinet-fortisoar/connector-azure-log-analytics
azure-log-analytics/operations.py
operations.py
py
11,944
python
en
code
0
github-code
50
11964147517
class Solution: def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: array = sorted(set(nums), reverse=True) frequency = Counter(nums) hash_map = defaultdict(int) length = len(nums) for key in array: length -= frequency[key] hash_map[key] = length answer=[] for item in nums: answer.append(hash_map[item]) return answer
duressa-feyissa/A2SV_Programming
1365-how-many-numbers-are-smaller-than-the-current-number/1365-how-many-numbers-are-smaller-than-the-current-number.py
1365-how-many-numbers-are-smaller-than-the-current-number.py
py
469
python
en
code
0
github-code
50
33657136040
from django.urls import path, include from rest_framework.routers import Route from app.urls import router from . import views app_name = 'user' router.routes += [ # User View Route Route( url=r'^user{trailing_slash}$', mapping={ 'get': 'view_user', 'post': 'create_user', }, name='user-view', detail=False, initkwargs={'suffix': 'View'} ), # User Detail Route Route( url=r'user{trailing_slash}{lookup}{trailing_slash}$', mapping={ 'get': 'view_user_by_id', 'patch': 'update_user_by_id', 'delete': 'destroy_user_by_id' }, name='user-detail', detail=True, initkwargs={'suffix': 'Detail'} ), ] router.register('user', views.UserViewSet) router.register('user', views.UserDetailViewSet) urlpatterns = [ path('', include(router.urls)), path('token/', views.AuthTokenViewSet.as_view(), name='auth-token') ]
Diaga/MARS-Server
app/user/urls.py
urls.py
py
997
python
en
code
1
github-code
50
35134699125
from .base import BaseCommand from app.controllers import Commands from app.utilities import typings, errors class Command(BaseCommand): name = "start" usage = "start <tournament_id>" description = "Start or Resume the tournament mode" def __init__(self) -> None: self.commands = Commands(package="app.commands.cmd_start") self.is_running = True def __quit(self): class Command(BaseCommand): name = "quit" usage = "quit" description = "quit the tournament mode" reload = False def run(_, context: typings.Context, **kwargs): self.is_running = False return Command() def _check_rounds(self, tournament): state = tournament.state try: tournament.round_instances[state.current_round] except IndexError: generated_round = tournament.generate_round() tournament.round_instances.append(generated_round) return tournament.save() return tournament def _check_commands(self, tournament): # Hide & unhide command based on state curr_round = tournament.state.current_round + 1 disable_previous = False if curr_round > 1 else True self.commands.cache["previous"].is_disabled = disable_previous disable_next = True if curr_round == tournament.rounds else False self.commands.cache["next"].is_disabled = disable_next disable_end = False if len(tournament.round_instances) == tournament.rounds else True self.commands.cache["end"].is_disabled = disable_end enable_commit = True if tournament.state.is_ongoing else False self.commands.cache["commit"].is_disabled = enable_commit def run(self, context: typings.Context, args: list): context = context.copy() # i don't want to modify the 'main' context self.commands.cache["quit"] = self.__quit() tournament_view = context["views"]["tournament"] tournament_model = context["models"]["Tournament"] tournament_id = self.pop_arg(args) tournament = tournament_model.find_one(tournament_id) if not tournament: raise errors.GenericError(f"Tournament with the id [{tournament_id}] doesn't exist") tournament = self._check_rounds(tournament) commands = self.commands.cache.values() self._check_commands(tournament) tournament_view.render_selected_tournament(tournament, commands) while self.is_running: try: input_content = input("-> : ").strip() args = input_content.split(" ") command_name = args.pop(0) self.commands.execute(command_name, args=args, context=context, tournament=tournament) tournament = tournament.save() except Exception as e: if not hasattr(e, "custom"): errors.GenericError(e) if not self.is_running: # Its not an error, but its a way out raise errors.GenericError("Tournament Mode has been closed", title="Note") tournament = self._check_rounds(tournament) self._check_commands(tournament) tournament_view.render_selected_tournament(tournament, commands)
Madscientiste/OpenClassrooms_P4
app/commands/start.py
start.py
py
3,357
python
en
code
0
github-code
50
7408239741
import os import re import pandas as pd import numpy as np import logging from time import strptime from collections import defaultdict from src.utils.files import save_json from src.utils.refs import aux_paths, params def read_csv(path, build_mode=False): _df = pd.read_csv(path) if build_mode: _df = _df[0:50] return _df def preprocess(data_path, args, load_if_avail=True, save_fe=True, save_all=True): """ Feature cleaning Feature engineering Feature selection (minimally) """ t_name = data_path.split('.csv')[0] all_fe_path = f'{args.out_folder}/{t_name}_df_fe_all.csv' logging.info(f'-- starting {t_name} fe') if load_if_avail and os.path.exists(all_fe_path): logging.info(f'Opening fe data for all...') df = read_csv(all_fe_path, args.build_mode) else: # Load or generate main features fe_path = f'{args.out_folder}/{t_name}_df_fe.csv' if load_if_avail and os.path.exists(fe_path): logging.info(f'Opening fe data for main...') df = read_csv(fe_path, args.build_mode) else: logging.info(f'Generating fe data for main...') df = read_csv(os.path.join(args.data_folder, data_path), args.build_mode) df = generate_main_fe(df, fe_path, save_fe=save_fe) # Load or generate auxiliary features aux_df = pd.DataFrame() aux_cols = params['aux_cols'] if args.with_hdb_data: aux_cols += params['hdb_cols'] for aux in aux_paths.keys(): # build hdb aux data only if opted in args if aux=='hdb_data' and not args.with_hdb_data: next aux_fe_path = f'{args.out_folder}/{t_name}_df_fe_{aux}.csv' if load_if_avail and os.path.exists(aux_fe_path): logging.info(f'Opening fe auxiliary data for "{aux}"...') _aux_df = read_csv(aux_fe_path, args.build_mode) else: logging.info(f'Generating fe auxiliary data for "{aux}"...') # load all auxiliary data (no build mode here) _aux_df = generate_aux_fe( df, aux, os.path.join(args.data_folder, aux_paths[aux]), aux_fe_path, save_fe=save_fe ) keep_columns = [i for i in _aux_df.columns if i in aux_cols] _aux_df = _aux_df[keep_columns] # concat to frame aux_df = pd.concat([aux_df, _aux_df], axis=1) logging.info(f'Auxiliary data has {len(aux_df)} rows, {len(aux_df.columns)} cols') # Combine features df = pd.concat([df, aux_df], axis=1) logging.info(f'Final data has {len(df)} rows, {len(df.columns)} cols') # Save frame if save_all: df.to_csv(all_fe_path, index=False) logging.info(f'-- complete {t_name} fe') return df def clean_flat_type(df): df['flat_type'] = df['flat_type'].apply(lambda x: str(x).lower()) df.loc[df['flat_type'] == "1-room", 'flat_type'] = "1 room" df.loc[df['flat_type'] == "2-room", 'flat_type'] = "2 room" df.loc[df['flat_type'] == "3-room", 'flat_type'] = "3 room" df.loc[df['flat_type'] == "4-room", 'flat_type'] = "4 room" df.loc[df['flat_type'] == "5-room", 'flat_type'] = "5 room" return df def generate_main_fe(df, fe_path, save_fe=True): """ Note that this section is manual, created by domain knowledge. """ # resale timing df[['resale_year', 'resale_month']] = df['month'].str.split('-', 1, expand=True) df['resale_quarter'] = df['resale_month'].apply(lambda m: (int(m)-1)//3 + 1) df['flat_age'] = df['resale_year'].astype(int)-df['lease_commence_date'].astype(int) # create alternative dep var if 'resale_price' in df.columns: df['resale_price_sqm'] = df['resale_price']/df['floor_area_sqm'] # flat type df = clean_flat_type(df) # count of 4 occurences in block no df['block'] = df['block'].apply(lambda x: x.count('4')) # convert to 01 to 06, 06 to 10, 10 to 15, 16 to 21, 21 to 25, 25 to 30, # 31 to 36, 36 to 40, 40 to 45, 46 to 51 # data is messy as it has lots of overlaps, so the partioning is to make # it more systematic # 01 to 06 df.loc[df['storey_range'] == "01 to 03", 'storey_range'] = "01 to 06" df.loc[df['storey_range'] == "01 to 05", 'storey_range'] = "01 to 06" df.loc[df['storey_range'] == "04 to 06", 'storey_range'] = "01 to 06" # 06 to 10 df.loc[df['storey_range'] == "07 to 09", 'storey_range'] = "06 to 10" # 10 to 15 df.loc[df['storey_range'] == "10 to 12", 'storey_range'] = "10 to 15" df.loc[df['storey_range'] == "11 to 15", 'storey_range'] = "10 to 15" df.loc[df['storey_range'] == "13 to 15", 'storey_range'] = "10 to 15" # 16 to 21 df.loc[df['storey_range'] == "16 to 18", 'storey_range'] = "16 to 21" df.loc[df['storey_range'] == "16 to 20", 'storey_range'] = "16 to 21" df.loc[df['storey_range'] == "19 to 21", 'storey_range'] = "16 to 21" # 21 to 25 df.loc[df['storey_range'] == "22 to 24", 'storey_range'] = "21 to 25" # 25 to 30 df.loc[df['storey_range'] == "25 to 27", 'storey_range'] = "25 to 30" df.loc[df['storey_range'] == "26 to 30", 'storey_range'] = "25 to 30" df.loc[df['storey_range'] == "28 to 30", 'storey_range'] = "25 to 30" # 31 to 36 df.loc[df['storey_range'] == "31 to 33", 'storey_range'] = "31 to 36" df.loc[df['storey_range'] == "31 to 35", 'storey_range'] = "31 to 36" df.loc[df['storey_range'] == "34 to 36", 'storey_range'] = "31 to 36" # 36 to 40 df.loc[df['storey_range'] == "37 to 39", 'storey_range'] = "36 to 40" # 40 to 45 df.loc[df['storey_range'] == "40 to 42", 'storey_range'] = "40 to 45" df.loc[df['storey_range'] == "43 to 45", 'storey_range'] = "40 to 45" # 46 to 51 df.loc[df['storey_range'] == "46 to 48", 'storey_range'] = "46 to 51" df.loc[df['storey_range'] == "49 to 51", 'storey_range'] = "46 to 51" # save frame if opted if save_fe: df.to_csv(fe_path, index=False) return df def generate_aux_hdb(df, aux_df, aux, save_fe=True): dnew_columns = defaultdict(dict) # median_resale_price sheet = 'median-resale-prices-for-regist' hdb = aux_df[sheet].copy() aux_df.pop(sheet) hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x) hdb = clean_flat_type(hdb) hdb = hdb.rename(columns={'quarter': 'resale_quarter', 'price': 'median_resale_price'}) hdb[['resale_year', 'resale_quarter']] = hdb['resale_quarter'].str.split('-', 1, expand=True) hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x)) hdb['resale_quarter'] = hdb['resale_quarter'].apply(lambda x: int(x[1])) hdb['town'] = hdb['town'].apply(lambda x: str(x).lower()) df['town'] = df['town'].apply(lambda x: str(x).lower()) df_x_aux = pd.merge(df, hdb, how='left')[['median_resale_price']] # no_of_resale_applications sheet = 'number-of-resale-applications-r' hdb = aux_df[sheet].copy() aux_df.pop(sheet) hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x) hdb = clean_flat_type(hdb) hdb = hdb.rename(columns={'quarter': 'resale_quarter'}) hdb[['resale_year', 'resale_quarter']] = hdb['resale_quarter'].str.split('-', 1, expand=True) hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x)) hdb['resale_quarter'] = hdb['resale_quarter'].apply(lambda x: int(x[1])) df_x_aux = pd.concat([df_x_aux, pd.merge(df, hdb, how='left')[['no_of_resale_applications']]], axis=1) # resale_transactions sheet = 'resale-transactions-by-flat-typ' hdb = aux_df[sheet].copy() aux_df.pop(sheet) hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x) hdb = clean_flat_type(hdb) hdb = hdb.rename(columns={'financial_year': 'resale_year'}) hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x)) df_x_aux = pd.concat([df_x_aux, pd.merge(df, hdb, how='left')[['resale_transactions']]], axis=1) # construction status sheet = 'completion-status-of-hdb-reside' hdb = aux_df[sheet].copy() aux_df.pop(sheet) hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x) hdb = hdb.rename(columns={'financial_year': 'resale_year', 'town_or_estate': 'town'}) hdb = hdb.groupby(['resale_year', 'town', 'hdb_or_dbss', 'status'])['no_of_units'].sum().unstack(['hdb_or_dbss', 'status']).reset_index() hdb.columns = [i[0] if i[1]=='' else i[0]+'_'+i[1] for i in hdb.columns] hdb['town'] = hdb['town'].apply(lambda x: str(x).lower()) df_x_aux = pd.concat([ df_x_aux, pd.merge(df, hdb, how='left')[['HDB_Completed', 'HDB_Under Construction', 'DBSS_Completed', 'DBSS_Under Construction']]], axis=1) # new columns dnew_columns[aux] = [ 'median_resale_price', 'no_of_resale_applications', 'HDB_Completed', 'HDB_Under Construction', 'DBSS_Completed', 'DBSS_Under Construction', 'resale_transactions'] return df_x_aux, dnew_columns def generate_aux_fe(df, aux, aux_fe_in_path, aux_fe_out_path, save_fe=True): # load aux frame(s) if aux == 'macro' or aux == 'hdb_data': if aux == 'macro': sheets = ['annual', 'quarterly', 'monthly'] elif aux == 'hdb_data': sheets = ['median-resale-prices-for-regist', 'number-of-resale-applications-r', 'resale-transactions-by-flat-typ', 'completion-status-of-hdb-reside'] aux_df = {} for sheet in sheets: aux_df[sheet] = pd.read_excel( aux_fe_in_path, sheet_name=sheet, engine='openpyxl') else: aux_df = read_csv(aux_fe_in_path) # create features per aux type if aux == 'demographics': aux_df, dnew_columns = generate_aux_demographic(df, aux_df, aux) elif aux == 'commercial': aux_df, dnew_columns = generate_aux_commercial(df, aux_df, aux) elif aux == 'hawker': aux_df, dnew_columns = generate_aux_hawker(df, aux_df, aux) elif aux == 'station': aux_df, dnew_columns = generate_aux_station(df, aux_df, aux) elif aux == 'malls': aux_df, dnew_columns = generate_aux_malls(df, aux_df, aux) elif aux == 'prisch': aux_df, dnew_columns = generate_aux_prisch(df, aux_df, aux) elif aux == 'secsch': aux_df, dnew_columns = generate_aux_secsch(df, aux_df, aux) elif aux == 'macro': aux_df, dnew_columns = generate_aux_macro(df, aux_df, aux) elif aux == 'hdb_data': aux_df, dnew_columns = generate_aux_hdb(df, aux_df, aux) else: raise NotImplementedError # save frame if opted if save_fe: aux_df.to_csv(aux_fe_out_path, index=False) save_json(dnew_columns, aux_fe_out_path.split('.csv')[0]+'_cols.json') return aux_df def generate_aux_macro(df, aux_df, aux): dnew_columns = defaultdict(dict) for col in ['resale_year', 'resale_month', 'resale_quarter']: df[col] = df[col].astype(int) # annual sheet='annual' _aux_df = aux_df[sheet].copy() aux_df.pop(sheet) orig_col_names = _aux_df.columns new_col_names = [sheet+'_'+abbreviate_col_name(col) for col in orig_col_names] _aux_df.columns = new_col_names dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names)) _aux_df = _aux_df.applymap(lambda x: np.nan if pd.isnull(x) else float(re.sub("[^\d\.]", "", str(x)))) df = pd.merge(df, _aux_df, how='left', left_on='resale_year', right_on=sheet+'_Variables') df = df.drop(columns=sheet+'_Variables') # quarterly sheet='quarterly' _aux_df = aux_df[sheet].copy() aux_df.pop(sheet) merge_on = ['resale_year', 'resale_quarter'] _aux_df[merge_on] = _aux_df['Variables'].str.split(' ', 1, expand=True) _aux_df['resale_quarter'] = _aux_df['resale_quarter'].apply(lambda q: q[0]) _aux_df = _aux_df.drop(columns='Variables') orig_col_names = _aux_df.columns new_col_names = [sheet+'_'+abbreviate_col_name(col) if col not in merge_on else col for col in orig_col_names] for col in merge_on: _aux_df[col] = _aux_df[col].astype(int) df[col] = df[col].astype(int) _aux_df.columns = new_col_names dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names)) _aux_df = _aux_df.applymap(lambda x: float(re.sub("[^\d\.]", "", str(x)))) df = pd.merge(df, _aux_df, how='left', on=merge_on) # monthly sheet='monthly' _aux_df = aux_df[sheet].copy() aux_df.pop(sheet) merge_on = ['resale_year', 'resale_month'] _aux_df[merge_on] = _aux_df['Variables'].str.split(' ', 1, expand=True) _aux_df['resale_month'] = _aux_df['resale_month'].apply(lambda m: strptime(m,'%b').tm_mon) _aux_df = _aux_df.drop(columns='Variables') orig_col_names = _aux_df.columns new_col_names = [sheet+'_'+abbreviate_col_name(col) if col not in merge_on else col for col in orig_col_names] for col in merge_on: _aux_df[col] = _aux_df[col].astype(int) df[col] = df[col].astype(int) _aux_df.columns = new_col_names dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names)) _aux_df = _aux_df.applymap(lambda x: float(re.sub("[^\d\.]", "", str(x)))) df = pd.merge(df, _aux_df, how='left', on=merge_on) return df, dnew_columns def generate_aux_demographic(df, aux_df, aux): dnew_columns = defaultdict(dict) conv_dict = { 'kids': ['0-4', '5-9', '10-14'], # dependents 'youth': ['15-19', '20-24'], # students/ part-timers 'youngads': ['25-29', '30-34', '35-39'], # young families 'middle': ['40-44', '45-49', '50-54'], # older families 'older': ['55-59', '60-64'], # retirees 'elderly': ['65-69', '70-74','75-79', '80-84', '85+'] # older group } rev_dict = {} for k,v in conv_dict.items(): for i in v: rev_dict[i] = k aux_df['age_grp'] = aux_df['age_group'].apply(lambda x: rev_dict[x]) aux_df = aux_df.groupby(['subzone', 'age_grp'])['count'].sum().unstack('age_grp').reset_index() df_x_aux = pd.merge(df, aux_df, how='left', on='subzone').iloc[:,-6:] df_x_aux.columns = [aux+'_'+i for i in df_x_aux.columns] dnew_columns[aux] = list(df_x_aux.columns) # 'city hall' and 'gali batu' does not have some information # we assume this is because none of a certain age group lives in that vicinity # thus we will fillna with 0 df_x_aux = df_x_aux.fillna(0) return df_x_aux, dnew_columns def generate_aux_commercial(df, aux_df, aux): dnew_columns = defaultdict(dict) # distance from each location df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), aux_df, aux) # distance from nearest type (grouped/min) grp_col_name = 'type' df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False) return df_x_aux, dnew_columns def generate_aux_prisch(df, aux_df, aux): dnew_columns = defaultdict(dict) # distance from each location df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), aux_df, aux) # create top 50 variable aux_df['top50'] = [ '' if i>0 else None for i in aux_df[ ['KiasuRank', '2020over', '2019over', '2018over','2017over'] ].sum(axis=1)] # distance from nearest top school (grouped/min) grp_col_name = 'top50' df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False) # create dummies that permit phase applications for pri schools df_x_aux['prisch_top50_<=1km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if x<=1 else 0) df_x_aux['prisch_top50_1to2km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if (x>1 and x<=2) else 0) df_x_aux['prisch_top50_2to4km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if (x>2 and x<=4) else 0) return df_x_aux, dnew_columns def Haversine(lat1, lon1, lat2, lon2, roundoff=4): """ Code Source: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude This uses the ‘haversine’ formula to calculate the great-circle distance between two points – that is, the shortest distance over the earth’s surface – giving an ‘as-the-crow-flies’ distance between the points (ignoring any hills they fly over, of course!). Haversine formula: a = sin²(Δφ/2) + cos φ1 ⋅ cos φ2 ⋅ sin²(Δλ/2) c = 2 ⋅ atan2( √a, √(1−a) ) d = R ⋅ c where φ is latitude, λ is longitude, R is earth’s radius (mean radius = 6,371km); note that angles need to be in radians to pass to trig functions! """ R = 6371.0088 lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2]) dlat = lat2 - lat1 dlon = lon2 - lon1 a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2) ** 2 c = 2 * np.arctan2(a**0.5, (1-a)**0.5) d = R * c return round(d, roundoff) def get_distance_between_main_aux(df_row, aux_row, verbose=False): distance = Haversine( df_row['latitude'], df_row['longitude'], aux_row['lat'], aux_row['lng'] ) if verbose: subzone = df_row['subzone'] auxname = aux_row['name'] print(f'Distance between "{subzone}" and "{auxname}": {distance}') return distance def abbreviate_col_name(abrv_name): if ' ' in abrv_name: abrv_name = ''.join([s[0] if s[0].isupper() else ( s if s.isnumeric() else '')for s in abrv_name.split(' ')]) abrv_name = abrv_name.replace('_', '') return abrv_name def create_main_aux_dist_cols(df, _aux_df, aux='', aux_col_name='name', df_lat_name='latitude', df_lng_name='longitude', aux_lat_name='lat', aux_lng_name='lng', verbose=False, new_frame=True): """ Assumes the following column naming convensions: df: has columns ['latitude', 'longitude'] aux: has columns ['name', 'lat', 'lng'] """ out_df = pd.DataFrame() dcol_conversion = defaultdict(str) for aux_ix, aux_row in _aux_df.iterrows(): # generate new column names abrv_name = aux_row[aux_col_name] abrv_name = abbreviate_col_name(abrv_name) col_name = aux + '_' + abrv_name # store column conversion if col_name in dcol_conversion.values(): col_name += 'V' + str(aux_ix) # create a new unique column dcol_conversion[aux_row[aux_col_name]] = col_name # generate columns out_df[col_name] = Haversine( df[df_lat_name], df[df_lng_name], aux_row[aux_lat_name], aux_row[aux_lng_name]) # complete if verbose: print(f'Created new column "{col_name}"...') if new_frame: return out_df, dcol_conversion else: return pd.concat([df, out_df], axis=1), dcol_conversion def mmin(df): return df.min(axis=1) def create_grouped_cols( dnew_columns, df, _aux_df, aux='', grp_col_name='type', function=mmin, verbose=False, new_frame=True): out_df = pd.DataFrame() dcol_conversion = defaultdict(str) for grp_ix, grp in enumerate(_aux_df[grp_col_name].unique()): # we do not create new cols for missings if grp is None: continue # generate new column names col_name = aux + '_' + grp_col_name + '_' + grp # store column conversion if col_name in dcol_conversion.values(): col_name += '_' + str(grp_ix) # create a new unique column dcol_conversion[grp] = col_name relevant_columns = [dnew_columns[aux][old] for old in _aux_df[_aux_df[grp_col_name] == grp]['name']] out_df[col_name] = function(df[relevant_columns]) # complete if verbose: print(f'Created new column "{col_name}"...') if new_frame: return out_df, dcol_conversion else: return pd.concat([df, out_df], axis=1), dcol_conversion def label_rows_by_index(full_indexes, positive_indexes, positive_label, negative_label=None): """ create group tags """ return [positive_label if i in positive_indexes else negative_label for i in full_indexes] def generate_aux_secsch(df, aux_df, aux): dnew_columns = defaultdict(dict) aux_df[''] = '' grp_col_name = '' # distance from each secsch df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), aux_df, aux) # distance from nearest secsch df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False) return df_x_aux, dnew_columns def generate_aux_hawker(df, aux_df, aux): dnew_columns = defaultdict(dict) aux_df[''] = '' grp_col_name = '' # distance from each hawker df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), aux_df, aux) # distance from nearest hawker df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False) # reviews crawled_path = f'data/auxiliary-data/google_search_{aux}.csv' aux_df2 = pd.read_csv(crawled_path) aux_df2 = aux_df2.rename( columns={'Unnamed: 0': 'aux_ix', 'name': 'crawled_name'}) # construct local df version _aux_df = pd.concat([aux_df, aux_df2], axis=1) # distance from high ratings hawker grp_col_name = 'highrating' _aux_df[grp_col_name] = label_rows_by_index( full_indexes=_aux_df.index, positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & ( _aux_df['user_ratings_total'] > 5) & (_aux_df['rating'] > 4)].index, positive_label='' ) df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False) # distance from established hawker grp_col_name = 'established' _aux_df[grp_col_name] = label_rows_by_index( full_indexes=_aux_df.index, positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & ( _aux_df['user_ratings_total'] > 15)].index, positive_label='' ) df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False) return df_x_aux, dnew_columns def generate_aux_malls(df, aux_df, aux): dnew_columns = defaultdict(dict) # manual fix loyang point 1.3670, 103.9644 aux_ix = 94 aux_df.loc[aux_ix, 'lat'] = 1.3670 aux_df.loc[aux_ix, 'lng'] = 103.9644 aux_df.loc[aux_ix] aux_df[''] = '' grp_col_name = '' # distance from each mall df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), aux_df, aux) # distance from nearest mall df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False) # reviews crawled_path = f'data/auxiliary-data/google_search_{aux}.csv' aux_df2 = pd.read_csv(crawled_path) aux_df2 = aux_df2.rename( columns={'Unnamed: 0': 'aux_ix', 'name': 'crawled_name'}) # construct local df version _aux_df = pd.concat([aux_df, aux_df2], axis=1) # create grouping by ratings # rationale: malls differ in ranges 4.5-4.0 alot (Central to Local malls) grp_col_name = 'ratingsbin' _aux_df2 = _aux_df[(_aux_df['fuzzy_score'] > 70) & ( _aux_df['user_ratings_total'] > 5)].copy() _aux_df2[grp_col_name] = _aux_df2['rating'].apply( lambda x: None if pd.isnull(x) else ( '>=4.5' if x >= 4.5 else ( '4.4' if x >= 4.4 else ( '4.3' if x >= 4.3 else ( '4.2' if x >= 4.2 else ( '4.1' if x >= 4.1 else ( '4.0' if x >= 4.0 else ">4.0")))))) ) df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, _aux_df2, aux, grp_col_name, new_frame=False) # distance from established mall grp_col_name = 'established' _aux_df[grp_col_name] = label_rows_by_index( full_indexes=_aux_df.index, positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & ( _aux_df['user_ratings_total'] > 15)].index, positive_label='' ) df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False) return df_x_aux, dnew_columns def generate_aux_station(df, aux_df, aux): dnew_columns = defaultdict(dict) # manual fix botanic gardens is an mrt aux_ix = 139 aux_df.loc[aux_ix, 'type'] = 'mrt' aux_df.loc[aux_ix, 'opening_year'] = 2011 aux_df.loc[aux_ix] # fix for duplicate rows that exists in mrt data _aux_df = aux_df.copy() _aux_df = _aux_df.groupby(['name', 'type']).agg( {'codes': '/'.join, 'lat': np.mean, 'lng': np.mean, 'opening_year': np.min}).reset_index() # generate groupings _aux_df['numlines'] = _aux_df['codes'].apply(lambda x: x.count('/')+1) _aux_df['interchange'] = label_rows_by_index( full_indexes=_aux_df.index, positive_indexes=_aux_df[_aux_df['numlines'] > 1].index, positive_label='' ) # group by main lines for line in ['EW', 'NS', 'NE', 'CC', 'DT']: _aux_df[line] = label_rows_by_index( full_indexes=_aux_df.index, positive_indexes=[ix for ix, code in enumerate( _aux_df['codes']) if line in code], positive_label='' ) # distance from each mrt stn df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols( df.copy(), _aux_df, aux) # overwrite with NaN if MRT not opened then # aux_row = _aux_df[_aux_df['opening_year']>=2004].iloc[aux_ix] dcol_conversion = dnew_columns[aux] for aux_ix, aux_row in _aux_df[_aux_df['opening_year'] >= 2004].iterrows(): focus_yr, focus_name = aux_row['opening_year'], aux_row['name'] focus_col = dcol_conversion[focus_name] # create new column df_x_aux[focus_col+'_open'] = [ ds if yr > focus_yr-5 else np.nan for yr, ds in zip(df['resale_year'].astype(int), df_x_aux[focus_col].astype(float))] # new column naming dcol_conversion[focus_name] = focus_col+'_open' dnew_columns[aux] = dcol_conversion # distance from group type for grp_col_name in ['type', 'interchange', 'EW', 'NS', 'NE', 'CC', 'DT']: df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols( dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False) return df_x_aux, dnew_columns
tanfiona/HDBResalePrice
src/steps/process.py
process.py
py
27,156
python
en
code
2
github-code
50
27211573123
# -*-coding:utf-8-*- # 题目描述 """ https://leetcode-cn.com/problems/er-cha-shu-de-shen-du-lcof/ """ # 标签: 树 深度优先搜索 广度优先搜索 二叉树 # 解题思路: """ 这里使用层次遍历 queue数组存储树节点 temp数组存储该节点下一层节点 然后用res统计结果 """ # 执行结果: 通过 """ 执行用时:24 ms, 在所有 Python 提交中击败了80.39% 的用户 内存消耗:15.8 MB, 在所有 Python 提交中击败了32.88% 的用户 通过测试用例:39 / 39 """ # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def maxDepth(self, root): """ :type root: TreeNode :rtype: int """ if not root: return 0 queue = [root] res = 0 while queue: # 每个while,走动一层 temp = [] # 存储每一层节点 for value in queue: # print(value.val) if value.left: temp.append(value.left) if value.right: temp.append(value.right) queue = temp res += 1 return res
zranguai/leetcode-solution
剑指Offer/easy/剑指Offer55-1-二叉树的深度.py
剑指Offer55-1-二叉树的深度.py
py
1,254
python
zh
code
1
github-code
50
7297029761
class Solution(object): def searchInsert(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int """ pos=-1 for i in nums: if target==i: pos=nums.index(i) if pos==-1: nums.append(target) nums=sorted(nums) pos=nums.index(target) return int(pos)
DanielAlexanderMarcus/Python-Work
search_Insert.py
search_Insert.py
py
407
python
en
code
0
github-code
50
350833675
from django.contrib.auth.views import LoginView, LogoutView from django.test import SimpleTestCase from django.urls import reverse, resolve from class_journal.views import TimetableView, JournalView, AddMarkView, DiaryView class TestURLs(SimpleTestCase): def test_url_resolves(self): url_names_views = { 'login': LoginView, 'logout': LogoutView, 'timetable': TimetableView, 'journal': JournalView, 'add_mark': AddMarkView, } for name, view in url_names_views.items(): with self.subTest(): self.assertEqual(resolve(reverse(name)).func.view_class, view)
Proximity42/Electronic-Diary
tests/test_urls.py
test_urls.py
py
675
python
en
code
0
github-code
50
71895360476
from configparser import SafeConfigParser global _WorkHeight global _StartxPosition global _ShakeHeight global _ShakeXDist global _ShakeStepDelay global _ShakeStepAngleRange global _servoFillAngle global _Zfeedrate global _Xfeedrate global _HWservoDelay # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!) global _ActualSyringeStatus global _SyringeHomeRest # sec to wait before synthesis start global _servosAttachPin global _LoadStepDelay global _UnloadStepDelay global _MaxLoadRange # ml global _DefaultLoadRange _WorkHeight = 60 _StartxPosition = 100 _Zfeedrate = 666 _Xfeedrate = 1600 _MaxLoadRange = 8.0 # ml _SyringeHomeRest = 20 # sec to wait before synthesis start _ShakeHeight = 60 _ShakeXDist = 150 _ShakeStepDelay = 800 _ShakeStepAngleRange = [5, 160] _servoFillAngle = 171 _HWservoDelay = 5 # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!) _servosAttachPin = 57 _LoadStepDelay = 230 _UnloadStepDelay = 140 _DefaultLoadRange=8.0 configs = {'WorkHeight':60, 'StartxPosition':100, 'Zfeedrate':666, 'Xfeedrate':1600, 'MaxLoadRange':8.0, 'SyringeHomeRest':20, 'ShakeHeight':60, 'ShakeXDist':150, 'ShakeStepDelay':800, 'ShakeTopAngle':5, 'ShakeBottomAngle':160, 'ServoFillAngle':171, 'HWservoDelay':5, 'ServosAttachPin':57, 'LoadStepDelay':230, 'UnloadStepDelay':140, 'DefaultLoadRange':8.0 } def init(): global _WorkHeight global _StartxPosition global _ShakeHeight global _ShakeXDist global _ShakeStepDelay global _ShakeStepAngleRange global _servoFillAngle global _Zfeedrate global _Xfeedrate global _HWservoDelay # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!) global _ActualSyringeStatus global _SyringeHomeRest # sec to wait before synthesis start global _servosAttachPin global _LoadStepDelay global _UnloadStepDelay global _MaxLoadRange # ml global _DefaultLoadRange config = SafeConfigParser() config.read('pypes.ini') #config.add_section('main') configs['WorkHeight'] = config.getint('main', 'workheight') configs['StartxPosition'] = config.getint('main', 'startxposition') configs['Zfeedrate'] = config.getint('main', 'zfeedrate') configs['Xfeedrate'] = config.getint('main', 'xfeedrate') configs['MaxLoadRange'] = config.getfloat('main', 'maxloadrange') configs['SyringeHomeRest'] = config.getint('main', 'syringehomerest') #config.add_section('shake') configs['ShakeHeight'] = config.getint('shake', 'shakeheight') configs['ShakeXDist'] = config.getint('shake', 'shakexdist') configs['ShakeStepDelay'] = config.getint('shake', 'shakestepdelay') configs['ShakeTopAngle'] = config.getint('shake', 'shaketopangle') configs['ShakeBottomAngle'] = config.getint('shake', 'shakebottomangle') #config.add_section('servo') configs['ServoFillAngle'] = config.getint('servo', 'servofillangle') configs['HWservoDelay'] = config.getint('servo', 'hwservodelay') configs['ServosAttachPin'] = config.getint('servo', 'servosattachpin') configs['LoadStepDelay'] = config.getint('servo', 'loadstepdelay') configs['UnloadStepDelay'] = config.getint('servo', 'unloadstepdelay') # config.add_section('gui') configs['DefaultLoadRange'] = config.getfloat('gui', 'defaultloadrange') def saveConfig(): config = SafeConfigParser() #config.read('pypes.ini') config.add_section('main') config.set('main', 'workheight', str(configs['WorkHeight'])) config.set('main', 'startxposition', str(configs['StartxPosition'])) config.set('main', 'zfeedrate', str(configs['Zfeedrate'])) config.set('main', 'xfeedrate', str(configs['Xfeedrate'])) config.set('main', 'maxloadrange', str(configs['MaxLoadRange'])) config.set('main', 'syringehomerest', str(configs['SyringeHomeRest'])) config.add_section('shake') config.set('shake', 'shakeheight', str(configs['ShakeHeight'])) config.set('shake', 'shakexdist', str(configs['ShakeXDist'])) config.set('shake', 'shakestepdelay', str(configs['ShakeStepDelay'])) config.set('shake', 'shaketopangle' , str(configs['ShakeTopAngle'])) config.set('shake', 'shakebottomangle', str(configs['ShakeBottomAngle'])) config.add_section('servo') config.set('servo', 'servofillangle', str(configs['ServoFillAngle'])) config.set('servo', 'hwservodelay', str(configs['HWservoDelay'])) config.set('servo', 'servosattachpin', str(configs['ServosAttachPin'])) config.set('servo', 'loadstepdelay', str(configs['LoadStepDelay'])) config.set('servo', 'unloadstepdelay', str(configs['UnloadStepDelay'])) config.add_section('gui') config.set('gui', 'defaultloadrange', str(configs['DefaultLoadRange'])) with open('pypes.ini', 'w') as f: config.write(f)
JiriPrusa/PyPeS
python_GUI/settings.py
settings.py
py
5,036
python
en
code
0
github-code
50
29871407460
from torch import nn import torch import numpy as np class Tacotron2Loss_VAE(nn.Module): def __init__(self, hparams): super(Tacotron2Loss_VAE, self).__init__() self.anneal_function = hparams.anneal_function self.lag = hparams.anneal_lag self.k = hparams.anneal_k self.x0 = hparams.anneal_x0 self.upper = hparams.anneal_upper def kl_anneal_function(self, anneal_function, lag, step, k, x0, upper): if anneal_function == 'logistic': return float(upper/(upper+np.exp(-k*(step-x0)))) elif anneal_function == 'linear': if step > lag: return min(upper, step/x0) else: return 0 elif anneal_function == 'constant': return 0.001 def forward(self, model_output, targets, step): mel_target, gate_target = targets[0], targets[1] mel_target.requires_grad = False gate_target.requires_grad = False gate_target = gate_target.view(-1, 1) mel_out, mel_out_postnet, gate_out, _, mu, logvar, _, _ = model_output gate_out = gate_out.view(-1, 1) mel_loss = nn.MSELoss()(mel_out, mel_target) + \ nn.MSELoss()(mel_out_postnet, mel_target) gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target) kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) kl_weight = self.kl_anneal_function(self.anneal_function, self.lag, step, self.k, self.x0, self.upper) recon_loss = mel_loss + gate_loss total_loss = recon_loss + kl_weight*kl_loss return total_loss, recon_loss, kl_loss, kl_weight
jinhan/tacotron2-vae
loss_function.py
loss_function.py
py
1,671
python
en
code
162
github-code
50
21371042513
from copy import deepcopy import numpy as np import pdb from src.formula_parser import FormulaParser from src.extended_definition import ExtendedDefinition from src.logic_parser import LogicParser kg_parser = LogicParser(ExtendedDefinition(debug=True)) fm_parser = FormulaParser(ExtendedDefinition(debug=True)) kb = {} list_of_predicates = [] list_of_explored_rules = [] unproved_premise = [] unproved_query = [] unproved_single_chain = [] unproved_chain = [] def fetch_rules(goal): global kb global list_of_predicates print("fetch_rules for goal:- ", goal) list_of_rules = [] #predicate = goal.partition('(')[0] predicate = key_from_predicates(kg_parser.parse(goal)) predicate = predicate[:-1] print("\t", predicate, kb[predicate]['conc']) list_of_rules = list_of_rules + kb[predicate]['conc'] return list_of_rules def subst(theta, res): #print("\tsubst: ", theta, res) fact = "" nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii == 0): fact = fact + res[ii] + "(" elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): if variable(res[ii]) and (res[ii] in theta): fact = fact + theta[res[ii]] + "," else: fact = fact + res[ii] + "," elif isinstance(res[ii], str) and (ii == nl - 1): if variable(res[ii]) and (res[ii] in theta): fact = fact + theta[res[ii]] else: fact = fact + res[ii] elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): _fact = subst(theta, res[ii]) fact = fact + _fact + "," elif not isinstance(res[ii], str) and (ii == nl - 1): _fact = subst(theta, res[ii]) fact = fact + _fact fact = fact + ")" return fact """ def variable(x): if not isinstance(x, str): return False else: if x[0].islower(): return False else: return True """ def variable(x): if not isinstance(x, str): return False else: if x[0].isupper(): return True else: return False def compound(x): if not isinstance(x, str): return False else: if '(' in x and ')' in x: return True else: return False def list(x): if not isinstance(x, str): return True else: return False def key_from_predicates(res): nl = len(res) keys = "" for ii in range(nl): if isinstance(res[ii], str) and (ii == 0): keys = keys + res[ii] + "-" elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): _key = key_from_predicates(res[ii]) keys = keys + _key elif not isinstance(res[ii], str) and (ii == nl - 1): _key = key_from_predicates(res[ii]) keys = keys + _key return keys def unify_var(var, x, theta): #print("IN unify_var", var, x, theta) if var in theta: #print("var in theta", var, theta) return unify(theta[var], x, theta) elif x in theta: #print("x in theta", x, theta) return unify(var, theta[x], theta) else: theta[var] = x #print("not in theta", theta[var]) return theta def check_theta(theta): for entry in theta: if variable(theta[entry]): if theta[entry] in theta: print("in check_theta. theta changed") theta[entry] = theta[theta[entry]] return theta def reverse_parse(res): fact = "" nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii == 0): fact = fact + res[ii] + "(" elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): fact = fact + res[ii] + "," elif isinstance(res[ii], str) and (ii == nl - 1): fact = fact + res[ii] elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): _fact = reverse_parse(res[ii]) fact = fact + _fact + "," elif not isinstance(res[ii], str) and (ii == nl - 1): _fact = reverse_parse(res[ii]) fact = fact + _fact fact = fact + ")" return fact def unify(x, y, theta): #print("\tunify", x, y, theta) if theta == None: #print("\tin theta is None") return None elif x == y: #print("\tin x=y") return check_theta(theta) elif variable(x) is True: #print("\tin variable(x)") return unify_var(x, y, theta) elif variable(y) is True: #print("\tin variable(y)") return unify_var(y, x, theta) elif compound(x) and compound(y): #print("\tin compound") x_parse = kg_parser.parse(x) y_parse = kg_parser.parse(y) x_op = x_parse[0] y_op = y_parse[0] x_args = [] for item in range(len(x_parse) - 1): #temp.split(','): if isinstance(x_parse[item + 1], str): x_args.append(x_parse[item + 1]) else: x_args.append(reverse_parse(x_parse[item + 1])) y_args = [] for item in range(len(y_parse) - 1): #temp.split(','): if isinstance(y_parse[item + 1], str): y_args.append(y_parse[item + 1]) else: y_args.append(reverse_parse(y_parse[item + 1])) return unify(x_args, y_args, unify(x_op, y_op, theta)) elif list(x) and list(y) and x != [] and y != []: #print("\tin list") return unify(x[1:], y[1:], unify(x[0], y[0], theta)) elif x == [] or y == []: return None else: #print("\tin else") return None #_body_unify={} #_body_unify = unify('pointPosition(B,Xb,Yb)', 'pointPosition(n,1055,1060).', {}) #print(_body_unify) def var_exist(res): fact = False nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii > 0) and res[ii][0].isupper(): fact = True return fact elif not isinstance(res[ii], str): _fact = var_exist(res[ii]) fact = fact or _fact return fact def parse_match(res, fact): matched = True nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii == 0) and isinstance( fact[ii], str): if res[ii] == fact[ii]: matched = True else: matched = False return matched elif isinstance(res[ii], str) and ( ii > 0) and res[ii][0].islower() and isinstance(res[ii], str): if res[ii] == fact[ii]: matched = True else: matched = False return matched elif isinstance(res[ii], str) and ( ii > 0) and res[ii][0].isupper() and isinstance(res[ii], str): matched = True elif (not isinstance(res[ii], str)) and (not isinstance(fact[ii], str)): _matched = parse_match(res[ii], fact[ii]) matched = matched and _matched else: matched = False return matched return matched def inst(temp, facts): results = [] temp_parse = kg_parser.parse(temp) for fact in facts: if fact[:len(temp_parse[0])] == temp_parse[0]: fact_parse = kg_parser.parse(fact) if parse_match(temp_parse, fact_parse): results.append(fact) return results def filterInst(temp_list, list_of_premises): filterlist = [] temp = [] n = len(temp_list) nlist = [len(ii) for ii in temp_list] for ii in range(n): temp.append( temp_list[ii] * (int(np.prod(nlist[:ii])) * int(np.prod(nlist[(ii + 1):])))) for ii in range(np.prod(nlist)): each_query = [] query_str = "Predicate(" premise_str = "Predicate(" for jj in range(n): each_query.append(temp[jj][ii]) if temp[jj][ii] == "()" and list_of_premises[jj] == "": continue else: if temp[jj][ii][-1] == '.': temp[jj][ii] = temp[jj][ii][:-1] query_str = query_str + temp[jj][ii] + "," premise_str = premise_str + list_of_premises[jj] + "," #if query_str[-2]=='.': # query_str = query_str[:-2]+")" #else: query_str = query_str[:-1] + ")" #if premise_str[-2]=='.': # premise_str = premise_str[:-2]+")" #else: premise_str = premise_str[:-1] + ")" sucess = unify(premise_str, query_str, {}) if sucess != None: filterlist.append(each_query) return filterlist def existed(temp_list, facts): val = True for temp in temp_list: if not (temp in facts): val = False break return val def extract_var(res, variable_names, label): nl = len(res) fact = "" #label=0 for ii in range(nl): if isinstance(res[ii], str) and (ii == 0): fact = fact + res[ii][0].lower() + res[ii][1:] + "(" elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): item = res[ii].upper() if item not in variable_names: variable_names[item] = "X" + repr(label) item = "X" + repr(label) label = label + 1 else: item = variable_names[item] fact = fact + item + "," elif isinstance(res[ii], str) and (ii == nl - 1): item = res[ii].upper() if item not in variable_names: variable_names[item] = "X" + repr(label) item = "X" + repr(label) label = label + 1 else: item = variable_names[item] fact = fact + item elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1): _fact, variable_names, label = extract_var(res[ii], variable_names, label) fact = fact + _fact + "," elif not isinstance(res[ii], str) and (ii == nl - 1): _fact, variable_names, label = extract_var(res[ii], variable_names, label) fact = fact + _fact fact = fact + ")" return fact, variable_names, label def extract_premise_var(res, variable_names, vars): nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): item = res[ii] vars.append(item) if item not in variable_names: variable_names[item] = "unknown" else: item = variable_names[item] elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): variable_names, vars = extract_premise_var(res[ii], variable_names, vars) return variable_names, vars def extract_var_simple(res, vars): nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): item = res[ii] vars.append(item) elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): vars = extract_var_simple(res[ii], vars) return vars #print(extract_var_simple(kg_parser.parse('a(X, b(Y,Z))'), [])) def extract_element(res, vars): nl = len(res) for ii in range(nl): if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): item = res[ii] vars.append(item) elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1): vars = extract_element(res[ii], vars) return vars def matchUnify(premise, theta): #predicate, vars, theta2, hasUnified = matchUnify(premise[ii], theta) vars_ = [] parse_premise = kg_parser.parse(premise) theta2, vars_ = extract_premise_var(parse_premise, theta, vars_) hasUnifed = [] for key in vars_: if theta2[key] == 'unknown': hasUnifed.append(False) else: hasUnifed.append(True) return vars_, theta2, hasUnifed def parse_predicates(rule_str): left_index = [] right_index = [] left_count = 0 right_count = 0 strset = [] start_index = 0 for ii in range(len(rule_str) - 1): if rule_str[ii] == "(": left_index.append(ii) left_count = left_count + 1 elif rule_str[ii] == ")": right_index.append(ii) right_count = right_count + 1 if right_count == left_count and left_count != 0 and ( rule_str[ii + 1:].strip()[0] == ',' or rule_str[ii + 1:].strip()[0] == '.'): _tmp = rule_str[start_index:ii + 1].strip() #pdb.set_trace() if _tmp[0] == "," or _tmp[0] == ".": _tmp = _tmp[1:] strset.append(_tmp.strip()) start_index = ii + 1 #left_count=0 #right_count=0 #pdb.set_trace() return strset def new_parse_predicates(premises_str): splited_premises = [] start_premise = i = 0 parent_stack = [] while i < len(premises_str): if premises_str[i] not in ["(", ")"]: i += 1 continue if premises_str[i] == "(": parent_stack.append("(") elif premises_str[i] == ")": parent_stack.pop() if not parent_stack: splited_premises.append(premises_str[start_premise:i + 1].strip()) start_premise = i + 2 i += 1 return splited_premises def testparsing(): from src.input_reader import read_rules rules = read_rules("src/knowledge_base") for rule in rules: try: # print(new_parse_predicates(rule.split(":-")[1])) # print(parse_predicates(rule.split(":-")[1]), "\n\n\n") assert new_parse_predicates( rule.split(":-")[1]) == parse_predicates(rule.split(":-")[1]) except Exception as e: print(e) print(rule, "Failed") print(new_parse_predicates(rule.split(":-")[1])) print(parse_predicates(rule.split(":-")[1]), "\n\n\n") print( parse_predicates( "equals(measureOf(angle(A,B,D)),measureOf(angle(B,D,E))),quadrilateral(A,B,E,D)." )) # parse_predicates( # "parallel(line(A,B),line(D,E)) :- equals(measureOf(angle(A,B,D)),measureOf(angle(B,D,E))) , quadrilateral(A,B,E,D)." # ) #test_str ="equals(measureOf(angle(A,B,F)),measureOf(angle(E,D,F))) :- parallel(line(A,B),line(D,E)), line(B, D), on_same_line(B, D, F), pointPosition(A,Xa,Ya), pointPosition(B,Xb,Yb), pointPosition(D,Xd,Yd), pointPosition(E,Xe,Ye), pointPosition(F,Xf,Yf), (Xa-Xb)*(Xe-Xd)>0, (Ya-Yb)*(Ye-Yd)>0, (Xf-Xb)*(Xf-Xd)>0, (Yf-Yb)*(Yf-Yd)>0, not A==C." #strset = parse_predicates(test_str) #print(strset)
JiajunSong-Bigai/geometry_fc_bc
src/my_unification.py
my_unification.py
py
15,219
python
en
code
0
github-code
50
74858873756
"""Implementation for agents interface""" from typing import Any, Tuple, List, Union import numpy as np from numpy.linalg import norm from highrl.obstacle.single_obstacle import SingleObstacle from highrl.utils.action import ActionXY from highrl.utils.abstract import Position class Agent: """ Class that represents the agent interacting in the environment. Attributes: px (int): agent x position. gx (int): goal x position. gy (int): goal y position. gt (int): goal orientation angle. vx (int): agent x velocity. vy (int): agent y velocity. py (int): agent y position. w (int): agent angular velocity. theta (int): agent angle theta. radius (int): agent radius. goal_radius (int): goal radius. """ def __init__( self, pos=Position[float](0.0, 0.0), goal_pos=Position[float](0.0, 0.0), gt: float = 0.0, vx: float = 0.0, vy: float = 0.0, w: float = 0.0, theta: float = 0.0, radius: int = 20, goal_radius: int = 10, ) -> None: """Constructs an agent object. Args: pos (Position, optional): Position of agent. Defaults to (x=0, y=0). gpos (Position, optional): Position of the goal. Defaults to (x=0, y=0). gt (int, optional): Goal orientation angle. Defaults to 0. vx (int, optional): Agent x velocity. Defaults to 0. vy (int, optional): Agent y velocity. Defaults to 0. w (int, optional): Agent angular velocity. Defaults to 0. theta (int, optional): Agent angle theta. Defaults to 0. radius (int, optional): Agent radius. Defaults to 20. goal_radius (int, optional): Goal radius. Defaults to 10. """ self.radius = radius self.goal_radius = goal_radius self.pos = pos self.gpos = goal_pos self.gt = gt self.vx = vx self.vy = vy self.w = w self.theta = theta def set( self, pos=Position[float](0.0, 0.0), goal_pos=Position[float](0.0, 0.0), gt: float = 0, vx: float = 0, vy: float = 0, w: float = 0, theta: float = 0, radius: int = 20, goal_radius: int = 10, ) -> None: """Sets all agent attributes. Args: px (int, optional): agent x position. Defaults to 0. py (int, optional): agent y position. Defaults to 0. gx (int, optional): goal x position. Defaults to 0. gy (int, optional): goal y position. Defaults to 0. gt (int, optional): goal orientation angle. Defaults to 0. vx (int, optional): agent x velocity. Defaults to 0. vy (int, optional): agent y velocity. Defaults to 0. w (int, optional): agent angular velocity. Defaults to 0. theta (int, optional): agent angle theta. Defaults to 0. radius (int, optional): agent radius. Defaults to 20. goal_radius (int, optional): goal radius. Defaults to 10. """ self.pos = pos self.gpos = goal_pos self.gt = gt self.vx = vx self.vy = vy self.w = w self.theta = theta self.radius = radius self.goal_radius = goal_radius @property def x_pos(self) -> float: """Getter for x_coord""" return self.pos.x @property def y_pos(self) -> float: """Getter for y_coord""" return self.pos.y def get_position(self) -> Position: """Getter for agent postion""" return self.pos def set_position(self, position: Position) -> None: """Setter for agent position""" self.pos = position def set_goal_position(self, position: Position) -> None: """Setter for goal position""" self.gpos = position def get_goal_position(self) -> Position: """Getter for goal postion""" return self.gpos def get_velocity(self) -> Tuple[float, float, float]: """Getter agent velocity vector. Returns: Tuple[float, float, float]: (agent x velocity, agent y velocity, agent angular velocity) """ return self.vx, self.vy, self.w def set_velocity(self, velocity: Tuple[float, ...]): """Setter for agent linear and angular velocity. Args: velocity (Tuple[int, int]): (agent x velocity, agent y velocity, agent angular velocity) """ self.vx = velocity[0] self.vy = velocity[1] self.w = velocity[2] def set_radius(self, agent_radius: int, goal_radius: int) -> None: """Setter for the goal and agent radius""" self.radius = agent_radius self.goal_radius = goal_radius def check_validity(self, action: Any): """Checks if action is in right format. The right format is the object forman: ActionXY """ assert isinstance(action, ActionXY) def compute_position(self, action: Any, delta_t: float) -> Tuple: """Computes agent next position and orientation based on the agent action velocity. Before computing the agent next position, Checks if the action is in the ActionXY format. Args: action (Any): action decided by the agent model but in ActionXY object format delta_t (float): time difference between actions Returns: Tuple[int, int, int]: (agent x position, agent y posistion, agent orientation theta) """ self.check_validity(action) velocity = (action.vx**2 + action.vy**2) ** 0.5 angle = self.fix(np.arctan2(action.vy, action.vx), 2 * np.pi) x_pos = self.pos.x + velocity * np.cos(self.theta + angle) * delta_t y_pos = self.pos.y + velocity * np.sin(self.theta + angle) * delta_t theta = self.fix(self.theta + action.w * delta_t, 2 * np.pi) return x_pos, y_pos, theta def fix(self, base: Union[int, float], mod: Union[int, float]) -> Union[int, float]: """Change `base` rane to be [0:mod[. For example, if `base` is an angle and `mod` is 2*pi, then we want to ensure that the angle is always in the range [0:mod[. Since Python does not support modulus of floating/negative numbers, the modulus is implemented manually. """ while base < 0: base += mod while base >= mod: base -= mod return base def reached_destination(self) -> bool: """Determines if agent reached the goal postion. Returns: bool: whether the agent has reached the goal or not """ min_allowed_dist = self.radius + self.goal_radius return self.dist_to_goal() < min_allowed_dist def dist_to_goal(self) -> float: """Compute the distance from the agent to the goal""" return norm(self.pos.get_coords() - self.gpos.get_coords()).item() def step(self, action: ActionXY, delta_t: float) -> None: """Performs an action and update the agent state. Args: action (List): action decided by the agent model but in ActionXY object format delta_t (float): time difference between actions """ self.check_validity(action) pos = self.compute_position(action, delta_t) x_pos, y_pos, self.theta = pos self.pos.set_pos(x_pos, y_pos) self.vx = action.vx self.vy = action.vy self.w = action.w def is_overlapped(self, obstacle: SingleObstacle, check_target: str = "agent"): """Checks if there is an overlap between the agent/goal and a given obstacle. Args: obstacle (SingleObstacle): input obstalce to check overlap with check_target (str): target to be checked, either agent or goal Returns: bool: flag to check for overlap. Returns True if there is overlap. """ assert check_target in [ "goal", "agent", ], "Check target should be goal or agent" if check_target == "goal": min_x = self.pos.x - self.goal_radius min_y = self.pos.x - self.goal_radius max_x = self.gpos.x + self.goal_radius max_y = self.gpos.y + self.goal_radius else: min_x = self.pos.x - self.radius min_y = self.pos.y - self.radius max_x = self.pos.x + self.radius max_y = self.pos.y + self.radius dummy = [ [ min_x, min_y, max_x, max_y, ], [ int(obstacle.px), int(obstacle.py), int(obstacle.px + obstacle.width), int(obstacle.py + obstacle.height), ], ] is_overlap: bool = not self._overlap_handler(dummy) return is_overlap def is_robot_overlap_goal(self) -> bool: """Check if robot and goal overlap. Returns: bool: flag to check for overlap. Returns True if there is an overlap. """ dummy = [ [ self.gpos.x - self.goal_radius, self.gpos.x - self.goal_radius, self.gpos.x + self.goal_radius, self.gpos.x + self.goal_radius, ], [ self.pos.x - self.radius, self.pos.y - self.radius, self.pos.x + self.radius, self.pos.y + self.radius, ], ] is_overlap = not self._overlap_handler(dummy) return is_overlap def is_robot_close_to_goal(self, min_dist: int) -> bool: """Checks if the robot is closer than the min distannce to the goal. Returns ``True`` if the robot is too close and ``False`` if the robot-goal dist did not exceed the min allowed distance. Args: min_dist (int): min allowable distance for robot-goal dist Returns: bool: flag to determine if the robot is closer than the max allowed distance or not. """ distance = self.dist_to_goal() distance -= self.radius + self.goal_radius return distance <= min_dist def _overlap_handler(self, dummy: List[List]) -> bool: """Check overlap condition between two objects. Args: dummy (List[List]): objects coordinates Returns: bool: overlap flag for input objects """ for _ in range(2): if dummy[0][0] > dummy[1][2] or dummy[0][1] > dummy[1][3]: return True dummy[0], dummy[1] = dummy[1], dummy[0] return False
ahmedheakl/multi-level-rl-for-robotics
src/highrl/agents/agent.py
agent.py
py
10,865
python
en
code
6
github-code
50
73810841435
import logging import json import os import sys sys.path.append('../') import data_processing.confidence as cf import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import skimage.filters as filters from skimage.io import imread, imsave import itertools from scipy.optimize import least_squares from skimage.util import img_as_ubyte import matplotlib.ticker as ticker base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\thermal camera\calibration' image_file = 'ir_thermography_spot_size_20.8252px_per_mm.png' # base_path = r'G:\Shared drives\ARPA-E Project\Lab\Data\Laser Tests\CAMERA\BEAM_PROFILING_20221212' center = np.array([13.97, 12.03]) pixel_size = 20.8252 # pixels/mm diameter = 2.68 if __name__ == '__main__': img = imread(os.path.join(base_path, image_file)) with open('../plot_style.json', 'r') as file: json_file = json.load(file) plot_style = json_file['defaultPlotStyle'] mpl.rcParams.update(plot_style) img_shape = img.shape norm1 = plt.Normalize(vmin=0, vmax=255) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(4.0, 3.0), constrained_layout=True) ax.imshow(img, interpolation='none', norm=norm1, extent=(0, img_shape[1]/pixel_size, 0, img_shape[0]/pixel_size)) circle = plt.Circle(center, 0.5*diameter, ec='r', fill=False, clip_on=False, ls=(0, (1, 1)), lw=1.0) ax.set_xlabel('x (mm)') ax.set_ylabel('y (mm)') ax.set_title('IR thermography spot size', fontweight='regular') wz_text = f'Ø: {diameter:.2f} mm' # ax.text( # 0.95, 0.05, wz_text, color='w', # transform=ax.transAxes, va='bottom', ha='right', # fontsize=11 # ) q = 45.0 x1 = center[0] + 0.5 * diameter * np.cos(q) y1 = center[1] - 0.5 * diameter * np.sin(q) x2 = center[0] + 2.0 * diameter * np.cos(q) + 1.0 y2 = center[1] - 2.0 * diameter * np.sin(q) connectionstyle = "angle,angleA=0,angleB=-90,rad=0" ax.annotate( wz_text, xy=(x1, y1), xycoords='data', xytext=(x2, y2), textcoords='data', color='w', ha='left', va='center', arrowprops=dict( arrowstyle="->", color="w", shrinkA=-30, shrinkB=2, patchA=None, patchB=None, connectionstyle=connectionstyle, ) ) ax.add_patch(circle) ax.set_xlim(0, img_shape[1]/pixel_size) ax.set_ylim(top=0, bottom=img_shape[0]/pixel_size) # ax.xaxis.set_major_locator(ticker.MultipleLocator(100)) # ax.yaxis.set_major_locator(ticker.MultipleLocator(100)) fig.savefig(os.path.join(base_path, 'result.png'), dpi=600) plt.show()
erickmartinez/relozwall
data_processing/camera/ir_thermography_spot.py
ir_thermography_spot.py
py
2,651
python
en
code
0
github-code
50
74164588634
import numpy as np def linear_fit(x, y, fit_min, fit_max): """Fit x, y with a linear function y = mx + b Args: x: x variable y: y variable fit_min: minimal value of x to fit fit_max: maximal value of x to fit Returns: m: slope of the fitted line b: intercept of the fitted function """ idx = (x > fit_min) & (x < fit_max) x = np.vstack([x[idx], np.ones_like(x[idx])]).T y = y[idx] m, b = np.linalg.lstsq(x, y, rcond=None)[0] return m, b
yqshao/tame
tame/fit.py
fit.py
py
533
python
en
code
0
github-code
50
41729304062
# -*- coding:utf-8 -*- from Tkinter import * class Red(): def __init__(self, root, btn, label): self.label = label self.btn = btn self.root = root self.n = 0 def gs(self): self.btn['command'] = self.cc def cc(self): if self.n == 0: self.label['bg'] = 'red' if self.n == 1: self.label['bg'] = 'yellow' if self.n == 2: self.label['bg'] = 'blue' self.n += 1 if self.n >= 3: self.n = 0 self.root.after(1000, self.cc) if __name__ == '__main__': root = Tk() btn = Button(root, text='开始游戏') label = Label(root, text='颜色') r = Red(root, btn, label) r.gs() label.pack() btn.pack() root.mainloop()
sdabing/my-python-diary
tkinter/kapai jishu.py
kapai jishu.py
py
785
python
en
code
0
github-code
50
7979426307
from pydantic import BaseModel,ValidationError, validator from typing import Any from pydantic.networks import EmailStr from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy import Boolean, Column, ForeignKey, Integer, String SQLALCHEMY_DATABASE_URL = "postgresql://postgres: @localhost/topskill" engine = create_engine( SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False} ) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base() class Business(BaseModel): businessName: str fullName: str businessEmail:EmailStr role:str service: str teamSize: int class Business(Base): __tablename__ = "business" id = Column(String, primary_key=True, index=True) businessName = Column(String,index=True) fullName=Column(String,index=True) businessEmail= Column(String,index=True) role= Column(String,index=True) service= Column(String,index=True) teamSize= Column(Integer,index=True) class Skill_up_africa(BaseModel): full_name:str phone_number:int email:EmailStr country:str career_path:str experience:str referal:str
ade2112/skillz
modules/form/model.py
model.py
py
1,264
python
en
code
0
github-code
50
36793390895
#! /usr/bin/env python import copy def readonly(value): return property(lambda self: value) class A: def __init__(self, value): _value = copy.deepcopy(value) A.readonly = readonly(_value) class B: def __init__(self, value): self._readonly = copy.deepcopy(value) @property def readonly(self): return self._readonly a = A(42) print(a.readonly) A.readonly2 = readonly(1) print(a.readonly2) b = B(42) print(b.readonly) b._readonly = 43 print(b.readonly) a.readonly = 43 print(a.readonly)
baites/examples
classes/python/ReadOnlyByClosure.py
ReadOnlyByClosure.py
py
543
python
en
code
4
github-code
50
585202611
from socket import * port = 3333 BUF_SIZE = 1024 sock = socket(AF_INET, SOCK_DGRAM) sock.bind(('', port)) while True: data, addr = sock.recvfrom(BUF_SIZE) print('<- ', data.decode()) msg = input('-> ') sock.sendto(msg.encode(), addr)
H43RO/Network-Programming
Example2/udp_chat_server.py
udp_chat_server.py
py
254
python
en
code
0
github-code
50
42082097565
#!/usr/bin/env python # coding: utf-8 # ![image.png](attachment:image.png) # In[1]: # Lets choose K-Means Clustering Unsupervised ML Algorithm # In[3]: # Step 1: Let us import the required Libraries # In[4]: import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn import datasets import seaborn as sns # In[5]: # Step-2: Let us load the Iris Data set from sklearn # In[6]: iris=datasets.load_iris() # In[7]: iris # In[8]: iris_df = pd.DataFrame(iris.data, columns = iris.feature_names) # In[9]: iris_df # In[10]: # Step3: Let us do a Exploratory Data Analysis for the Iris Data Set #Displays the first 5 rows of the dataset iris_df.head() # In[11]: #Displays the dimensions of the dataset iris_df.shape # In[12]: #Displays the numerical insights on the dataset iris_df.describe() # In[13]: #Checking for null values in the dataset iris_df.isnull().sum() # In[14]: # Step 4: Visualize and Analyze the Dataset # In[15]: #Finding the corelation between the data corr_df= iris_df.corr() corr_df # In[16]: #Plotting a heat map for the dataset plt.figure(figsize= [10,6]) sns.heatmap(corr_df, cmap='Spectral', annot=True) # In[17]: #PLotting a graph by considering different attributes in pairs sns.pairplot(iris_df) # In[18]: # Step 5: Design the K-Means Clustering Algorithm for optimal clusters # In[19]: #Extracting the values of different attributes in the dataset such as sepal lenth, sepal width, petal length and petal width x = iris_df.iloc[:, [0, 1, 2, 3]].values # In[20]: x # In[ ]: # Step-6: We actually do not know the number of clusters. #There are several methods to select k that depends on the domain knowledge and rule of thumbs. # Elbow method is one of the robust one used to find out the optimal number of clusters. #In this method, the sum of distances of observations from their cluster centroids, called Within-Cluster-Sum-of-Squares (WCSS). #This is computed as the shown where Yi is centroid for observation Xi # ![image.png](attachment:image.png) # In[97]: #KMeans class from the sklearn library. # Using the elbow method to find out the optimal number of #clusters. # Now we will difine the K means clustering algorithm. As we do not know what is the optimum number of clusters. # The way to do this is using FOR loop by keeping the range from 1 to 10 since we dont want large number of clusters depiction # We want to find what is the optimum number of clusters # would be storing the value of each iterations in the list called WCSS(Within Cluster Sum of Squares) and we are using that to # plot our graph # Within Cluster Sum of Squares (WCSS) #i above is between 1-10 numbers. init parameter is the random #initialization method #we select kmeans++ method. max_iter parameter the maximum number of iterations there can be to #find the final clusters when the K-meands algorithm is running. we #enter the default value of 300 #the next parameter is n_init which is the number of times the #K_means algorithm will be run with #different initial centroid. # K-Means Clustering algorithm to find optimal clusters for classification #kmeans algorithm fits to the X dataset #appending the WCSS to the list (kmeans.inertia_ returns the WCSS value for an initialized cluster) # In[98]: # Using the elbow method to find the optimal number of clusters from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42) kmeans.fit(X) #appending the WCSS to the list (kmeans.inertia_ returns the WCSS value for an initialized cluster) wcss.append(kmeans.inertia_) # In[99]: # Step 7: Plot the K-Means Clustering graph and identify the optimal number of clusters from the graph. # In[100]: # kmeans inertia_ attribute is: Sum of squared distances of samples #to their closest cluster center. # Plotting the results onto a line graph, allowing us to observe 'The elbow' import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') # In[102]: plt.plot(range(1,11), wcss) plt.title('The elbow method to find optimal number of clusters') plt.xlabel('Number of clusters') plt.ylabel('WCSS') # Within cluster sum of squares plt.show() # In[103]: #From 'The Elbow Method' of graphical representation, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration. #Therefore, from the above graph we choose the optimal number of clusters to be 3. # In[110]: # Step 8 # Applying kmeans to the dataset / Creating the kmeans classifier with optimal clusters to be 3 and than fitting the model to # do the predictions kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kmeans = kmeans.fit_predict(x) # In[111]: y_kmeans # In[112]: # Step 9: Visualize the Clusters using a scatter plot # In[115]: # Visualising the clusters on the first two columns plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 80, c = 'red', label = 'Iris-setosa') plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 80, c = 'yellow', label = 'Iris-versicolour') plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 80, c = 'pink', label = 'Iris-virginica') # Plotting the centroids of the clusters plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 200, c = 'black', label = 'Centroids') plt.legend() # In[116]: # Step 6: Make Predictions # In[117]: #Defining the input and target variables X = iris.data[:,:2] #Contains sepal length and Sepal Width y = iris.target #Contains target species value # In[118]: X # In[119]: y # In[120]: # Visualizing X and Y variables in graphical form plt.scatter(X[:,0],X[:,1], c=y, cmap='gist_rainbow') plt.xlabel('Sepal Length', fontsize=14) plt.ylabel('Sepal Width', fontsize=14) plt.show() # In[121]: # Step 7: Evaluate the Model: Comparing Actual vs Predicted data values # In[122]: #This will tell us which cluster the data observation belongs to new_labels = kmeans.labels_ new_labels # In[123]: #Plotting the identified clusters and comparing with the results fig, axes = plt.subplots(1,2, figsize=(16,8)) axes[0].scatter(X[:,0],X[:,1], c=y, cmap='gist_rainbow', edgecolor = 'k',s=80) axes[1].scatter(X[:,0],X[:,1], c=new_labels, cmap='viridis', edgecolor = 'k',s=80) axes[0].set_xlabel('Speal Length',fontsize=18) axes[0].set_ylabel('Speal Width',fontsize=18) axes[1].set_xlabel('Speal Length',fontsize=18) axes[1].set_ylabel('Speal Width',fontsize=18) axes[0].tick_params(direction='in',length=10,width=5,colors='k',labelsize=20) axes[1].tick_params(direction='in',length=10,width=5,colors='k',labelsize=20) axes[0].set_title('Actual',fontsize=18) axes[1].set_title('Predicted',fontsize=18) plt.show() # In[44]: # 3-D Plotting # K means Clustering # In[124]: from sklearn.cluster import KMeans import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np get_ipython().run_line_magic('matplotlib', 'inline') from sklearn import datasets #Iris Dataset iris = datasets.load_iris() X = iris.data #KMeans km = KMeans(n_clusters=3) km.fit(X) km.predict(X) labels = km.labels_ #Plotting fig = plt.figure(1, figsize=(7,7)) ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134) ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float), edgecolor="k", s=50) ax.set_xlabel("Petal width") ax.set_ylabel("Sepal length") ax.set_zlabel("Petal length") plt.title("K Means", fontsize=14) # In[125]: # 3-D Plotting # Gaussian Mixture Model # In[126]: from sklearn.mixture import GaussianMixture import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np get_ipython().run_line_magic('matplotlib', 'inline') from sklearn import datasets #Iris Dataset iris = datasets.load_iris() X = iris.data #Gaussian Mixture Model gmm = GaussianMixture(n_components=3) gmm.fit(X) proba_lists = gmm.predict_proba(X) #Plotting colored_arrays = np.matrix(proba_lists) colored_tuples = [tuple(i.tolist()[0]) for i in colored_arrays] fig = plt.figure(1, figsize=(7,7)) ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134) ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=colored_tuples, edgecolor="k", s=50) ax.set_xlabel("Petal width") ax.set_ylabel("Sepal length") ax.set_zlabel("Petal length") plt.title("Gaussian Mixture Model", fontsize=14) # In[ ]:
SAMKOXXPACO/Prediction-using-Unsupervised-Algorithm
Unsupervised ML Task Clustering .py
Unsupervised ML Task Clustering .py
py
8,575
python
en
code
0
github-code
50
71756104794
import math from typing import * import numpy as np import torch import torchaudio.transforms as at from torch import nn from torch.distributions import Beta from torch.nn import functional as F from torch.nn.parameter import Parameter class GeMP(nn.Module): """from: https://github.com/knjcode/kaggle-seti-2021/blob/master/working/model.py referred at https://www.kaggle.com/c/seti-breakthrough-listen/discussion/266403 """ def __init__(self, p=3.0, eps=1e-6, learn_p=True): super().__init__() self._p = p self._learn_p = learn_p self.p = nn.Parameter(torch.ones(1) * p) self.eps = eps self.set_learn_p(flag=learn_p) def set_learn_p(self, flag): self._learn_p = flag self.p.requires_grad = flag def forward(self, x): # x = F.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2), x.size(-1))).pow(1.0 / self.p) x = F.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2), 1)).pow(1.0 / self.p) return x class SwinGeMP(nn.Module): """from: https://github.com/knjcode/kaggle-seti-2021/blob/master/working/model.py referred at https://www.kaggle.com/c/seti-breakthrough-listen/discussion/266403 """ def __init__(self, p=3.0, eps=1e-6, learn_p=True): super().__init__() self._p = p self._learn_p = learn_p self.p = nn.Parameter(torch.ones(1) * p) self.eps = eps self.set_learn_p(flag=learn_p) def set_learn_p(self, flag): self._learn_p = flag self.p.requires_grad = flag def forward(self, x): x = F.adaptive_avg_pool1d(x.clamp(min=self.eps).pow(self.p), 1).pow(1.0 / self.p) return x class GeM1d(nn.Module): """ Code modified from the 2d code in https://amaarora.github.io/2020/08/30/gempool.html """ def __init__(self, kernel_size=8, stride=None, p=3, eps=1e-6): super(GeM1d, self).__init__() self.p = nn.Parameter(torch.ones(1) * p) self.kernel_size = kernel_size self.eps = eps self.stride = stride def forward(self, x): return self.gem(x, p=self.p, eps=self.eps) def gem(self, x, p=3, eps=1e-6): return F.avg_pool1d(x.clamp(min=eps).pow(p), self.kernel_size, self.stride).pow(1.0 / p) def __repr__(self): return ( self.__class__.__name__ + "(" + "p=" + "{:.4f}".format(self.p.data.tolist()[0]) + ", " + "eps=" + str(self.eps) + ")" ) class Mixup(nn.Module): """from: https://www.kaggle.com/ilu000/2nd-place-birdclef2021-inference""" def __init__(self, mix_beta, label_mix_type="mix"): super(Mixup, self).__init__() self.beta_distribution = Beta(mix_beta, mix_beta) self.label_mix_type = label_mix_type def forward(self, X, Y, weight=None): bs = X.shape[0] n_dims = len(X.shape) perm = torch.randperm(bs) coeffs = self.beta_distribution.rsample(torch.Size((bs,))).type_as(X) if n_dims == 2: X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm] elif n_dims == 3: X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm] else: X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm] y_coeffs = coeffs if self.label_mix_type == "mix": Y = y_coeffs * Y + (1 - y_coeffs) * Y[perm] elif self.label_mix_type == "max": Y = Y + Y[perm] - Y * Y[perm] if weight is None: return X, Y else: weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm] return X, Y, weight class PositionalFeaturesBlockV1(nn.Module): def __init__(self, pfb_params: Dict[str, Any]): super().__init__() self.mlp = nn.Sequential( nn.Linear(pfb_params["input_dim"], pfb_params["dim1"]), nn.LayerNorm(pfb_params["dim1"]), nn.ReLU(), nn.Dropout(pfb_params["drop_out_p"]), nn.Linear(pfb_params["dim1"], 128), nn.LayerNorm(128), nn.ReLU(), nn.Dropout(pfb_params["drop_out_p"]), ) def forward(self, x): x = self.mlp(x) return x class Conv3dBlock(nn.Module): def __init__(self, conv_params: Dict[str, Any]): super().__init__() out_ch = conv_params["out_channel_num"] self.conv1 = nn.Conv3d( in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1, ) self.bn1 = nn.BatchNorm3d(out_ch) self.act1 = nn.ReLU() self.conv2 = nn.Conv3d( in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1, ) self.bn2 = nn.BatchNorm3d(out_ch) self.act2 = nn.ReLU() self.conv3 = nn.Conv3d( in_channels=out_ch, out_channels=out_ch, kernel_size=3, padding=1, ) self.bn3 = nn.BatchNorm3d(out_ch) self.act3 = nn.ReLU() def forward(self, x): residual = x x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.conv3(x) x = self.bn3(x) x += residual x = self.act3(x) return x class Conv3dBlockV2(nn.Module): def __init__(self, mid_channle_num: int): super().__init__() self.mid_ch = mid_channle_num self.conv = torch.nn.Sequential( torch.nn.Conv3d(self.mid_ch, self.mid_ch, (3, 9, 9), padding=(1, 4, 4), padding_mode="replicate"), torch.nn.BatchNorm3d(self.mid_ch), torch.nn.LeakyReLU(), torch.nn.Conv3d(self.mid_ch, self.mid_ch, (3, 9, 9), padding=(1, 4, 4), padding_mode="replicate"), torch.nn.BatchNorm3d(self.mid_ch), torch.nn.LeakyReLU(), ) self.act = torch.nn.LeakyReLU() def forward(self, x): shortcut = x x = self.conv(x) x += shortcut x = self.act(x) return x
yoichi-yamakawa/kaggle-contrail-3rd-place-solution
scripts/training/model_util.py
model_util.py
py
6,295
python
en
code
1
github-code
50
23741005175
import transformers import torch from transformers import OpenAIGPTTokenizer, GPT2Tokenizer from transformers import PreTrainedTokenizer, PreTrainedModel from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForCausalLM import faulthandler faulthandler.enable() from transformers import (AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME) #model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') #tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small') print("><>before model") #model = AutoModelWithLMHead.from_pretrained('/Users/beeoladeji/Desktop/content/gpt-2/output') #model = AutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-small') model = AutoModelWithLMHead.from_pretrained('/Users/beeoladeji/Desktop/content/gpt-2/output') print("**after model") #/Users/beeoladeji/miniconda3/pkgs/pydotplus-2.0.2-py_3/site-packages/pydotplus-2.0.2.dist-info/METADATA def generate_answer(question): new_user_input_ids = tokenizer.encode(question + tokenizer.eos_token, return_tensors='pt') # append the new user input tokens to the chat history #bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) # generated a response while limiting the total chat history to 1000 tokens, chat_history_ids = model.generate(new_user_input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id, temperature=0.6, repetition_penalty=1.3) preds = [ tokenizer.decode(chat_history_ids[:, new_user_input_ids.shape[-1]:][0], skip_special_tokens=True)] # pretty print last ouput tokens from bot #print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True))) return "".join(preds) if __name__ == "__main__": print("Let's chat! (type 'quit' to exit)") while True: # sentence = "do you use credit cards?" sentence = input("You: ") if sentence == "quit": break resp = generate_answer(sentence) print(resp) #/Users/beeoladeji/Desktop/output # import os # import pandas as pd # import openai # import openai # openai.Completion.create( # model=FINE_TUNED_MODEL, # prompt=YOUR_PROMPT) # def ask(question,chat_log = None): # if chat_log is None: # chat_log = start_chart_log # prompt = f'{chat_log}Human:{question}\nAI:' # print("prompt",prompt) # response = completion.create( # prompt = prompt,engine = "davinci",stop = ["\nHuman"],temperature = 0.9, # top_p =1,best_of=1, # max_tokens=150 # ) # answer = response.choices[0].text.strip() # return answer #export OPENAI_API_KEY="<OPENAI_API_KEY>" #openai tools fine_tunes.prepare_data -f <LOCAL_FILE>
BolanleOladeji/IS-Project
chat2.py
chat2.py
py
2,866
python
en
code
0
github-code
50
70782325915
''' Practice Problem #2 Samuel Hulme Problem: Given a 2D array of numbers, determine the cheapest path from the top left (0,0) node to the bottom right example = [ [6, 8, 1] [100, 2, 30] [1, 4, 2] ] The cheapest path in this case would be the path of 6, 8, 2, 4, 2 = 22 Conditions: 1.) The movements can only be right or down 2.) The numbers can be negative or positive 3.) Return only the cheapest path value, not the actual path My Solution: We should use a DFS to determine the shortest path. We can call the top [0, 0] and the bottom [w, h] As we go through the list, we will create a dictionary/set of tuples that will represent the cost from that location on the matrix. It will be calculated to the value at that location plus the cheapest path that comes after it. ''' def cheapest_path(matrix, hash={}, x=0, y=0): if x >= len(matrix[0]) or y >= len(matrix): return float('inf') #If x or y are out of bounds elif x == len(matrix[0])-1 and y == len(matrix)-1: return matrix[x][y] #If we found the base elif (x,y) in hash: return hash[(x, y)] #If this spot has already been visited overall_cheapest = float('inf') for dir in [(1, 0), (0, 1)]: current_cheapest = cheapest_path(matrix, hash, x+dir[0], y+dir[1]) if current_cheapest < overall_cheapest: overall_cheapest = current_cheapest hash[(x, y)] = matrix[x][y] + overall_cheapest return hash[(x, y)] def print_matrix(matrix): for row in matrix: row_string = "[" for item in row: row_string += " " + "{:<3}".format(str(item)) print(row_string + "]") matrix = [ [1, 5, 70, 10], [90, 4, -3, 70], [-14, 200, 6, 11], [-1, 15, 2, 3] ] print("\nThis program will take in a 2D array of integers and determine the\ncheapest path from the top left to the bottom right index.\n") print("The test matrix is shown below:\n") print_matrix(matrix) print("\nThe cheapest path is: " + str(cheapest_path(matrix)))
shulme33/Programming
Python/pp_2.py
pp_2.py
py
2,148
python
en
code
0
github-code
50
26636795524
from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * class Btn(QPushButton): pass class MyWindow(QWidget): def __init__(self): super(MyWindow, self).__init__() self.setWindowTitle('QDialog的学习') self.resize(500, 500) self.init_gui() def init_gui(self): box1 = QWidget(self) # box1.setStyleSheet("QPushButton {background-color: orange;}") label1 = QLabel("标签1", box1) label1.resize(200, 60) label1.setObjectName("pink") label1.setProperty("notice_level", "warning") label1.move(50, 50) btn1 = Btn("按钮1", box1) btn1.move(150, 50) btn1.setObjectName("btn1") cb = QCheckBox("python", box1) cb.move(150, 100) cb.resize(100, 50) cb.setTristate(True) box2 = QWidget(self) box2.setObjectName("box2") # box2.setStyleSheet("background-color: cyan;") btn2 = QPushButton("按钮2", box2) btn2.move(150, 50) btn2.setObjectName("btn2") label3 = QLabel("标签3", box2) label3.move(200, 200) box3 = QWidget(box2) box3.resize(150, 150) # box3.setStyleSheet("background-color: lightgray;") label2 = QLabel("标签2", box3) label2.resize(100, 60) label2.move(50, 50) v_layout = QVBoxLayout() self.setLayout(v_layout) v_layout.addWidget(box1) v_layout.addWidget(box2) btn2.setEnabled(False) self.other_btn = QPushButton("按钮3") self.other_btn.show() if __name__ == '__main__': import sys from imqssf_tool import QssFileDealer app = QApplication(sys.argv) QssFileDealer.set_app_qss(app,'qssfile1.qss') mywindow = MyWindow() mywindow.show() sys.exit(app.exec_())
PeterZhangxing/codewars
gui_test/test_pyqt/qss_test/learning_qss.py
learning_qss.py
py
1,856
python
en
code
0
github-code
50
35196294855
# -*- coding: utf-8 -*- """ Created on Thu Mar 31 00:21:52 2016 @edited by K Provost """ #Aligning sequences #Muscle software installed required: http://www.drive5.com/muscle/downloads.htm def align(filename,outpath,cwd,muscle_exe): import os from Bio.Align.Applications import MuscleCommandline from Bio import AlignIO muscle_exe = "/Users/kprovost/Documents/muscle3.8.31_i86darwin64" outname = "ALIGNED_"+filename print("ALIGNING: "+filename) with open(filename,"r") as infile: read = infile.read() count = read.count(">") if count <= 1: with open(outpath+outname,"w") as outfile: outfile.write(read) print("ONLY ONE SEQ, DONE") else: try: muscle_cline = MuscleCommandline(muscle_exe, input=filename, out=outname) stdout, stderr = muscle_cline() AlignIO.read(outname, "fasta") print("ALIGNED") except: print("??? ERROR") print(filename) def main(): from Bio.Align.Applications import MuscleCommandline from Bio import AlignIO import os import sys import glob import shutil cwd = os.getcwd() try: muscle_exe = sys.argv[1] print("\tMuscle path exists") except: print("Muscle defaulting to:") print("/Users/kprovost/Documents/muscle3.8.31_i86darwin64") #print("Muscle not given, quitting") muscle_exe = "/Users/kprovost/Documents/muscle3.8.31_i86darwin64" #quit() try: path = sys.argv[2] print("\tPath is: ",path) except: print("Path not given") path = os.getcwd()+"/7_readytoalign/" print("Path is current directory + 7_readytoalign") treepath = cwd+"/9_badalignments/" if not os.path.exists(treepath): print("creating folder: ",treepath) os.makedirs(treepath) outpath = cwd+"/8_goodalignments/" if not os.path.exists(outpath): print("creating folder: ",outpath) os.makedirs(outpath) os.chdir(path) for filename in glob.glob("*.fa*"): align(filename,outpath,cwd,muscle_exe) print("\n\nDONE") # from Bio import SeqIO # filename = "NC_005816.gb" # locus_to_gene = dict() # for record in SeqIO.parse(filename, "genbank"): # for f in record.features: # if f.type == "CDS": # if "gene" in f.qualifiers: # if "locus_tag" in f.qualifiers: # genes = f.qualifiers["gene"] # locus_tags = f.qualifiers["locus_tag"] # assert len(genes) == 1, genes # assert len(locus_tags) == 1, locus_tags # locus_to_gene[locus_tags[0]] = genes[0] # print("Mapped %i locus tags to genes" % len(locus_to_gene)) if __name__ == "__main__": main() #import os #os.chdir("/Users/kprovost/Documents/Publications/Parrots/ParrotPipelineRedo/OUTGROUPS/") #muscle_exe = "/Users/kprovost/Documents/Publications/Parrots/ParrotPipelineRedo/SCRIPTS/muscle3.8.31_i86darwin64" #align("Mascarinus_OLDANDNEW.fasta","ALIGNED_Mascarinus_OLDANDNEW.fasta",os.getcwd(),muscle_exe)
kaiyaprovost/misc_scripts
muscleAlign.py
muscleAlign.py
py
3,397
python
en
code
0
github-code
50
27622938759
from dataclasses import FrozenInstanceError from scipy import signal # type: ignore import matplotlib.pyplot as plt # type: ignore import pytest import numpy as np import pysmo.tools.noise as noise def test_NoiseModel() -> None: # create two random arrays for testing psd = np.random.rand(20) psd2 = np.random.rand(20) T = np.random.rand(20) # length of the arrays needs to be equal with pytest.raises(ValueError): noise.NoiseModel(psd[1:], T) # create a NoiseModel instance and verify it is immutable model = noise.NoiseModel(psd, T) assert isinstance(model, noise.NoiseModel) with pytest.raises(FrozenInstanceError): model.psd = psd2 # type: ignore with pytest.raises(ValueError): model.psd[3] *= 2 with pytest.raises(ValueError): model.T[3] *= 2 @pytest.mark.depends(on=["test_NoiseModel"]) @pytest.mark.mpl_image_compare( remove_text=True, baseline_dir="../baseline/", style="default" ) def test_peterson(): # type: ignore nlnm = noise.peterson(0) nhnm = noise.peterson(1) nm_03 = noise.peterson(0.3) with pytest.raises(ValueError): noise.peterson(1.34) assert nlnm == noise.NLNM assert nhnm == noise.NHNM assert all( nm_03.T == np.array( [ 0.10, 0.17, 0.22, 0.32, 0.40, 0.80, 1.24, 2.40, 3.80, 4.30, 4.60, 5.00, 6.00, 6.30, 7.90, 10.00, 12.00, 15.40, 15.60, 20.00, 21.90, 31.60, 45.00, 70.00, 101.00, 154.00, 328.00, 354.80, 600.00, 10**4, 10**5, ] ) ) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(nlnm.T, nlnm.psd) ax.plot(nhnm.T, nhnm.psd) ax.plot(nm_03.T, nm_03.psd) ax.set_xscale("log") return fig @pytest.mark.depends(on=["test_NoiseModel"]) @pytest.mark.mpl_image_compare( remove_text=True, baseline_dir="../baseline/", style="default" ) def test_generate_noise(): # type: ignore npts = 10000 nperseg = npts / 4 nfft = npts / 2 srate = 0.1 sfrec = 1 / srate nhnm = noise.NHNM # velocity noise model from peterson paper nhnm_velo = noise.NoiseModel( psd=nhnm.psd + 20 * np.log10(nhnm.T / 2 / np.pi), T=nhnm.T ) nhnm_data_acc = noise.generate_noise( model=nhnm, npts=npts, delta=srate, seed=0 ).data nhnm_data_vel = noise.generate_noise( model=nhnm, npts=npts, delta=srate, return_velocity=True, seed=0 ).data freqs_acc, power_acc = signal.welch( nhnm_data_acc, sfrec, nperseg=nperseg, nfft=nfft, scaling="density" ) freqs_vel, power_vel = signal.welch( nhnm_data_vel, sfrec, nperseg=nperseg, nfft=nfft, scaling="density" ) freqs_acc, power_acc = freqs_acc[1:], power_acc[1:] freqs_vel, power_vel = freqs_vel[1:], power_vel[1:] fig = plt.figure() ax1 = fig.add_subplot(2, 1, 1) ax1.plot(1 / freqs_acc, 10 * np.log10(power_acc)) ax1.plot(nhnm.T, nhnm.psd, "k") ax1.set_xscale("log") ax2 = fig.add_subplot(2, 1, 2) ax2.plot(1 / freqs_vel, 10 * np.log10(power_vel)) ax2.plot(nhnm_velo.T, nhnm_velo.psd, "k") ax2.set_xscale("log") return fig
pysmo/pysmo
tests/tools/test_noise.py
test_noise.py
py
3,632
python
en
code
18
github-code
50
73179515356
# coding: utf-8 import argparse import time import math import torch import torch.nn as nn from torch.autograd import Variable import numpy as np import data import model import cPickle import glob, os import math import read_graph as rg parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model') parser.add_argument('--seed', type=int, default=1111, help='random seed') parser.add_argument('--lattice', type=str, default='', help='lattice path to rescore') parser.add_argument('--nbest', type=str, default='', help='nbest path to rescore') parser.add_argument('--job', type=int, default=30, help='number of job') parser.add_argument('--scale', type=int, default=10, help='scale of lm score') parser.add_argument('--beam', type=int, default=100, help='beam') parser.add_argument('--output', type=str, help='output file') args = parser.parse_args() fw = open(args.output,'w') cover_recall = [] for job in range(1,args.job+1): for file in glob.glob(args.lattice+'/'+str(job)+"/*.lat"): filename = file.replace(args.lattice+'/'+str(job)+'/','').replace('.lat','') # print file nodes, arcs = rg.read_graph(file) lattice_sents = rg.nbest_BFS(1000,nodes,args.scale,args.beam) nbest_sents=[] fr = open(args.nbest+'/'+str(job)+'/'+filename+'.onlytext') for i in fr: line = i.split() nbest_sents.append(line) # lattice_sent = sents[index][0] # lattice_sent = sents[index][0].split() # lattice_sent.pop(0) # lattice_sent.pop() cover_recall.append(0) for i in nbest_sents: for j in lattice_sents: if i==j[0]: cover_recall[-1]+=1 break cover_recall[-1] = cover_recall[-1]/float(len(nbest_sents)) fw.write(str(cover_recall[-1])+'\n') if len(cover_recall)%100==0: print(str(len(cover_recall))+'\tfinished') fw.write('average: '+str(sum(cover_recall)/len(cover_recall))+'\n') # print cover_recall # print len(nbest_sents) # import pdb;pdb.set_trace()
cliffchen123/language_model
compute_lattice_cover_rate.py
compute_lattice_cover_rate.py
py
2,375
python
en
code
1
github-code
50
42131499524
# Source: https://github.com/krrish94/nerf-pytorch # Torch imports import torch from torch import nn from torch.nn import functional as F class VeryTinyNerfModel(torch.nn.Module): def __init__(self, hidden_size=128, num_encoders=6): super(VeryTinyNerfModel, self).__init__() self.layer1 = torch.nn.Linear(3 + 3 * 2 * num_encoders, hidden_size) self.layer2 = torch.nn.Linear(hidden_size, hidden_size) self.layer3 = torch.nn.Linear(hidden_size, 4) def forward(self, x): x = x.float() x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.layer3(x) return x class ReplicateNeRFModel(torch.nn.Module): def __init__(self, hidden_size=256, num_encoding_fn_xyz=6, num_encoding_fn_dir=4): super(ReplicateNeRFModel, self).__init__() self.dim_xyz = 3 + 2 * 3 * num_encoding_fn_xyz self.dim_dir = (3 if num_encoding_fn_dir > 0 else 0) + 2 * 3 * max(num_encoding_fn_dir, 0) self.layer1 = torch.nn.Linear(self.dim_xyz, hidden_size) self.layer2 = torch.nn.Linear(hidden_size, hidden_size) self.layer3 = torch.nn.Linear(hidden_size, hidden_size) self.alpha = torch.nn.Linear(hidden_size, 1) self.layer4 = torch.nn.Linear(hidden_size + self.dim_dir, hidden_size // 2) self.layer5 = torch.nn.Linear(hidden_size // 2, hidden_size // 2) self.rgb = torch.nn.Linear(hidden_size // 2, 3) def forward(self, x): x = x.float() xyz, direction = x[...,:self.dim_xyz], x[...,self.dim_xyz:] # Pass only location first x = F.relu(self.layer1(xyz)) x = F.relu(self.layer2(x)) x = self.layer3(x) alpha = self.alpha(x) # Add viewing direction x = F.relu(self.layer4(torch.cat((x, direction), dim=-1))) x = F.relu(self.layer5(x)) rgb = self.rgb(x) return torch.cat((rgb, alpha), dim=-1)
anshuman64/nerf
src/main_model.py
main_model.py
py
2,028
python
en
code
0
github-code
50
14130660030
n = int(input()) coords = [] for _ in range(n): x1,x2 = map(int, input().split()) coords.append((x1,x2)) total_cnt = 0 for i in range(n): for j in range(i+1, n): for k in range(j+1, n): arr = [0] * 101 is_bool = True for l in range(n): if l != i and l != j and l != k: x1,x2 = coords[l][0], coords[l][1] for m in range(x1, x2+1): arr[m] += 1 for ele in arr: if ele > 1: is_bool = False if is_bool: total_cnt += 1 print(total_cnt)
hoonkiyeo/codetree-TILs
231205/선분 3개 지우기/remove-three-segments.py
remove-three-segments.py
py
666
python
en
code
0
github-code
50
34094024275
import math, time, random import numpy as np from vcopt import vcopt node = [ [23,39],[ 8,44],[34,36],[12,30],[42,37],[ 6,35],[ 1,15],[12,25], [ 4,39],[13,42],[23,13],[ 7,39],[11, 5],[ 6,44],[28,45],[20, 7], [ 3,16],[ 4,19],[ 3,39],[ 0, 2],[19,21],[ 3,43],[ 8,34],[20,39], [ 2,50],[20,26],[16,36],[24,30],[ 9,40],[ 5,22],[30,35],[ 2, 0], [21,36],[22,28],[ 3,33],[11,36],[14,34] ] def distance(cal_node): size = len(cal_node) return_table = [[0] * size for x in range(size)] for i in range(size): for j in range(size): if i != j: dx = cal_node[i][0] - cal_node[j][0] dy = cal_node[i][1] - cal_node[j][1] return_table[i][j] = math.sqrt(dx * dx + dy * dy) return return_table def path_length(path): global distance_table n = 0 for i in range(1, len(path)): n += distance_table[path[i - 1]][path[i]] n += distance_table[path[0]][path[-1]] return n def opt_2_solve(size, path): global distance_table total = 0 while True: count = 0 for i in range(size - 2): i1 = i + 1 for j in range(i + 2, size): if j == size - 1: j1 = 0 else: j1 = j + 1 if i != 0 or j1 != 0: l1 = distance_table[path[i]][path[i1]] l2 = distance_table[path[j]][path[j1]] l3 = distance_table[path[i]][path[j]] l4 = distance_table[path[i1]][path[j1]] if l1 + l2 > l3 + l4: new_path = path[i1:j+1] path[i1:j+1] = new_path[::-1] count += 1 total += count if count == 0: break return path, total def NN(size): global distance_table cal_path = np.delete(np.arange(size),0,0) return_path = np.array([0]) for i in range(int(size)-1): for j in range(len(cal_path)): if j==0 or (min_len > distance_table[return_path[i]][cal_path[j]]): del_idx = j min_idx = cal_path[j] min_len = distance_table[return_path[i]][cal_path[j]] return_path = np.insert(return_path,len(return_path),min_idx) cal_path = np.delete(cal_path,del_idx,0) return return_path def tsp_1_score(path): path_full = np.hstack((0, path)) return path_length(path_full) #各ノード間距離のテーブルを作成 distance_table = distance(node) node_len = len(node) print("\n--Nearest Neighber solve--") start_time = time.time() #NearestNeighber法で最適化 min_path = NN(node_len) min_length = path_length(min_path) min_path = np.hstack((min_path,0)) end_time = time.time() print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time)) print("Route : " + str(min_path)) print("\n--2 Opt solve--") start_time = time.time() #道順を作成 path = np.arange(node_len) min_length = 0 #2-opt法で最適化し、一番スコアの良かったものを記録 for i in range(10): #道順をランダムに並び替える np.random.shuffle(path) path, x = opt_2_solve(node_len, path) length = path_length(path) if i == 0 or min_length > length: min_length = length min_path = np.array(path) min_path = np.hstack((np.roll(min_path,-np.where(min_path == 0)[0][0]),0)) end_time = time.time() print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time)) print("Route : " + str(min_path)) print("\n--2 Opt solve (vcopt)--") start_time = time.time() #道順を作成 path = np.arange(1,node_len) min_length = 0 #2-opt法(vcopt)で最適化し、一番スコアの良かったものを記録 for i in range(10): #道順をランダムに並び替える np.random.shuffle(path) path, length = vcopt().opt2(path, tsp_1_score, 0.0, seed=None) if i == 0 or length < min_length: min_length = length min_path = np.array(path) min_path = np.hstack((0,min_path,0)) end_time = time.time() print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time)) print("Route : " + str(min_path))
UnknownSP/ProgrammingExercise
巡回セールスマン問題/比較/1TSP_Compare.py
1TSP_Compare.py
py
4,181
python
en
code
0
github-code
50
13928767500
APP_INTERFACE = 'tcp://127.0.0.1:5555' DEFAULT_LIBRARY = 'mongodb://127.0.0.1/apps' DEFAULT_COLLECTION = 'apps' ERROR_SUCCESS = 0 ERROR_EXCEPTION = 1 METHOD_REGISTER = 'Register' METHOD_UNREGISTER = 'UnRegister' METHOD_UPDATE = 'Update' METHOD_QUERY = 'Query'
Kimice/rpc-demo
origin/dataservice/common/constants.py
constants.py
py
262
python
en
code
0
github-code
50
29860050280
#domain_stats.py by Mark Baggett #Twitter @MarkBaggett from __future__ import print_function import BaseHTTPServer import threading import SocketServer import urlparse import re import argparse import sys import time import os import datetime try: import whois except Exception as e: print(str(e)) print("You need to install the Python whois module. Install PIP (https://bootstrap.pypa.io/get-pip.py). Then 'pip install python-whois' ") sys.exit(0) class domain_api(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() print(self.path) (ignore, ignore, urlpath, urlparams, ignore) = urlparse.urlsplit(self.path) cmdstr = tgtstr = None print(urlparams) if re.search("[\/](?:created|alexa|domain)[\/].*?", urlpath): cmdstr = re.search(r"[\/](created|alexa|domain)[\/].*$", urlpath) tgtstr = re.search(r"[\/](created|alexa|domain)[\/](.*)$", urlpath) if not cmdstr or not tgtstr: self.wfile.write('<html><body>API Documentation<br> http://%s:%s/cmd/tgt <br> cmd = domain, alexa or created <br> tgt = domain name </body></html>' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1])) return params = {} params["cmd"] = cmdstr.group(1) params["tgt"] = tgtstr.group(2) else: cmdstr=re.search("cmd=(?:domain|alexa|created)",urlparams) tgtstr = re.search("tgt=",urlparams) if not cmdstr or not tgtstr: self.wfile.write('<html><body>API Documentation<br> http://%s:%s/?cmd=measure&tgt=&ltstring&gt <br> http://%s:%s/?cmd=normal&tgt=&ltstring&gt <br> http://%s:%s/?cmd=normal&tgt=&ltstring&gt&weight=&ltweight&gt </body></html>' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1])) return params={} try: for prm in urlparams.split("&"): key,value = prm.split("=") params[key]=value except: self.wfile.write('<html><body>Unable to parse the url. </body></html>') return if params["cmd"] == "alexa": if self.server.verbose: self.server.safe_print ("Alexa Query:", params["tgt"]) if not self.server.alexa: if self.server.verbose: self.server.safe_print ("No Alexa data loaded. Restart program.") self.wfile.write("Alexa not loaded on server. Restart server with -a or --alexa and file path.") else: if self.server.verbose: self.server.safe_print ("Alexa queried for:%s" % (params['tgt'])) self.wfile.write(str(self.server.alexa.get(params["tgt"],"0"))) elif params["cmd"] == "domain" or params["cmd"] == "created": if params['tgt'] in self.server.cache: print("Found in cache!!") domain_info = self.server.cache.get(params['tgt']) else: try: print ("Querying the web", params['tgt']) domain_info = whois.whois(params['tgt']) if not domain_info.get('creation_date'): self.wfile.write(str("No whois record for %s" % (params['tgt']))) return except Exception as e: if self.server.verbose: self.server.safe_print ("Error querying whois server: %s" % (str(e))) return self.server.safe_print("Caching whois record %s" % (str(domain_info))) domain_info["time"] = time.time() if self.server.alexa: domain_info['alexa'] = self.server.alexa.get(params["tgt"],"0") try: self.server.cache_lock.acquire() self.server.cache[params['tgt']] = domain_info finally: self.server.cache_lock.release() if params["cmd"] == "created": self.wfile.write(domain_info.get('creation_date','not found').__str__()) elif params["cmd"] =="domain": self.wfile.write(str(domain_info)) return def log_message(self, format, *args): return class ThreadedDomainStats(SocketServer.ThreadingMixIn, SocketServer.TCPServer, BaseHTTPServer.HTTPServer): def __init__(self, *args,**kwargs): self.cache = {} self.cache_lock = threading.Lock() self.cache_time = 1 self.screen_lock = threading.Lock() self.alexa = "" self.verbose = False self.exitthread = threading.Event() self.exitthread.clear() BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs) def safe_print(self,*args,**kwargs): try: self.screen_lock.acquire() print(*args,**kwargs) finally: self.screen_lock.release() def clear_old_cache(self): if self.verbose: self.safe_print ( "Clearing old cache") try: self.cache_lock.acquire() for item in self.cache: if (self.cache[item].get('time', time.time()) - time.time()) > self.cache_time*60*60: del self.cache[item] finally: self.cache_lock.release() #Reschedule yourself to run again in 1 hour if not self.exitthread.isSet(): self.timer = threading.Timer(60*60, self.clear_old_cache, args = ()) self.timer.start() def main(): parser=argparse.ArgumentParser() parser.add_argument('-ip','--address',required=False,help='IP Address for the server to listen on. Default is 127.0.0.1',default='127.0.0.1') parser.add_argument('-c','--cache_time',type=float,required=False,help='Number of hours to hold a whois record in the cache. Default is 1 hour. Set to 0 to save forever.',default=1) parser.add_argument('port',type=int,help='You must provide a TCP Port to bind to') parser.add_argument('-v','--verbose',action='count',required=False,help='Print verbose output to the server screen. -vv is more verbose.') parser.add_argument('-a','--alexa',required=False,help='Provide a local file path to an Alexa top-1m.csv') #args = parser.parse_args("-s 1 -vv 8081 english_lowercase.freq".split()) args = parser.parse_args() #Setup the server. server = ThreadedDomainStats((args.address, args.port), domain_api) if args.alexa: if not os.path.exists(args.alexa): print("Alexa file not found %s" % (args.alexa)) else: try: server.alexa = dict([(a,b) for b,a in re.findall(r"^(\d+),(.*)", open(args.alexa).read(), re.MULTILINE)]) except Exception as e: print("Unable to parse alexa file:%s" % (str(e))) server.verbose = args.verbose server.cache_time = args.cache_time #Schedule the first save interval unless save_interval was set to 0. if args.cache_time: server.timer = threading.Timer(60 *args.cache_time, server.clear_old_cache, args = ()) server.timer.start() #start the server print('Server is Ready. http://%s:%s/?cmd=measure&tgt=astring' % (args.address, args.port)) print('[?] - Remember: If you are going to call the api with wget, curl or something else from the bash prompt you need to escape the & with \& \n\n') while True: try: server.handle_request() except KeyboardInterrupt: break server.timer.cancel() server.safe_print("Web API Disabled...") server.safe_print("Control-C hit: Exiting server. Please wait..") if __name__=="__main__": main()
HASecuritySolutions/Logstash
configfiles-setup_required/freq/domain_stats.py
domain_stats.py
py
8,009
python
en
code
248
github-code
50
3641922355
## ##pedir = True ##while pedir: ## numero = int(input("Dame un numero del 1 al 100: ")) ## if numero < 100 and numero > 0: ## pedir = False from random import * print("Piensa un número del 1 al 100,¡voy a intentar advinarlo!") print("Pulsa intro cuando estés listo...") input() aleatorio = randint(1,100) acierto = True while acierto: respuesta = input("¿Es el {0} el número secreto? (s/n) " .format(aleatorio)) if respuesta == "n": mayor_menor = input("¿Es el número secreto mayor o menor que {0}?. ".format(aleatorio)) if mayor_menor == "mayor": aleatorio = randint(aleatorio+1, 100) elif mayor_menor == "menor": aleatorio = randint(0,aleatorio-1) elif respuesta == "s": print("Estoy de suerte, ¡He acertado!") acierto = False else: print("Lo siento no te he entendido")
emiliobort/python
Practica2_Past/Programas/Ejercicio10.py
Ejercicio10.py
py
896
python
es
code
0
github-code
50
22007133395
import sqlite3 # criar instancia de conexão com o banco connection = sqlite3.connect('records.db') # inicializar cursor cursor = connection.cursor() # IF PARA CRIAR SE NAO TIVER CRIADO create_table = "CREATE TABLE IF NOT EXISTS records (id INTEGER PRIMARY KEY, pontos int)" cursor.execute(create_table) connection.commit() connection.close()
Murimaral/projeto_batalha_naval
criar_tabela.py
criar_tabela.py
py
361
python
en
code
0
github-code
50
27539293247
import json import PySimpleGUI as sg from src.handlers import login def config(dificultad,ayuda,tarjeta,tiempo,color,alerta): """ Guarda la configuracion del usuario en un archivo json""" datos_config = [dificultad,ayuda,tarjeta,tiempo,color,alerta] tiempo = str(tiempo) if (tiempo.isdigit()): configuraciones = leer_config() # Carga todas las configuraciones jugador_logueado = login.leer_sesion() configuraciones[jugador_logueado] = datos_config # Actualiza las configuraciones del usuario datos_json = json.dumps(configuraciones) # Guarda la configuracion en un json with open("configuracion.json", "w", encoding="utf8") as archivoJSON: archivoJSON.write(datos_json) sg.SystemTray.notify('Éxito!', 'Cambios guardados') def leer_config(): """Devuelve todas las configuraciones guardadas""" configuraciones = {} with open("configuracion.json", "r", encoding="utf8") as archivoJSON: configuraciones = json.load(archivoJSON) #print(configuraciones) return configuraciones def crear_configuracion_default(usuario): """Crea la configuracion default cuando el jugador se registra""" try: configuraciones = leer_config() except Exception: # Si el archivo no existe configuraciones = {} configuraciones[usuario] = ["Facil", "Con", "Texto", "60", "Topanga", "Ganaste, Perdiste"] datos_json = json.dumps(configuraciones) # Guarda la configuracion en un json with open("configuracion.json", "w", encoding="utf8") as archivoJSON: archivoJSON.write(datos_json)
LauraCuenca/MempybyGrupo29
src/handlers/configuracion_h.py
configuracion_h.py
py
1,640
python
es
code
0
github-code
50
23363173038
import random import math depth = 5 functions = ["xd", "*", "+", "-"] noFunctions = 3 terminals = ["mizerie", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9"] noTerminals = 9 class Individ(object): """docstring for Individ""" def __init__(self): self.values = [0 for i in range(2 ** depth)] for i in range(len(self.values)): if i < 3 or (i < 2 ** (depth - 1) - 1 and random.random() < 0.5): self.values[i] = -random.randint(1, len(functions) - 1) else: self.values[i] = random.randint(1, len(terminals) - 1) def fitness(self, problem): s = 0 for i in range(len(problem.inData)): rez = self.dfs(0, problem.inData[i]) s += (rez - problem.outData[i]) * (rez - problem.outData[i]) #print("Expected:", problem.outData[i], "Got: ", rez) return 100000.0 / s def eval(self, problem): s = 0 for i in range(len(problem.inData)): rez = self.dfs(0, problem.inData[i]) s += (rez - problem.outData[i]) * (rez - problem.outData[i]) print("Expected:", problem.outData[i], "Got: ", rez) return 100000.0 / s def dfs(self, pos, inputs): if (self.values[pos] > 0): return inputs[self.values[pos] - 1] if (functions[-self.values[pos]] == "*"): return self.dfs(2 * pos + 1, inputs) * self.dfs(2 * pos + 2, inputs) if (functions[-self.values[pos]] == "-"): return self.dfs(2 * pos + 1, inputs) - self.dfs(2 * pos + 2, inputs) if (functions[-self.values[pos]] == "+"): return self.dfs(2 * pos + 1, inputs) + self.dfs(2 * pos + 2, inputs) def mutate(self, probability): index1 = random.randint(0, (2 ** (depth - 1)) - 2) index2 = random.randint(0, (2 ** (depth - 1)) - 2) i = min(index1, index2) j = max(index1, index2) while i < j: self.values[i], self.values[j] = self.values[j], self.values[i] i += 1 j -= 1 def crossover(individ1, individ2, probability): offspring1 = Individ() offspring2 = Individ() index1 = random.randint(0, (2 ** (depth))) index2 = random.randint(0, (2 ** (depth))) i = min(index1, index2) j = max(index1, index2) for k in range(0, (2 ** (depth))): if k in range(i, j): offspring1.values[k] = individ1.values[k] offspring2.values[k] = individ2.values[k] else: offspring1.values[k] = individ2.values[k] offspring2.values[k] = individ1.values[k] return offspring1, offspring2 def __str__(self): return str(self.values) class Algorithm(object): """docstring for Algorithm""" def __init__(self, problem, populationSize = 40): self.__problem = problem self.readParameters() self.__populationSize = populationSize self.__population = Population(self.__populationSize) def getPopulation(self): return self.__population def iteration(self): self.__population.selection(self.__problem) self.__population.evaluate(self.__problem) def run(self): for i in range(100): self.iteration() print( self.__population.getBest(), self.__population.getBest().fitness(self.__problem), self.__population.getPopSize() ) self.__population.getBest().eval(self.__problem) def readParameters(self): self.__problem.loadData("slump_test.data") class Population(object): """docstring for Population""" def __init__(self, noIndivids): self.__noIndivids = noIndivids self.__individs = [] for i in range(noIndivids): self.__individs.append(Individ()) def evaluate(self, problem): for i in range(self.__noIndivids // 2 - 1): offspring1, offspring2 = Individ.crossover(self.__individs[i], self.__individs[i + 1], 0.5) self.__individs.append(offspring1) self.__individs.append(offspring2) offspring1, offspring2 = Individ.crossover(self.__individs[self.__noIndivids // 2 - 1], self.__individs[0], 0.5) self.__individs.append(offspring1) self.__individs.append(offspring2) for i in range(self.__noIndivids): self.__individs[i].mutate(0.08) self.__individs.sort(key = lambda x : x.fitness(problem), reverse = True) def getBest(self): return self.__individs[0] def getPopSize(self): return len(self.__individs) def selection(self, problem): s = 0 newPopulation = [] for i in range(self.__noIndivids): s += self.__individs[i].fitness(problem) for i in range(self.__noIndivids // 2): r = random.random() j = 0 percents = 0 while j < self.__noIndivids and percents < r: percents += self.__individs[j].fitness(problem) / s j += 1 j -= 1 newPopulation.append(self.__individs[j]) s -= self.__individs[j].fitness(problem) self.__individs.pop(j) self.__individs = newPopulation def __str__(self): s = "" for i in self.__individs: s += str(i) + "\n" return s class Problem(object): """docstring for Problem""" def __init__(self): self.inData = [] self.outData = [] def loadData(self, fileName): with open(fileName, "r") as f: while True: line = f.readline() if (line == ""): break line = line.split(",") crtIn = [] for i in range(1, 10): crtIn.append(float(line[i].strip())) self.outData.append(float(line[10].strip())) self.inData.append(crtIn) if __name__ == "__main__": p = Problem() a = Algorithm(p, 40) a.run()
ggaaggaabbii/University-work
ai/lab6_2.py
lab6_2.py
py
5,158
python
en
code
0
github-code
50
38534453006
import torch from torch_geometric.nn import knn_graph, knn, CGConv class GNNAttention(torch.nn.Module): '''Uses 2 graph layers. One for self attention and one for cross attention. Self-attention based on k-NN of coordinates. Cross-attention based on k-NN in feature space''' def __init__(self, dim, k): '''dim is the feature dimensions, k is the number of neighbours to consider''' super().__init__() self.k = k self.conv1 = CGConv(dim, aggr='max', batch_norm=True).cuda() self.conv2 = CGConv(dim, aggr='max', batch_norm=True).cuda() def forward(self, xyz0, xyz1, f0, f1): b, npts, d = f0.shape batch_idx = torch.arange(b).repeat_interleave(npts).to(xyz0.device) f0 = f0.reshape(-1, d) f1 = f1.reshape(-1, d) #creates edge graph for coordinates edge_idx_c0 = knn_graph(xyz0.reshape(-1,3), k=self.k, batch=batch_idx) edge_idx_c1 = knn_graph(xyz1.reshape(-1,3), k=self.k, batch=batch_idx) #self-attention (layer 1) f0 = self.conv1(f0, edge_idx_c0) f1 = self.conv1(f1, edge_idx_c1) #cross-attention (layer 2) edge_idx_f = knn(f1, f0, k=self.k, batch_x=batch_idx, batch_y=batch_idx, cosine=True) edge_idx_f[1] += b * npts f = self.conv2(torch.cat([f0,f1], dim=0), edge_idx_f) f0, f1 = f[:(b*npts)], f[(b*npts):] #convert f0, f1 to dense representation again f0 = f0.reshape(b, npts, d) f1 = f1.reshape(b, npts, d) return f0, f1
eduardohenriquearnold/fastreg
lib/models/attention.py
attention.py
py
1,537
python
en
code
52
github-code
50
14370519259
import pygame import math import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) WHITE = (255, 255, 255) BLACK = (0, 0, 0) f = open("dialog.txt", "r") rawText = f.read().split("\n") f.close() msg = rawText[0] options = rawText[1:] def finish(s): pygame.quit() f = open("dialog.txt", "w") f.write(s) f.close() exit() pygame.font.init() FONT = pygame.font.Font(pygame.font.get_default_font(), 30) msgRendered = FONT.render(msg, True, BLACK) msgWidth = msgRendered.get_width() msgHeight = msgRendered.get_height() SCREENSIZE = [msgWidth + 100, msgHeight + 50 + msgHeight] screen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE) # Loop running = True c = pygame.time.Clock() option_width = (SCREENSIZE[0] - ((len(options) - 1) * 5)) / len(options) while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.VIDEORESIZE: SCREENSIZE = [*event.dict["size"]] screen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE) elif event.type == pygame.MOUSEBUTTONUP: if pygame.mouse.get_pos()[1] < (SCREENSIZE[1] - msgHeight): continue pos = pygame.mouse.get_pos()[0] pos /= option_width pos = math.floor(pos) finish(options[pos]) # Message screen.fill(WHITE) screen.blit(msgRendered, ((SCREENSIZE[0] - msgWidth) / 2, ((SCREENSIZE[1] - msgHeight) - msgHeight) / 2)) # Options option_width = SCREENSIZE[0] / len(options) cum_x = 0 pygame.draw.rect(screen, BLACK, pygame.Rect(0, SCREENSIZE[1] - msgHeight, SCREENSIZE[0], msgHeight)) for o in options: oRendered = FONT.render(o, True, WHITE) screen.blit(oRendered, (cum_x + ((option_width - oRendered.get_width()) / 2), SCREENSIZE[1] - msgHeight)) cum_x += option_width pygame.draw.line(screen, WHITE, (cum_x, SCREENSIZE[1] - msgHeight), (cum_x, SCREENSIZE[1]), 5) # Flip pygame.display.flip() c.tick(60) # End finish("")
sillypantscoder/pygame_zip
dialog/dialog.py
dialog.py
py
1,907
python
en
code
0
github-code
50
16558290798
import io import re import time from collections import defaultdict import requests import requests_cache from imicrobe.util import grouper requests_cache.install_cache('kegg_api_cache') def get_kegg_annotations(kegg_ids): all_kegg_annotations = {} all_bad_kegg_ids = set() # the missing_accessions_groups_of_10 generator returns groups of 10 KEGG ids # that are not already in the database and that are not 'bad' KEGG ids # the last group will be padded with 'None' if there are fewer than 10 KEGG ids for group_of_10 in grouper(sorted(kegg_ids), n=10): t0 = time.time() kegg_id_list = [k for k in group_of_10 if k is not None] #print(kegg_id_list) print('requesting {} KEGG annotation(s)'.format(len(kegg_id_list))) kegg_annotations, bad_kegg_ids = get_10_kegg_annotations(kegg_id_list) print(' received {} in {:5.2f}s'.format(len(kegg_annotations), time.time()-t0)) all_kegg_annotations.update(kegg_annotations) all_bad_kegg_ids.update(bad_kegg_ids) return all_kegg_annotations, all_bad_kegg_ids kegg_orthology_field_re = re.compile(r'^(?P<field_name>[A-Z]+)?(\s+)(?P<field_value>.+)$') def get_10_kegg_annotations(kegg_ids): """ Request annotations for up to 10 KEGG ids. If a bad id is given there will be no response for it. The response from the KEGG API looks like this: ENTRY K01467 KO NAME ampC DEFINITION beta-lactamase class C [EC:3.5.2.6] PATHWAY ko01501 beta-Lactam resistance ko02020 Two-component system MODULE M00628 beta-Lactam resistance, AmpC system ... ENTRY K00154 KO NAME E1.2.1.68 DEFINITION coniferyl-aldehyde dehydrogenase [EC:1.2.1.68] BRITE Enzymes [BR:ko01000] 1. Oxidoreductases 1.2 Acting on the aldehyde or oxo group of donors 1.2.1 With NAD+ or NADP+ as acceptor 1.2.1.68 coniferyl-aldehyde dehydrogenase K00154 E1.2.1.68; coniferyl-aldehyde dehydrogenase DBLINKS COG: COG1012 GO: 0050269 GENES GQU: AWC35_21175 CED: LH89_09310 LH89_19560 SMW: SMWW4_v1c32370 SMAR: SM39_2711 SMAC: SMDB11_2482 ... return: a dictionary of dictionaries that looks like this { 'K01467': { 'ENTRY': 'K01467 KO', 'NAME': 'ampC', 'DEFINITION': '', 'PATHWAY': '', 'MODULE': '', ... }, 'K00154': { 'ENTRY': 'K00154 KO', 'NAME': 'E1.2.1.68', 'DEFINITION': '', 'PATHWAY': '', 'MODULE': '', ... } } and a (possibly empty) set of KEGG ids for which no annotation was returned """ debug = False ko_id_list = '+'.join(['ko:{}'.format(k) for k in kegg_ids]) response = requests.get('http://rest.kegg.jp/get/{}'.format(ko_id_list)) if response.status_code == 404: print('no annotations returned') all_entries = {} bad_kegg_ids = set(kegg_ids) return all_entries, bad_kegg_ids if response.status_code != 200: error_msg = 'ERROR: response to "{}" is {}'.format(response.url, response.status_code) print(error_msg) raise Exception(error_msg) else: all_entries = defaultdict(lambda: defaultdict(list)) kegg_id = None field_name = None for line in io.StringIO(response.text).readlines(): field_match = kegg_orthology_field_re.search(line.rstrip()) if field_match is None: # this line separates entries kegg_id = None field_name = None else: field_value = field_match.group('field_value') if 'field_name' in field_match.groupdict(): field_name = field_match.group('field_name') if field_name == 'ENTRY': kegg_id, *_ = field_value.split(' ') # print('KEGG id: "{}"'.format(kegg_id)) else: # just a field value is present pass all_entries[kegg_id][field_name].append(field_value) # were any of the KEGG ids bad? bad_kegg_ids = {k for k in kegg_ids} - {k for k in all_entries.keys()} return all_entries, bad_kegg_ids
hurwitzlab/imicrobe-data-loaders
imicrobe/util/kegg.py
kegg.py
py
4,889
python
en
code
0
github-code
50
70409103517
import os, random import requests from bs4 import BeautifulSoup import NBA as nba class ziz() : def hello(self): print("---- Hello my name Ziz ----") def NBA(self, args): if args[0] == 'games': return self.stringfy(nba.getGames()) def getGames(self): url = 'https://reddit.nbabite.com/' page = requests.get(url) soup = BeautifulSoup(page.content,'html.parser') # table_MN = pd.read_html(page) competitions = soup.find(id='competitions') heure_matchs = soup.find_all("div", {"class": "status"}) team_names = soup.find_all("div", {"class": "team-name"}) date = soup.find_all('div', {"class":"date d-sm-block d-none"})[0].text # print(matches) match = {} text = date + "\n" for i, heure in enumerate(heure_matchs): s = heure.text + " : " +team_names[i+1].text + " @ " + team_names[i].text text += '\n' + s return self.stringfy(text) def stringfy(self, text): s = "```text\n" s += text +'\n' s += "```" return s
ahandan/discord_bot
bot/zizBot.py
zizBot.py
py
1,163
python
en
code
0
github-code
50
7764637836
import socket from OpenSSL import SSL import certifi import datetime hostname = 'services.bq.com' port = 443 now = datetime.datetime.now() context = SSL.Context(method=SSL.TLSv1_METHOD) context.load_verify_locations(cafile=certifi.where()) conn = SSL.Connection(context, socket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)) conn.settimeout(5) conn.connect((hostname, port)) conn.setblocking(1) conn.do_handshake() conn.set_tlsext_host_name(hostname.encode()) certs = conn.get_peer_cert_chain() for (idx, cert) in enumerate(certs): formated_date_after = datetime.datetime.strptime(cert.get_notAfter().decode('ascii'), '%Y%m%d%H%M%SZ') formated_date_before = datetime.datetime.strptime(cert.get_notBefore().decode('ascii'), '%Y%m%d%H%M%SZ') print(f'{idx} subject: {cert.get_subject()}') print(f' issuer: {cert.get_issuer()})') print(f' Valido-Desde :' , formated_date_before) print(f' Valido-Hasta :' , formated_date_after) print( ' --> Expira en : ' , formated_date_after - now) #if (now - formated_date_after) > now: # print("ERRROR Expired") #print(f' fingerprint: {cert.digest("sha1")}') #print('----',formated_date_after) print() conn.close()
dgardella/pys
check_cert.py
check_cert.py
py
1,215
python
en
code
0
github-code
50
4635654089
__author__ = "Younes Bouhadjar, Vincent Marois, Tomasz Kornuta" import torch import numpy as np from miprometheus.problems.seq_to_seq.algorithmic.algorithmic_seq_to_seq_problem import AlgorithmicSeqToSeqProblem class ScratchPadCommandLines(AlgorithmicSeqToSeqProblem): """ Class generating sequences of random bit-patterns and targets forcing the system to learn the scratch pad problem (overwriting the memory). Minor modification I: the target contains may contain random command lines. """ def __init__(self, params): """ Constructor - stores parameters. Calls parent class ``AlgorithmicSeqToSeqProblem``\ initialization. :param params: Dictionary of parameters (read from configuration ``.yaml`` file). """ # Set default number of bits for a given problem. # This has to be done before calling base class constructor! params.add_default_params({ 'control_bits': 2, 'data_bits': 8 }) # Call parent constructor - sets e.g. the loss function, dtype. # Additionally it extracts "standard" list of parameters for # algorithmic tasks, like batch_size, numbers of bits, sequences etc. super(ScratchPadCommandLines, self).__init__(params) self.name = 'ScratchPadCommandLines' assert self.control_bits >= 2, "Problem requires at least 2 control bits (currently %r)" % self.control_bits assert self.data_bits >= 1, "Problem requires at least 1 data bit (currently %r)" % self.data_bits # Number of subsequences. self.num_subseq_min = params["num_subseq_min"] self.num_subseq_max = params["num_subseq_max"] def generate_batch(self, batch_size): """ Generates a batch of samples of size ''batch_size'' on-the-fly. .. note:: The sequence length is drawn randomly between ``self.min_sequence_length`` and \ ``self.max_sequence_length``. .. warning:: All the samples within the batch will have the same sequence lengt. :param batch_size: Size of the batch to be returned. :return: DataDict({'sequences', 'sequences_length', 'targets', 'masks', 'num_subsequences'}), with: - sequences: [BATCH_SIZE, SEQ_LENGTH, CONTROL_BITS+DATA_BITS], - sequences_length: [BATCH_SIZE] (random value between self.min_sequence_length and self.max_sequence_length) - targets: [BATCH_SIZE, SEQ_LENGTH, DATA_BITS], - masks: [BATCH_SIZE, SEQ_LENGTH, 1] - num_subsequences: [BATCH_SIZE, 1] (number of subsequences) """ # Store marker. ctrl_store = np.zeros(self.control_bits) ctrl_store[self.store_bit] = 1 # [1, 0, 0] # Recall marker. ctrl_recall = np.zeros(self.control_bits) ctrl_recall[self.recall_bit] = 1 # [0, 1, 0] # Empty data marker. ctrl_data = np.zeros(self.control_bits) # [0, 0] # Define control lines. ctrl_aux = np.zeros(self.control_bits) if self.use_control_lines: if self.control_bits >= 3: if self.randomize_control_lines: # Randomly pick one of the bits to be set. ctrl_bit = np.random.randint(2, self.control_bits) ctrl_aux[ctrl_bit] = 1 else: # Set last. ctrl_aux[self.control_bits - 1] = 1 # Else: no control lines! # assign markers markers = ctrl_data, ctrl_store, ctrl_data # number sub sequences num_sub_seq = np.random.randint(self.num_subseq_min, self.num_subseq_max + 1) # set the sequence length of each marker seq_lengths = np.random.randint(low=self.min_sequence_length, high=self.max_sequence_length + 1, size=num_sub_seq) # generate subsequences for x and y x = [np.random.binomial(1, self.bias, (batch_size, n, self.data_bits)) for n in seq_lengths] # create the target seq_length_tdummies = sum(seq_lengths) + seq_lengths.shape[0] + 1 dummies_target = np.zeros([batch_size, seq_length_tdummies, self.data_bits], dtype=np.float32) targets = np.concatenate((dummies_target, x[-1]), axis=1) # data of x and dummies xx = [self.augment(seq, markers, ctrl_start=ctrl_store, add_marker_data=True, add_marker_dummy=False) for seq in x] # data of x data_1 = [arr for a in xx for arr in a[:-1]] # this is a marker between sub sequence x and dummies inter_seq = self.add_ctrl(np.zeros((batch_size, 1, self.data_bits)), ctrl_recall, ctrl_data) # dummies of x data_2 = [xx[-1][-1]] # concatenate all parts of the inputs inputs = np.concatenate(data_1 + [inter_seq] + data_2, axis=1) # Set control lines for recall items. inputs[:, inputs.shape[1]-seq_lengths[-1]:,0:self.control_bits] = np.tile( ctrl_aux,(batch_size,seq_lengths[-1],1)) # Generate 3D ByteTensor for mask. ptmasks = torch.zeros([batch_size, inputs.shape[1], 1]).type(torch.ByteTensor) ptmasks[:, inputs.shape[1]-seq_lengths[-1]:, 0] = 1 # Return data_dict. data_dict = self.create_data_dict() data_dict['sequences'] = torch.from_numpy(inputs).type(self.app_state.dtype) data_dict['targets'] = torch.from_numpy(targets).type(self.app_state.dtype) data_dict['masks'] = ptmasks data_dict['sequences_length'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * max(seq_lengths).item() data_dict['num_subsequences'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * num_sub_seq return data_dict if __name__ == "__main__": """ Tests sequence generator - generates and displays a random sample""" # "Loaded parameters". from miprometheus.utils.param_interface import ParamInterface params = ParamInterface() params.add_config_params({#'control_bits': 4, #'data_bits': 8, 'min_sequence_length': 1, 'max_sequence_length': 10, 'num_subseq_min': 2, 'num_subseq_max': 4}) batch_size = 10 # Create problem object. scratchpad = ScratchPad(params) # get a sample sample = scratchpad[0] print(repr(sample)) print('__getitem__ works.') # wrap DataLoader on top from torch.utils.data import DataLoader problem = DataLoader(dataset=scratchpad, batch_size=batch_size, collate_fn=scratchpad.collate_fn, shuffle=False, num_workers=0) # generate a batch import time s = time.time() for i, batch in enumerate(problem): #print('Batch # {} - {}'.format(i, type(batch))) pass print('Number of workers: {}'.format(problem.num_workers)) print('time taken to exhaust a dataset of size {}, with a batch size of {}: {}s' .format(scratchpad.__len__(), batch_size, time.time() - s)) # Display single sample (0) from batch. batch = next(iter(problem)) scratchpad.show_sample(batch, 0) print('Unit test completed.')
vincentalbouy/mi-prometheus
miprometheus/problems/seq_to_seq/algorithmic/recall/scratch_pad_cl.py
scratch_pad_cl.py
py
7,377
python
en
code
0
github-code
50
224890675
''' The purpose of the python code is as follows: 1) To load the trained classifier model to classify different hand signs 2) To capture the frames taken from users camera 3) Take the landmarks from the users hand 4) Load the landmark data into the model 5) Get the prediction from the model and print it in the frame ''' import pickle import cv2 import mediapipe as mp import numpy as np # Load the savel classifier model model_dict = pickle.load(open('./model2.p', 'rb')) model = model_dict['model'] cap = cv2.VideoCapture(0) # Get the hands part from the picture using mediapipe mp_hands = mp.solutions.hands mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles hands = mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.3) # Kae the alphabet labels labels_dict = {} for i in range(26): labels_dict[i] = chr(65+i) while True: data_aux = [] x_ = [] y_ = [] ret, frame = cap.read() H, W, _ = frame.shape frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) #Capture the landmarks from the hand region results = hands.process(frame_rgb) if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: mp_drawing.draw_landmarks( frame, # image to draw hand_landmarks, # model output mp_hands.HAND_CONNECTIONS, # hand connections mp_drawing_styles.get_default_hand_landmarks_style(), mp_drawing_styles.get_default_hand_connections_style()) for hand_landmarks in results.multi_hand_landmarks: for i in range(len(hand_landmarks.landmark)): x = hand_landmarks.landmark[i].x y = hand_landmarks.landmark[i].y x_.append(x) y_.append(y) for i in range(len(hand_landmarks.landmark)): x = hand_landmarks.landmark[i].x y = hand_landmarks.landmark[i].y data_aux.append(x - min(x_)) data_aux.append(y - min(y_)) x1 = int(min(x_) * W) - 10 y1 = int(min(y_) * H) - 10 x2 = int(max(x_) * W) - 10 y2 = int(max(y_) * H) - 10 # Get the prediction of model after loading the hand landmark data prediction = model.predict([np.asarray(data_aux)]) # Get the character predicted predicted_character = labels_dict[int(prediction[0])] # Plot a rectangle on the hand region and inscribe text above this hand region cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 0), 4) cv2.putText(frame, predicted_character, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 0), 3, cv2.LINE_AA) # Show the frame cv2.imshow('frame', frame) cv2.waitKey(1) cap.release() cv2.destroyAllWindows()
RexSan0x/Sign-Language-and-Emotion-Detection
Sign_Language_Training/inferece_sign_lang.py
inferece_sign_lang.py
py
2,902
python
en
code
0
github-code
50
191147204
import sqlite3 # connecting to db con = sqlite3.connect('technical_test.db') cur = con.cursor() # printing each row in the db for row in cur.execute('select * from famous_people;'): print(row) print('') # closing connection to db con.close()
MickyCompanie/technical_test_sneldev
query_db.py
query_db.py
py
252
python
en
code
0
github-code
50
22593983762
#-*- coding: utf-8 -*- from cadproj.models import OrientadorOuMediador, Projeto, Curso, TipoDeProjeto, ModoDeApresentacao, Cidade, Recurso, Calouro, Turma from django.contrib import admin from django.utils.translation import ugettext_lazy as _ class EstudanteOptions(admin.ModelAdmin): list_display = ('nome','matricula') #date_hierarchy = 'data_e_hora' class OrientadorOptions(admin.ModelAdmin): list_display = ('nome',) #class CursoOptions(admin.ModelAdmin): #list_display = ('nome',) class ProjetoOptions(admin.ModelAdmin): #inlines = [Pendencia_Inline, Contatamento_Inline] list_display = ('estudante', 'titulo', 'orientador_ou_mediador') fieldsets = ( (None, { 'fields': ('estudante','matricula','titulo','descricao') }), ('Outros Componentes da Equipe', { 'fields': ('estudante2','estudante3','outros_componentes') }), ('Curso', { 'fields': ('curso','turma') }), ('Contato', { 'fields': ('cidade_onde_mora','fone','email') }), ('Projeto', { 'fields': ('orientador_ou_mediador','colaborador','tipo_de_projeto','outro_tipo_de_projeto','palavra_chave1','palavra_chave2','palavra_chave3','cidade_de_abrangencia','local_e_ou_instituicao_de_abrangencia') }), ('Apresentação', {'fields':('modo_de_apresentacao','outro_modo','recursos_para_a_apresentacao') }), ) list_per_page = 25 search_fields = ['estudante', 'titulo', 'descricao', 'matricula', 'fone'] list_filter = ('orientador_ou_mediador','curso','tipo_de_projeto','cidade_de_abrangencia') admin.site.register(Curso) admin.site.register(Projeto,ProjetoOptions) admin.site.register(TipoDeProjeto) admin.site.register(ModoDeApresentacao) admin.site.register(OrientadorOuMediador) admin.site.register(Cidade) admin.site.register(Recurso) admin.site.register(Calouro) admin.site.register(Turma)
jamur/Mostra-de-Projetos
cadproj/admin.py
admin.py
py
1,960
python
pt
code
1
github-code
50
25257312328
from __future__ import annotations import typing from flupy import flu from nebulo.config import Config from nebulo.gql.alias import FunctionPayloadType, MutationPayloadType, ObjectType, ResolveInfo, ScalarType from nebulo.gql.parse_info import parse_resolve_info from nebulo.gql.relay.node_interface import NodeIdStructure, to_node_id_sql from nebulo.gql.resolve.resolvers.claims import build_claims from nebulo.gql.resolve.transpile.mutation_builder import build_mutation from nebulo.gql.resolve.transpile.query_builder import sql_builder, sql_finalize from nebulo.sql.table_base import TableProtocol from sqlalchemy import literal_column, select async def async_resolver(_, info: ResolveInfo, **kwargs) -> typing.Any: """Awaitable GraphQL Entrypoint resolver Expects: info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine """ context = info.context engine = context["engine"] default_role = context["default_role"] jwt_claims = context["jwt_claims"] tree = parse_resolve_info(info) async with engine.begin() as trans: # Set claims for transaction if jwt_claims or default_role: claims_stmt = build_claims(jwt_claims, default_role) await trans.execute(claims_stmt) result: typing.Dict[str, typing.Any] if isinstance(tree.return_type, FunctionPayloadType): sql_function = tree.return_type.sql_function function_args = [val for key, val in tree.args["input"].items() if key != "clientMutationId"] func_call = sql_function.to_executable(function_args) # Function returning table row if isinstance(sql_function.return_sqla_type, TableProtocol): # Unpack the table row to columns return_sqla_model = sql_function.return_sqla_type core_table = return_sqla_model.__table__ func_alias = func_call.alias("named_alias") stmt = select([literal_column(c.name).label(c.name) for c in core_table.c]).select_from(func_alias) # type: ignore stmt_alias = stmt.alias() node_id_stmt = select([to_node_id_sql(return_sqla_model, stmt_alias).label("nodeId")]).select_from(stmt_alias) # type: ignore ((row,),) = await trans.execute(node_id_stmt) node_id = NodeIdStructure.from_dict(row) # Add nodeId to AST and query query_tree = next(iter([x for x in tree.fields if x.name == "result"]), None) if query_tree is not None: query_tree.args["nodeId"] = node_id base_query = sql_builder(query_tree) query = sql_finalize(query_tree.alias, base_query) ((stmt_result,),) = await trans.execute(query) else: stmt_result = {} else: stmt = select([func_call.label("result")]) (stmt_result,) = await trans.execute(stmt) maybe_mutation_id = tree.args["input"].get("clientMutationId") mutation_id_alias = next( iter([x.alias for x in tree.fields if x.name == "clientMutationId"]), "clientMutationId", ) result = {tree.alias: {**stmt_result, **{mutation_id_alias: maybe_mutation_id}}} elif isinstance(tree.return_type, MutationPayloadType): stmt = build_mutation(tree) ((row,),) = await trans.execute(stmt) node_id = NodeIdStructure.from_dict(row) maybe_mutation_id = tree.args["input"].get("clientMutationId") mutation_id_alias = next( iter([x.alias for x in tree.fields if x.name == "clientMutationId"]), "clientMutationId", ) node_id_alias = next(iter([x.alias for x in tree.fields if x.name == "nodeId"]), "nodeId") output_row_name: str = Config.table_name_mapper(tree.return_type.sqla_model) query_tree = next(iter([x for x in tree.fields if x.name == output_row_name]), None) sql_result = {} if query_tree: # Set the nodeid of the newly created record as an arg query_tree.args["nodeId"] = node_id base_query = sql_builder(query_tree) query = sql_finalize(query_tree.alias, base_query) ((sql_result,),) = await trans.execute(query) result = { tree.alias: {**sql_result, mutation_id_alias: maybe_mutation_id}, mutation_id_alias: maybe_mutation_id, node_id_alias: node_id, } elif isinstance(tree.return_type, (ObjectType, ScalarType)): base_query = sql_builder(tree) query = sql_finalize(tree.name, base_query) ((query_json_result,),) = await trans.execute(query) if isinstance(tree.return_type, ScalarType): # If its a scalar, unwrap the top level name result = flu(query_json_result.values()).first(None) else: result = query_json_result else: raise Exception("sql builder could not handle return type") # Stash result on context to enable dumb resolvers to not fail context["result"] = result return result
olirice/nebulo
src/nebulo/gql/resolve/resolvers/asynchronous.py
asynchronous.py
py
5,380
python
en
code
90
github-code
50
26212162918
from langchain.agents import Tool from htmlTemplates import css, bot_template, user_template, disclaimer_text, box_template, user_img, bot_img from typing import List from langchain.agents import Tool from streamlit.components.v1 import html from agentFunctions import simple_report_search, report_summarizer, one_person_search, tearm_search def create_tools(): # define usable Tools for the Agent tools = [ Tool( name = "TermSearch", func=tearm_search, description="use this tool if you are not sure about a term. Input the term" ), Tool( name = "SimpleReportSearch", func=simple_report_search, description="useful if you think that you need just a little information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space" ), Tool( name = "ReportSummarizer", func = report_summarizer, description="useful if you think that you need a lot information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space" ), Tool( name = "OnePersonSearch", func= one_person_search, description="useful if you think that you need personal information about a persons in the MPI to answer the User Question. Input a question with the name of the person you search for, Suitable for a keyword-based search in a vector space" ) ] return tools
kpister/prompt-linter
data/scraping/repos/HannesDiemerling~MinervasArchive/agentTools.py
agentTools.py
py
1,654
python
en
code
0
github-code
50
43709154819
#recommended way admin_dict = {'1':'scie/065p','2':'scii/890p'} #getting value for a key using [] brackets print(admin_dict['1']) #not recommended if key is an integer dict_func = dict(one='1',two='2') #change value admin_dict['1'] = 'steve/07' print(admin_dict['1']) #adding key value dictionary from one dict to another admin_name = {'name':'steve','phone':'0756949393'} admin_name.update(admin_dict) print(admin_name)
steve-ryan/python-tutorial-for-beginners
dictionary.py
dictionary.py
py
424
python
en
code
0
github-code
50
17060406313
import json import openpyxl from case_study.models import Question from core.decorators import staff_required from django.db import IntegrityError from django.http import JsonResponse from django.shortcuts import render from .common import populate_data, delete_model, patch_model from ..forms import QuestionImportForm schema_question = { "endpoint": "/caseadmin/questions/", "fields": [ { "title": "Question", "key": "body", "widget": { "template": "w-text.html", }, "write": True, }, ] } def render_question_view(request, message=None, message_type=None): data = populate_data(schema_question, Question.objects.all()) c = { "title": "Question Admin", "model_name": "Question", "toolbar_new": True, "toolbar_import": True, "data": data, "import_form": QuestionImportForm(), "import_endpoint": "/caseadmin/questions/import", "schema": schema_question, "admin_message": message, "admin_message_type": message_type, "hard_delete_only": True, } return render(request, "case-admin.html", c) @staff_required def api_admin_question(request, question_id): if request.method == "PATCH": return patch_model(request, Question, schema_question, question_id) elif request.method == "DELETE": return delete_model(request, Question, question_id) else: return JsonResponse({ "success": False, "message": "Unsupported HTTP method: " + request.method, }) def question_import_txt(request, file, file_format): if file.content_type != "text/plain": return render_question_view(request, "Failed to import questions as text/plain. " "Please ensure your text file contains one question per line. ", "alert-danger") questions = [] for question in file.file.readlines(): q = question.decode("utf-8").strip() questions.append(Question(body=q)) try: Question.objects.bulk_create(questions, ignore_conflicts=True) return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success") except IntegrityError as e: return render_question_view(request, "Failed to import questions as text/plain. " "Please ensure your text file contains one question per line. " "Error: " + str(e.args[0]), "alert-danger") def question_import_csv(request, file, file_format): if file.content_type != "text/csv": return render_question_view(request, "Failed to import questions as text/csv. " "Please ensure your csv file contains one question per line. ", "alert-danger") questions = [] lines = file.read().decode("utf-8").split("\n") for line in lines: q = line.strip() questions.append(Question(body=q)) try: Question.objects.bulk_create(questions, ignore_conflicts=True) return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success") except IntegrityError as e: return render_question_view(request, "Failed to import questions as text/csv. " "Please ensure your csv file contains one question per line. " "Error: " + str(e.args[0]), "alert-danger") def question_import_json(request, file, file_format): if file.content_type != "application/json": return render_question_view(request, "Failed to import questions as application/json. " "Please ensure your json file contains a list of strings. ", "alert-danger") questions = [] file_text = file.read().decode("utf-8") file_json = json.loads(file_text) for question in file_json: q = question.strip() questions.append(Question(body=q)) try: Question.objects.bulk_create(questions, ignore_conflicts=True) return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success") except IntegrityError as e: return render_question_view(request, "Failed to import questions as application/json. " "Please ensure your json file contains a list of strings. " "Error: " + str(e.args[0]), "alert-danger") def question_import_xlsx(request, file, file_format): if not (str(file.content_type) == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xlsx')): return render_question_view(request, "Failed to import questions as xlsx. " "Please ensure column A has a single question per cell. ", "alert-danger") questions = [] wb = openpyxl.load_workbook(file) sheet = wb.worksheets[0] for col in sheet.iter_cols(): for cel in col: q = str(cel.value).strip() questions.append(Question(body=q)) try: Question.objects.bulk_create(questions, ignore_conflicts=True) return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success") except IntegrityError as e: return render_question_view(request, "Failed to import questions as xlsx. " "Please ensure column A has a single question per cell. " "Error: " + str(e.args[0]), "alert-danger") @staff_required def api_admin_question_import(request): if request.method == "POST": form = QuestionImportForm(request.POST) file = request.FILES["file"] file_format = str(form["file_format"].value()) if file_format == "auto": if file.content_type == "text/csv": file_format = "csv" elif file.content_type == "application/json": file_format = "json" elif file.content_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xlsx'): file_format = "xlsx" elif file.content_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xls'): file_format = "xls" elif file.content_type == "text/plain": file_format = "txt" if file_format == "csv": return question_import_csv(request, file, file_format) elif file_format == "json": return question_import_json(request, file, file_format) elif file_format == "xlsx": return question_import_xlsx(request, file, file_format) elif file_format == "xls": return question_import_xlsx(request, file, file_format) elif file_format == "txt": return question_import_txt(request, file, file_format) else: return render_question_view(request, "Unknown file format: {}".format(str(file_format)), "alert-danger") else: return JsonResponse({ "success": False, "message": "Unsupported method: " + request.method, }) @staff_required def view_admin_question(request): if request.method == "GET": return render_question_view(request) elif request.method == "POST": try: body = json.loads(request.body) Question.objects.create(body=body["body"]) return JsonResponse({ "success": True, "message": "Question created", }) except Exception as e: return JsonResponse({ "success": False, "message": "Failed to create a question:\n" + str(e.args[0]), })
320011/case
core/case_admin/views/question.py
question.py
py
8,610
python
en
code
1
github-code
50
41390824167
import os import json import sqlite3 import requests db_stored = os.path.join(os.path.dirname(__file__), 'qaset.db') # r'D:\Archive\Voibot\qabot\data\qabot\data\qaset.db' url = 'http://10.1.163.22:5000/encode' headers = {'Content-Type': 'application/json'} def generate_all_features(db_stored, begin_id, end_id): conn = sqlite3.connect(db_stored) cursor = conn.cursor() ques_cursor = cursor.execute('select question from qaset where id between ? and ?', (begin_id, end_id)) questions = [] for ques in ques_cursor: questions.append(ques[0]) data = { 'id': 123, 'texts': questions } r = requests.post(url=url, headers=headers, data=json.dumps(data)) result = json.loads(r.text) qvectors = result['result'] current_id = begin_id while current_id <= end_id: cursor.execute('update qaset set feature = ? where id = ?', (json.dumps(qvectors[current_id - begin_id]), current_id)) current_id += 1 conn.commit() conn.close() if __name__ == "__main__": # begin_id = 1 # while(begin_id <= 36800): # end_id = begin_id + 99 # generate_all_features(db_stored, begin_id, end_id) # print('%d to %d is done.' % (begin_id, end_id)) # begin_id = end_id + 1 begin_id = 36811 end_id = 36843 generate_all_features(db_stored, begin_id, end_id)
yaohsinyu/voibot
qabot/data/generate_all_feature.py
generate_all_feature.py
py
1,382
python
en
code
0
github-code
50
4149350510
"""empty message Revision ID: 96089780dc64 Revises: 45811f048651 Create Date: 2022-07-14 08:54:14.167701 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = '96089780dc64' down_revision = '45811f048651' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('rounds', sa.Column('funny_count', sa.SmallInteger(), nullable=True)) op.add_column('rounds', sa.Column('deeep_count', sa.SmallInteger(), nullable=True)) op.execute("UPDATE rounds SET funny_count = 0 WHERE true") op.execute("UPDATE rounds SET deeep_count = 0 WHERE true") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('rounds', 'deeep_count') op.drop_column('rounds', 'funny_count') # ### end Alembic commands ###
pamelafox/translation-telephone
migrations/versions/96089780dc64_.py
96089780dc64_.py
py
924
python
en
code
16
github-code
50
23275279979
from rest_framework import mixins, status from rest_framework.viewsets import GenericViewSet from rest_framework.response import Response from api.models import UploadImage, UploadRequest from api.serializers import UploadSerializer from api.serializers.image_serializer import ImageSerializer class UploadViewSet(mixins.CreateModelMixin, GenericViewSet): queryset = UploadRequest.objects.all() serializer_class = UploadSerializer def create(self, request, *args, **kwargs): serializer_data = {} processed_images = [] request_data = request.data images_to_process = 'images[]' in request_data and \ request.FILES.getlist('images[]') or request_data.get('images',[]) for image_to_process in images_to_process: transformed_image = UploadViewSet.process_image(image_to_process) processed_images.append(transformed_image['id']) serializer_data['images_id'] = bool(processed_images) and processed_images or None serializer = self.get_serializer(data=serializer_data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) @classmethod def process_image(cls, image): serializer_data = {} image_to_process = image serializer_data['source'] = isinstance(image_to_process, str) and \ UploadImage.ImageSource.Remote or UploadImage.ImageSource.Upload serializer_data['source_path'] = '' if serializer_data['source'] == UploadImage.ImageSource.Remote: serializer_data['source_path'] = image_to_process serializer_data['original_file'] = image_to_process serializer_data['transformed_file'] = None image_serializer = ImageSerializer(data=serializer_data) image_serializer.is_valid(raise_exception=True) image_serializer.save() return image_serializer.data
ongtzewei/django-image-manipulation-webapp
api/views/upload.py
upload.py
py
2,050
python
en
code
0
github-code
50
17604501818
import numpy as np from scipy import misc import matplotlib.pyplot as plt def conv_single_step(a_slice_prev, W, b): """ Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation of the previous layer. Arguments: a_slice_prev -- slice of input data of shape (f, f, n_C_prev) W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev) b -- Bias parameters contained in a window - matrix of shape (1, 1, 1) Returns: Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data """ ### START CODE HERE ### (≈ 2 lines of code) # Element-wise product between a_slice_prev and W. Do not add the bias yet. s = a_slice_prev * W # Sum over all entries of the volume s. Z = np.sum(s) # Add bias b to Z. Cast b to a float() so that Z results in a scalar value. Z = np.float(np.add(b, Z)) ### END CODE HERE ### return Z def convolve2d(image, kernel, padding=False, striding=1): # This function which takes an image and a kernel # and returns the convolution of them # Args: # image: a numpy array of size [image_height, image_width]. # kernel: a numpy array of size [kernel_height, kernel_width]. # Returns: # a numpy array of size [image_height, image_width] (convolution output). assert kernel.shape[0] == kernel.shape[1], "kernel must be square" assert striding != 0, "striding cannot be zero" # The kernel is flipped so that we are not performing a "correlation" operation kernel = np.flipud(np.fliplr(kernel)) kernel_h = kernel.shape[0] kernel_w = kernel.shape[1] h = kernel_h // 2 w = kernel_w // 2 image_h = image.shape[0] image_w = image.shape[1] # if padding turned on (to fix border effect) then set for "same" padding if padding: pad = (kernel_h - 1) // 2 else: pad = 0 new_height = int(((image_h + 2*pad - kernel_h) / striding) + 1) new_width = int(((image_w + 2*pad - kernel_w) / striding) + 1) image_out = np.zeros(new_height, new_width) # Add padding to the input image image_padded = np.pad(image, ((0,0), (pad, pad), (pad, pad), (0,0)), 'constant', constant_values = (0,0)) for x in range(h, image_h - h): # Loop over every pixel of the image for y in range(w, image_w - w): sum = 0 for m in range(kernel_h): for n in range(kernel_w): sum += kernel[m][n] * image_padded[x-h+m][y-w+n] image_out[x,y] = sum return image_out img = misc.ascent() plt.grid(False) plt.gray() plt.axis('off') plt.imshow(img) plt.show() # This filter detects edges nicely # It creates a convolution that only passes through sharp edges and straight # lines. #Experiment with different values for fun effects. filter_edge = [[0, 1, 0], [1, -4, 1], [0, 1, 0]] image_sharpen = convolve2d(img, filter_edge) plt.imshow(image_sharpen, cmap=plt.cm.gray) plt.axis('off') plt.show() # A couple more filters to try for fun! filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]] filter = [ [0, 1, 1, 0], [1, 3, 3, 1], [-1, -3, -3, -1], [0, -1, -1, 0]] weight = 1 #filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]] # If all the digits in the filter don't add up to 0 or 1, you # should probably do a weight to get it to do so # so, for example, if your weights are 1,1,1 1,2,1 1,1,1 # They add up to 10, so you would set a weight of .1 if you want to normalize them i_transformed = np.copy(i) size_x = i_transformed.shape[0] size_y = i_transformed.shape[1] print(size_x, size_y) weight = 1 for x in range(2,size_x-2): for y in range(2,size_y-2): convolution = 0.0 convolution = convolution + (i[x - 2, y-2] * filter[0][0]) convolution = convolution + (i[x - 1, y-2] * filter[0][1]) convolution = convolution + (i[x, y-2] * filter[0][2]) convolution = convolution + (i[x + 1, y-2] * filter[0][3]) convolution = convolution + (i[x-1, y] * filter[1][0]) convolution = convolution + (i[x, y] * filter[1][1]) convolution = convolution + (i[x+1, y] * filter[1][2]) convolution = convolution + (i[x + 1, y] * filter[1][3]) convolution = convolution + (i[x-1, y+1] * filter[2][0]) convolution = convolution + (i[x, y+1] * filter[2][1]) convolution = convolution + (i[x+1, y+1] * filter[2][2]) convolution = convolution + (i[x + 1, y + 1] * filter[2][3]) convolution = convolution + (i[x-1, y+1] * filter[3][0]) convolution = convolution + (i[x, y+1] * filter[3][1]) convolution = convolution + (i[x+1, y+1] * filter[3][2]) convolution = convolution + (i[x + 1, y + 1] * filter[3][3]) convolution = convolution * weight if(convolution<0): convolution=0 if(convolution>255): convolution=255 i_transformed[x, y] = convolution # Plot the image. Note the size of the axes -- they are 512 by 512 plt.gray() plt.grid(False) plt.imshow(i_transformed) #plt.axis('off') plt.show() plt.imshow(image_sharpen, cmap=plt.cm.gray) plt.axis('off') plt.show()
sheldon-wall/DLSpecCourse4
Week1.py
Week1.py
py
5,168
python
en
code
0
github-code
50
33948199381
import http.server import socketserver from .tools import HTTPTools class Handler(http.server.SimpleHTTPRequestHandler): """ Subclass of pex.proto.http module. This subclass of pex.proto.http module represents HTTP handler for web server. """ def log_request(self, fmt, *args) -> None: pass def send_status(self, code: int = 200) -> None: self.send_response(int(code)) self.send_header("Content-type", "text/html") self.end_headers() class HTTPListener(object): """ Subclass of pex.proto.http module. This subclass of pex.proto.http module represents Python implementation of HTTP listener. """ def __init__(self, host: str, port: int, methods: dict = {}) -> None: """ Start HTTP listener on socket pair. :param str host: host to listen :param int port: port to listen :param dict methods: methods, method names as keys and method handlers as items :return None: None """ super().__init__() self.http_tools = HTTPTools() self.handler = Handler self.host = host self.port = int(port) self.sock = None self.methods = methods def listen(self) -> None: """ Start HTTP listener. :return None: None :raises RuntimeError: with trailing error message """ try: for method in self.methods: setattr(self.handler, f"do_{method.upper()}", self.methods[method]) self.sock = socketserver.TCPServer((self.host, self.port), self.handler) except Exception: raise RuntimeError(f"Failed to start HTTP listener on port {str(self.port)}!") def stop(self) -> None: """ Stop HTTP listener. :return None: None :raises RuntimeError: with trailing error message """ try: self.sock.server_close() except Exception: raise RuntimeError(f"HTTP listener is not started!") def accept(self) -> None: """ Accept connection. :return None: None :raises RuntimeError: with trailing error message """ try: self.sock.handle_request() except Exception: raise RuntimeError(f"HTTP listener is not started!")
EntySec/Pex
pex/proto/http/listener.py
listener.py
py
2,334
python
en
code
25
github-code
50
73961830874
from urllib.parse import urlencode import requests from dj_rest_auth.app_settings import api_settings from dj_rest_auth.jwt_auth import set_jwt_cookies from dj_rest_auth.models import get_token_model from dj_rest_auth.utils import jwt_encode from dj_rest_auth.views import LoginView from django.conf import settings from django.core.exceptions import ValidationError from django.shortcuts import redirect from rest_framework import serializers from db.repository.user import UserRepository def google_get_access_token(*, code: str, redirect_uri: str) -> str: # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#obtainingaccesstokens data = { "code": code, "client_id": settings.GOOGLE_OAUTH2_CLIENT_ID, "client_secret": settings.GOOGLE_OAUTH2_CLIENT_SECRET, "redirect_uri": redirect_uri, "grant_type": "authorization_code", } response = requests.post(settings.GOOGLE_ACCESS_TOKEN_OBTAIN_URL, data=data) if not response.ok: raise ValidationError("Failed to obtain access token from Google.") return response.json()["access_token"] def google_get_user_info(*, access_token: str): # Reference: https://developers.google.com/identity/protocols/oauth2/web-server#callinganapi response = requests.get( settings.GOOGLE_USER_INFO_URL, params={"access_token": access_token} ) if not response.ok: raise ValidationError("Failed to obtain user info from Google.") return response.json() class GoogleLoginApi(LoginView): permission_classes = () authentication_classes = () class InputSerializer(serializers.Serializer): code = serializers.CharField(required=False) error = serializers.CharField(required=False) def get(self, request, *args, **kwargs): user_repository = UserRepository() input_serializer = self.InputSerializer(data=request.GET) input_serializer.is_valid(raise_exception=True) validated_data = input_serializer.validated_data code = validated_data.get("code") error = validated_data.get("error") if error or not code: params = urlencode({"error": error}) return redirect(f"{settings.PLATFORM_URL}?{params}") # api_uri = reverse('api:v1:auth:login-with-google') api_uri = f"{settings.PLATFORM_URL}/api/v1/auth/login/google" access_token = google_get_access_token(code=code, redirect_uri=api_uri) user_data = google_get_user_info(access_token=access_token) profile_data = { "username": user_data["email"], "first_name": user_data.get("givenName", ""), "last_name": user_data.get("familyName", ""), } # We use get-or-create logic here for the sake of the example. # We don't have a sign-up flow. self.user = user_repository.get_or_create( email=user_data["email"], **profile_data ) token_model = get_token_model() if api_settings.USE_JWT: self.access_token, self.refresh_token = jwt_encode(self.user) elif token_model: self.token = api_settings.TOKEN_CREATOR( token_model, self.user, self.serializer ) if api_settings.SESSION_LOGIN: self.process_login() response = redirect(f"{settings.PLATFORM_URL}/courses") set_jwt_cookies(response, self.access_token, self.refresh_token) return response
edu4ml/WSB-ML-PLATFORM-FORKED
api/apis/v1/auth/auth.py
auth.py
py
3,513
python
en
code
0
github-code
50
29905578649
# coding:utf-8 from unityagents import UnityEnvironment import numpy as np from network.DQN import DQNAgent import matplotlib.pyplot as plt import tensorflow as tf import time env = UnityEnvironment(file_name="../environment/Banana_Windows_x86_64/Banana.exe") path = "../result/banana/" # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state = env_info.vector_observations[0] state_size = len(state) total_scores = [] scores = [] batch_size = 64 mean = 0 count = 0 eps = 1.0 eps_end = 0.01 decay = 0.999 max_t = 1000 gamma = 0.99 alpha = 1e-4 tua = 1e-3 max_memory_size = 50000 train = False with tf.Session() as session: brain_agent = DQNAgent(session, state_size, action_size, max_memory_size, gamma, alpha, tua) session.run(tf.global_variables_initializer()) saver = tf.train.Saver() while mean < 13 and train: env_info = env.reset(train_mode=True)[brain_name] score = 0 time_b = time.time() loss = 0 for i in range(max_t): if np.random.random() > eps: action = np.argmax(brain_agent.choose_action(state), axis=1) else: action = np.random.choice(action_size) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] score += reward brain_agent.store(state, action, reward, next_state, [done]) state = next_state if brain_agent.step % 4 == 0: loss += brain_agent.learn(batch_size) if done: break scores.append(score) total_scores.append(score) eps = max(eps * decay, eps_end) print("\rEpisode: {},\tCurr Score: {},\tAverage Score: {:.2f},\tLoss:{:.4},\tEPS:{:.4},\tTime: {:.4}".format(count, score, np.mean(scores), loss/250.0, eps, time.time()-time_b), end="") if count % 100 == 0 and count > 0: mean = np.mean(scores) print("\rEpisode: {}, \tAverage Score: {:.2f}".format(count, mean)) scores.clear() count += 1 if train: saver.save(session, path) fig = plt.figure() plt.plot(range(len(total_scores)), total_scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() else: saver.restore(session, path) saver.restore(session, path) for _ in range(10): done = False env_info = env.reset(train_mode=False)[brain_name] score = 0 state = env_info.vector_observations[0] while not done: action = brain_agent.action = np.argmax(brain_agent.choose_action(state), axis=1) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] score += reward state = next_state print("Score is ", score)
lebesgue125/reinforce_learning
banana/dqn_agent.py
dqn_agent.py
py
3,144
python
en
code
0
github-code
50
74910611675
import numpy as np import cv2 import pyrealsense2 as rs import math """INTIALIZING REALSENSE DATA""" # Initialize RealSense pipeline pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30) pipeline.start(config) # Initialize ORB detector orb = cv2.ORB_create() # Brute-force Matcher bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # Previous frame data prev_gray = None prev_kps = None prev_descs = None prev_matched_coords = None current_matched_coords = None MAX_MATCH_DISTANCE = 40 # You can change this threshold based on your needs TOP_PERCENTAGE = 0.1 # Top 10% best matches # LIST OF DISTANCE VECTORS real_points = None distance_vectors = None euler_prediction = None def rotation_matrix(theta_x, theta_y, theta_z): Rx = np.array([[1, 0, 0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(theta_x)]]) Ry = np.array([[np.cos(theta_y), 0, -np.sin(theta_y)], # Note the negative sign for sin(theta_y) [0, 1, 0], [np.sin(theta_y), 0, np.cos(theta_y)]]) Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [0, 0, 1]]) R = np.dot(Rz, np.dot(Ry, Rx)) return R def euler_displacement(theta_x, theta_y, theta_z, point): return np.dot(rotation_matrix(theta_x, theta_y, theta_z), point) def distance_point(point): distance = math.sqrt(point[0] ** 2 + point[1] ** 2 + point[2] ** 2) return distance def average_vectors(vectors): if not vectors: return None # return None if the list is empty total_x = sum(vec[0] for vec in vectors) total_y = sum(vec[1] for vec in vectors) total_z = sum(vec[2] for vec in vectors) num_vectors = len(vectors) return [total_x / num_vectors, total_y / num_vectors, total_z / num_vectors] def average_list(list): return sum(list) / len(list) def vector_between_points(p1, p2): return [p2[i] - p1[i] for i in range(3)] """INTIATING BNO055 ROTATIONAL DATA""" import os import hid os.environ["BLINKA_MCP2221"] = "1" device = hid.device() device.open(0x04D8, 0x00DD) import board import adafruit_bno055 i2c = board.I2C() # uses board.SCL and board.SDA sensor = adafruit_bno055.BNO055_I2C(i2c) last_val = 0xFFFF """MAIN LOOP""" try: while True: """RGB AND DEPTH DATA PROCESSING""" # Create alignment align_to = rs.stream.color align = rs.align(align_to) # Get frameset of depth and color frames = pipeline.wait_for_frames() aligned_frames = align.process(frames) aligned_depth_frame = aligned_frames.get_depth_frame() depth_image = np.asanyarray(aligned_depth_frame.get_data()) depth_intrinsics = frames.profile.as_video_stream_profile().intrinsics color_frame = frames.get_color_frame() # Convert color frame to numpy array color_image = np.asanyarray(color_frame.get_data()) # Convert to grayscale for ORB gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY) # Detect ORB keypoints and descriptors kps, descs = orb.detectAndCompute(gray, None) depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) depth_with_kps = cv2.drawKeypoints(depth_colormap, kps, None, color=(0, 255, 0), flags=0) cv2.imshow('Depth with Keypoints', depth_with_kps) # Match with previous frame's keypoints and descriptors, if available if prev_gray is not None: matches = bf.match(prev_descs, descs) if len(matches) > 0: # Sort the matches based on distance (lowest distance is better) matches = sorted(matches, key=lambda x: x.distance) # Filter matches based on a distance threshold good_matches = [m for m in matches if m.distance < MAX_MATCH_DISTANCE] """PERCENTAGE BASED FILTERING""" # 1. Percentage-based Filtering num_good_matches = int(len(matches) * TOP_PERCENTAGE) good_matches_percentage = matches[:num_good_matches] # Extract (x, y) coordinates of matched keypoints prev_matched_coords = [prev_kps[match.queryIdx].pt for match in good_matches_percentage] current_matched_coords = [kps[match.trainIdx].pt for match in good_matches_percentage] # Print matched coordinates (You can store or process them further based on your needs) print("Previous Frame Matched Coordinates:", prev_matched_coords) print("Current Frame Matched Coordinates:", current_matched_coords) print("Depth of current:", depth_image[int(current_matched_coords[0][1])][int(current_matched_coords[0][0])]) if len(good_matches) > 0: matched_image = cv2.drawMatches(prev_gray, prev_kps, gray, kps, good_matches_percentage, None) # or replace 'good_matches_percentage' with 'good_matches_ratio' cv2.imshow('Filtered Matched keypoints', matched_image) # Update the previous frame data prev_gray = gray prev_kps = kps prev_descs = descs # Exit on 'q' if cv2.waitKey(1) & 0xFF == ord('q'): break except KeyboardInterrupt: pass finally: pipeline.stop() cv2.destroyAllWindows()
vpark915/The-GingerLens
LocalPythonIdeas/FundamentalScripts/ORBDepthPrimitive.py
ORBDepthPrimitive.py
py
5,665
python
en
code
1
github-code
50
2609538019
import matplotlib.pyplot as plt import scipy.optimize as optimize import scipy.sparse as sparse import scipy.sparse.linalg from math import ceil import numpy as np import sys def solve_one_time_step(u_0, mu_vec, temp_a=0, temp_b=0): print("h") def create_main_matrix(n_x_points, mu_vec): """ Matrix for theta method """ tri_diag = np.ones((3, n_x_points)) tri_diag[1] = -2 * tri_diag[1] for row in range(n_x_points): tri_diag[:, row] *= float(mu_vec[row]) a_matrix = sparse.spdiags(tri_diag, [-1, 0, 1], n_x_points, n_x_points) i_matrix = sparse.identity(n_x_points) return a_matrix, i_matrix u = u_0 bv = np.zeros_like(u_0) bv[0] = mu_vec[0] * temp_a bv[-1] = mu_vec[0] * temp_b D2, I = create_main_matrix(n_x_points=u_0.shape[0], mu_vec=mu_vec) lhs = (I - D2 / 2) rhs = (I + D2 / 2) * u + bv u = np.transpose(np.mat(sparse.linalg.spsolve(lhs, rhs))) return u def solve_heat_equation(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c, plot=False): """ This function approximates a solution to the generic heat equation u_0_func: function of x that returns the initial value. t_final: Latest time to simulate to [s] x_a: The lowest x-value of the domain [m] x_b: The highest x-value of the domain [m] temp_a: The temperature at x=a (Dirichlet BV) [deg C] temp_b: The temperature at x=b (Dirichlet BV) [deg C] n_x_points: The number of points required in the x-direction. c: The constant in the heat equation. """ mu = 1 # Arbitrarily chosen, pick a higher number to increase the time step. # This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme. dx = (x_b - x_a) / n_x_points dt = dx ** 2 * mu / c n_t_points = ceil(t_final / dt) x = np.linspace(x_a, x_b, n_x_points) t = np.arange(0, t_final, dt) u_0 = np.reshape(u_0_func(x), (100, 1)) data = [u_0] u = u_0 for t_i in range(n_t_points): u = solve_one_time_step(u_0=u, mu=mu, temp_a=temp_a - 1 + np.cos(t_i * dt), temp_b=temp_b - 1 + np.cos(t_i * dt)) data.append(u) if (t_i % 1000) == 0: print(".", end="") result = np.hstack(data) if plot: X, Y = np.meshgrid(x, t) fig = plt.figure() ax = plt.axes(projection='3d') # Creating plot ax.plot_surface(X, Y, result[:, :-1].T) ax.set_xlabel("X [m]") ax.set_ylabel("T [s]") plt.show() return result def initial_value(x): return -6 * np.sin(np.pi * (x - 0.5)) + 2 * (x - 0.5) def find_zeros(y_arr, a, b): """ Returns the x-values (assuming y_arr is on a linear interpolation mesh between a and b) where the y_arr mesh function changes sign. """ zeros_i = [] for i in range(len(y_arr) - 1): if y_arr[i] * y_arr[i + 1] < 0: # This means that there is a sign change. zeros_i.append(i) # We want to store the index # Let's now translate these indices into x values. dx = (b - a) / len(y_arr) zeros = [] for index in zeros_i: zeros.append((index + 0.5) * dx) # Adding half a step because the zero is between i and i+1. return zeros # def find_zeros_func(f: callable, a, b): # k = 1 # xs = np.linspace(a, b, 1000*k) # t_zero = f(xs) # sgn = np.sign(t_zero) # zbd = [] # # for i in range(0,len(sgn)-1): # if sgn[i] != sgn[i+1]: # zbd.append((xs[i]+xs[i+1])/2) # # while len(zbd) != 2 and k < 11: # k += 1 # xs = np.linspace(a, b, 1000 * k) # t_zero = f(xs) # sgn = np.sign(t_zero) # zbd = [] # for i in range(0, len(sgn) - 1): # if sgn[i] != sgn[i + 1]: # zbd.append((xs[i] + xs[i + 1]) / 2) # # if len(zbd) != 2: # sys.exit("The function u_0 might not be a suitable choice. The function u_0 must be continuous and have exactly two zeros in [x_a,x_b]") # h1 = zbd[0] # h2 = zbd[1] # h = [h1, h2] # # return h # def find_zeros_array(u, a, b, tol): # k = len(u) # xs = np.linspace(a, b, k) # sgn = np.sign(u) # zbd = [] # zbd_id = [] # h = [] # # for i in range(0,len(sgn)-1): # if sgn[i] != sgn[i+1]: # zbd.append(xs[i]) # zbd_id.append(i) # # if len(zbd) == 1: # if abs(u[zbd_id[0]]) < tol: # h.append(xs[zbd_id[0]]) # h.append(xs[zbd_id[0]]) # else: # h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2) # h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2) # elif len(zbd) == 2: # if abs(u[zbd_id[0]]) < tol: # h.append(xs[zbd_id[0]]) # else: # h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2) # if abs(u[zbd_id[1]]) < tol: # h.append(xs[zbd_id[1]]) # else: # h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2) # else: # h = [] # # return h def solve_model(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c1, c2, c3, tol, n_t_points, plot=False): """ u_0_func: function of x that returns the initial value. t_final: Latest time to simulate to [s] x_a: The lowest x-value of the domain [m], x_a = 0 x_b: The highest x-value of the domain [m] temp_a: The temperature at x=a (Dirichlet BV) [deg C] temp_b: The temperature at x=b (Dirichlet BV) [deg C] n_x_points: The number of points required in the x-direction. c1: The constant in the heat equation in the first part. tol: Tolerance for zero finding. """ # This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme. dx = (x_b - x_a) / n_x_points dt = t_final / n_t_points mu1 = c1 * dt / dx ** 2 mu2 = c2 * dt / dx ** 2 mu3 = c3 * dt / dx ** 2 x = np.linspace(x_a, x_b, n_x_points) t = np.arange(0, t_final, dt) u_0 = np.reshape(u_0_func(x), (100, 1)) data = [u_0] # bd1 = [] # bd2 = [] # u_0 = u_0_func() # h = find_zeros(u_0_func, x_a, x_b) # bd1.append(h[0]) # bd2.append(h[1]) h_1_arr = [] h_2_arr = [] h_data = find_zeros(u_0, a=x_a, b=x_b) print("Starting boundary points: ", h_data) h_1_arr.append(h_data[0]) h_2_arr.append(h_data[1]) u = u_0 for t_i in range(n_t_points): mu_vector = np.ones_like(u) mu_vector[[x < h_1_arr[-1]]] *= mu1 mu_vector[np.logical_and(h_1_arr[-1] <= x, x < h_2_arr[-1])] *= mu2 mu_vector[h_2_arr[-1] <= x] *= mu3 u = solve_one_time_step(u_0=u, mu_vec=mu_vector, temp_a=temp_a, temp_b=temp_b) h_data = find_zeros(u, a=x_a, b=x_b) if len(h_data) == 0: h_1_arr.append(h_data[0]) h_2_arr.append(h_data[1]) data.append(u) if (t_i % 1000) == 0: print(".", end="") result = np.hstack(data) if plot: X, Y = np.meshgrid(x, t) fig = plt.figure() ax = plt.axes(projection='3d') # Creating plot ax.plot_surface(X, Y, result[:, :-1].T) ax.set_xlabel("X [m]") ax.set_ylabel("T [s]") plt.show() solve_model(u_0_func=initial_value, t_final=50, x_a=0, x_b=2, temp_a=5, temp_b=9, n_x_points=100, c1=0.01, c2=0.04, c3=0.01, tol=10 ** (-10), n_t_points=500, plot=True) # solve_heat_equation(u_0_func=initial_value, # t_final=50, # x_a=-1, # x_b=2, # temp_a=-2, # temp_b=4, # n_x_points=100, # c=0.01, # plot=True)
liorarueff/MathematicalIce
main.py
main.py
py
7,928
python
en
code
0
github-code
50
42680391223
import unittest from unittest.mock import patch from lotto.cities import Cities class TestCities(unittest.TestCase): def test_get_city_wrong_input(self): self.assertNotIn('vxvx', Cities.total_cities) self.assertNotIn(1, Cities.total_cities) with patch('builtins.input', return_value='Tom'): with self.assertRaises(ValueError): Cities.get_city_input() def test_get_city_correct_input(self): self.assertIn('bari', Cities.total_cities) with patch('builtins.input', return_value='BAri'): self.assertEqual(Cities.get_city_input(), 'bari') if __name__ == '__main__': unittest.main()
erydegio/lotto-game
test/test_cities.py
test_cities.py
py
693
python
en
code
0
github-code
50
35062260193
# This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. import bpy import string import pdb import time import json # import urllib.request import math import operator import ast try: import _pickle as pickle except: import pickle import os import base64 import zlib # from materials import * from bpy_extras.io_utils import ImportHelper from bpy.props import StringProperty dbg = False bl_info = { "name": "Minecraft motions import (*.mcmo)", "description": "This addon allows you to import minecraft worlds and mob motions", "author": "Aat Karelse", "version": (0, 4, 0), "blender": (2, 6, 3), #"api": ???, "location": "File > Import > minecraft stuff", "warning": "Alpha", "wiki_url": "https://github.com/aaps/MCmotions", # "tracker_url": "http://projects.blender.org/tracker/index.php?func=detail&aid=29552", "category": "Import-Export"} # This class initiates and starts the state machine and uses the gathered data # to construct the model in Blender. class DataImporter: def createMeshFromData(self, material, origin, verts, faces): # Create mesh and object mat = bpy.data.materials.new('TexMat') if material in self.materials: themat = self.materials else: themat = {material:{'name': 'Unknown - ' + str(material), 'color': (0, 0, 0), 'alpha':0, 'emittance':0 ,'textures':[]}} # print(themat[material]) if 'textures' in themat[material] and len(themat[material]['textures']) > 0: for texpath in themat[material]['textures']: mtex = mat.texture_slots.add() mtex.texture = self.textures[texpath] # print('ok' + ) me = bpy.data.meshes.new(themat[material]['name']+' Mesh') ob = bpy.data.objects.new(themat[material]['name'], me) ob.location = origin if len(themat[material]) >= 2: mat.diffuse_color = themat[material]['color'] if len(themat[material]) >= 3 and themat[material]['alpha'] != 0: mat.alpha = themat[material]['alpha'] mat.use_transparency = True mat.transparency_method = 'RAYTRACE' if len(themat[material]) >= 4 and themat[material]['emittance'] != 0: mat.emit = themat[material]['emittance'] ob.show_name = True ob.active_material = mat # Link object to scene and make active scn = bpy.context.scene scn.objects.link(ob) # Create mesh from given verts, faces. me.from_pydata(verts, [], faces) # Update mesh with new data me.update() return ob def run(self, filepath, context): start_time = time.time() handle = open(filepath, 'rb') total = pickle.loads(zlib.decompress(handle.read())) indexi = 0 vertices = total['vertices'] faces = total['faces'] entitys = total['allhistory'] origins = total['origins'] self.materials = total['materials'] self.textures = total['textures'] total = None extralist = {} self.tempdir = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'textures' try: os.makedirs(self.tempdir) except Exception: print('some dir error should be ok !') filelist = [ f for f in os.listdir(self.tempdir) ] for f in filelist: try: os.remove(f) except Exception: print('file removal trouble no biggy') if self.textures: for texture in self.textures: fileh = open(self.tempdir + os.sep + texture + ".png", "wb") fileh.write(base64.b64decode(self.textures[texture])) temp = {} for material in self.materials: if 'textures' in self.materials[material] and len(self.materials[material]['textures']) > 0: for texpath in self.materials[material]['textures']: img = bpy.data.images.load(self.tempdir + os.sep + texpath + '.png') cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE') cTex.image = img temp[texpath] = cTex self.textures = temp print(self.textures) for mat in vertices: if mat in vertices and mat in faces and mat in origins: self.createMeshFromData(mat, origins[mat], vertices[mat], faces[mat] ) faces[mat] = None vertices[mat] = None else: print(str(mat) + 'not in faces, vertices or origins !') for value in entitys: aentity = entitys[value] if len( aentity['positions']) > 0: firstloc = aentity['positions'][0]['pos'] firstloc = firstloc[0], firstloc[1]+2,firstloc[2] headloc = firstloc[0],firstloc[1]+1, firstloc[2] bpy.ops.mesh.primitive_cube_add(location=headloc) head = bpy.context.object head.rotation_mode = 'XYZ' head.scale = (0.25, 0.25, 0.25) bpy.ops.mesh.primitive_cube_add(location=firstloc) ob = bpy.context.object ob.rotation_mode = 'XYZ' ob.scale = (0.25, 0.75, 0.25) mat = bpy.data.materials.new("PKHG") mobtype = aentity['type'] if mobtype == '50': ob.name = "creeper" mat.diffuse_color = (0.0, 1.0, 0.0) elif mobtype == '51': ob.name = "skeleton" mat.diffuse_color = (1.0, 1.0, 1.0) elif mobtype == '52': ob.name = "spider" mat.diffuse_color = (0.2, 0.1, 0.1) elif mobtype == '54': ob.name = "zombol" mat.diffuse_color = (0.0, 0.3, 0.0) elif mobtype == '55': ob.name = "slime" mat.diffuse_color = (0.5, 1, 0.5) elif mobtype == '58': ob.name = "enderman" mat.diffuse_color = (0.5, 0.0, 0.5) elif mobtype == '90': ob.name = "pig" mat.diffuse_color = (0.5, 0.4, 0.4) elif mobtype == '65': ob.name = "bat" mat.diffuse_color = (1, 0.5, 0.2) elif mobtype == '91': ob.name = "sheep" mat.diffuse_color = (1, 1, 1) elif mobtype == '92': ob.name = "cow" mat.diffuse_color = (1, 0.2, 0.1) elif mobtype == '94': ob.name = "squid" mat.diffuse_color = (0.2, 0.2, 1) elif mobtype == '101': ob.name = "rabbit" mat.diffuse_color = (0.5, 0.1, 0.05) elif len(mobtype) > 10 or mobtype == 'player': if mobtype == 'player': ob.name = "player: RECORDER" mat.diffuse_color = (1, 0, 0) else: if 'type' in aentity: ob.name = "player: " + aentity['type'] else: ob.name = "player: unknown" mat.diffuse_color = (1, 0.6, 0.4) else: mat.diffuse_color = (0.0, 0.0, 0.0) ob.name = str(mobtype) ob.active_material = mat bpy.ops.object.select_all(action='DESELECT') ob.select = True head.select = True put_on_layers = lambda x: tuple((i in x) for i in range(20)) bpy.context.scene.objects.active = ob bpy.ops.object.parent_set() maincam = bpy.data.cameras.new("Camera") maincam.clip_start = 1 maincam.clip_end = 5000 cam_ob = bpy.data.objects.new("Camera", maincam) cam_ob.rotation_euler = (0, math.radians(180), 0) selfycam = bpy.data.cameras.new("Camera") selfycam.clip_start = 1 selfycam.clip_end = 5000 selfy_cam_ob = bpy.data.objects.new("Camera", selfycam) selfy_cam_ob.rotation_euler = (0, 0, 0) selfy_cam_ob.location = (0, 0, 25) selfy_cam_ob.layers[:] = put_on_layers({2}) cam_ob.layers[:] = put_on_layers({2}) ob.layers[:] = put_on_layers({2}) head.layers[:] = put_on_layers({2}) selfy_cam_ob.parent = head cam_ob.parent = head bpy.context.scene.objects.link(cam_ob) bpy.context.scene.objects.link(selfy_cam_ob) for posses in aentity['positions'][1:]: frame_num = int((posses['time'] / 20) * 25) bpy.context.scene.frame_set(frame_num) ob.location = (posses['pos'][0], posses['pos'][2], posses['pos'][1]+0.75) yaw = posses['yawpichhead'][1] head.rotation_euler = (math.radians(posses['yawpichhead'][1]), 0, 0) ob.rotation_euler = (math.radians(90), 0, math.radians(posses['yawpichhead'][0]) ) ob.hide = not bool(posses['alive']) ob.hide_render = not bool(posses['alive']) ob.keyframe_insert("hide") ob.keyframe_insert("hide_render") ob.keyframe_insert(data_path="location") ob.keyframe_insert(data_path="rotation_euler") if ob.animation_data: for fc in ob.animation_data.action.fcurves: fc.extrapolation = 'LINEAR' for kp in fc.keyframe_points: kp.interpolation = 'LINEAR' print("Script finished after {} seconds".format(time.time() - start_time)) return {'FINISHED'} # This is the import operator. class MineCraftImport(bpy.types.Operator, ImportHelper): '''Import form minecraft netrecorder some format (.mcmo)''' bl_idname = "minecraft.importminecraftdump" bl_label = "MineCraft EntityPaths" # mc ep filename_ext = ".mcmo" filter_glob = StringProperty( default="*.mcmo", options={'HIDDEN'} ) @classmethod def poll(cls, context): return True def execute(self, context): di = DataImporter() return di.run(self.filepath, context) def menu_func_import(self, context): self.layout.operator(MineCraftImport.bl_idname, text="Mcmo import (.mcmo)") def register(): bpy.utils.register_class(MineCraftImport) bpy.types.INFO_MT_file_import.append(menu_func_import) def unregister(): bpy.utils.unregister_class(MineCraftImport) bpy.types.INFO_MT_file_import.remove(menu_func_import) if __name__ == "__main__": register() bpy.ops.something.minecraft('INVOKE_DEFAULT')
aaps/MCmotions
minecraftimport.py
minecraftimport.py
py
12,057
python
en
code
8
github-code
50
1391672757
# N = input('enter N: ') # M = input('enter M: ') import timeit def draw_board(): global board for line in transpose(board): print(*line) def transpose(matr): res=[] n=len(matr) m=len(matr[0]) for j in range(m): tmp=[] for i in range(n): tmp=tmp+[matr[i][j]] res=res+[tmp] return res N, M = 10, 10 board = [['.' for _ in range(M)] for _ in range(N)] turn = True def fill(y1, y2, x): for y in range(y1 + 1, y2): global board, turn if turn: rock_color = 'W' else: rock_color = 'B' if board[x][y] != rock_color: board[x][y] = rock_color def check_column(rock_color, x_placed, y_placed): y1, y2 = None, None for y, rock in enumerate(board[x_placed]): if rock == rock_color: if y1 == None: y1 = y elif y2 == None: y2 = y fill(y1, y2, x_placed) break x1, x2 = None, None for y, rock in enumerate(board[y_placed]): if rock == rock_color: if x1 == None: x1 = y elif x2 == None: x2 = y fill(x1, x2, y_placed) break def chech_rock(x, y): global board, turn if turn: rock_color = 'W' else: rock_color = 'B' if board[x][y] == '.': board[x][y] = rock_color board = transpose(board) check_column(rock_color, x, y) board = transpose(board) check_column(rock_color, x, y) draw_board() turn = not turn else: print('error: field is taken') def input_xy(): return [int(number) - 1 for number in input('Enter: ').split(' ')] def count(): global board white_count = 0 black_count = 0 for column in board: for n in column: if n == 'W': white_count += 1 elif n == 'B': black_count += 1 print(f'white has {white_count - black_count} more rocks') def main(): x, y = input_xy() while x != -1 and y != -1: chech_rock(x, y) count() x, y = input_xy() main()
matbitilya/rocks
2.py
2.py
py
2,328
python
en
code
0
github-code
50
27653685459
import collections import os import sys import openpyxl import database from truckmate_email import TruckmateEmail REPORT_EMAILS = [ '[email protected]' ] class Rate(object): def __init__(self, tariff, customers, origin, destination, break_value, is_min, rate): self.tariff = tariff self.customers = customers self.origin = origin self.destination = destination self.break_value = break_value self.is_min = (is_min.strip() == 'True') self.rate = rate def __repr__(self): return 'Rate(tariff=%s, origin=%s, dest=%s, break=%s, rate=%s)' % ( self.tariff, self.origin, self.destination, self.rate_break, self.rate ) @property def three_digit_zip(self): if self.destination.isdigit(): if 600 <= int(self.destination[:3]) <= 606: return 'CHICOMM' else: return self.destination[:3] elif self.destination == 'CHICOMM': return 'CHICOMM' elif self.destination in ['497LP', '497UP']: return '497' else: return 'OTHER' @property def rate_break(self): if self.is_min: return 'MIN' else: rounded_break = round(self.break_value / 100.0) * 100.0 return rounded_break class RateReport(object): def __init__(self, file_name, datab): sql_file_path = os.path.join(sys.path[0], file_name) self.sql_query = self.load_query_from_file(sql_file_path) self.dataset = self.fetch_data_from_db(self.sql_query, datab) self.split_data = self.split_dataset(self.dataset) def load_query_from_file(self, file_path): with open(file_path, 'r') as sql_file: return sql_file.read() def fetch_data_from_db(self, query, db): with db as datab: with datab.connection.cursor() as cursor: cursor.execute(query) return cursor.fetchall() def split_dataset(self, dataset): split_data = collections.defaultdict( lambda: { 'breaks': set(), 'rates': collections.defaultdict(list) } ) for rate in dataset: for origin in self.get_origins(rate): rate_obj = Rate(rate.TARIFF, rate.CUSTOMERS, origin, rate.DESTINATION, rate.BREAK, rate.IS_MIN, rate.RATE) if rate_obj.rate_break not in split_data[rate_obj.three_digit_zip]['breaks']: if not rate_obj.is_min: split_data[rate_obj.three_digit_zip]['breaks'].add(rate_obj.rate_break) rate_tup = (rate_obj.tariff, rate_obj.customers, rate_obj.origin, rate_obj.destination) split_data[rate_obj.three_digit_zip]['rates'][rate_tup].append(rate_obj) return split_data def get_origins(self, rate): origins = [] if rate.ORIGIN_MS: for origin in rate.ORIGIN_MS.split(', '): origins.append(origin) if rate.ORIGIN: origins.append(rate.ORIGIN) return origins def export_as_xlsx(self): wb = openpyxl.Workbook() wb.remove_sheet(wb.active) for zone in sorted(self.split_data.keys()): ws = wb.create_sheet(zone) self._excel_insert_titles(ws, zone) self._excel_insert_data(ws, zone) virtual_wb = openpyxl.writer.excel.save_virtual_workbook(wb) return virtual_wb def _excel_insert_titles(self, worksheet, zone): titles = { 'A1': 'TARIFF', 'B1': 'CUSTOMER', 'C1': 'ORIGIN', 'D1': 'DESTINATION', 'E1': 'MIN' } row = 'F' for b in sorted(self.split_data[zone]['breaks']): cellname = row + str(1) titles[cellname] = b row = chr(ord(row) + 1) for cell, title in titles.items(): worksheet[cell] = title def _excel_insert_data(self, worksheet, zone): current_row = 2 for tariff_tup, rates in sorted(self.split_data[zone]['rates'].iteritems()): tariff, customers, origin, destination = tariff_tup worksheet.cell(row=current_row, column=1).value = tariff worksheet.cell(row=current_row, column=2).value = customers worksheet.cell(row=current_row, column=3).value = origin worksheet.cell(row=current_row, column=4).value = destination for rate in rates: current_column = self.find_column(worksheet, rate.rate_break) current_cell = worksheet.cell(row=current_row, column=current_column) current_cell.value = rate.rate current_cell.number_format = '#,##0.00' current_row += 1 def find_column(self, worksheet, header): for cell in worksheet[1]: if cell.value == header: return cell.col_idx else: raise ValueError('No header found for %s' % header) def main(): rate_report = RateReport('ratereport.sql', database.truckmate) email_message = TruckmateEmail( REPORT_EMAILS, subject='Rate Report', attachments=[ ('rate_report.xlsx', rate_report.export_as_xlsx()) ] ) email_message.send() if __name__ == '__main__': main()
jwaltzjr/truckmate
truckmate/ratereport.py
ratereport.py
py
5,480
python
en
code
2
github-code
50
86734481945
import pygame class Ui: def __init__(self, screen, player) -> None: self.screen = screen self.player = player self.font = pygame.font.SysFont('Arial', 18) self.big_font = pygame.font.SysFont('Arial', 32) def render(self, score): score_text = self.big_font.render(str(score), 1, (255, 255, 255)) hp_text = self.font.render(str(self.player.hp), 1, (255, 255, 255)) self.screen.blit(score_text, (self.screen.get_width() / 2 - score_text.get_width() / 2, score_text.get_height())) self.screen.blit(hp_text, (self.player.rect.x + self.player.width / 2 - hp_text.get_width() / 2, self.player.rect.y + self.player.height))
JustThomi/SpaceShooter
ui.py
ui.py
py
692
python
en
code
0
github-code
50
71994975835
c50=0 c20 = 0 c10 = 0 c1 = 0 print('Banco dos Crias') saque = int(input('Valor a ser sacado:')) while saque !=0: if saque - 50 >= 0: c50 += 1 saque = saque -50 else: break while saque !=0: if saque - 20 >= 0: c20 += 1 saque = saque -20 else: break while saque !=0: if saque - 10 >= 0: c10 += 1 saque = saque -10 else: break while saque !=0: if saque - 1 >= 0: c1 += 1 saque = saque -1 else: break print(f'{c50} Cedulas(a) de 50R$') print(f'{c20} Cedulas(a) de 20R$') print(f'{c10} Cedulas(a) de 10R$') print(f'{c1} Cedulas(a) de 1R$') ###if saque % 20 >= 0: ##print(f'{} Cedulas de 20R$')
ArthPx/learning-code
d 71.py
d 71.py
py
769
python
en
code
0
github-code
50
70896170076
import os import subprocess import tempfile from typing import Dict import requests from . import errors from snapcraft.file_utils import calculate_hash, get_tool_path from snapcraft.internal.cache import FileCache from snapcraft.internal.indicators import download_requests_stream class _Image: def __init__( self, *, base: str, snap_arch: str, url: str, checksum: str, algorithm: str ) -> None: self.base = base self.snap_arch = snap_arch self.url = url self.checksum = checksum self.algorithm = algorithm self._image_cache = FileCache(namespace="build-images-{}".format(self.base)) def _download_and_cache(self) -> str: request = requests.get(self.url, stream=True, allow_redirects=True) if not request.ok: raise errors.BuildImageRequestError( base=self.base, status_code=request.status_code ) # First create the prefix as tempfile.TemporaryDirectory does not do that for you os.makedirs(self._image_cache.file_cache, exist_ok=True) with tempfile.TemporaryDirectory( prefix=self._image_cache.file_cache ) as tmp_dir: download_file = os.path.join(tmp_dir, "{}-vm".format(self.base)) download_requests_stream(request, download_file) calculated_digest = calculate_hash(download_file, algorithm=self.algorithm) if self.checksum != calculated_digest: raise errors.BuildImageChecksumError( expected=self.checksum, calculated=calculated_digest, algorithm=self.algorithm, ) return self._image_cache.cache( filename=download_file, algorithm=self.algorithm, hash=self.checksum ) def get(self): cached_file = self._image_cache.get( hash=self.checksum, algorithm=self.algorithm ) if not cached_file: cached_file = self._download_and_cache() # TODO verify nothing is using this as a backing store before implementing. # image_cache.prune(keep_hash=image.checksum) return cached_file def _get_build_images(base: str) -> Dict[str, _Image]: if base == "core16": return dict( amd64=_Image( base="core16", snap_arch="amd64", url="https://cloud-images.ubuntu.com/releases/16.04/release-20180703/ubuntu-16.04-server-cloudimg-amd64-disk1.img", # noqa: E501 checksum="79549e87ddfc61b1cc8626a67ccc025cd7111d1af93ec28ea46ba6de70819f8c", # noqa: E501 algorithm="sha256", ) ) elif base == "core18": return dict( amd64=_Image( base="core18", snap_arch="amd64", url="https://cloud-images.ubuntu.com/releases/18.04/release-20180724/ubuntu-18.04-server-cloudimg-amd64.img", # noqa: E501 checksum="6d663a8fd5eddd916f4aef4fd06d0f7f4cf0bb191f170b8c84cd2adf297bc5c3", # noqa: E501 algorithm="sha256", ) ) else: raise KeyError(base) def setup(*, base: str, snap_arch: str, size: str, image_path: str) -> None: """Setup a build image for base and snap_arch of size at image_path. Example usage: >>> from snapcraft.internal.build_providers import _images >>> _images.setup(base="core18", snap_arch="amd64", size="10G", image_path="images/core18.qcow2") :param str base: the base of the build image to setup. :param str snap_arch: the architecture of the base for the build image. :param str size: the size of the disk for the build image. :param str image_path: the path to create the build image. :raises errors.BuildImageForBaseMissing: if there is no build image defined for the requested base or snap architecture. :raises errors.BuildImageRequestError: upon a network related issue that prevents download of the build image. :raises errors.BuildImageChecksumError: if the resulting downloaded build image does not match the expected checksum. :raises errors.BuildImageSetupError: if a build image cannot be created due to tooling or other system issues (e.g.; space issues). """ try: image = _get_build_images(base)[snap_arch] except KeyError as key_error: raise errors.BuildImageForBaseMissing( base=base, snap_arch=snap_arch ) from key_error cached_file = image.get() if os.path.dirname(image_path): os.makedirs(os.path.dirname(image_path), exist_ok=True) qemu_img_cmd = get_tool_path("qemu-img") # qemu-img parameters: # -q: quiet. # -f: first image format. # -b: backing file. try: subprocess.check_call( [ qemu_img_cmd, "create", "-q", "-f", "qcow2", "-b", cached_file, image_path, size, ] ) except subprocess.CalledProcessError as process_error: raise errors.BuildImageSetupError( exit_code=process_error.returncode ) from process_error
Tymbur/Archive_Encrypted.zip
snapcraft/data/usr/lib/python3/dist-packages/snapcraft/internal/build_providers/_images.py
_images.py
py
5,347
python
en
code
0
github-code
50
31526327859
#"D:\UCLA+USC\OPT\fetch\fetch_run.py" import json import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import ScalarFormatter import torch import torch.nn as nn import torch.nn.functional as F import streamlit as st import os from collections import defaultdict import torchvision.models as models start_date_2021 = pd.to_datetime("2021-01-01") # Start date for 2022 end_date_2022 = pd.to_datetime("2022-12-31") # End date for 2022 date_range_2021_2022 = pd.date_range(start_date_2021, end_date_2022, freq='D') x_new = pd.DataFrame({'# Date': date_range_2021_2022}) script_directory = os.path.dirname(os.path.abspath(__file__)) model_path = os.path.join(script_directory, 'fetch_LSTM_model.pth') seq_length=90 input_size = seq_length hidden_size = 64 num_layers = 2 output_size = seq_length fetch_data_path= os.path.join(script_directory, 'data_daily.csv') monthly_sums = defaultdict(float) days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] monthly_sum_2022 = {month: 0 for month in range(1, 13)} class LSTM(nn.Module): def __init__(self, input_size, hidden_size, output_size): super().__init__() self.lstm = nn.LSTM(input_size, hidden_size) self.linear = nn.Linear(hidden_size, output_size) def forward(self, input): x, _ = self.lstm(input) x = self.linear(x) return x def normalize_column(column): normalized = (column - column.min()) / (column.max() - column.min()) return torch.tensor(normalized.values, dtype=torch.float32) def revert(x,Y_min,Y_max): return Y_min+x*(Y_max-Y_min) def main(): #Load the trained LSTM model model = LSTM(input_size, hidden_size, output_size) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) #Load the original data raw = pd.read_csv(fetch_data_path) raw['# Date'] = pd.to_datetime(raw['# Date']) Y_min = raw['Receipt_Count'].values.astype(float).min() Y_max = raw['Receipt_Count'].values.astype(float).max() Y = raw['Receipt_Count'].values.astype(float) Y = normalize_column(raw['Receipt_Count']) Y = Y.reshape(-1, 1) Y_new = Y.detach().reshape(-1) #Use the loaded model to make predictions for 2022 for j in range(365): with torch.no_grad(): prediction = model(Y_new[-seq_length:].view(-1,seq_length)) prediction = torch.tensor(prediction[0,-1].item()).view(1) Y_new = torch.cat((Y_new, prediction)) output = revert(Y_new,Y_min,Y_max) output2 = output.detach().tolist() daily_number_of_receipts_2022 = output2[365:] start_date= 0 for i in monthly_sum_2022.keys(): monthly_sum = sum(daily_number_of_receipts_2022[start_date:(start_date +(days_in_month[i-1]))]) monthly_sum_2022[i] +=monthly_sum start_date += days_in_month[i-1] x_to_be_plotted = monthly_sum_2022.keys() Y_to_be_plotted = [monthly_sum_2022[key] for key in monthly_sum_2022.keys() ] #Visualization plt.figure(figsize=(10, 6)) #plt.plot(x_new['# Date'].tolist(), tensor_list, label='Predicted Number of Receipts per month', color='green', marker='o', linestyle='-') plt.plot(x_to_be_plotted, Y_to_be_plotted, label='Predicted Number of Receipts per month in 2022', color='green', marker='o', linestyle='-') plt.xlabel('Month for 2022') plt.ylabel('Number of Receipts') plt.title('Line Plot of Monthly Number of Receipts Over Time in 2022') plt.legend() plt.grid(True) y_formatter = ScalarFormatter(useMathText=True) y_formatter.set_scientific(False) # Disable scientific notation y_formatter.set_powerlimits((0, 0)) # Set the exponent range to (0, 0) plt.gca().yaxis.set_major_formatter(y_formatter) #plt.show() #Show the result using streamlit: st.title("LSTM model App for fetch analysis By Xiaoshu Luo") selected_month = st.number_input("Please select a month (1-12) in 2022", min_value=1, max_value=12, step=1, value=1) plt.scatter(selected_month, Y_to_be_plotted[selected_month - 1], color='red', marker='o', s=100, label='Selected Month') st.text(f"The month you selected is: {selected_month}") st.text(f"The predicted monthly number of receipts in 2022 is: {int(monthly_sum_2022[selected_month])}") st.pyplot(plt) if __name__ == "__main__": main()
tree2601/Fetch_LSTM_model
fetch_run.py
fetch_run.py
py
4,500
python
en
code
0
github-code
50
26211678718
import os import json import subprocess from transformers import AutoTokenizer, AutoModelForCausalLM from openai import OpenAI import requests import torch import tiktoken import argparse commit_schema = { "name": "git_commit", "description": 'Performs a git commit by calling `git commit -m "commit_message"`', "parameters": { "type": "object", "properties": { "commit_message": { "description": "A short but descriptive commit message", "type": "string" } }, "required": ["commit_message"] } } def generate_commit_message_mistral(diff): """Generate commit message using Mistral AI.""" tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") tokens = tokenizer.encode(diff) tokens = tokens[:7999] diff = tokenizer.decode(tokens) prompt = "You are given the output of a git diff. Your task is to create a descriptive commit message based on this diff, max 15 words\n\n" + diff data = { "system": "You generate commit messages from a git diff that is provided to you. It is your job to create a descriptive commit message based on this diff. Do not include the diff in your commit message. Only include the commit message. The most important thing is to ensure you are only describing the changes that are marked with + or - in the diff. Do not include any other changes in your commit message.", "model": "mistral", "prompt": "{prompt}".format(prompt=prompt), "stream": False, } response = requests.post("http://localhost:11434/api/generate", json=data) json_strings = response.text.strip().split('\n') responses = [json.loads(js)["response"] for js in json_strings] result = "".join(responses) return result def generate_commit_message_globe_server(diff): data = {"diff": diff} response = requests.post("http://globe.engineer/api/scommit-server", json=data) commit_message = response.text.strip() return commit_message def format_diff(diff): added = [] removed = [] lines = diff.split('\n') for line in lines: if line.startswith('+'): added.append(line) elif line.startswith('-'): removed.append(line) formatted_diff = 'ADDED:\n' + '\n'.join(added) + '\nREMOVED:\n' + '\n'.join(removed) return formatted_diff def generate_commit_message_gpt(diff): """Generate commit message using OpenAI's ChatGPT.""" client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') if len(diff) == 0: return 'default commit message' tokens = tokenizer.encode(diff) tokens = tokens[:15900] diff = tokenizer.decode(tokens) prompt = "Can you commit this diff for me:\n\n" + diff response = client.chat.completions.create(messages=[ {'role': 'system', 'content': "You call the git commit function with short and informative commit messages"}, {'role': 'user', 'content': prompt}, ], functions=[commit_schema], function_call={'name': 'git_commit'}, model='gpt-3.5-turbo-16k', temperature=0.5) args = json.loads(response.choices[0].message.function_call.arguments) commit_message = args['commit_message'] return commit_message def scommit(): """Perform a git commit with a generated or provided message.""" parser = argparse.ArgumentParser() parser.add_argument('-m', type=str, help='Commit message') parser.add_argument('-mi', action='store_true', help='Using mistral') parser.add_argument('-globe-server', action='store_true', help='Using globe server') args, unknown = parser.parse_known_args() try: # Check if there are any commits subprocess.check_output(['git', 'rev-parse', '--verify', 'HEAD'], text=True).strip() commits_exist = True except subprocess.CalledProcessError: commits_exist = False if commits_exist and args.mi: diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip() formatted_diff = format_diff(diff) message = generate_commit_message_mistral(formatted_diff) message = message.replace('"', '\\"') elif commits_exist and args.globe_server: diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip() formatted_diff = format_diff(diff) message = generate_commit_message_globe_server(formatted_diff) message = message.replace('"', '\\"') elif args.m is None and commits_exist: diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip() formatted_diff = format_diff(diff) message = generate_commit_message_gpt(formatted_diff) else: message = args.m if args.m is not None else 'Initial commit' cmd = f'git commit {" ".join(unknown)} -m "{message}"' os.system(cmd) if __name__ == '__main__': scommit()
kpister/prompt-linter
data/scraping/repos/Globe-Engineer~semantic-commit/scommit~scommit.py
scommit~scommit.py
py
5,025
python
en
code
0
github-code
50
25249831556
import cv2 import pandas as pd import time # Can take a video file as input or video stream from the webcam cap = cv2.VideoCapture("C:/Users/harsh/Downloads/video (1080p).mp4") #cap = cv2.VideoCapture(0) index = ["color", "color_name", "hex", "R", "G", "B"] csv = pd.read_csv("C:/Users/harsh/Downloads/colors.csv", names=index, header=None) r = g = b = x_pos = y_pos = 0 # Function to get the Color name from the dataset for which the RGB value is the closest. def get_color_name(R, G, B): minimum = 10000 for i in range(len(csv)): d = abs(R - int(csv.loc[i, "R"])) + abs(G - int(csv.loc[i, "G"])) + abs(B - int(csv.loc[i, "B"])) if d <= minimum: minimum = d cname = csv.loc[i, "color_name"] return cname # Function to get x,y coordinates of mouse double click which will also give the RGB values def draw_function(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDBLCLK: global b, g, r, x_pos, y_pos, clicked clicked = True x_pos = x y_pos = y b, g, r = frame[y, x] b = int(b) g = int(g) r = int(r) # Outer Loop To keep the Video Stream on while True: ret, frame = cap.read() clicked = False cv2.namedWindow('Video') # draw_function will be called when the mouse event occured cv2.setMouseCallback('Video', draw_function) cv2.imshow('Video', frame) key = cv2.waitKey(1) # Inner Loop will be executed when key(p) is clicked which will pause the video stream to a single frame # This loop is used for the main task which is Color detection if cv2.waitKey(1) == ord("p"): while True: cv2.imshow('Video', frame) # Display the color name once draw function is called and clicked is true if clicked: # cv2.rectangle(image, start point, endpoint, color, thickness)-1 fills entire rectangle cv2.rectangle(frame, (20, 20), (750, 60), (b, g, r), -1) # Creating text string to display( Color name and RGB values ) text = get_color_name(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b) # cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType ) cv2.putText(frame, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA) # For very light colours we will display text in black colour if r + g + b >= 600: cv2.putText(frame, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA) clicked = False # Key to get out of the loops # Key(p) to resume the video stream and Key(esc) to get out of both the loops and end the execution key = cv2.waitKey(1) if key == ord("p"): break if key == 27: break if key == 27: break cap.release() cv2.destroyAllWindows()
Harshil-Agrawal/RealTime_Color_Detection
Color_detection.py
Color_detection.py
py
2,995
python
en
code
0
github-code
50
40559915413
import random # def rotto(): # num = [0, 0, 0, 0, 0, 0] # for i in range(0, 6): # num[i] = random.randint(1, 46) # for j in num: # if j == num[i]: # i -= 1 # return num # print(rotto()) lotto_number = [] def getRandomNumber(): number = random.randint(1, 45) return number while True: if len(lotto_number) == 6: break random_number = getRandomNumber() if random_number not in lotto_number: lotto_number.append(random_number) while True: bonus_number = getRandomNumber() if bonus_number not in lotto_number: break print(lotto_number, '+', bonus_number)
Getver/StartCoding
00_BasicLecture/09_로또번호.py
09_로또번호.py
py
700
python
en
code
0
github-code
50
21652314287
import sklearn from sklearn.linear_model import Perceptron import pandas as pd import numpy as np from sklearn.datasets import load_iris import matplotlib.pyplot as plt iris=load_iris() df=pd.DataFrame(iris.data,columns=iris.feature_names) df['label']=iris.target df.columns = [ 'sepal length', 'sepal width', 'petal length', 'petal width', 'label' ] data = np.array(df.iloc[:100, [0, 1, -1]]) #因为iloc[num_of_row_start : num_of_row_end, num_of_column_start : num_of_column_end]不包含num_of_end,所以需要 +1才能包含c行 X, y = data[:,:-1], data[:,-1] y = np.array([1 if i == 1 else -1 for i in y]) #感知机 clf = Perceptron(fit_intercept=True, max_iter=1000, shuffle=True) clf.fit(X, y) print(clf.coef_) print(clf.intercept_) plt.figure(figsize=(10,10)) # 中文标题 plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus'] = False plt.title('鸢尾花线性数据示例') plt.scatter(data[:50, 0], data[:50, 1], c='b', label='Iris-setosa',) plt.scatter(data[50:100, 0], data[50:100, 1], c='orange', label='Iris-versicolor') # 画感知机的线 x_ponits = np.arange(4, 8) y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1] plt.plot(x_ponits, y_) # 其他部分 plt.legend() # 显示图例 plt.grid(False) # 不显示网格 plt.xlabel('sepal length') plt.ylabel('sepal width') plt.legend() plt.show()
yishishizi/machinelearning
sk.py
sk.py
py
1,453
python
en
code
0
github-code
50
3132557366
import torch from torch import nn, reshape from torch import device as torch_device class Simple(nn.Module): """ Simple model use mlp to do denoise """ def __init__(self, samples, chunk_size, channels, device): super().__init__() self.chunk_size = chunk_size self.channels = channels self.linear = nn.Linear(self.chunk_size*self.channels, self.chunk_size*self.channels, bias=False, device = device) def forward(self, state, _input): """ Parameters ---------- x : Tensor Input tensor of shape (batch_size, samples, channels) state : Tensor Input tensor of shape (batch_size, hidden_dim, channels) Returns ------- Tensor State tensor of shape (batch_size, hidden_dim, channels) Tensor Output tensor of shape (batch_size, samples, channels) """ if len(_input.shape)==3: batch_size = _input.shape[0] else: batch_size = 1 shape_saved = _input.shape std = torch.std(_input) _input = _input/std _res = reshape(_input, (batch_size,-1)) _res = self.linear(_res) _res = reshape(_res, shape_saved) return None, (_input+_res)*std @property def is_recursive(self): return True
zhouxinyu0723/audio-denoise-addon-v2
ZENNet/model/simple.py
simple.py
py
1,156
python
en
code
1
github-code
50
7408358101
#import tool import sys inputfile_1=sys.argv[1] inputfile_2=sys.argv[2] #create dictionary def list2dict(s): d={} for i in s: if i in d.keys(): d[i]=d[i]+1 else: d[i]=1 return d #define a function to match key and value between 2 files def cmplist (s1,s2): d1=list2dict(s1) d2=list2dict(s2) d1_keys=set(d1.keys()) d2_keys=set(d2.keys()) intersect_keys=d1_keys.intersection(d2_keys) added={} for i in (d2_keys-d1_keys): added[i]=d2[i] removed={} for i in (d1_keys-d2_keys): removed[i]=d1[i] modified={} for i in intersect_keys: if d1[i]!=d2[i]: modified[i]=d1[i],d2[i] same={} for i in intersect_keys: if d1[i]==d2[i]: same[i]=d1[i] return added, removed, modified,same with open(inputfile_1,"r") as f1: result=f1.read().split('\n') print(result[0]) f1list=[] for line in result: f1list.append(line) with open(inputfile_2,"r") as f2: result2=f2.read().split('\n') f2list=[] for line in result2: f2list.append(line) added_out, removed_out, modified_out, same_out=cmplist(f2list,f1list) #print out the results on screen print("Number of observations in spec but not in output ----------") print(len(added_out)) print("Number of observations in both docs--------") print(len(same_out)) print("Number of observations in the output but not in spec---------") print(len(removed_out)) print("Number of observations were modified in the output---------") print(len(modified_out)) print("Below is the records in spec but not in the output-----") print(added_out) print("Below is the records in the output but not in spec------") print(removed_out)
Becky2012/Large-file-discrepancy-checks
check.py
check.py
py
1,755
python
en
code
0
github-code
50
16325613872
#!/usr/bin/env python # coding: utf-8 # ECON 280A # # PS 1 # # By Yi-Fan, Lin # In[1]: import pandas as pd import numpy as np from scipy.optimize import fsolve from sympy import symbols, Eq, solve, nsolve import matplotlib.pyplot as plt # In[2]: df = pd.read_excel("/Users/ricky/Documents/椰林大學/Berkeley/International Econ/Data for PS 1.xls" , sheet_name="trade flows mfg", header=0, nrows=20, usecols="B:T") df = df[1:] #exports from i to j # In[3]: df.head(5) # In[4]: #name of countries print(df.columns) # In[149]: theta = 5 #the median theta as in slides nc = 19 #number of countries t_cost = np.ones([nc, nc]) #change in trade costs prod = np.ones(nc) #change in productivities labor = np.ones(nc) #change in labors total_prod = df.sum(axis=1) #total production, Y_n total_cons = df.sum(axis=0) #total consumption deficit = [total_cons.iloc[i] - total_prod.iloc[i] for i in range(nc)] df_share = df.divide(total_cons.iloc[0], axis=0) #pi_n def_share = deficit/np.array(total_prod) # In[190]: def gen_denom(ncoun, w, theta, t_cost): #ncoun for the index of country denom = 0 for k in range(nc): temp = df_share.iloc[ncoun, k]*(w[k]*t_cost[ncoun, k])**(-theta) denom = denom + temp return denom def obj(w_vec, theta, t_cost): eq_vec = [0 for i in range(nc)] w_vec = w_vec for i in range(nc): rhs = 0 for n in range(nc): denom = gen_denom(n, w_vec, theta, t_cost) rhs = rhs + df_share.iloc[n, i]*(w_vec[i]*t_cost[n, i])**(-theta)*(w_vec[n]*total_cons.iloc[n])/denom eq_vec[i] = w_vec[i]*total_cons.iloc[i] - rhs return eq_vec # In[151]: def welfare(ncoun, w, theta, t_cost): c_share = (w[ncoun]*t_cost[ncoun, ncoun])**(-theta)/gen_denom(ncoun, w, theta, t_cost) return c_share**(-1/theta) def output(theta, t_cost): wage = fsolve(obj, np.ones(nc), (theta, t_cost)) real_wage = np.array([welfare(n, wage, theta, t_cost) for n in range(nc)]) price = wage/real_wage return [wage, real_wage, price] # In[191]: base = output(theta, t_cost) # In[192]: for i in range(nc): print(def_share[i]*100, (base[0])[i], (base[1])[i], (base[2])[i]) # In[154]: t_cost_dec = t_cost*(1/1.3) for i in range(nc): t_cost_dec[i, i] = 1 #except for own # In[155]: #tariff cut tarcut = output(theta, t_cost_dec) # In[173]: for i in range(nc): print((df.columns)[i], def_share[i]*100, tarcut[0][i], tarcut[1][i], tarcut[2][i]) # In[159]: #us-canada FTA #canada: 3 #us: 18 t_cost_FTA = t_cost t_cost_FTA[3, 18] = t_cost_FTA[3, 18]*(1/1.3) t_cost_FTA[18, 3] = t_cost_FTA[18, 3]*(1/1.3) # In[160]: tarFTA = output(theta, t_cost_FTA) # In[174]: for i in range(nc): print((df.columns)[i], def_share[i]*100, tarFTA[0][i], tarFTA[1][i], tarFTA[2][i]) # In[162]: plt.rcParams['figure.figsize'] = [10, 6] plt.rcParams['figure.dpi'] = 100 # In[179]: fig = plt.figure() fig, ax = plt.subplots() ax.scatter(def_share, base[0], c='blue', label='Relative') ax.scatter(def_share, base[1], c='green', label='Real') ax.legend() plt.ylabel('Change in Wage') plt.xlabel('Trade deficit') plt.title('Baseline (graph 1.)') plt.show() # In[180]: fig = plt.figure() fig, ax = plt.subplots() ax.scatter(def_share, tarcut[0], c='blue', label='Relative') ax.scatter(def_share, tarcut[1], c='green', label='Real') ax.legend() plt.ylabel('Change in Wage') plt.xlabel('Trade deficit') plt.title('Overall Tariff Cut (graph 2.)') plt.show() # In[181]: fig = plt.figure() fig, ax = plt.subplots() ax.scatter(def_share, tarFTA[0], c='blue', label='Relative') ax.scatter(def_share, tarFTA[1], c='green', label='Real') plt.text(def_share[3], tarFTA[0][3]+0.01, 'CAN') plt.text(def_share[18], tarFTA[0][18]+0.01, 'USA') ax.legend() plt.ylabel('Change in Wage') plt.xlabel('Trade deficit') plt.title('US-Canada FTA (graph 3.)') plt.show() # In[182]: fig = plt.figure() fig, ax = plt.subplots() ax.scatter(def_share, base[1], c='blue', label='Base') ax.scatter(def_share, tarcut[1], c='green', label='Tariff cut') ax.scatter(def_share, tarFTA[1], c='brown', label='FTA') plt.text(def_share[3], tarFTA[1][3]+0.01, 'CAN') plt.text(def_share[18], tarFTA[1][18]+0.01, 'USA') ax.legend() plt.ylabel('Change in Real Wage') plt.xlabel('Trade deficit') plt.title('Comparison (graph 4.)') plt.show() # In[183]: table1 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[1], 'Tariff Cut': tarcut[1], 'FTA': tarFTA[1]}) table1.index = df.columns print("Change in Real wage (Table 1)") print(table1) # In[184]: table2 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[0], 'Tariff Cut': tarcut[0], 'FTA': tarFTA[0]}) table2.index = df.columns print("Change in Relative wage (Table 2)") print(table2) # In[185]: table3 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[2], 'Tariff Cut': tarcut[2], 'FTA': tarFTA[2]}) table3.index = df.columns print("Change in Price index (Table 3)") print(table3) # In[193]: fig = plt.figure() fig, ax = plt.subplots() ax.scatter(def_share, base[2], c='blue', label='Base') ax.scatter(def_share, tarcut[2], c='green', label='Tariff cut') ax.scatter(def_share, tarFTA[2], c='brown', label='FTA') plt.text(def_share[3], tarFTA[2][3]+0.01, 'CAN') plt.text(def_share[18], tarFTA[2][18]+0.01, 'USA') ax.legend() plt.ylabel('Change in Price index') plt.xlabel('Trade deficit') plt.title('Comparison (graph 5.)') plt.show() # In[ ]:
Yifan3018/Armington-model-in-international-trade
PS1.py
PS1.py
py
5,541
python
en
code
1
github-code
50
15892916608
from collections import defaultdict def solution(dirs): d = defaultdict(list) cur_x = 0 cur_y = 0 x = [0, 0, 1, -1] y = [1, -1, 0, 0] cnt = 0 for e in dirs: to_x = cur_x to_y = cur_y if e == 'U': to_x += x[0] to_y += y[0] elif e == 'D': to_x += x[1] to_y += y[1] elif e == 'R': to_x += x[2] to_y += y[2] elif e == 'L': to_x += x[3] to_y += y[3] if -5 <= to_x <= 5 and -5 <= to_y <= 5: flag = False if d[(cur_x, cur_y)]: for pos in d[(cur_x, cur_y)]: dx, dy = pos if to_x == dx and to_y == dy: flag = True break if d[(to_x, to_y)]: for to_pos in d[(to_x, to_y)]: to_dx, to_dy = to_pos if cur_x == to_dx and cur_y == to_dy: flag = True break d[(cur_x, cur_y)].append((to_x, to_y)) cur_x = to_x cur_y = to_y if flag is False: cnt += 1 return cnt print(solution("ULURRDLLU")) print(solution("LULLLLLLU")) print(solution("LLLLRRRRRRRRRRLLLLUUUUUUUUULLLLLLL")) print(solution("ULURRDLLUL")) print(solution("LLLLLLL")) print(solution("LLLLLLLDRU")) print(solution("LLLLLLLDRUD")) print(solution("URULDD")) # 설명 # 딕셔너리를 사용해서 두 가지 경우의 수를 확인해서 풀었습니다. # 현재 위치에서 다음 좌표를 방문했을 경우와, 방문할 위치에서 현재 위치를 이미 방문했는지 확인했습니다. # 이 두 가지의 경우는 해당 경로를 이미 지나쳤기 때문에 처음 걸어본 경로가 아닙니다.
hyunsoolee991/cs
algorithm/programmers/방문 길이.py
방문 길이.py
py
1,866
python
ko
code
0
github-code
50
71346745115
from os import path from mediakit.utils.files import increment_filename_if_exists from mediakit.utils.commands import run_command_in_background from mediakit.constants import FFMPEG_BINARY VIDEO_FORMATS = {"mp4"} class ConversionOptions: NO_AUDIO = "-an" def merge_video_and_audio( video_path, audio_path, output_file_path, output_format="mp4" ): final_output_file_path = increment_filename_if_exists(output_file_path) command = ( f'{FFMPEG_BINARY} -i "{video_path}" -i "{audio_path}" ' f"-vcodec copy -f {output_format} " f'"{final_output_file_path}"' ) run_command_in_background(command) def convert_media(file_path, output_file_path, output_format, options=[]): final_output_file_path = increment_filename_if_exists(output_file_path) command = ( f'{FFMPEG_BINARY} -i "{file_path}" ' + ("-vcodec copy " if output_format in VIDEO_FORMATS else "") + f"-f {output_format} " + f'"{final_output_file_path}" ' + " ".join(options) ) run_command_in_background(command)
diego-aquino/mediakit
mediakit/media/convert.py
convert.py
py
1,080
python
en
code
11
github-code
50
36424255650
#http://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html#sphx-glr-auto-examples-model-selection-plot-grid-search-digits-py from __future__ import print_function from sklearn.model_selection import GridSearchCV from sklearn.metrics import classification_report from sklearn.svm import SVC from sklearn.neural_network import MLPClassifier import data_service print(__doc__) scale_data = True transform_data = True random_slice = 10000 random_seed = 777 dataset="kdd" test_size=0.5 x_train, x_test, y_train, y_test = data_service.load_and_split_data(scale_data=scale_data, transform_data=transform_data, random_slice=random_slice, random_seed=random_seed, dataset=dataset, test_size=test_size) # Set the parameters by cross-validation tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.0001], 'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]}] # neural_network.MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, solver=solver, # activation=activation, alpha=alpha, random_state=1, max_iter=max_iter, learning_rate=learning_rate, # learning_rate_init=learning_rate_init) tuned_parameters = [{ 'solver': ['lbfgs'], 'learning_rate_init': [0.0001, 0.01, 1], 'hidden_layer_sizes': [(100,)], 'activation': ['relu'], 'alpha': [0.0001, 0.01, 1] }, { 'solver': ['sgd'], 'learning_rate': ['constant', 'invscaling', 'adaptive'], 'learning_rate_init': [0.0001, 0.01, 1], 'hidden_layer_sizes': [(100,)], 'activation': ['relu'], 'alpha': [0.0001, 0.01, 1] } ] estimator = SVC(); estimator = MLPClassifier() scores = ['precision_macro', 'recall_macro', 'accuracy'] for score in scores: print("# Tuning hyper-parameters for %s" % score) print() clf = GridSearchCV(estimator, tuned_parameters, cv=5, scoring=score, n_jobs=-1) clf.fit(x_train, y_train) print("Best parameters set found on development set:") print() print(clf.best_params_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) print() print("Detailed classification report:") print() print("The model is trained on the full development set.") print("The scores are computed on the full evaluation set.") print() y_pred = clf.predict(x_test) print(classification_report(y_test, y_pred)) print()
boyko11/ML1-SupervisedLearning
grid_search.py
grid_search.py
py
3,243
python
en
code
0
github-code
50
36046350963
import os class CustomValidator: @staticmethod def path_validate(path: str) -> str: """ try: path = validate_path(" my /path /with spaces ") print(f"The path {path} is valid.") except FileNotFoundError as e: print(e) :param path: :return: """ # Remove spaces from the path path = path.replace(" ", "") # Check if the path exists if not os.path.exists(path): raise FileNotFoundError(f"The path {path} does not exist.") return path
jerome-neo/Command-line-Data-Processor
validator/custom_validator.py
custom_validator.py
py
579
python
en
code
0
github-code
50
3912202681
from rk_diagram.models import RKPipeline, LocalizationAlgorithm, TransformNode from rk_diagram.visualize import RKModelVisualizer from rk_diagram.models.graph import EdgeType, Edge import numpy as np class HierarchicalFeatureExtractor1(): ''' Generates a heirarchical feature extractor TODO: Think about 2+ levels ''' def __init__(self): self.children = {} def predict(self, X): self.children['range_measure'] = range_measure(X) self.children['max_measure'] = max_measure(X) def range_measure(self, X): # computes the range as a feature return np.max(X) - np.min(X) def max_measure(self, X): # computes the max as a measure return np.max(X) class SimpleLinkage(): ''' Simple linkage: A simple linkage function. Compares the values of two nodes, draws a link if the euclidean distancd is less than the threshold. Sends back a list of edges ''' def __init__(self, threshold): self.threshold = 5 def link(self, nodes): edges = [] for n, i in enumerate(nodes): l = n+1 while l < len(nodes): if np.linalg.norm(nodes[i].value - nodes[l].values) < self.threshold: l_larger = nodes[i].value > nodes[l].value fid = nodes[l].from_id if l_larger else nodes[i].from_id tid = nodes[i].from_id if l_larger else nodes[j].from_id etype = EdgeType.DIRECTED if nodes[i].value == nodes[l].value: etype = EdgeType.UNDIRECTED edges.append(Edge(from_id=fid, to_id=tid, type=etype)) l+=1 class MaxLocalizer(LocalizationAlgorithm): ''' localizes the max position ''' def localize(self, X): return np.argmax(X) # returns the max position of X class MinMaxNormalizerNode(TransformNode): ''' min max normalizer takes the max and min as a transform node and will normalize the data ''' def __init__(self): self._fitted = False def fit(self, X): self._fitted = True self.min = np.min(X) self.max = np.max(X) def transform(self, X): return (X - self.min) / (self.max - self.min) class StaticFilter(): ''' This static filter takes simple boundary conditions, a min and max, and provides a filter function over it ''' def __init__(self, min=None, max=None): self._min = min self._max = max def filter(self, val): if self._min is not None and val < self._min: return False if self._max is not None and val > self._max: return False return True def main(X): rk_models = [] example_pipeline = RKPipeline(preprocess_nodes=[MinMaxNormalizerNode()], localization_algorithm=MaxLocalizer(), hierarchical_embedding_nodes= [ { "HFeatureExtractor1": HierarchicalFeatureExtractor1() } ], filter_functions=[ { "HFeatureExtractor1" : { 'range_measure': StaticFilter(min=.2, max=.8), 'max_measure': StaticFilter(min=0, max=1) } } ], # question: how to define which limits for which measure. Each filter and linkage has to be BY CLUSTER linkage_function=SimpleLinkage(threshold=.8)) example_pipeline.build() example_pipeline.fit(X) rk_model = example_pipeline.transform(X) rk_models.append(rk_model) visualizer = RKModelVisualizer(method="circular") visualizer.build(rk_models) # build requires a list of rk_models visualizer.show() def parse_arguments(): X = [1,2,3,4] main()
andorsk/rk_toolkit
example/example.py
example.py
py
4,225
python
en
code
2
github-code
50
637859362
from selenium import webdriver from selenium.webdriver.common.by import By from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.chrome.service import Service from selenium.webdriver.support.wait import WebDriverWait from crud import crud import sys sys.path.insert(1, './model') from algoritmo import algoritmoModel class scraping: __obj_crud = None __obj_model = None def __init__(self): self.__obj_crud = crud() self.__obj_model = algoritmoModel() def switch(self, case, campeonato): if case == 'times': if campeonato == 'premier': return self.timesPremier() else: print("campeonato invalido") elif case == 'rodadas': if campeonato == 'premier': return self.rodadasPremier() else: print("campeonato invalido") else: print("metódo invalido") def timesPremier(self): servico = Service(ChromeDriverManager().install()) url = "https://www.sofascore.com/tournament/football/england/premier-league/17#52186" navegador = webdriver.Chrome(service=servico) WebDriverWait(navegador, timeout=10) navegador.get(url) click = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[3]') click.click() # TEMPO DE ESPERA PARA DISPARA AÇÃO navegador.implicitly_wait(20) dados = navegador.find_elements(By.CLASS_NAME, 'uTWnT') if(dados): time = [] temporada = '2023/2024' for dado in dados: if dado.text and dado.text != '0': time.append(dado.text) else: print("Sem dados") for t1 in time: print(t1) # self.__obj_crud.inserir(t1,temporada) def rodadasPremier(self): servico = Service(ChromeDriverManager().install()) url = "https://www.sofascore.com/tournament/football/england/premier-league/17#52186" navegador = webdriver.Chrome(service=servico) WebDriverWait(navegador, timeout=10) navegador.get(url) click = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[2]') click.click() # TEMPO DE ESPERA PARA DISPARA AÇÃO navegador.implicitly_wait(20) click2 = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[2]/div/div/div[2]/div[2]') click2.click() # TEMPO DE ESPERA PARA DISPARA AÇÃO navegador.implicitly_wait(20) click3 = navegador.find_element(By.XPATH, '//*[@id="downshift-11-toggle-button"]') click3.click() # TEMPO DE ESPERA PARA DISPARA AÇÃO navegador.implicitly_wait(20) click4 = navegador.find_element(By.XPATH, '//*[@id="downshift-11-item-0"]') click4.click() lista = {} dados = navegador.find_elements(By.CLASS_NAME, 'bwUmPO') contador = 0 for dado in dados: contador += 1 div = contador % 2 if div == 1: lista[contador] = {'rodada':'1'} lista[contador]['campeonato'] = '1' time = self.__obj_model.getTimes(dado.text) if time: lista[contador]['mandante'] = time[0] else: lista[contador]['mandante'] = '?' else: cont = contador - 1 time = self.__obj_model.getTimes(dado.text) if time: lista[cont]['visitante'] = time[0] else: lista[cont]['visitante'] = '?' self.__obj_crud.setTable('rodadas') self.__obj_model.setTable('rodadas') if len(lista) > 0: for list in lista: self.__obj_crud.newInsert(lista[list]) scrap = scraping() scrap.switch('rodadas','premier')
CaioFreitas96/scraping
scraping.py
scraping.py
py
4,411
python
pt
code
0
github-code
50
29053233199
x=10 y=2 print(x//y) x=10 y=3 print(x//y) x=10 y=8.5 print(x//y) # algorithm # 10,1,8,3,6,5,4,7,x,y # Find the general solution of x and y # x-> 2 y->9 # Step1: Start # Step2: Initialise a variable named n # Step3:x=n+1 # Step4:a=x+2 # Step5:b=x-2 # Step6: if x%2=0, then x+a # Step7: if x%2!=0, then x+b # Step8: Stop
RiyaBaid/Python
floordivision.py
floordivision.py
py
346
python
en
code
0
github-code
50
35880060544
# O nome e a posição das colunas dos dados históricos e das estações são diferentes! # Esse dicionário vai nos auxiliar para pegar um determinado dado nas duas tabelas. # lista[0] -> Colunas como estão nos dados históricos. # lista[1] -> Colunas como estão nos dados das estações (website). d_dic = { "Data": ['DATA (YYYY-MM-DD)', 'DT_MEDICAO'], "Hora": ['HORA (UTC)', 'HR_MEDICAO'], "Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'], "Radiacao": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'], "Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'], "Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS'] } d_dic_2019 = { "Data": ['Data', 'DT_MEDICAO'], "Hora": ['Hora UTC', 'HR_MEDICAO'], "Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'], "Radiacao": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'], "Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'], "Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS'] } # Alguém de lá teve a brilhante ideia de modificar o nome das colunas e a formatação dos # dados a partir de 2019. d_dic_2020_greater = { "Data": ['Data', 'DT_MEDICAO'], "Hora": ['Hora UTC', 'HR_MEDICAO'], "Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'], "Radiacao": ['RADIACAO GLOBAL (Kj/m²)', 'RAD_GLO'], "Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'], "Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS'] } # Para download feitos através de web scraping no site do INMET. d_dic_inmet = { "Data": ['Data', 'DT_MEDICAO'], "Hora": ['Hora (UTC)', 'HR_MEDICAO'], "Pressao": ['Pressao Ins. (hPa)', 'PRE_INS'], "Radiacao": ['Radiacao (KJ/m²)', 'RAD_GLO'], "Temperatura": ['Temp. Ins. (C)', 'TEM_INS'], "Umidade": ['Umi. Ins. (%)', 'UMD_INS'] } class ID: MENU_SCROLL = 1 LISTBOX = 2 POPUP_CONCAT = 2002 POPUP_UPDATE = 2003 POPUP_CLEAN = 2004 POPUP_DELETE = 2005 POPUP_SAVE = 2006
NeoFahrenheit/inmet-scraper
id.py
id.py
py
2,115
python
pt
code
0
github-code
50
39518772432
from random import random import requests from flask import Flask, request from conf import ( get_healthy_server, healthcheck, load_configuration, process_firewall_rules_flag, process_rules, process_rewrite_rules, transform_backends_from_config, ) loadbalancer = Flask(__name__) MAIL_BACKENDS = ['localhost:8081', 'localhost:8082'] YANDEX_BACKENDS = ['localhost:9081', 'localhost:9082'] config = load_configuration('balancer.yaml') register = transform_backends_from_config(config) @loadbalancer.route('/') def router(): host_header = request.headers['Host'] if host_header == 'www.mail.ru': response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}') return response.content, response.status_code elif host_header == 'www.yandex.ru': response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}') return response.content, response.status_code else: return 'Not Found', 404 @loadbalancer.route('/mail') def mmail_path(): response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}') return response.content, response.status_code @loadbalancer.route('/yandex') def yandex_path(): response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}') return response.content, response.status_code
leader8901/testServer
balancer.py
balancer.py
py
1,317
python
en
code
0
github-code
50
1282240873
import os import numpy as np import random from gym.envs.mujoco.pusher import PusherEnv from evaluation.eval import Eval from data import utils XML_FOLDER = "/media/stephen/c6c2821e-ed17-493a-b35b-4b66f0b21ee7/MIL/gym/gym/envs/mujoco/assets" class EvalMilPush(Eval): def _load_env(self, xml): xml = xml[xml.rfind('pusher'):] xml_file = 'sim_push_xmls/test2_ensure_woodtable_distractor_%s' % xml xml_file = os.path.join(XML_FOLDER, xml_file) env = PusherEnv(**{'xml_file': xml_file, 'distractors': True}) env.set_visibility(self.render) env.render() viewer = env.viewer viewer.autoscale() viewer.cam.trackbodyid = -1 viewer.cam.lookat[0] = 0.4 viewer.cam.lookat[1] = -0.1 viewer.cam.lookat[2] = 0.0 viewer.cam.distance = 0.75 viewer.cam.elevation = -50 viewer.cam.azimuth = -90 return env def _eval_success(self, obs): obs = np.array(obs) target = obs[:, -3:-1] obj = obs[:, -6:-4] dists = np.sum((target - obj) ** 2, 1) # distances at each time step return np.sum(dists < 0.017) >= 10 def evaluate(self, iter): print("Evaluating at iteration: %i" % iter) iter_dir = os.path.join(self.record_gifs_dir, 'iter_%i' % iter) utils.create_dir(iter_dir) successes = [] for i in range(self.num_tasks): # demo_selection will be an xml file env = self._load_env(self.demos[i][0]['demo_selection']) selected_demo_indexs = random.sample( range(len(self.demos[i])), self.supports) embedding = self.get_embedding(i, selected_demo_indexs) gifs_dir = self.create_gif_dir(iter_dir, i) for j in range(self.num_trials): env.reset() observations = [] world_state = [] for t in range(self.time_horizon): env.render() # Observation is shape (100,100,3) obs, state = env.get_current_image_obs() observations.append(obs) obs = ((obs / 255.0) * 2.) - 1. action = self.get_action(obs, state, embedding) ob, reward, done, reward_dict = env.step(np.squeeze(action)) world_state.append(np.squeeze(ob)) if done: break if self._eval_success(world_state): successes.append(1.) else: successes.append(0.) self.save_gifs(observations, gifs_dir, j) env.render(close=True) final_suc = np.mean(successes) print("Final success rate is %.5f" % (final_suc)) return final_suc
stepjam/TecNets
evaluation/eval_mil_push.py
eval_mil_push.py
py
2,852
python
en
code
40
github-code
50
17566333407
n=int(input()) k=n l=(n*(n+1))//2 num=0 if(l)%2==0: l=l//2 ls=[i for i in range(1,n+1)] ls1=[] while(num!=l): if(l-num)<n: ls1.append(l-num) break else: num+=n n-=1 ls1.append(n+1) print("YES") print(len(ls1)) print(*ls1) print(k-len(ls1)) s2=set(ls)-set(ls1) ls2=list(s2) ls2.sort(reverse=True) print(*ls2) else: print("NO")
SaranSaiChava/Problem_Solving
CSES/twosets.py
twosets.py
py
382
python
en
code
0
github-code
50
13917300027
import pickle import os import sys import ast from header import Driver import struct import subprocess import re from pprint import pprint import pandas as pd from collections import defaultdict # set working directory WD = os.path.dirname(os.path.abspath(__file__)) os.chdir(WD) d_p = "../../AutoRNP/experiments/testing_results/DEMC/" def convert(filename): xsl_file = pd.read_excel(d_p+filename, usecols="C") l_val = xsl_file['input'].tolist() uni_val = set() for val in l_val: uni_val.add(round(val, 1)) return list(uni_val) def autoRNP(): directory = os.fsencode(d_p) all_functions = defaultdict() for file in os.listdir(directory): filename = os.fsdecode(file) if filename.endswith(".xls"): all_functions[filename[:-4]+'_DRIVER'] = convert(filename) else: continue return all_functions def gslGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS): all_functions = autoRNP() with open("spFunDrivers/" + libraryName + "_drivers.c", 'w') as f: f.write("#include <gsl/gsl_sf.h>\n") f.write("#include <gsl/gsl_errno.h>\n") f.write("#include <stdio.h>\n\n") f.write("void my_handler(const char * reason, const char * file, int line, int gsl_errno)\n") f.write("{\n") f.write("\tfprintf(stderr, \"%s\\n\", reason);\n") f.write("}\n\n") for funcName, args in signatures.items(): driverName = "{}_DRIVER".format(funcName) thisDriver = Driver(driverName, funcName, libraryName, "c", len(args[0]), funcName) thisDriver.add_line("double {}(double * doubleInput, int * intInput)\n".format(driverName)) thisDriver.add_line("{\n") thisDriver.add_line("\tdouble out;\n") # tally up number of ints and doubles numberOfInts = 0 numberOfDoubles = 0 for i in range(len(args[0])): if "int" in args[0][i]: numberOfInts += 1 elif "double" in args[0][i]: numberOfDoubles += 1 thisDriver.set_numberOfDoubles(numberOfDoubles) thisDriver.set_numberOfInts(numberOfInts) # for each extracted function, save all of its test arguments for test migration k = 1 for j in range(1, len(args)): ints = [] doubles = [] for i in range(len(args[0])): if "int" in args[0][i]: ints.append(int(args[j][i])) elif "double" in args[0][i]: doubles.append(float(args[j][i])) TEST_INPUTS["{}~input_num{:0>3}".format(driverName, j-1)] = (doubles, ints) k += 1 if driverName in all_functions.keys(): for m in range(k, len(all_functions[driverName])): TEST_INPUTS["{}~input_num{:0>3}".format(driverName, m-1)] = ([all_functions[driverName][m-k]], []) thisDriver.add_line("\tgsl_error_handler_t * old_handler = gsl_set_error_handler (&my_handler);\n\n") thisDriver.add_line("\tout = {}(".format(funcName)) for i in range(numberOfInts): thisDriver.add_line('intInput[{}]'.format(i)) if i + 1 != numberOfInts or numberOfDoubles != 0: thisDriver.add_line(", ") for i in range(numberOfDoubles): thisDriver.add_line('doubleInput[{}]'.format(i)) if i + 1 != numberOfDoubles: thisDriver.add_line(", ") if numberOfDoubles + numberOfInts < len(args[0]): thisDriver.add_line(", GSL_PREC_DOUBLE") thisDriver.add_line(');\n\n') #thisDriver.add_line("\tgsl_set_error_handler (old_handler);\n\n") thisDriver.add_line("\treturn out;\n") thisDriver.add_line("}} //END_DRIVER {}\n\n".format(funcName)) f.write(thisDriver.get_driverText()) DRIVER_LIST[thisDriver.get_driverName()] = thisDriver def pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS): with open("spFunDrivers/" + libraryName + "_drivers.py", 'w') as f: # write all imports for x in imports: if len(x) == 1: f.write("import {}\n".format(x[0])) if len(x) == 2: f.write("import {} as {}\n".format(x[0], x[1])) for x in fromImports: f.write("from {} import {}\n".format(x[0], x[1])) # for each collected function signature for funcName, args in signatures.items(): # for a varying number of integers... for numberOfInts in range(len(args[0])): # get the number of doubles numberOfDoubles = len(args[0]) - numberOfInts # form driverName driverName = "{}_{}_DRIVER{}".format(libraryName, funcName.replace('.', '_'), numberOfInts ) # form unique funcName without "_alt" and namespace info temp = funcName callName = funcName if '_alt' in temp: temp = temp[:temp.index("_alt")] callName = temp if '.' in temp: temp = temp[temp.index(".") + 1:] # construct driver object thisDriver = Driver(driverName, temp, libraryName, "python", len(args[0]), callName) thisDriver.add_line("def {}(doubleInput, intInput):\n".format(driverName)) # for each extracted function, save all of its test arguments for test migration for j in range(len(args)): ints = [] doubles = [] for k in range(numberOfInts): ints.append(int(args[j][k])) for k in range(len(args[0]) - numberOfInts): doubles.append(float(args[j][k])) TEST_INPUTS["{}~inputs_num{:0>3}".format(driverName,j)] = (doubles, ints) thisDriver.set_numberOfDoubles(numberOfDoubles) thisDriver.set_numberOfInts(numberOfInts) if "_alt" in funcName: thisDriver.add_line("\tout = {}(".format(funcName[:funcName.find("_alt")])) else: thisDriver.add_line("\tout = {}(".format(funcName)) for i in range(numberOfInts): thisDriver.add_line("intInput[{}]".format(i)) if i + 1 != numberOfInts or numberOfDoubles != 0: thisDriver.add_line(", ") for i in range(numberOfDoubles): thisDriver.add_line("doubleInput[{}]".format(i)) if i + 1 != numberOfDoubles: thisDriver.add_line(", ") thisDriver.add_line(")\n\n") thisDriver.add_line("\treturn float(out) #END_DRIVER {}\n\n".format(funcName)) f.write(thisDriver.get_driverText()) DRIVER_LIST[thisDriver.get_driverName()] = thisDriver if __name__ == "__main__": # python3 driverGenerator mpmath python libraryName = sys.argv[1] language = sys.argv[2] try: with open("__temp/__driverCollection", 'rb') as fp: DRIVER_LIST = pickle.load(fp) except: DRIVER_LIST = {} try: with open("__temp/__testInputs", 'rb') as fp: TEST_INPUTS = pickle.load(fp) except: TEST_INPUTS = {} # load information from signature extractor with open("__temp/__" + libraryName + "_signatures", 'rb') as fp: signatures = pickle.load(fp) with open("__temp/__" + libraryName + "_imports", 'rb') as fp: imports = pickle.load(fp) with open("__temp/__" + libraryName + "_fromImports", 'rb') as fp: fromImports = pickle.load(fp) if language == 'c': gslGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS) subprocess.call(['make'], cwd="spFunDrivers/") if language == 'python': pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports,TEST_INPUTS) with open("__temp/__testInputs", 'wb') as fp: pickle.dump(TEST_INPUTS, fp) with open("__temp/__driverCollection", 'wb') as fp: pickle.dump(DRIVER_LIST, fp)
Sherryhh/fpdiff_extend
fp-diff-testing/workspace/driverGenerator.py
driverGenerator.py
py
8,600
python
en
code
1
github-code
50
10070441282
import datetime import os import sys from importlib import reload from antlr4 import * from CnfUtility import CnfUtility from CnfVcGenerator import CnfVcGenerator from MyCFG import MyCFG from MyHelper import MyHelper from MyUtility import MyUtility from MyVisitor import MyVisitor from PreProcessor import PreProcessor from WpcStringConverter import WpcStringConverter from gen.MySsaStringGenerator import MySsaStringGenerator from gen.PlSqlLexer import PlSqlLexer from gen.PlSqlParser import PlSqlParser from MyRawCfgToGraph import MyRawCfgToGraph def executeSinglePlSqlFile(data, spec): f = open(data, 'r') linesOfCode = len(f.readlines()) f.close() processor = PreProcessor(spec, data) tableInfo, assumeConstraint, assertConstraint, resultString = processor.start() file = open('cnf/upper_input.sql', "w") file.write(resultString) file.close() # recording startTime startTime = datetime.datetime.now() input = FileStream('cnf/upper_input.sql') lexer = PlSqlLexer(input) stream = CommonTokenStream(lexer) parser = PlSqlParser(stream) tree = parser.sql_script() # ast = tree.toStringTree(recog=parser) # print(str(MyPlSqlVisitor(parser).getRuleName(tree))) # print("\n\n", signature(tree.toStringTree), "\n") cfg = MyCFG() helper = MyHelper(parser) helper.updateTableDict(tableInfo) utility = MyUtility(helper) v = MyVisitor(parser, cfg, utility) v.visit(tree) print("\nRaw CFG : ", v.rawCFG) # for key in v.cfg.nodes: # if v.cfg.nodes[key].ctx != None: # print(key, " --> ", v.cfg.nodes[key].ctx.getText()) res = MyRawCfgToGraph(v.rawCFG, cfg) res.execute() # cfg.printPretty() # cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph") # TODO: make dot file in cnf form utility.generateDomSet(cfg) # print("Dominator set ended----------->\n\n") utility.generateSDomSet(cfg) # print("Strictly Dominator set ended ----------->\n\n") utility.generatIDom(cfg) # print("Immediate Dominator ended ----------->\n\n") utility.generateDFSet(cfg) utility.insertPhiNode(cfg) utility.initialiseVersinosedPhiNode(cfg) utility.versioniseVariable(cfg) utility.phiDestruction(cfg) ssaString = MySsaStringGenerator(cfg, parser) ssaString.execute() # utility.generateFinalDotGraph(cfg) # for nodeId in cfg.nodes: # cfg.nodes[nodeId].printPretty() # cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph") # # hello1 = utility.generateBeforeVersioningDotFile(cfg) # cfg.dotToPng(hello1, "cnf/before_versioning_graph") # # hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg) # cfg.dotToPng(hello4, "cnf/destructed_phi_node_wala_graph") cnfUtility = CnfUtility(helper) iCnfCfg = cnfUtility.copyCfg(cfg) reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg) cnfUtility.unvisit(iCnfCfg) cnfUtility.setParentBranching(iCnfCfg) cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg) cnfUtility.copyParentBranching(cnfCfg, iCnfCfg) # print("\n\n\n\n\n\t\t\tThe intermediate CNF form is ------------------------------>\n\n\n\n") # for nodeId in iCnfCfg.nodes: # iCnfCfg.nodes[nodeId].printPretty() # print("\n\n\n\n\n\t\t\tThe CNF form is ------------------------------>\n\n\n\n") cnfVcGenerator = CnfVcGenerator(cnfCfg, parser) cnfPath = [] for nodeId in cnfCfg.nodes: cnfPath.append(nodeId) cnfVcGenerator.generateCnfVc(cnfPath) # print("\n\n\n\n\t\t\tThe CNF VCs are : ------------------------------->\n\n\n") # print(cnfVcs) # for nodeId in cnfCfg.nodes: # cnfCfg.nodes[nodeId].printPretty() # cnfVc = cnfUtility.cnfVc(cnfCfg) # # print("\n\n\t\tThe CNF VCs are ----------------->\n\n\n") # # for str in cnfVc: # print(str) varSet, z3Str = cnfUtility.iZ3format(cnfCfg) # print("\n\n*******************\n\n", z3Str, "\n\n--------------\n\n") # print(varSet) # # print("\n\n") z3Str = z3Str.replace(" ", " ") z3Str = z3Str.replace(" == ", " = ") z3Str = z3Str.replace(" = ", " == ") print("\n**** Final CNF VC in Well_Bracketted_Format:\n\n", z3Str, "\n") z3StringConvertor = WpcStringConverter(z3Str) z3StringConvertor.execute() # print("\n**** Final CNF VC in Z3 Format:\n", z3StringConvertor.convertedWpc, "\n") z3FileString = "# This file was generated at runtime on " + str(datetime.datetime.now()) + "\n" z3FileString = z3FileString + "from z3 import *\n\n" z3FileString = z3FileString + "class Z3RuntimeCnfFile():\n" z3FileString = z3FileString + "\t" + "def __init__(self):\n" z3FileString = z3FileString + "\t\t" + "self.finalFormula = \"\"\n" z3FileString = z3FileString + "\t\t" + "self.satisfiability = \"\"\n" z3FileString = z3FileString + "\t\t" + "self.modelForViolation = \"\"\n\n" z3FileString = z3FileString + "\t" + "def execute(self):\n" for i in varSet: z3FileString = z3FileString + "\t\t" + i + " = Real(\'" + i + "\')\n" z3FileString = z3FileString + "\n\t\ts = Solver()\n" if len(z3StringConvertor.implies_p) > 0: for i in range(len(z3StringConvertor.implies_p)): z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.implies_p[i] + ")\n" if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]: z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.implies_p_q[i] + ")\n" # if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q: # z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.convertedWpc + ")\n" # else: # z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.convertedWpc + ")\n" z3FileString = z3FileString + "\t\t" + "s.add( Not( " + z3StringConvertor.convertedWpc + " ) )\n" # z3FileString = z3FileString + "\n\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t" + "print(\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\n\", s)" z3FileString = z3FileString + "\n\t\t" + "self.finalFormula = str(s)" # z3FileString = z3FileString + "\n\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t" + "print(\"%%%%%%%%%% Satisfiability %%%%%%%%%%\")\n" z3FileString = z3FileString + "\n\t\t" + "self.satisfiability = str(s.check())" z3FileString = z3FileString + "\n\t\t" + "if self.satisfiability == \"sat\":" # z3FileString = z3FileString + "\n\t\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t\t" + "print(\"-------->> Violation Occurred...\")" z3FileString = z3FileString + "\n\t\t\t" + "self.satisfiability = \"violation\"" # z3FileString = z3FileString + "\n\t\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t\t" + "print(\"%%%%%%%%%% An Instance for which Violation Occurred %%%%%%%%%%\\n\", s.model())" z3FileString = z3FileString + "\n\t\t\t" + "self.modelForViolation = str(s.model())" z3FileString = z3FileString + "\n\t\t" + "elif self.satisfiability == \"unsat\":" # z3FileString = z3FileString + "\n\t\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t\t" + "print(\"-------->> NO Violation Detected so far...\")" z3FileString = z3FileString + "\n\t\t\t" + "self.satisfiability = \"sat\"" # z3FileString = z3FileString + "\n\t\t\t" + "print()" # z3FileString = z3FileString + "\n\t\t" + "print()\n" file = open('cnf/Z3RuntimeCnfFile.py', "w") file.write(z3FileString) file.close() import cnf.Z3RuntimeCnfFile from cnf.Z3RuntimeCnfFile import Z3RuntimeCnfFile # Reload after module's creation to avoid old module remain imported from disk...VVI... cnf.Z3RuntimeCnfFile = reload(cnf.Z3RuntimeCnfFile) z3Runtime = Z3RuntimeCnfFile() z3Runtime.execute() finishTime = datetime.datetime.now() timeDifference = (finishTime - startTime).total_seconds() return linesOfCode, timeDifference, z3StringConvertor.convertedWpc, z3Runtime.satisfiability, z3Runtime.modelForViolation def main(argv): if len(argv) < 3: print("Not Enough Arguments. Exiting...") elif len(argv) == 3: data = "cnf/data/" + argv[1] spec = "cnf/spec/" + argv[2] linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(data, spec) print("\n\n*** Equivalent VC :") print(vcGenerated) print("\n*** Satisfibality :\t", satisfiability, "\n\n*** Model for Violation :\t", modelForViolation, "\n") print("\n////// Execution completed for file :", argv[1]) print("No. of VCs = 1") print("Time Taken =", executionTime) print("LinesOfCode =", linesOfCode) elif len(argv) == 4: if argv[1] == "-dataset": dataList = os.listdir(argv[2]) specList = os.listdir(argv[3]) # print(dataList) # print(specList) mat = [] counter = 0 for dataFile in dataList: specFile = dataFile.split(".")[0].strip() + ".spec" print("~~~~~~~~~~~~~~~~ For PlSQL FileName => " + dataFile + " ~~~~~~~~~~~~~~~~") if specFile in specList: linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile( argv[2] + "/" + dataFile, argv[3] + "/" + specFile) temp = [] temp.append(dataFile) temp.append(linesOfCode) temp.append(executionTime) # temp.append(vcGenerated) temp.append(satisfiability) temp.append(modelForViolation) mat.append(temp) file = open('cnf/Z3RuntimeCnfFile.py', "w") file.write("# Cleared content of this File...\n\nclass Z3RuntimeCnfFile():\n\tdef __init__(self):\n\t\tself.finalFormula = \"\"\n\t\tself.satisfiability = \"\"\n\t\tself.modelForViolation = \"\"\n\n\tdef execute(self):\n\t\tprint('+++++++++++++++++++++++++++++%%%%%^^^^^^^^####')\n") file.close() else: print(specFile + " do not exist!!!") counter = counter + 1 print("Counter =", counter) print( "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") print("Filename\t\tLinesOfCode\t\tExecutionTime\t\tSatisfiability\t\tViolatingInstance\n") for i in range(len(mat)): for j in range(len(mat[i])): print(mat[i][j], end="\t\t") print() elif len(argv) == 6: if argv[1] == "-datafilename" and argv[3] == "-data_spec_filepaths": linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(argv[4], argv[5]) print(" "+argv[2], end="\t\t\t") print(linesOfCode, end="\t\t") print(executionTime, end="\t") print("1", end="\t") print(satisfiability, end="\t\t") print(modelForViolation.replace("\n", " "), end="") print() # data = "cnf/data/" + argv[1] # spec = "cnf/spec/" + argv[2] # processor = PreProcessor(spec, data) # tableInfo, assumeConstraint, assertConstraint, resultString = processor.start() # # file = open('cnf/upper_input.sql', "w") # file.write(resultString) # file.close() # # input = FileStream('cnf/upper_input.sql') # lexer = PlSqlLexer(input) # stream = CommonTokenStream(lexer) # parser = PlSqlParser(stream) # tree = parser.sql_script() # # ast = tree.toStringTree(recog=parser) # # print(str(MyPlSqlVisitor(parser).getRuleName(tree))) # # print("\n\n", signature(tree.toStringTree), "\n") # # cfg = MyCFG() # helper = MyHelper(parser) # helper.updateTableDict(tableInfo) # utility = MyUtility(helper) # v = MyVisitor(parser, cfg, utility) # v.visit(tree) # # # # print(v.rawCFG) # # for key in v.cfg.nodes: # if v.cfg.nodes[key].ctx != None: # print(key, " --> ", v.cfg.nodes[key].ctx.getText()) # # # res = MyRawCfgToGraph(v.rawCFG, cfg) # res.execute() # cfg.printPretty() # cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph") #TODO: make dot file in cnf form # utility.generateDomSet(cfg) # print("Dominator set ended----------->\n\n") # utility.generateSDomSet(cfg) # print("Strictly Dominator set ended ----------->\n\n") # utility.generatIDom(cfg) # print("Immediate Dominator ended ----------->\n\n") # utility.generateDFSet(cfg) # utility.insertPhiNode(cfg) # # # utility.initialiseVersinosedPhiNode(cfg) # utility.versioniseVariable(cfg) # utility.phiDestruction(cfg) # # # ssaString = MySsaStringGenerator(cfg, parser) # ssaString.execute() # # #utility.generateFinalDotGraph(cfg) # # for nodeId in cfg.nodes: # # cfg.nodes[nodeId].printPretty() # # cnfUtility = CnfUtility(helper) # iCnfCfg = cnfUtility.copyCfg(cfg) # reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg) # cnfUtility.unvisit(iCnfCfg) # cnfUtility.setParentBranching(iCnfCfg) # # cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg) # cnfUtility.copyParentBranching(cnfCfg, iCnfCfg) # print("\n\n\n\n\n\t\t\tThe intermediate CNF form is ------------------------------>\n\n\n\n") # # for nodeId in iCnfCfg.nodes: # iCnfCfg.nodes[nodeId].printPretty() # # print("\n\n\n\n\n\t\t\tThe CNF form is ------------------------------>\n\n\n\n") # # # # cnfVcGenerator = CnfVcGenerator(cnfCfg, parser) # # cnfPath = [] # # for nodeId in cnfCfg.nodes: # cnfPath.append(nodeId) # # cnfVcGenerator.generateCnfVc(cnfPath) # # # print("\n\n\n\n\t\t\tThe CNF VCs are : ------------------------------->\n\n\n") # # print(cnfVcs) # # for nodeId in cnfCfg.nodes: # cnfCfg.nodes[nodeId].printPretty() # # # cnfVc = cnfUtility.cnfVc(cnfCfg) # # # # print("\n\n\t\tThe CNF VCs are ----------------->\n\n\n") # # # # for str in cnfVc: # # print(str) # # varSet, z3Str = cnfUtility.iZ3format(cnfCfg) # # print("\n\n*******************\n\n", z3Str, "\n\n--------------\n\n") # print(varSet) # # print("\n\n") # z3Str = z3Str.replace(" ", " ") # z3Str = z3Str.replace(" == ", " = ") # z3Str = z3Str.replace(" = ", " == ") # z3StringConvertor = WpcStringConverter(z3Str) # z3StringConvertor.execute() # print("\n**** WPC String in Z3 Format:\n", z3StringConvertor.convertedWpc, "\n") # # z3FileString = "# This file was generated at runtime " + "\n" # z3FileString = z3FileString + "from z3 import *\n\n" # for i in varSet: # z3FileString = z3FileString + i + " = Real(\'" + i + "\')\n" # z3FileString = z3FileString + "\ns = Solver()\n" # # if len(z3StringConvertor.implies_p) > 0: # for i in range(len(z3StringConvertor.implies_p)): # z3FileString = z3FileString + "s.add(" + z3StringConvertor.implies_p[i] + ")\n" # if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]: # z3FileString = z3FileString + "s.add(" + z3StringConvertor.implies_p_q[i] + ")\n" # # if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q: # # z3FileString = z3FileString + "s.add(" + z3StringConvertor.convertedWpc + ")\n" # # else: # # z3FileString = z3FileString + "s.add(" + z3StringConvertor.convertedWpc + ")\n" # z3FileString = z3FileString + "s.add( Not( " + z3StringConvertor.convertedWpc + " ) )\n" # # z3FileString = z3FileString + "\nprint()\n" # z3FileString = z3FileString + "\nprint(\"------------------------------------------------------------------\\nRunning script in /wpc/z3FormatWpcFile.py ....\\n\")\n" # z3FileString = z3FileString + "\nprint(\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\n\", s)\n" # z3FileString = z3FileString + "\nprint()\n" # z3FileString = z3FileString + "print(\"%%%%%%%%%% Satisfiability %%%%%%%%%%\\n\", s.check())\n" # z3FileString = z3FileString + "\nprint()\n" # z3FileString = z3FileString + "print(\"%%%%%%%%%% Satisfiable Model %%%%%%%%%%\\n\", s.model())\n" # z3FileString = z3FileString + "\nprint()\n" # # file = open('cnf/z3FormatCnfFile.py', "w") # file.write(z3FileString) # file.close() # # # call(["python3", "cnf/z3FormatWpcFile.py"]) # # # # # hello = utility.generateFinalDotGraph(cfg) # # print(hello) # # cfg.dotToPng(hello, "versioned_graph") # # #hello2 = utility.generateVersionedDotFile(cfg) # #print(hello2) # #cfg.dotToPng(hello2, "se/versioned_graph") # # #hello3 = utility.generateVersionedPhiNodeWalaDotFile(cfg) # #print(hello3) # #cfg.dotToPng(hello3, "se/versioned_phi_node_wala_graph") # # #hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg) # #print(hello4) # #cfg.dotToPng(hello4, "se/destructed_phi_node_wala_graph") if __name__ == '__main__': main(sys.argv)
NabeelQaiser/BTP_2k18-19
simulator_cnf.py
simulator_cnf.py
py
17,501
python
en
code
1
github-code
50
18659979731
from configparser import ConfigParser from datetime import timedelta, datetime from discord_webhook import DiscordWebhook import os, random, requests, re from typing import TypedDict, Union class UserNameResponseDict(TypedDict): personaname:str name:str def get_username(steam_id:int) -> Union[UserNameResponseDict,None]: """ Returns dict of persona name and name from Steam Player Summaries API """ try: response = requests.get(f'https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key={str(steam_api_key)}&format=json&steamids={str(steam_id)}') except requests.exceptions.RequestException as e: return None if 'realname' in response.json()['response']['players'][0]: name = response.json()['response']['players'][0]['realname'] else: name = response.json()['response']['players'][0]['personaname'] user_dict = { 'personaname': response.json()['response']['players'][0]['personaname'], 'name': name } return user_dict def check_name(steam_id): """ Checks if user steam id is known, then returns user dict: { 'status': <bool> (Is user recognized), 'name': <str> 'personaname': <str> } If username is unknown, looks up user via steam ID """ if str(steam_id) in known_ids: name = known_ids[str(steam_id)][0] personaname = known_ids[str(steam_id)][1] status = True else: response = get_username(steam_id) if response is not None: name = response['name'] personaname = response['personaname'] status = False else: personaname = name = "Unknown Mystery Person" status=False user_dict = { 'status': status, 'name': name, 'personaname': personaname, } return user_dict def generate_greeting(steam_id, incoming): """ Generates random greeting after looking up steam ID. args steam_id: <int> incoming: <bool> (true if user joining server, else false) returns: greeting: <str> """ user = check_name(steam_id) status = user['status'] name = user['name'] personaname = user['personaname'] #print(status) if incoming == True: greetings = [ f'Hello {name}, or is it {personaname}? I don\'t know... I don\'t get paid enough.', f'Hello {name}, toilet paper is on isle 24.', f'Welcome to Walmart, {name}!', f'Enjoy shopping at Walmart, {name}!', f'Hi, {name} how can-- HEY, NO RIDING ON THE CARTS!', f'What do you want, {personaname}?', f'Yo, {personaname}, want to hear about the time I ran over a cat?', f'We don\'t sell them, but possums are super tasty, {name}', f'Hey {name}, Have you ever seen a grown Walmart Greeter Naked?', ] if status == True: greetings.append(f'Welcome back {name}!') greetings.append(f'Wonderful seeing you again, {name}!') greetings.append(f'Lookin\' fly today, {name}') greetings.append(f'Welcome back {name}... I\'m watching you...') else: greetings = [ f'Goodbye {name}', f'Thank you, come again {name}', f'Thank you for shopping at Walmart, see you next time, {name}', f'You better not have anything you didn\'t pay for {name}' ] if status == True: greetings.append(f'I hate to watch {name} go, but I love to watch {name} leave...') greetings.append(f'See ya {name}, wouldn\'t wanna be ya though.') result = greetings[random.randint(0, len(greetings)-1)] return result def extract_date(line): """Return a datetime from a log line""" fmt = '%m/%d/%Y %H:%M:%S' return datetime.strptime(line[:19], fmt) if __name__ == "__main__": # parse config file for paths and known ids config = ConfigParser() config.read('greeter_config.ini') vhlog = config['Paths'].get('RECENT_LOG','./example.log') lastupdated = config['Paths'].get('LAST_UPDATED','./last_updated.txt') webhook_url = config['Discord'].get('WEBHOOK_URL',False) steam_api_key = config['Steam'].get('API_KEY',False) if not steam_api_key: raise ValueError("Steam API Key is required to look up users. Please add one to greeter_config.ini") if not webhook_url: raise ValueError("Webhook URL is required to post to discord. Please add one to greeter_config.ini") suppress_old = True if config['Settings']['SUPPRESS_OLD'] == 'True' else False known_ids = dict() if config['Known Users']: for key in config['Known Users']: known_ids[key] = [w.strip() for w in str(config['Known Users'][key]).split(',')] ## get current time and last updated time end_date = datetime.now() # create lastupdated file if none exists if not os.path.exists(os.path.abspath(lastupdated)): print('Creating last_updated.txt') os.makedirs(os.path.dirname(lastupdated),exist_ok=True) new_file = open(lastupdated, 'a').close() with open(lastupdated, 'r') as date_file: if os.stat(lastupdated).st_size > 0: data = date_file.read(19) start_date = datetime.strptime(data, '%m/%d/%Y %H:%M:%S') else: start_date = datetime(2019,1,1) changed = False ## Prevent posting status more than a minute old ## Useful if listener is started when there are a bunch of old logs if suppress_old: if end_date - start_date > timedelta(seconds=60): start_date = end_date - timedelta(seconds=60) ## check for updates and post to discord if any with open(vhlog) as f: # from https://stackoverflow.com/questions/18562479/what-is-the-quickest-way-to-extract-entries-in-a-log-file-between-two-dates-in-p for line in f: if start_date < extract_date(line) < end_date: client_id = re.search(r'\d+$', line).group(0) if "Closing socket" in line: incoming = False elif "Got handshake from client" in line: incoming = True greeting = generate_greeting(client_id, incoming) if webhook_url: print('Sending webhook:',greeting) webhook = DiscordWebhook(url=webhook_url, content=greeting) response = webhook.execute() else: print('No Webhook_URL specified, didn\'t send greeting:', greeting) changed = True ## set last_updated time to end_date if changed == True: date_file = open(lastupdated, "w") date_file.write(end_date.strftime('%m/%d/%Y %H:%M:%S')) date_file.close() else: print(f'No changes found. Suppress old messages: {suppress_old}')
lekjos/vhserver-walmart-greeter
discord_post.py
discord_post.py
py
7,031
python
en
code
1
github-code
50
28076502272
# -*- coding: utf-8 -*- """ @Author 坦克手贝塔 @Date 2023/2/8 0:25 """ from typing import List """ 你是一位系统管理员,手里有一份文件夹列表 folder,你的任务是要删除该列表中的所有 子文件夹,并以 任意顺序 返回剩下的文件夹。 如果文件夹 folder[i] 位于另一个文件夹 folder[j] 下,那么 folder[i] 就是 folder[j] 的 子文件夹 。 文件夹的“路径”是由一个或多个按以下格式串联形成的字符串:'/' 后跟一个或者多个小写英文字母。 例如,"/leetcode" 和 "/leetcode/problems" 都是有效的路径,而空字符串和 "/" 不是。 示例 1: 输入:folder = ["/a","/a/b","/c/d","/c/d/e","/c/f"] 输出:["/a","/c/d","/c/f"] 解释:"/a/b/" 是 "/a" 的子文件夹,而 "/c/d/e" 是 "/c/d" 的子文件夹。 示例 2: 输入:folder = ["/a","/a/b/c","/a/b/d"] 输出:["/a"] 解释:文件夹 "/a/b/c" 和 "/a/b/d/" 都会被删除,因为它们都是 "/a" 的子文件夹。 示例 3: 输入: folder = ["/a/b/c","/a/b/ca","/a/b/d"] 输出: ["/a/b/c","/a/b/ca","/a/b/d"] """ """ 思路:先排序再挨个扫描。如果当前的路径f不是以我们想要的t开头,说明他不是子路径,就把我们的t更新为f+'/'继续扫描。 """ class Solution: @staticmethod def removeSubfolders(folder: List[str]) -> List[str]: res, t = [], ' ' for f in sorted(folder): if not f.startswith(t): res.append(f) t = f + '/' return res
TankManBeta/LeetCode-Python
problem1233_medium.py
problem1233_medium.py
py
1,569
python
zh
code
0
github-code
50
705515631
import math import os import time from copy import deepcopy import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision.models as models def init_seeds(seed=0): torch.manual_seed(seed) # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html if seed == 0: # slower, more reproducible cudnn.deterministic = True cudnn.benchmark = False else: # faster, less reproducible cudnn.deterministic = False cudnn.benchmark = True def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' cpu_request = device.lower() == 'cpu' if device and not cpu_request: # if device requested other than 'cpu' os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity cuda = False if cpu_request else torch.cuda.is_available() if cuda: c = 1024 ** 2 # bytes to MB ng = torch.cuda.device_count() if ng > 1 and batch_size: # check that batch_size is compatible with device_count assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng) x = [torch.cuda.get_device_properties(i) for i in range(ng)] s = 'Using CUDA ' for i in range(0, ng): if i == 1: s = ' ' * len(s) print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" % (s, i, x[i].name, x[i].total_memory / c)) else: print('Using CPU') print('') # skip a line return torch.device('cuda:0' if cuda else 'cpu') def time_synchronized(): torch.cuda.synchronize() if torch.cuda.is_available() else None return time.time() def is_parallel(model): return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} def initialize_weights(model): for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]: m.inplace = True def find_modules(model, mclass=nn.Conv2d): # Finds layer indices matching module class 'mclass' return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] def sparsity(model): # Return global model sparsity a, b = 0., 0. for p in model.parameters(): a += p.numel() b += (p == 0).sum() return b / a def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune print('Pruning model... ', end='') for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name='weight', amount=amount) # prune prune.remove(m, 'weight') # make permanent print(' %.3g global sparsity' % sparsity(model)) def fuse_conv_and_bn(conv, bn): # https://tehnokv.com/posts/fusing-batchnorm-and-conv/ with torch.no_grad(): # init fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, bias=True).to(conv.weight.device) # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) # prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv def model_info(model, verbose=False): # Plots a line-by-line description of a PyTorch model n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) try: # FLOPS from thop import profile flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2 fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS except: fs = '' print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs)) def load_classifier(name='resnet101', n=2): # Loads a pretrained model reshaped to n-class output model = models.__dict__[name](pretrained=True) # Display model properties input_size = [3, 224, 224] input_space = 'RGB' input_range = [0, 1] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] for x in [input_size, input_space, input_range, mean, std]: print(x + ' =', eval(x)) # Reshape output to n classes filters = model.fc.weight.shape[1] model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) model.fc.out_features = n return model def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio # scales img(bs,3,y,x) by ratio if ratio == 1.0: return img else: h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img gs = 128#64#32 # (pixels) grid size h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith('_') or k in exclude: continue else: setattr(a, k, v) class ModelEMA: """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models Keep a moving average of everything in the model state_dict (parameters and buffers). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, updates=0): # Create EMA self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) def update(self, model): # Update EMA parameters with torch.no_grad(): self.updates += 1 d = self.decay(self.updates) msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d v += (1. - d) * msd[k].detach() def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes copy_attr(self.ema, model, include, exclude)
WongKinYiu/ScaledYOLOv4
utils/torch_utils.py
torch_utils.py
py
8,846
python
en
code
2,013
github-code
50
72328303514
import io # アクセスするときに使う import requests import zipfile # 普通に書いた場合 # with open('/tmp/a.txt','w') as f: # f.write('test test') # # with open('/tmp/a.txt','r') as f: # print(f.read()) # f =io.StringIO() f.write('string io test') # 最初に戻る f.seek(0) print(f.read()) # 使用例 # zipfileをダウンロードをメモリ上で処理するときとかに使用する url ='###########' f =io.BytesIO() r =requests.get(url) f.write(r.content) with zipfile.ZipFile(f) as z: with z.open('ファイル指定') as r: print(r.read().decode())
magisystem0408/python_cord_dir
library/io.py
io.py
py
609
python
ja
code
0
github-code
50
70988579675
import torch import torch.nn as nn import torch.optim as optim import matplotlib.pyplot as plt from Mesh import * from Utils import * import math def F(Pi, Pj, k, r): return k * (torch.linalg.norm(Pi - Pj) - r) * (Pj - Pi) / torch.linalg.norm(Pi - Pj) def force_magnitude_sum(mesh): l = 0 for vIndex, this in enumerate(mesh.verts): force = torch.tensor([0.,0.]) for key,edge in mesh.connected(vIndex).items(): other = mesh.verts[key] force += F(this, other, k=edge.stiffness, r=edge.rest_length) # print(vIndex, force) l += torch.linalg.norm(force) return l mesh = generate_rectangle_mesh_grid((0,10), (10, 0), 5, 5) # fig, axs = plt.subplots(1,1) # axs.set_aspect('equal') # visualize_mesh(axs, mesh) # plt.show() verts = [torch.tensor(x, requires_grad=True) for x in mesh.verts] tInd = mesh.tInd springMesh = SpringMesh(verts, tInd) T = [27] E = [] for t in T: E.append(tuple(sorted([tInd[t][0], tInd[t][1]]))) E.append(tuple(sorted([tInd[t][1], tInd[t][2]]))) E.append(tuple(sorted([tInd[t][0], tInd[t][2]]))) for k,v in springMesh.edges.items(): if k in E: v.rest_length = v.length * 2 v.stiffness = 1 else: v.rest_length = v.length print(force_magnitude_sum(springMesh)) optimizer = optim.Adam(springMesh.verts, lr=0.0001) theta = 1e-5 history = [] for i in range(2000): optimizer.zero_grad() loss = force_magnitude_sum(springMesh) history.append(loss.item()) if(history[-1] < theta): print("break early") break loss.backward(retain_graph=True) optimizer.step() print(history[-1]) fig, axs = plt.subplots(1,3) axs[0].set_aspect('equal') axs[0].set_xlim((0,10)) axs[0].set_ylim((0,10)) visualize_mesh(axs[0], mesh) axs[1].set_aspect('equal') axs[1].set_xlim((0,10)) axs[1].set_ylim((0,10)) visualize_mesh(axs[1], springMesh) axs[2].plot(history) plt.show()
COMP0031VRProject/Framework
spring_mesh_example.py
spring_mesh_example.py
py
1,950
python
en
code
0
github-code
50