{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ')\n \n##### MAIN #########\nfs = cgi.FieldStorage()\nfn = DATAPATH + str(fs[\"fn\"].value)\nmn = str(fs[\"mn\"].value)\nhd = get_plots(fn)\nprint_page_meas(str(fs[\"fn\"].value),hd,mn)\n\n"},"size":{"kind":"number","value":2159,"string":"2,159"}}},{"rowIdx":328,"cells":{"max_stars_repo_path":{"kind":"string","value":"codenames/models.py"},"max_stars_repo_name":{"kind":"string","value":"Schluggi/codenames"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2023366"},"content":{"kind":"string","value":"from . import db\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(255), unique=True, nullable=False)\n mode = db.Column(db.String(255), nullable=False)\n images = db.Column(db.Text, nullable=False)\n cards = db.Column(db.Text, nullable=False)\n score_red = db.Column(db.Integer)\n score_blue = db.Column(db.Integer)\n members_red = db.Column(db.Text, nullable=False, default='[]')\n members_blue = db.Column(db.Text, nullable=False, default='[]')\n start_score_red = db.Column(db.Integer)\n start_score_blue = db.Column(db.Integer)\n fields = db.relationship('Field', backref='game', lazy='dynamic')\n\n\nclass Field(db.Model):\n __tablename__ = 'fields'\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'), nullable=False, primary_key=True)\n hidden = db.Column(db.Boolean, nullable=False, default=True)\n type = db.Column(db.String(8), nullable=False)\n"},"size":{"kind":"number","value":1055,"string":"1,055"}}},{"rowIdx":329,"cells":{"max_stars_repo_path":{"kind":"string","value":"emails/admin.py"},"max_stars_repo_name":{"kind":"string","value":"vasudeveloper001/mvc_python_django"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022703"},"content":{"kind":"string","value":"from django.contrib import admin\n\n# Register your models here.\nfrom emails.models import EmailEntry\n\nadmin.site.register(EmailEntry)\n"},"size":{"kind":"number","value":133,"string":"133"}}},{"rowIdx":330,"cells":{"max_stars_repo_path":{"kind":"string","value":"paginas/migrations/0011_players_lobby_slug.py"},"max_stars_repo_name":{"kind":"string","value":"igor-pontes/Dolex"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023684"},"content":{"kind":"string","value":"# Generated by Django 2.1.5 on 2019-02-06 17:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('paginas', '0010_players_lobby_slot'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='players_lobby',\n name='slug',\n field=models.CharField(default=None, max_length=110),\n ),\n ]\n"},"size":{"kind":"number","value":404,"string":"404"}}},{"rowIdx":331,"cells":{"max_stars_repo_path":{"kind":"string","value":"script_python/csvAll.py"},"max_stars_repo_name":{"kind":"string","value":"goldleaf3i/generativeCMLgraphs"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023804"},"content":{"kind":"string","value":"#!/usr/bin/python\n\n# APRE LA CARTELLA DOVE STA LO SCRIPT O, ALTERNATIVAMETNE, argv[1]. \n# PARSA TUTTE \n# PRENDE TUTTI I FILE DITESTO, CHE CONSIDERA MATRICI DI ADIACENZA DI UN GRAFO\n# INSERISCE LE MATRICI TROVATE IN UN GRAFO DI IGRAPH\n\n### COPIATO DA CARTELLA SVILUPPO DROPBOX, DA REINTEGRARE POI NEL PROGETTO ORIGINALE - FINITO 28/9/14\n# IN PARTICOLARE INSERIRE NELLA LIBRERIA LE METRICHE PER STAMPARE LE VARIE CARATTERISTICHE DEI GRAFI\n\n# TODO SPOSTARE LE FUNZIONI DI SUPPORTO IN UTILS\nfrom sys import argv\nimport re\nimport sys\nimport math\nfrom loadGraph import *\nimport numpy as Math\nimport os\nimport glob\nfrom multiprocessing import Process\nmylabelschema = 'office.xml'\n#for i in matrix: \n#M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\\')[0]]]) \ndef parseEverything(direct) :\n global mylabelschema\n for filename in glob.glob(direct+\"/*.xml\") :\n #try :\n print(\"apro il file \" , filename)\n loadXML(filename,mylabelschema)\n #except Exception as e:\n #\tprint str(e)\n #\tprint \"cannot process \" , filename\n #\texit()\n p = []\n i = 0\n for directories in glob.glob(direct+\"/*/\") :\n #p.append(Process(target = parseEverything, args =(directories,)))\n parseEverything(directories)\n #p[i].start()\n i+=1\n print(\"apro la cartella \" , directories)\n\n #for j in range(i-1) :\n #\tp[j].join()\n return True\n\ndef plotAdiacency(filename) :\n myfile = open(filename);\n #inizializzo la struttura dati\n matrix = []\n for line in myfile:\n matrix.append([int(i)for i in line.split(',')])\n myfile.close()\n topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)\n graph = topologicalmap.graph\n print(\".\".join(filename.split(\".\")[:-1])+ \".png\")\n print(graph.vs[\"label\"])\n #print graph.vs[\"label\"]\n #exit()\n vertex_shape = ['rect' if i =='C' or i =='H' or i == 'L' or i=='E' or i=='N' or i=='Q' else 'circle' for i in graph.vs[\"label\"]]\n #print vertex_shape\n #exit()\n plot(graph,\".\".join(filename.split(\".\")[:-1])+\".png\",vertex_label_size = 0, vertex_shape = vertex_shape,bbox=(700,700),layout='kk')\n\n# #for i in matrix: \n# #M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\\')[0]]]) \n# def parseEverything(direct) :\n\n# \tfor filename in glob.glob(direct+\"/*\") :\n# \t\t#try :\n# \t\t\tprint \"apro il file \" , filename \n# \t\t\tplotAdiacency(filename)\n# \t\t#except Exception as e: \n# \t\t#\tprint str(e)\n# \t\t#\tprint \"cannot process \" , filename\n# \t\t#\texit()\n# \tfor directories in glob.glob(direct+\"/*/\") :\n# \t\tparseEverything(directories)\n# \t\tprint \"apro la cartella \" , directories \n# \treturn True \n\n# def plotAdiacency(filename) :\n# \tmyfile = open(filename);\n# \t#inizializzo la struttura dati\t\n# \tmatrix = []\n# \tfor line in myfile:\n# \t\tmatrix.append([int(i)for i in line.split(',')])\n# \tmyfile.close()\n# \ttopologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)\n# \tgraph = topologicalmap.graph\n# \tprint \".\".join(filename.split(\".\")[:-1])+ \".png\"\n# \t#print graph.vs[\"label\"]\n# \t#exit()\n# \tvertex_shape = ['rect' if i =='C' or i =='H' or i == 'E' else 'circle' for i in graph.vs[\"label\"]]\n# \t#print vertex_shape\n# \t#exit()\n# \tplot(graph,\".\".join(filename.split(\".\")[:-1])+\".png\",vertex_label_size = 0, vertex_shape = vertex_shape)\n\ndef evaluateGraphs(direct, myformat = None ) :\n # calcola tutte le metriche di igraph e poi le stampa\n graphStats = dict()\n metrics = ['nodes','R','C','path_len','diameter','density','articulation_points','betweenness',\n 'mu_betweenness','scaled_betweenness','mu_scaled_betweenness','Rbetweenness','mu_Rbetweenness',\n 'Cbetweenness','mu_Cbetweenness','closeness','mu_closeness','Rcloseness','mu_Rcloseness',\n 'Ccloseness','mu_Ccloseness','eig','mu_eig','Reig', 'mu_Reig','Ceig','mu_Ceig'\n ]\n for filename in glob.glob(direct+\"/*.txt\") :\n #try :\n print(\"apro il file \" , filename)\n graphStats[filename] = analyzeGraph(filename, myformat)\n #except Exception as e:\n #\tprint str(e)\n #\tprint \"cannot process \" , filename\n #\texit()\n\n data = aggrateMetrics(graphStats,metrics)\n if data :\n text_file = open(direct+\"/aggregate_graph_data.log\", \"w\")\n text_file.write(str(data))\n text_file.close()\n\n\n for directories in glob.glob(direct+\"/*/\") :\n evaluateGraphs(directories, myformat=myformat)\n print(\"apro la cartella \" , directories)\n return True\n\ndef analyzeGraph(filename, myformat = 'adjacency') :\n # format: adjacency e' la matrice di 0 e 1, valori spaziati da \",\" e righe termiante da ; DEFAULT\n # il formato matlab e' quello invece ce usa matlab per fare le matrici\n\n myfile = open(filename);\n #inizializzo la struttura dati\n matrix = []\n for line in myfile:\n print(line)\n if myformat == 'matlab' :\n line = line.replace('[','')\n line = line.replace(']','')\n line = line.replace(';','')\n print(line)\n matrix.append([int(i)for i in line.split(',')])\n myfile.close()\n topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix)\n g = topologicalmap.graph\n Cs = g.vs.select(RC_label = 'C')\n Rs = g.vs.select(RC_label = 'R')\n indexC = [i.index for i in Cs]\n indexR = [i.index for i in Rs]\n data = dict()\n # numero di nodi\n data['nodes'] = len(g.vs())\n # numero di R\n data['R'] = len(indexR)\n # numero di C\n data['C'] = len(indexC)\n # average path len\n data['path_len'] = g.average_path_length()\n # diametro\n data['diameter'] = g.diameter()\n # average degree (densirt)\n data['density'] = g.density()\n # articulation points, quanti sono\n data['articulation_points'] = len(g.articulation_points())\n # betweenness\n betweenness = g.betweenness()\n data['betweenness'] = betweenness\n # mean betweenness\n data['mu_betweenness'] = avg(betweenness)\n # scaled betweenness\n scaled_b = [ float(i)/(float(len(betweenness)-1))/(float(len(betweenness))-2) for i in betweenness ]\n data['scaled_betweenness'] = scaled_b\n # mean scaled betweenness\n data['mu_scaled_betweenness'] = avg(scaled_b)\n # betweenness scaled solo R\n data['Rbetweenness'] = selectLabelArray(scaled_b,indexR)\n # average betweennes scaled solo R\n print(data['Rbetweenness'])\n data['mu_Rbetweenness'] = avg(data['Rbetweenness'])\n # betweenness scaled solo C\n data['Cbetweenness'] = selectLabelArray(scaled_b,indexC)\n # average betwenness scaled solo C\n data['mu_Cbetweenness'] = avg(data['Cbetweenness'])\n # closenesss\n closeness = g.closeness()\n data['closeness'] = closeness\n # average closeness\n data['mu_closeness'] = avg(closeness)\n # closeness solo R\n data['Rcloseness'] = selectLabelArray(closeness,indexR)\n # avg closeness solo R\n data['mu_Rcloseness'] = avg(data['Rcloseness'])\n # closeness solo C\n data['Ccloseness'] = selectLabelArray(closeness,indexC)\n # avg closeness solo C\n data['mu_Ccloseness'] = avg(data['Ccloseness'])\n # eigenvector centrality\n eigenvec = g.eigenvector_centrality()\n data['eig'] = eigenvec\n # mean eig\n data['mu_eig'] = avg(eigenvec)\n # eigenvec centrality R\n data['Reig'] = selectLabelArray(eigenvec,indexR)\n # mean eigenvec centrality R\n data['mu_Reig'] = avg(data['Reig'])\n # eigenvec centrality C\n data['Ceig'] = selectLabelArray(eigenvec,indexC)\n # mean eigenvec centrality C\n data['mu_Ceig'] = avg(data['Ceig'])\n\n #print \".\".join(filename.split(\".\")[:-1])+ \".png\"\n #plot(graph,\".\".join(filename.split(\".\")[:-1])+\".png\")\n stringa = str()\n for i in data.keys():\n stringa+= str(i) + \":\\n\"\n stringa+= str(data[i]) + \"\\n\"\n text_file = open(\".\".join(filename.split(\".\")[:-1])+\"_aggregate_data.log\", \"w\")\n text_file.write(str(stringa))\n text_file.close()\n return data\n\ndef selectLabelArray(array,indexes) :\n # restituisce gli elementi del vettore array di indice contenuto in indexes\n tmp = []\n for i in indexes :\n tmp.append(array[i])\n return tmp\n\ndef averageLabel(array,indexes):\n # restituisce la media degli elementi del vettore array di indice contenuto in indexes\n tmp = []\n for i in indexes :\n tmp.append(array[i])\n return sum(tmp)/float(len(indexes))\n\ndef avg(array) :\n return sum(array)/float(len(array))\n\ndef aggrateMetrics(dictionary,list_of_metrics) :\n # per ora non calcolo dati aggregati sugli array\n # prende un array di array e poi ricalcola tutto\n mydict = dict()\n # inizializzo le variabili\n for i in list_of_metrics :\n mydict[i] = variable(i)\n # per ogni grafo parso il dizionario e lo inserisco nelle variabili\n for i in dictionary.keys() :\n for j in dictionary[i].keys() :\n if type(dictionary[i][j]) is list :\n # per ora non calcolo dati aggregati sugli array.\n pass\n else :\n mydict[j].add(dictionary[i][j])\n ret_str = str()\n for i in list_of_metrics :\n if mydict[i].n > 0 :\n ret_str += mydict[i].printVar()\n return ret_str\n\n# apre ricorsivamente tutti i file di TXT che ci trova. usa la cartella corrente, se non specifichi una cartella di start alternativa\ncurrent = os.getcwd()\n#try:\n# current = argv[1]\n#except :\n# print(\"non hai specificato la cartella corrente\")\n#print(\"inizio a parsare la cartella \", current , 'che diavleria e ques?')\n#parseEverything(current)\n#print(\"finito!\")\n\ncount = 0\nbtypename = 'zoffice.xml'\n#btypename = 'zoffice.xml'\nfor filename in glob.glob(current+\"/*.xml\"):\n count+=1\n print filename\n # LOADXML carica i TOPOLOGICAL. LOAD XML2 carica i XML standard\n if not btypename in filename :\n matrix = loadXML2(filename, btypename)\n Math.savetxt(\"graph_\"+str(count)+\".csv\", matrix, fmt='%s', delimiter=\",\")\nprint \"done\""},"size":{"kind":"number","value":9925,"string":"9,925"}}},{"rowIdx":332,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/utils/test_time.py"},"max_stars_repo_name":{"kind":"string","value":"SatelCreative/toolip"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023696"},"content":{"kind":"string","value":"from datetime import datetime, timezone\n\nimport pytz\n\nfrom toolip.utils.time import make_time_aware, now, now_epoch, now_epoch_ms\n\n\ndef test_now():\n assert now().tzinfo == timezone.utc\n\n\ndef test_now_epoch():\n now = datetime.now(timezone.utc).timestamp()\n assert now_epoch() == int(now)\n\n\ndef test_now_epoch_ms():\n now = datetime.now(timezone.utc).timestamp() * 1000\n assert now_epoch_ms() == int(now)\n\n\ndef test_make_time_aware():\n dtime = datetime.now()\n assert dtime.tzinfo != pytz.utc\n assert make_time_aware(dtime).tzinfo == pytz.utc\n"},"size":{"kind":"number","value":563,"string":"563"}}},{"rowIdx":333,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/robot/types.py"},"max_stars_repo_name":{"kind":"string","value":"mogenson/tubers"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022621"},"content":{"kind":"string","value":"from dataclasses import dataclass\nfrom enum import Enum\nfrom struct import unpack\n\nfrom .packet import Packet\n\n\n@dataclass\nclass Bumper:\n left: bool\n right: bool\n\n @classmethod\n def from_packet(cls, packet: Packet):\n return Bumper(packet.payload[4] & 0x80 != 0, packet.payload[4] & 0x40 != 0)\n\n\n@dataclass\nclass Color:\n WHITE = 0\n BLACK = 1\n RED = 2\n GREEN = 3\n BLUE = 4\n ORANGE = 5\n YELLOW = 6\n MAGENTA = 7\n NONE = 15\n ANY = -1\n\n colors: list[int]\n\n @classmethod\n def from_packet(cls, packet: Packet):\n return Color([c >> i & 0xF for c in packet.payload for i in range(4, -1, -4)])\n\n\n@dataclass\nclass Light:\n DARKER = 4\n RIGHT_BRIGHTER = 5\n LEFT_BRIGHTER = 6\n LIGHTER = 7\n\n state: int\n left: int = 0\n right: int = 0\n\n @classmethod\n def from_packet(cls, packet: Packet):\n return Light(\n packet.payload[4],\n unpack(\">H\", packet.payload[5:7])[0],\n unpack(\">H\", packet.payload[7:9])[0],\n )\n\n\n@dataclass\nclass Touch:\n front_left: bool\n front_right: bool\n back_right: bool\n back_left: bool\n\n @classmethod\n def from_packet(cls, packet: Packet):\n return Touch(\n packet.payload[4] & 0x80 != 0,\n packet.payload[4] & 0x40 != 0,\n packet.payload[4] & 0x20 != 0,\n packet.payload[4] & 0x10 != 0,\n )\n\n\ndef note(note: str, A4=440) -> float:\n \"\"\"Convert a note name into frequency in hertz: eg. 'C#5'\"\"\"\n notes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n octave = int(note[-1])\n step = notes.index(note[0:-1])\n step += ((octave - 1) * 12) + 1\n return A4 * 2 ** ((step - 46) / 12)\n\n\nclass Marker(Enum):\n UP = 0\n DOWN = 1\n ERASE = 2\n\n\nclass Animation(Enum):\n OFF = 0\n ON = 1\n BLINK = 2\n SPIN = 3\n\n\nclass ColorSensors(Enum):\n SENSORS_0_TO_7 = 0\n SENSORS_8_TO_15 = 1\n SENSORS_16_TO_23 = 2\n SENSORS_24_TO_31 = 3\n\n\nclass ColorLighting(Enum):\n OFF = 0\n RED = 1\n GREEN = 2\n BLUE = 3\n ALL = 4\n\n\nclass ColorFormat(Enum):\n ADC_COUNTS = 0\n MILLIVOLTS = 1\n\n\nclass ModulationType(Enum):\n DISABLED = 0\n VOLUME = 1\n PULSE_WIDTH = 2\n FREQUENCY = 3\n"},"size":{"kind":"number","value":2238,"string":"2,238"}}},{"rowIdx":334,"cells":{"max_stars_repo_path":{"kind":"string","value":"split_data.py"},"max_stars_repo_name":{"kind":"string","value":"smtnkc/gcn4epi"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023632"},"content":{"kind":"string","value":"import os\nimport random\nimport pickle as pkl\nimport argparse\nfrom sklearn.model_selection import train_test_split\nfrom prepare_data import getTuples\n\ndef trainTestSplit(cell_line, cross_cell_line, id_dict, cross_begin_id, label_rate, seed):\n\n def getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed):\n\n \"\"\"\n Returns ID portions for train, test, validation split.\n\n Label rate is the number of labeled nodes (x) that are used\n for training divided by the total number of nodes in dataset.\n\n Example: Label rate = 0.1\n 10% labeled training (x)\n 60% unlabaled training (ux)\n 10% validation (vx)\n 20% test (tx) !!! 20% of the same or cross cell-line !!!\n\n allx = x + ux + vx\n \"\"\"\n\n idx = list(id_dict.values())[0:cross_begin_id] # do not include cross cell-line elements\n idx_allx, idx_tx = train_test_split(idx, test_size=0.2, random_state=seed)\n idx_x_vx, idx_ux = train_test_split(idx_allx, test_size=1-(label_rate*2/0.8), random_state=seed)\n idx_x, idx_vx = train_test_split(idx_x_vx, test_size=0.5, random_state=seed)\n\n if cross_begin_id == len(id_dict):\n # No cross cell-line specified. Use same cell-line for testings.\n print('SAME CELL-LINE TESTING:\\n {} labeled training \\n {} validation \\n {} test ({}) \\n{} unlabeled training'\n .format(len(idx_x), len(idx_vx), len(idx_tx), cell_line, len(idx_ux)))\n else:\n # Use cross cell-line for testing. Overwrite idx_tx.\n cross_idx = list(id_dict.values())[cross_begin_id:]\n _, idx_tx = train_test_split(cross_idx, test_size=0.2, random_state=seed)\n print('CROSS CELL-LINE TESTING:\\n {} labeled training \\n {} validation \\n {} test ({}) \\n{} unlabeled training'\n .format(len(idx_x), len(idx_vx), len(idx_tx), cross_cell_line, len(idx_ux)))\n\n return idx_x, idx_ux, idx_vx, idx_tx\n\n\n # TRAIN / TEST / VALIDATION SPLIT\n idx_x, idx_ux, idx_vx, idx_tx = getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed)\n print('Writing index files for train/test/validation split...')\n\n if (args.cross_cell_line != None) and (args.cross_cell_line != args.cell_line):\n dump_dir = 'data/{}/'.format(cell_line + '_' + cross_cell_line)\n else:\n dump_dir = 'data/{}/'.format(cell_line)\n\n if not os.path.exists(dump_dir):\n os.makedirs(dump_dir)\n\n lr = '{:.2f}'.format(label_rate).split('.')[1]\n\n idx_x_file = open('{}/x_{}.index'.format(dump_dir, lr), \"wb\")\n pkl.dump(idx_x, idx_x_file)\n idx_x_file.close()\n\n idx_ux_file = open('{}/ux_{}.index'.format(dump_dir, lr), \"wb\")\n pkl.dump(idx_ux, idx_ux_file)\n idx_ux_file.close()\n\n idx_vx_file = open('{}/vx_{}.index'.format(dump_dir, lr), \"wb\")\n pkl.dump(idx_vx, idx_vx_file)\n idx_vx_file.close()\n\n idx_tx_file = open('{}/tx_{}.index'.format(dump_dir, lr), \"wb\")\n pkl.dump(idx_tx, idx_tx_file)\n idx_tx_file.close()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='gcn4epi')\n parser.add_argument('--cell_line', default='GM12878', type=str)\n parser.add_argument('--cross_cell_line', default=None, type=str) # set to run cross cell-line testing\n parser.add_argument('--k_mer', default=5, type=int)\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--label_rate', default=0.2, type=float) # [0.2, 0.1, 0.05]\n parser.add_argument('--frag_len', default=200, type=int) # set 0 to disable fragmentation and use full sequences\n args = parser.parse_args()\n random.seed(args.seed)\n\n _, id_dict, cross_begin_id = getTuples(args.cell_line, args.cross_cell_line, args.k_mer) # requires successful run of prepare_gcn_data.py\n\n trainTestSplit(args.cell_line, args.cross_cell_line, id_dict, cross_begin_id, args.label_rate, args.seed)\n"},"size":{"kind":"number","value":3949,"string":"3,949"}}},{"rowIdx":335,"cells":{"max_stars_repo_path":{"kind":"string","value":"muk_autovacuum/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"Yousif-Mobark/odoo11_cutom"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023387"},"content":{"kind":"string","value":"###################################################################################\n# \n# Copyright (C) 2018 MuK IT GmbH\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n###################################################################################\n\nfrom odoo import api, SUPERUSER_ID\n\nfrom . import models\n\ndef _get_value(env, model):\n model_model = env['ir.model']\n model_fields = env['ir.model.fields']\n model = model_model.search([('model', '=', model)], limit=1)\n if model.exists():\n field_domain = [\n ('model_id', '=', model.id),\n ('ttype', '=', 'datetime'),\n ('name', '=', 'create_date')]\n field = model_fields.search(field_domain, limit=1)\n return model, field\n return None\n \ndef _init_default_rules(cr, registry):\n env = api.Environment(cr, SUPERUSER_ID, {})\n rule = env['muk_autovacuum.rules']\n values = _get_value(env, 'mail.message')\n if values:\n rule.create({\n 'name': \"Delete Message Attachments after 6 Months\",\n 'model': values[0].id,\n 'active': False,\n 'state': 'time',\n 'time_field': values[1].id,\n 'time_type': 'months',\n 'time': 6,\n 'only_attachments': True})\n rule.create({\n 'name': \"Delete Messages after 1 Year\",\n 'model': values[0].id,\n 'active': False,\n 'state': 'time',\n 'time_field': values[1].id,\n 'time_type': 'years',\n 'time': 1})\n values = _get_value(env, 'ir.logging')\n if values:\n rule.create({\n 'name': \"Delete Logs after 2 Weeks\",\n 'model': values[0].id,\n 'active': False,\n 'state': 'time',\n 'time_field': values[1].id,\n 'time_type': 'weeks',\n 'time': 2,\n 'protect_starred': False})"},"size":{"kind":"number","value":2528,"string":"2,528"}}},{"rowIdx":336,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/tvl/transforms.py"},"max_stars_repo_name":{"kind":"string","value":"hyperfraise/tvl"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023356"},"content":{"kind":"string","value":"\"\"\"Functions for transforming image data stored in PyTorch tensors.\n\nThis module is necessary since most of the transformations provided by the `torchvision` package\nare applicable for PIL.Image images only. Since tvl may load video frames on the GPU, we want\nto be able to take the computation to the data rather than moving the images to and from main\nmemory.\n\nAs an additional benefit, these functions are defined such that they also work in batched mode,\nwhich is especially useful for videos.\n\"\"\"\n\nimport math\nfrom typing import Sequence\n\nimport torch\nfrom torch.nn.functional import interpolate\nfrom torchgeometry import warp_affine\n\n\ndef normalise(tensor, mean, stddev, inplace=False):\n \"\"\"Normalise the image with channel-wise mean and standard deviation.\n\n Args:\n tensor (torch.Tensor): The image tensor to be normalised.\n mean (Sequence of float): Means for each channel.\n stddev (Sequence of float): Standard deviations for each channel.\n inplace (bool): Perform normalisation in-place.\n\n Returns:\n Tensor: The normalised image tensor.\n \"\"\"\n mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None]\n stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None]\n\n if inplace:\n tensor.sub_(mean)\n else:\n tensor = tensor.sub(mean)\n\n tensor.div_(stddev)\n return tensor\n\n\ndef denormalise(tensor, mean, stddev, inplace=False):\n \"\"\"Denormalise the image with channel-wise mean and standard deviation.\n\n Args:\n tensor (torch.Tensor): The image tensor to be denormalised.\n mean (Sequence of float): Means for each channel.\n stddev (Sequence of float): Standard deviations for each channel.\n inplace (bool): Perform denormalisation in-place.\n\n Returns:\n Tensor: The denormalised image tensor.\n \"\"\"\n mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None]\n stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None]\n\n if inplace:\n return tensor.mul_(stddev).add_(mean)\n else:\n return torch.addcmul(mean, tensor, stddev)\n\n\ndef resize(tensor, size, mode='bilinear'):\n \"\"\"Resize the image.\n\n Args:\n tensor (torch.Tensor): The image tensor to be resized.\n size (tuple of int): Size of the resized image (height, width).\n mode (str): The pixel sampling interpolation mode to be used.\n\n Returns:\n Tensor: The resized image tensor.\n \"\"\"\n assert len(size) == 2\n\n # If the tensor is already the desired size, return it immediately.\n if tensor.shape[-2] == size[0] and tensor.shape[-1] == size[1]:\n return tensor\n\n if not tensor.is_floating_point():\n dtype = tensor.dtype\n tensor = tensor.to(torch.float32)\n tensor = resize(tensor, size, mode)\n return tensor.to(dtype)\n\n out_shape = (*tensor.shape[:-2], *size)\n if tensor.ndimension() < 3:\n raise Exception('tensor must be at least 2D')\n elif tensor.ndimension() == 3:\n tensor = tensor.unsqueeze(0)\n elif tensor.ndimension() > 4:\n tensor = tensor.view(-1, *tensor.shape[-3:])\n align_corners = None\n if mode in {'linear', 'bilinear', 'trilinear'}:\n align_corners = False\n resized = interpolate(tensor, size=size, mode=mode, align_corners=align_corners)\n return resized.view(*out_shape)\n\n\ndef crop(tensor, t, l, h, w, padding_mode='constant', fill=0):\n \"\"\"Crop the image, padding out-of-bounds regions.\n\n Args:\n tensor (torch.Tensor): The image tensor to be cropped.\n t (int): Top pixel coordinate.\n l (int): Left pixel coordinate.\n h (int): Height of the cropped image.\n w (int): Width of the cropped image.\n padding_mode (str): Padding mode (currently \"constant\" is the only valid option).\n fill (float): Fill value to use with constant padding.\n\n Returns:\n Tensor: The cropped image tensor.\n \"\"\"\n # If the crop region is wholly within the image, simply narrow the tensor.\n if t >= 0 and l >= 0 and t + h <= tensor.size(-2) and l + w <= tensor.size(-1):\n return tensor[..., t:t+h, l:l+w]\n\n if padding_mode == 'constant':\n result = torch.full((*tensor.size()[:-2], h, w), fill,\n device=tensor.device, dtype=tensor.dtype)\n else:\n raise Exception('crop only supports \"constant\" padding currently.')\n\n sx1 = l\n sy1 = t\n sx2 = l + w\n sy2 = t + h\n dx1 = 0\n dy1 = 0\n\n if sx1 < 0:\n dx1 = -sx1\n w += sx1\n sx1 = 0\n\n if sy1 < 0:\n dy1 = -sy1\n h += sy1\n sy1 = 0\n\n if sx2 >= tensor.size(-1):\n w -= sx2 - tensor.size(-1)\n\n if sy2 >= tensor.size(-2):\n h -= sy2 - tensor.size(-2)\n\n # Copy the in-bounds sub-area of the crop region into the result tensor.\n if h > 0 and w > 0:\n src = tensor.narrow(-2, sy1, h).narrow(-1, sx1, w)\n dst = result.narrow(-2, dy1, h).narrow(-1, dx1, w)\n dst.copy_(src)\n\n return result\n\n\ndef flip(tensor, horizontal=False, vertical=False):\n \"\"\"Flip the image.\n\n Args:\n tensor (torch.Tensor): The image tensor to be flipped.\n horizontal: Flip horizontally.\n vertical: Flip vertically.\n\n Returns:\n Tensor: The flipped image tensor.\n \"\"\"\n if horizontal == True:\n tensor = tensor.flip(-1)\n if vertical == True:\n tensor = tensor.flip(-2)\n return tensor\n\n\ndef affine(tensor, matrix):\n \"\"\"Apply an affine transformation to the image.\n\n Args:\n tensor (torch.Tensor): The image tensor to be warped.\n matrix (torch.Tensor): The 2x3 affine transformation matrix.\n\n Returns:\n Tensor: The warped image.\n \"\"\"\n is_unbatched = tensor.ndimension() == 3\n if is_unbatched:\n tensor = tensor.unsqueeze(0)\n warped = warp_affine(tensor, matrix, tensor.size()[-2:])\n if is_unbatched:\n warped = warped.squeeze(0)\n return warped\n\n\ndef rotate(tensor, degrees):\n \"\"\"Rotate the image anti-clockwise about the centre.\n\n Args:\n tensor (torch.Tensor): The image tensor to be rotated.\n degrees (float): The angle through which to rotate.\n\n Returns:\n Tensor: The rotated image tensor.\n \"\"\"\n rads = math.radians(degrees)\n h, w = tensor.size()[-2:]\n c = math.cos(rads)\n s = math.sin(rads)\n x = (w - 1) / 2\n y = (h - 1) / 2\n # Transformation matrix for clockwise rotation about the centre of the image.\n matrix = torch.tensor([[\n [ c, s, -c * x - s * y + x],\n [-s, c, s * x - c * y + y],\n ]], dtype=torch.float32, device=tensor.device)\n return affine(tensor, matrix)\n\n\ndef fit(tensor, size, fit_mode='cover', resize_mode='bilinear', *, fill=0):\n \"\"\"Fit the image within the given spatial dimensions.\n\n Args:\n tensor (torch.Tensor): The image tensor to be fit.\n size (tuple of int): Size of the output (height, width).\n fit_mode (str): 'fill', 'contain', or 'cover'. These behave in the same way as CSS's\n `object-fit` property.\n fill (float): padding value (only applicable in 'contain' mode).\n\n Returns:\n Tensor: The resized image tensor.\n \"\"\"\n # Modes are named after CSS object-fit values.\n assert fit_mode in {'fill', 'contain', 'cover'}\n\n if fit_mode == 'fill':\n return resize(tensor, size, mode=resize_mode)\n elif fit_mode == 'contain':\n ih, iw = tensor.shape[-2:]\n k = min(size[-1] / iw, size[-2] / ih)\n oh = round(k * ih)\n ow = round(k * iw)\n resized = resize(tensor, (oh, ow), mode=resize_mode)\n result = tensor.new_full((*tensor.size()[:-2], *size), fill)\n y_off = (size[-2] - oh) // 2\n x_off = (size[-1] - ow) // 2\n result[..., y_off:y_off + oh, x_off:x_off + ow] = resized\n return result\n elif fit_mode == 'cover':\n ih, iw = tensor.shape[-2:]\n k = max(size[-1] / iw, size[-2] / ih)\n oh = round(k * ih)\n ow = round(k * iw)\n resized = resize(tensor, (oh, ow), mode=resize_mode)\n y_trim = (oh - size[-2]) // 2\n x_trim = (ow - size[-1]) // 2\n result = crop(resized, y_trim, x_trim, size[-2], size[-1])\n return result\n\n raise Exception('This code should not be reached.')\n"},"size":{"kind":"number","value":8316,"string":"8,316"}}},{"rowIdx":337,"cells":{"max_stars_repo_path":{"kind":"string","value":"TE-1/PL-1/OSD/2. Socket(py)/server.py"},"max_stars_repo_name":{"kind":"string","value":"Adityajn/College-Codes"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023780"},"content":{"kind":"string","value":"import socket,sys\ns=socket.socket()\n#host=socket.gethostname()\nport=28901 #port between 1024 and 49151\ns.bind((sys.argv[1],port))\ns.listen(4)\nc,addr=s.accept()\nprint \"Connected to:\",addr\n\nf1=open(sys.argv[2],\"r\") #open file in read mode\nbytes=f1.read(1024)\t#read 1024 bytes\nwhile(bytes):\t\t\n\tc.send(bytes)\t#send read bytes\n\tbytes=f1.read(1024)\t#read next 1024 bytes\t\nf1.close()\nc.close()\n"},"size":{"kind":"number","value":388,"string":"388"}}},{"rowIdx":338,"cells":{"max_stars_repo_path":{"kind":"string","value":"master_django/intensity/register/context_processors.py"},"max_stars_repo_name":{"kind":"string","value":"kripken/intensityengine"},"max_stars_count":{"kind":"number","value":31,"string":"31"},"id":{"kind":"string","value":"2023204"},"content":{"kind":"string","value":"\n# Copyright 2010 ('kripken'). All rights reserved.\n# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.\n\nfrom intensity.models import UserAccount\nimport intensity.conf as intensity_conf\n\ndef account(request):\n '''\n A context processor that provides 'my_account', the Intensity Engine account info for a user,\n and shows messages for that account\n '''\n ret = {\n 'my_account': request.account if request.user.is_authenticated() else None,\n 'message': request.session.get('message'),\n }\n request.session['message'] = None\n return ret\n\ndef toplevel(request):\n '''\n Gives a redirect URL for the toplevel\n '''\n return { 'toplevel_root': intensity_conf.get('Sites', 'toplevel_root') }\n\n"},"size":{"kind":"number","value":795,"string":"795"}}},{"rowIdx":339,"cells":{"max_stars_repo_path":{"kind":"string","value":"manifest/mixins.py"},"max_stars_repo_name":{"kind":"string","value":"ozgurgunes/django-manifest"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023580"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\" Manifest View Mixins\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.sites.models import Site\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import FormView, View\n\nfrom manifest import decorators, defaults\nfrom manifest.utils import get_protocol\n\n\nclass MessageMixin:\n \"\"\"\n View mixin adding messages to response.\n \"\"\"\n\n success_message = \"\"\n error_message = \"\"\n extra_context = None\n\n def set_success_message(self, message):\n if defaults.MANIFEST_USE_MESSAGES:\n messages.success(self.request, message, fail_silently=True)\n\n def set_error_message(self, message):\n if defaults.MANIFEST_USE_MESSAGES:\n messages.error(self.request, message, fail_silently=True)\n\n\nclass SendMailMixin:\n \"\"\"\n Mixin that send an email to given recipients.\n \"\"\"\n\n from_email = None\n email_subject_template_name = None\n email_message_template_name = None\n email_html_template_name = None\n\n def create_email(self, context, recipient):\n if not self.email_subject_template_name:\n raise ImproperlyConfigured(\n \"No template name for subject. \"\n \"Provide a email_subject_template_name.\"\n )\n if not self.email_message_template_name:\n raise ImproperlyConfigured(\n \"No template name for message. \"\n \"Provide a email_message_template_name.\"\n )\n\n subject = \"\".join(\n render_to_string(\n self.email_subject_template_name, context\n ).splitlines()\n )\n message = render_to_string(self.email_message_template_name, context)\n\n return EmailMultiAlternatives(\n subject, message, self.from_email, [recipient]\n )\n\n def send_mail(self, recipient, opts):\n \"\"\"\n Send a django.core.mail.EmailMultiAlternatives to `to_email`.\n \"\"\"\n context = {\n \"protocol\": get_protocol(),\n \"site\": Site.objects.get_current(),\n }\n context.update(opts)\n\n email = self.create_email(context, recipient)\n\n if self.email_html_template_name is not None:\n html_email = render_to_string(\n self.email_html_template_name, context\n )\n email.attach_alternative(html_email, \"text/html\")\n\n return email.send()\n\n\nclass SendActivationMailMixin(SendMailMixin):\n def send_activation_mail(self, user):\n context = {\n \"user\": user,\n \"activation_days\": defaults.MANIFEST_ACTIVATION_DAYS,\n \"activation_key\": user.activation_key,\n }\n self.send_mail(user.email, context)\n\n\nclass EmailChangeMixin(SendMailMixin):\n\n email_subject_template_name_old = (\n \"manifest/emails/confirmation_email_subject_old.txt\"\n )\n email_message_template_name_old = (\n \"manifest/emails/confirmation_email_message_old.txt\"\n )\n email_html_template_name_old = None\n email_subject_template_name_new = (\n \"manifest/emails/confirmation_email_subject_new.txt\"\n )\n email_message_template_name_new = (\n \"manifest/emails/confirmation_email_message_new.txt\"\n )\n email_html_template_name_new = None\n\n def send_confirmation_mail(self, user):\n\n context = {\n \"user\": user,\n \"new_email\": user.email_unconfirmed,\n \"confirmation_key\": user.email_confirmation_key,\n }\n\n self.email_subject_template_name = self.email_subject_template_name_old\n self.email_message_template_name = self.email_message_template_name_old\n self.email_html_template_name = self.email_html_template_name_old\n self.send_mail(user.email, context)\n\n self.email_subject_template_name = self.email_subject_template_name_new\n self.email_message_template_name = self.email_message_template_name_new\n self.email_html_template_name = self.email_html_template_name_new\n self.send_mail(user.email_unconfirmed, context)\n\n\nclass SecureRequiredMixin(View):\n \"\"\"\n Mixin that switches URL from http to https if\n ``MANIFEST_USE_HTTPS`` setting is ``True``.\n\n \"\"\"\n\n @method_decorator(decorators.secure_required)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n\nclass LoginRequiredMixin(View):\n \"\"\"\n Mixin that redirects user to login form if not authenticated yet.\n\n \"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n\n# pylint: disable=bad-continuation\nclass UserFormMixin(\n FormView, SecureRequiredMixin, LoginRequiredMixin, MessageMixin\n):\n \"\"\"\n Mixin that sets forms user argument to ``request.user``.\n \"\"\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"user\"] = self.request.user\n return kwargs\n"},"size":{"kind":"number","value":5201,"string":"5,201"}}},{"rowIdx":340,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/size_msgs_test.py"},"max_stars_repo_name":{"kind":"string","value":"UCY-LINC-LAB/Self-Stabilization-Edge-Simulator"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2022934"},"content":{"kind":"string","value":"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport io\nimport base64\nimport os\nimport sys\nimport argparse\n\n# See https://matplotlib.org/3.1.0/users/dflt_style_changes.html\nplt.style.use('seaborn-ticks')\nmpl.rcParams['grid.color'] = 'grey'\nmpl.rcParams['grid.linestyle'] = ':'\nmpl.rcParams['grid.linewidth'] = 0.5\nmpl.rcParams['axes.spines.right'] = False\nmpl.rcParams['axes.spines.top'] = False\nmpl.rcParams['font.size'] = 15\nmpl.rcParams['legend.fontsize'] = 'medium'\nmpl.rcParams['figure.titlesize'] = 'large'\n\n\ndef build_graph(f, export):\n if export:\n f.savefig(export, format='png')\n return\n img = io.BytesIO()\n f.set_size_inches(11.7, 8.27)\n f.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n return graph_url\n # return 'data:image/png;base64,{}'.format(graph_url)\n\n\ndef load_data(file, period):\n data = []\n last_time = 0\n partial = [0., 0., 0., 0.]\n with open(file, 'r') as fp:\n for line in fp:\n line = line.strip()\n if len(line) == 0:\n continue\n if line.startswith('time'):\n continue\n toks = line.split(',')\n t = int(toks[0])\n control_count = int(toks[1])\n control_size = int(toks[2])\n data_count = int(toks[5])\n data_size = int(toks[6])\n\n control_size *=(1000/period)\n data_size *=(1000/period)\n\n partial[0] += control_count\n partial[1] += control_size\n partial[2] += data_count\n partial[3] += data_size\n\n if t - last_time > period:\n last_time = t\n data.append([t, partial[0], partial[1]/1024, partial[2], partial[3]/1024])\n partial = [0., 0., 0., 0.]\n return np.array(data)\n\ndef compute_graph2(data):\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 12), sharex=False)\n controlColor = 'xkcd:bright blue'\n dataColor = 'xkcd:light orange'\n # Time is in ms...\n df_time = data[:,0]/1000\n df_control_msgs_count = data[:,1]\n df_control_msgs_size = data[:,2]/1024\n df_data_msgs_count = data[:,3]\n df_data_msgs_size = data[:,4]/1024\n\n ax1.fill_between(x=df_time, y1=df_data_msgs_size, y2=0, color=dataColor, alpha=1, label=\"Data Plane\")\n ax1.plot(df_time ,df_data_msgs_size, color=dataColor, marker='o', markersize=2, alpha=1, linewidth=1)\n\n ax1.fill_between(x=df_time, y1=df_control_msgs_size,y2=0, color=controlColor, alpha=0.55, label=\"Control Plane\")\n ax1.plot(df_time,df_control_msgs_size, color=controlColor, marker='D', markersize=2, alpha=0.85, linewidth=1)\n\n ax1.legend()\n # ax1.set_title('Traffic Transmitted')\n ax1.set_ylabel('Network Traffic (MB/s)')\n ax1.set_xlabel('Time (s)')\n ax1.grid()\n # Now to MBs\n #df['control_msgs_sz'] /= 1024\n #df['data_msgs_sz'] /= 1024\n\n ax2.plot(df_time, df_data_msgs_size.cumsum(), color=dataColor, alpha=1, label=\"Data Plane\")\n ax2.plot(df_time, df_control_msgs_size.cumsum(), color=controlColor, alpha=1, label=\"Control Plane\")\n ax2.legend()\n ax2.grid()\n ax2.set_ylabel('Total Network Traffic (MB)')\n ax2.set_xlabel('Time (s)')\n return fig\n\nif __name__ == '__main__':\n\n root = os.getenv('RESULTS_ROOT',\"../results/small\")\n scenario=os.getenv('SCENARIO',\"all_failures\")\n\n experiments = os.listdir(os.path.join(os.path.abspath(root),scenario))\n print(\"Existing experiments: \"+str(experiments))\n experiment= experiments[0]\n print(\"Using experiment: \"+str(experiment))\n file = \"stats/network/msgs.csv\"\n\n # In ms\n period = 200\n path = os.path.join(root,scenario,experiment, file)\n data = load_data(path, period)\n fig = compute_graph2(data)\n plt.show()\n\n #build_graph(fig, export=None)\n"},"size":{"kind":"number","value":3864,"string":"3,864"}}},{"rowIdx":341,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/azure-cli/azure/cli/command_modules/cognitiveservices/tests/latest/test_network_rules.py"},"max_stars_repo_name":{"kind":"string","value":"xaliciayang/azure-cli"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"id":{"kind":"string","value":"2023413"},"content":{"kind":"string","value":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport unittest\n\nfrom azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer\n\n\nclass CognitiveServicesNetworkRulesTests(ScenarioTest):\n @ResourceGroupPreparer()\n def test_cognitiveservices_network_rules(self, resource_group):\n sname = self.create_random_name(prefix='cs_cli_test_', length=16)\n customdomain = self.create_random_name(prefix='csclitest', length=16)\n\n self.kwargs.update({\n 'sname': sname,\n 'vnetname': sname,\n 'kind': 'Face',\n 'sku': 'S0',\n 'location': 'westus',\n 'customdomain': customdomain,\n })\n\n self.cmd('network vnet create --resource-group {rg} --name {vnetname}')\n\n subnet1 = self.cmd('network vnet subnet create --resource-group {rg} --name default'\n ' --vnet-name {vnetname} --address-prefixes 10.0.0.0/24').get_output_in_json()\n subnet2 = self.cmd('network vnet subnet create --resource-group {rg} --name subnet'\n ' --vnet-name {vnetname} --address-prefixes 10.0.1.0/24').get_output_in_json()\n\n self.cmd('az cognitiveservices account create -n {sname} -g {rg} --kind {kind} --sku {sku} -l {location}'\n ' --custom-domain {customdomain} --yes',\n checks=[self.check('name', '{sname}'),\n self.check('location', '{location}'),\n self.check('sku.name', '{sku}'),\n self.check('properties.provisioningState', 'Succeeded')])\n\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 0)\n self.assertEqual(len(rules['virtualNetworkRules']), 0)\n\n self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address \"172.16.58.3\"')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 1)\n self.assertEqual(len(rules['virtualNetworkRules']), 0)\n self.assertEqual(rules['ipRules'][0]['value'], \"172.16.58.3\")\n\n self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address \"172.16.17.32/24\"')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 2)\n self.assertEqual(len(rules['virtualNetworkRules']), 0)\n self.assertEqual(rules['ipRules'][0]['value'], \"172.16.58.3\")\n self.assertEqual(rules['ipRules'][1]['value'], \"172.16.17.32/24\")\n\n self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet1['id'])\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 2)\n self.assertEqual(len(rules['virtualNetworkRules']), 1)\n self.assertEqual(rules['ipRules'][0]['value'], \"172.16.58.3\")\n self.assertEqual(rules['ipRules'][1]['value'], \"172.16.17.32/24\")\n self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])\n\n self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet2['name'] +\n ' --vnet-name {vnetname}')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 2)\n self.assertEqual(len(rules['virtualNetworkRules']), 2)\n self.assertEqual(rules['ipRules'][0]['value'], \"172.16.58.3\")\n self.assertEqual(rules['ipRules'][1]['value'], \"172.16.17.32/24\")\n self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])\n self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])\n\n self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address \"172.16.58.3\"')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 1)\n self.assertEqual(len(rules['virtualNetworkRules']), 2)\n self.assertEqual(rules['ipRules'][0]['value'], \"172.16.17.32/24\")\n self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])\n self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])\n\n self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address \"172.16.17.32/24\"')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 0)\n self.assertEqual(len(rules['virtualNetworkRules']), 2)\n self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id'])\n self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id'])\n\n self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet1['id'])\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 0)\n self.assertEqual(len(rules['virtualNetworkRules']), 1)\n self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet2['id'])\n\n self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] +\n ' --vnet-name {vnetname}')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 0)\n self.assertEqual(len(rules['virtualNetworkRules']), 0)\n\n # Remove something doesn't exists in rules\n self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] +\n ' --vnet-name {vnetname}')\n rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json()\n self.assertEqual(len(rules['ipRules']), 0)\n self.assertEqual(len(rules['virtualNetworkRules']), 0)\n\n # delete the cognitive services account\n ret = self.cmd('az cognitiveservices account delete -n {sname} -g {rg}')\n self.assertEqual(ret.exit_code, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":6825,"string":"6,825"}}},{"rowIdx":342,"cells":{"max_stars_repo_path":{"kind":"string","value":"nbcelltests/define.py"},"max_stars_repo_name":{"kind":"string","value":"timkpaine/nbcelltests"},"max_stars_count":{"kind":"number","value":52,"string":"52"},"id":{"kind":"string","value":"2022802"},"content":{"kind":"string","value":"# *****************************************************************************\n#\n# Copyright (c) 2019, the nbcelltests authors.\n#\n# This file is part of the nbcelltests library, distributed under the terms of\n# the Apache License 2.0. The full license can be found in the LICENSE file.\n#\nfrom enum import Enum\n\n\nclass LintType(Enum):\n LINES_PER_CELL = \"lines_per_cell\"\n CELLS_PER_NOTEBOOK = \"cells_per_notebook\"\n FUNCTION_DEFINITIONS = \"function_definitions\"\n CLASS_DEFINITIONS = \"class_definitions\"\n LINTER = \"linter\"\n KERNELSPEC = \"kernelspec\"\n MAGICS = \"magics\"\n\n\nclass TestType(Enum):\n CELL_COVERAGE = \"cell_coverage\"\n CELL_TEST = \"cell_test\"\n\n\nclass LintMessage(object):\n def __init__(self, cell, message, type, passed=False):\n self.cell = cell\n self.message = message\n self.type = type\n self.passed = passed\n\n def __repr__(self):\n ret = \"PASSED: \" if self.passed else \"FAILED: \"\n ret += self.message\n ret += \" (Cell %d)\" % self.cell if self.cell > 0 else \"\"\n return ret\n\n def to_html(self):\n ret = (\n 'PASSED&nbsp;'\n if self.passed\n else 'FAILED&nbsp;'\n )\n ret += self.message\n ret += \"(Cell %d)\" % self.cell if self.cell > 0 else \"\"\n return ret\n\n\nclass TestMessage(object):\n def __init__(self, cell, message, type, passed=0):\n self.cell = cell\n self.message = message\n self.type = type\n self.passed = passed\n\n def __repr__(self):\n ret = (\n \"PASSED: \"\n if self.passed > 0\n else \"FAILED: \"\n if self.passed < 0\n else \"NOT RUN: \"\n )\n ret += self.message\n ret += \" (Cell %d)\" % self.cell if self.cell > 0 else \"\"\n return ret\n\n def to_html(self):\n ret = (\n 'PASSED&nbsp;'\n if self.passed\n else 'FAILED&nbsp;'\n )\n ret += self.message\n ret += \"(Cell %d)\" % self.cell if self.cell > 0 else \"\"\n return ret\n"},"size":{"kind":"number","value":2196,"string":"2,196"}}},{"rowIdx":343,"cells":{"max_stars_repo_path":{"kind":"string","value":"embeddings/clean_text.py"},"max_stars_repo_name":{"kind":"string","value":"onai/code-ecosystem-analyzer"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022976"},"content":{"kind":"string","value":"'''\n'''\n\nimport os\nimport json\nimport sys\n\nimport emoji\nimport json\nimport os\nimport string\nimport sys\n\ndef remove_emoji(text):\n return emoji.get_emoji_regexp().sub(u'', text)\n\ndef clean_text(the_text):\n lower = the_text.lower().split()\n cleaned = ' '.join(lower)\n trans_dict = {}\n for key in string.punctuation:\n if key == \"'\":\n trans_dict[key] = ''\n else:\n trans_dict[key] = ' '\n\n text_punct = str.maketrans(trans_dict)\n\n text_low = cleaned.lower()\n text_toks = text_low.translate(text_punct).split()\n\n return text_toks\n\nif __name__ == '__main__':\n dirname = sys.argv[1]\n dest = sys.argv[2]\n count = 0\n reply_count = 0\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(filename)\n full_path = os.path.join(root, filename)\n dest_path = os.path.join(dest, filename)\n\n cmts = []\n with open(full_path) as handle:\n for new_line in handle:\n the_payload = json.loads(new_line)\n the_text = ''\n if the_payload['kind'] == 'youtube#commentThread':\n the_text = the_payload['snippet']['topLevelComment']['snippet']['textOriginal']\n\n elif the_payload['kind'] == 'youtube#comment':\n the_text = the_payload['snippet']['textOriginal']\n\n cleaned_toks = clean_text(the_text)\n the_payload['cleaned_tokens'] = cleaned_toks\n cmts.append(the_payload)\n\n with open(dest_path, 'a') as handle:\n for cmt in cmts:\n handle.write(json.dumps(cmt))\n handle.write('\\n')\n \n"},"size":{"kind":"number","value":1781,"string":"1,781"}}},{"rowIdx":344,"cells":{"max_stars_repo_path":{"kind":"string","value":"unused/mlat/dim1/stringABEdge.py"},"max_stars_repo_name":{"kind":"string","value":"yoongun/topological-edge-modes-of-mechanical-lattice"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023695"},"content":{"kind":"string","value":"import numpy as np\nfrom numpy import linalg as la\nfrom typing import List, Tuple\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport pandas as pd\n\n\nclass StringABLatticeEdge:\n \"\"\"\n Reference:\n \"A study of topological effects in 1D and 2D mechanical lattices\" (2018), et. al.\n from 'Journal of the Mechanics and Physics of Solids', Volum 117, Aug 2018, 22-36,\n https://www.sciencedirect.com/science/article/abs/pii/S0022509618301820\n \"\"\"\n\n def __init__(self, k: List[float], m: List[float], precision: float = .01) -> None:\n \"\"\"\n Represents dynamic system of 1 dimensional mechanical lattice.\n e.g.) ABABAB...\n\n :param k: Spring constants (2)\n :param m: Mass (2)\n :param precision: Precision for wavenumber q\n \"\"\"\n if len(k) != len(m):\n raise ValueError(\n f\"The length of k={len(k)} and m={len(m)} does not match.\")\n self.k = k\n self.M = np.diag(m)\n self.qs = np.arange(-np.pi, np.pi, precision)\n\n def H(self, q):\n \"\"\"\n Hamiltonian \n\n :return: Hamiltonian defined given k and q\n \"\"\"\n k = self.k\n Q = np.exp(1.j * q)\n return np.array([[k[0] + k[1], -k[0] - k[1] * Q.conj()],\n [-k[0] - k[1] * Q, k[0] + k[1]]])\n\n def dispersion(self) -> List[Tuple[float, float]]:\n \"\"\"\n Calculate the dispersion relation\n\n :return: List of angular frequency omega for each q (wavenumber) and its eigenvectors\n \"\"\"\n M_inv = la.inv(self.M)\n eigenvals = []\n eigenvecs = []\n for q in self.qs:\n eigen_val, eigen_vec = self._min_eigen(M_inv.dot(self.H(q)))\n eigenvals.append(eigen_val)\n eigenvecs.append(eigen_vec)\n ws = np.sqrt(np.array(eigenvals).real)\n evs = np.array(eigenvecs)\n return ws, evs\n\n def _min_eigen(self, mat: np.ndarray) -> Tuple[float, float]:\n \"\"\"\n Return eigenvalue, eigenvector pair of minimum eigenvalue.\n\n :return: eigenvalue, eigenvector\n \"\"\"\n eigenvals, eigenvecs = la.eig(mat)\n min_idx = np.argsort(eigenvals)\n return eigenvals[min_idx], eigenvecs[min_idx]\n\n def beta(self) -> float:\n \"\"\"\n Calculate varying contrast beta with given spring constants\n\n :return: Varying contrast beta\n \"\"\"\n k = self.k\n return (k[0] - k[1]) / (k[0] + k[1])\n\n def animate(self, q: float, N: int, mode: int, *, fps: int = 30, s: int = 3):\n \"\"\"\n :param q: Wavenumber to animate [-pi, pi]\n :param N: Number of unit cells\n :param mode: Mode to animate (0 for acoustic, 1 for optical)\n :param fps: (Optional) Frame per second (/s) (default: 30 /s)\n :param s: (Optional) Animation duration (s) (default: 3 s)\n \"\"\"\n ws, evs = self.dispersion()\n\n # Parameters\n idx = min(range(len(self.qs)), key=lambda i: abs(self.qs[i] - q))\n w = ws[idx, mode] # /s\n\n # Construct frames\n frames = []\n for t in range(int(s * fps)):\n dt = t / fps\n dphase = dt * w * 2 * np.pi\n y = []\n for i in range(N):\n y.append(evs[idx, mode, 0] * np.exp(1.j * (q * i + dphase)))\n y.append(evs[idx, mode, 1] * np.exp(1.j * (q * i + dphase)))\n y = np.array(y)\n frames.append(\n go.Frame(data=[go.Scatter(y=y.real, line_shape='spline')]))\n\n # Figure components\n start_button = dict(\n label=\"Play\",\n method=\"animate\",\n args=[\n None,\n {\n \"frame\": {\"duration\": 1000 / fps, \"redraw\": False},\n \"fromcurrent\": True,\n \"transition\": {\"duration\": 100}\n }])\n pause_button = dict(\n label=\"Pause\",\n method=\"animate\",\n args=[\n [None],\n {\n \"frame\": {\"duration\": 0, \"redraw\": False},\n \"mode\": \"immediate\",\n \"transition\": {\"duration\": 0}\n }])\n\n # Plot\n fig = go.Figure(\n data=frames[0].data,\n layout=go.Layout(\n title=\"Dispersion relation animation\",\n yaxis=dict(range=[-1., 1.], autorange=False),\n updatemenus=[\n dict(\n type=\"buttons\",\n buttons=[start_button, pause_button\n ])\n ]\n ),\n frames=frames[1:])\n fig.show()\n\n def plot_dispersion_relation(self):\n ws, _ = self.dispersion()\n w0 = ws[:, 0]\n w1 = ws[:, 1]\n ws = np.append(w0, w1)\n\n x = np.append(self.qs, self.qs)\n y = ws\n index = np.append(np.repeat(0, len(self.qs)),\n np.repeat(1, len(self.qs)))\n\n df = pd.DataFrame({\n \"q\": x,\n \"w\": y,\n \"index\": index,\n })\n\n fig = px.line(df, x=\"q\", y=\"w\", color='index')\n fig.show()\n"},"size":{"kind":"number","value":5196,"string":"5,196"}}},{"rowIdx":345,"cells":{"max_stars_repo_path":{"kind":"string","value":"Lesson 4/website_alive/make_request.py"},"max_stars_repo_name":{"kind":"string","value":"arechesk/PythonHW"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023763"},"content":{"kind":"string","value":"import requests\n\nOK = requests.codes.ok\n\n\ndef request(url):\n r = requests.get(url)\n return r\n\n"},"size":{"kind":"number","value":100,"string":"100"}}},{"rowIdx":346,"cells":{"max_stars_repo_path":{"kind":"string","value":"ddi_search_engine/Bio/dbdefs/embl.py"},"max_stars_repo_name":{"kind":"string","value":"dbmi-pitt/DIKB-Evidence-analytics"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2023274"},"content":{"kind":"string","value":"# Copyright 2002 by . All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\nfrom Bio.config.DBRegistry import CGIDB, DBGroup\nfrom _support import *\n\nembl_xembl_cgi = CGIDB(\n name=\"embl-xembl-cgi\",\n doc=\"Query XEMBL for EMBL sequence data in XML format.\",\n cgi=\"http://www.ebi.ac.uk/cgi-bin/xembl/XEMBL.pl\",\n url=\"http://www.ebi.ac.uk/xembl/\",\n delay=5.0,\n params=[(\"format\", \"Bsml\")],\n key=\"id\",\n failure_cases=[(has_str(\"NOT EXIST\"), \"id does not exist\")],\n )\n\nembl_dbfetch_cgi = CGIDB(\n name=\"embl-dbfetch-cgi\",\n cgi=\"http://www.ebi.ac.uk/cgi-bin/dbfetch\",\n url=\"http://www.ebi.ac.uk/cgi-bin/dbfetch\",\n doc=\"dbfetch provides EMBL, Genbank, and SWALL sequences\",\n delay=5.0,\n params=[(\"db\", \"embl\"),\n (\"style\", \"raw\"),\n (\"format\", \"embl\"),\n ],\n key=\"id\",\n failure_cases=[(has_str(\"not found in database\"), \"id does not exist\")]\n )\n\nembl_ebi_cgi = CGIDB(\n name=\"embl-ebi-cgi\",\n cgi=\"http://www.ebi.ac.uk/cgi-bin/emblfetch\",\n url=\"http://www.ebi.ac.uk/cgi-bin/emblfetch\",\n doc=\"Retrieve many kinds of sequences from EBI\",\n delay=5.0,\n params=[(\"db\", \"EMBL\"),\n (\"format\", \"default\"), # also Fasta, bsml, agave available\n (\"style\", \"raw\")\n ],\n key=\"id\",\n failure_cases=[(blank_expr, \"No results returned\")]\n )\n\nembl = DBGroup(\n name=\"embl\",\n behavior=\"serial\",\n## cache=\"XXX\"\n )\nembl.add(embl_dbfetch_cgi)\nembl.add(embl_ebi_cgi)\n\nembl_xml = DBGroup(\n name = \"embl-xml\",\n behavior = \"serial\")\n\nembl_fast = DBGroup(\n name=\"embl-fast\",\n behavior=\"concurrent\",\n )\nembl_fast.add(embl_dbfetch_cgi)\nembl_fast.add(embl_ebi_cgi)\n"},"size":{"kind":"number","value":1844,"string":"1,844"}}},{"rowIdx":347,"cells":{"max_stars_repo_path":{"kind":"string","value":"reverse_proxy/proxies/admin.py"},"max_stars_repo_name":{"kind":"string","value":"optimor/reverse-proxy"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023991"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom jet.admin import CompactInline\n\nfrom .models import ProxySite, ProxyRewrite, ProxyHeader\nfrom .forms import ProxySiteForm\n\n\nclass ProxyRewriteInline(CompactInline):\n model = ProxyRewrite\n extra = 1\n fieldsets = (\n (\n None,\n {\n \"fields\": (\"from_regex\", \"to_regex\"),\n \"description\": \"A list of tuples in the style (from, to) where from \"\n \"must by a valid regex expression and to a valid URL. If \"\n \"request.get_full_path matches the from expression the \"\n \"request will be redirected to to with an status code 302. \"\n \"Matches groups can be used to pass parts from the from \"\n \"URL to the to URL using numbered groups.\",\n },\n ),\n )\n\n\nclass ProxyHeaderInline(CompactInline):\n model = ProxyHeader\n extra = 1\n fieldsets = (\n (\n None,\n {\n \"fields\": (\"header_name\", \"header_value\"),\n \"description\": \"A list of tuples in the style (key, value) where key \"\n \"must by a valid HEADER and key a valid header value.\",\n },\n ),\n )\n\n\n@admin.register(ProxySite)\nclass ProxySiteAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"upstream\",\n \"subdomain_name\",\n \"subdomain_full_url\",\n \"add_remote_user\",\n \"default_content_type\",\n \"retries\",\n )\n fieldsets = (\n (None, {\"fields\": (\"name\", \"upstream\", \"thumbnail\")}),\n (\n \"Subdomain\",\n {\n \"fields\": (\"subdomain_name\", \"subdomain_full_url\"),\n \"description\": \"Specify those to setup proxy that redirects based on \"\n \"the subdomain of the current URL\",\n },\n ),\n (\"Extra\", {\"fields\": (\"add_remote_user\", \"default_content_type\", \"retries\")}),\n )\n form = ProxySiteForm\n inlines = (ProxyRewriteInline, ProxyHeaderInline)\n"},"size":{"kind":"number","value":2024,"string":"2,024"}}},{"rowIdx":348,"cells":{"max_stars_repo_path":{"kind":"string","value":"preprocess.py"},"max_stars_repo_name":{"kind":"string","value":"Cyna298/hifi-gan"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023535"},"content":{"kind":"string","value":"import glob\nimport os\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom TTS.utils.audio import AudioProcessor\n\n\ndef preprocess_wav_files(out_path, config, ap):\n os.makedirs(os.path.join(out_path, \"quant\"), exist_ok=True)\n os.makedirs(os.path.join(out_path, \"mel\"), exist_ok=True)\n wav_files = find_wav_files(config.data_path)\n for path in tqdm(wav_files):\n wav_name = Path(path).stem\n quant_path = os.path.join(out_path, \"quant\", wav_name + \".npy\")\n mel_path = os.path.join(out_path, \"mel\", wav_name + \".npy\")\n y = ap.load_wav(path)\n mel = ap.melspectrogram(y)\n np.save(mel_path, mel)\n if isinstance(config.mode, int):\n quant = (\n ap.mulaw_encode(y, qc=config.mode)\n if config.mulaw\n else ap.quantize(y, bits=config.mode)\n )\n np.save(quant_path, quant)\n\n\ndef find_wav_files(data_path):\n wav_paths = glob.glob(os.path.join(data_path, \"**\", \"*.wav\"), recursive=True)\n return wav_paths\n\n"},"size":{"kind":"number","value":1051,"string":"1,051"}}},{"rowIdx":349,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"NineteenPeriod/django-bulk-update-or-create"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023217"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nfrom setuptools import setup\n\nsetup(\n name='django-bulk-update-or-create',\n)\n\n"},"size":{"kind":"number","value":103,"string":"103"}}},{"rowIdx":350,"cells":{"max_stars_repo_path":{"kind":"string","value":"artap/tests/test_benchmark_robust.py"},"max_stars_repo_name":{"kind":"string","value":"tamasorosz/artap"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2023499"},"content":{"kind":"string","value":"import unittest\nfrom ..individual import Individual\nfrom ..benchmark_robust import Synthetic1D, Synthetic2D, Synthetic5D, Synthetic10D\n\n\nclass TestSynthetic1D(unittest.TestCase):\n\n def test_synthetic1d(self):\n test = Synthetic1D()\n self.assertAlmostEqual(test.evaluate(Individual([11.0]))[0], 3.23, 3)\n self.assertAlmostEqual(test.evaluate(Individual([1.6]))[0], 3.205, 2)\n\n\nclass TestSynthetic2D(unittest.TestCase):\n\n def test_synthetic2d(self):\n test = Synthetic2D()\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0]))[0], 1.21112, 4)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0]))[0], 1.00096, 4)\n\n\nclass TestSynthetic5D(unittest.TestCase):\n\n def test_synthetic5d(self):\n test = Synthetic5D()\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.200000000, 4)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000, 4)\n\n self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0]))[0], .7)\n self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0]))[0], .75)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2, 5)\n\n self.assertAlmostEqual(test.evaluate(Individual([5.0, 2.0, 9.6, 7.3, 8.6]))[0], 1.0)\n self.assertAlmostEqual(test.evaluate(Individual([7.5, 8.0, 9.0, 3.2, 4.6]))[0], .6, 4)\n self.assertAlmostEqual(test.evaluate(Individual([5.7, 9.3, 2.2, 8.4, 7.1]))[0], .5)\n self.assertAlmostEqual(test.evaluate(Individual([5.5, 7.2, 5.8, 2.3, 4.5]))[0], .2, 4)\n\n self.assertAlmostEqual(test.evaluate(Individual([4.7, 3.2, 5.5, 7.1, 3.3]))[0], 0.4)\n self.assertAlmostEqual(test.evaluate(Individual([9.7, 8.4, 0.6, 3.2, 8.5]))[0], 0.1)\n\n\nclass TestSynthetic10D(unittest.TestCase):\n\n def test_synthetic10d(self):\n test = Synthetic10D()\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0],\n 1.200000000, 4)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000,\n 4)\n self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0, 1.0, 1.0, 6.0, 7.0, 8.0]))[0], 0.7)\n self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0, 1.0, 3.0, 8.0, 9.5, 2.0]))[0], 0.75)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0)\n self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2)\n"},"size":{"kind":"number","value":2815,"string":"2,815"}}},{"rowIdx":351,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/data/write_data.py"},"max_stars_repo_name":{"kind":"string","value":"lzmch/framequery"},"max_stars_count":{"kind":"number","value":66,"string":"66"},"id":{"kind":"string","value":"2023577"},"content":{"kind":"string","value":"from __future__ import print_function, division, absolute_import\n\nimport os.path\n\nimport pandas as pd\n\ndf = pd.DataFrame({\n 'g': [0, 0, 0, 1, 1, 2],\n 'i': [1, 2, 3, 4, 5, 6],\n 'f': [7.0, 8.0, 9.0, 0.0, 1.0, 2.0],\n})\n\ndf.to_csv(\n os.path.join(os.path.dirname(__file__), 'test.csv'),\n sep=';',\n index=False,\n)\n"},"size":{"kind":"number","value":326,"string":"326"}}},{"rowIdx":352,"cells":{"max_stars_repo_path":{"kind":"string","value":"projects/crawl_taobao_goods_migrate/model/result.py"},"max_stars_repo_name":{"kind":"string","value":"kingking888/crawler-pyspider"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022702"},"content":{"kind":"string","value":"from crawl_taobao_goods_migrate.model.task import Task\nfrom pyspider.core.model.mongo_base import *\nfrom pyspider.helper.date import Date\n\n\nclass Result(ResultBase):\n def __init__(self):\n super(Result, self).__init__()\n\n def find_by_goods_id(self, goods_id):\n \"\"\"\n 从 goods image 库查找商品\n :param goods_id:\n :return:\n \"\"\"\n return self.find_one({\"taskid\": Task.get_task_id_goods_image(goods_id)})\n\n def find_complete_goods(self, goods_id):\n \"\"\"\n 从 goods image 和 goods details 两个库中同时查找数据,返回更新时间较新的结果;\n 如果能查到两条记录,说明有一个商品已经下架了\n :param goods_id:\n :return:\n \"\"\"\n image_goods = self.find_one({\"taskid\": Task.get_task_id_goods_image(goods_id)})\n detail_goods = self.find_one({\"taskid\": Task.get_task_id_goods_detail(goods_id)})\n\n img_result = image_goods.get('result') if image_goods else \"1970-01-01\"\n detail_result = detail_goods.get('result') if detail_goods else \"1970-01-01\"\n\n img_date = img_result.get('update_time') if isinstance(img_result, dict) else \"1970-01-01\"\n detail_date = detail_result.get('update_time') if isinstance(detail_result, dict) else \"1970-01-01\"\n\n if img_date is None:\n img_date = \"1970-01-01\"\n if detail_date is None:\n detail_date = \"1970-01-01\"\n\n return detail_goods if Date(img_date) < Date(detail_date) else image_goods\n\n def find_all_goods(self, shop_id=''):\n \"\"\"\n 查询 goods 库里的所有商品;\n 有 shop_id 则查询该 shop_id 下的所有商品,否则就返回所有的商品;\n :param shop_id: 店铺ID\n :return:\n \"\"\"\n builder = {\n 'goods_id': {'$exists': 'true'},\n }\n if shop_id:\n builder['shop_id'] = shop_id\n return self.find(builder)\n\n def find_all_shop_goods(self, shop_list: list):\n \"\"\"\n 获取所有的店铺商品ID\n :param shop_list: str list\n :return:\n \"\"\"\n builder = {\n \"goods_id\": {\"$exists\": 'true'}\n }\n if shop_list:\n shop_list = [str(item) for item in shop_list]\n builder[\"shop_id\"] = {\"$in\": shop_list}\n return self.find(builder)\n\n def find_filter_goods(self, shop_ids: list, update_time=0):\n \"\"\"\n 过滤查询商品数据\n :param shop_ids: int list\n :param update_time: 如果有更新时间,则获取小于更新时间的商品\n :return:\n \"\"\"\n builder = {\n 'result.goods_id': {'$exists': 'true'},\n }\n if shop_ids:\n shop_ids = [int(item) for item in shop_ids]\n builder['result.shop_id'] = {\"$in\": shop_ids}\n if update_time > 0:\n builder['updatetime'] = {\"$gte\": update_time}\n return self.find(builder)\n\n def find_all_shop_id(self):\n \"\"\"\n 获取所有的店铺ID\n :return:\n \"\"\"\n return self.find({\n 'result.shop_id': {'$exists': 'true'},\n 'result.shop_url': {'$exists': 'true'},\n 'result.banner_imgs': {'$exists': 'true'},\n })\n\n def find_shop_by_id(self, shop_id):\n \"\"\"\n 从 shop details 库查找店铺详情\n :param shop_id:\n :return:\n \"\"\"\n return self.find_one({\"taskid\": Task.get_task_id_shop_details(shop_id)})\n\n def update_shop_crawled_status(self, shop_id, status):\n \"\"\"\n 更改店铺的被抓取的状态\n :param shop_id:\n :param status:\n :return:\n \"\"\"\n return self.update_many({'taskid': Task.get_task_id_shop_details(shop_id)},\n {\"$set\": {\"result.crawled\": status}})\n\n def insert_or_update_goods(self, doc):\n \"\"\"\n 写入或者更新天猫商品\n :param doc:\n :return:\n \"\"\"\n goods_id = doc.get(\"goods_id\", \"\")\n goods_name = doc.get(\"goods_name\", \"\")\n shop_id = doc.get(\"shop_id\", \"\")\n update_time = doc.get(\"update_time\", 0)\n if goods_id:\n re = self.find_one({\"goods_id\": goods_id})\n if re:\n return self.update(\n {'goods_id': goods_id},\n {\"$set\": {\"goods_id\": goods_id, \"goods_name\": goods_name, \"shop_id\": shop_id,\n \"update_time\": update_time}})\n else:\n return self.insert(doc)\n else:\n return self.insert(doc)\n"},"size":{"kind":"number","value":4288,"string":"4,288"}}},{"rowIdx":353,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/Tokenize.py"},"max_stars_repo_name":{"kind":"string","value":"ttrung149/turquoise"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2023117"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -----------------------------------------------------------------------------\n# Turquoise - VHDL linter and compilation toolchain\n# Copyright (c) 2020-2021: Turquoise team\n#\n# File name: Tokenize.py\n#\n# Description: Implementation of tokenizer class\n#\n# -----------------------------------------------------------------------------\nfrom pyVHDLParser.Token.Parser import Tokenizer\nfrom pyVHDLParser.Blocks import TokenToBlockParser\nfrom pyVHDLParser.Base import ParserException\n\nclass Tokenize():\n\n def __init__(self, filename=None):\n self._filename = filename\n\n def get_token_stream(self):\n with open (self._filename, 'r') as handle:\n content = handle.read()\n\n stream = Tokenizer.GetVHDLTokenizer(content)\n return stream\n\n def get_token_iter(self):\n stream = self.get_token_stream()\n token_iter = iter(stream)\n return token_iter"},"size":{"kind":"number","value":925,"string":"925"}}},{"rowIdx":354,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/oneflow/framework/docstr/unbind.py"},"max_stars_repo_name":{"kind":"string","value":"L-Net-1992/oneflow"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023764"},"content":{"kind":"string","value":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport oneflow\nfrom oneflow.framework.docstr.utils import add_docstr\n\nadd_docstr(\n oneflow.unbind,\n \"\"\"\n This function is equivalent to PyTorch's unbind function.\n Removes a tensor dimension.\n\n Returns a tuple of all slices along a given dimension, already without it.\n \n Args:\n x(Tensor): the tensor to unbind\n dim(int): dimension to remove\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n \n >>> x = flow.tensor(range(12)).reshape([3,4])\n >>> flow.unbind(x)\n (tensor([0, 1, 2, 3], dtype=oneflow.int64), tensor([4, 5, 6, 7], dtype=oneflow.int64), tensor([ 8, 9, 10, 11], dtype=oneflow.int64))\n >>> flow.unbind(x, 1)\n (tensor([0, 4, 8], dtype=oneflow.int64), tensor([1, 5, 9], dtype=oneflow.int64), tensor([ 2, 6, 10], dtype=oneflow.int64), tensor([ 3, 7, 11], dtype=oneflow.int64))\n\n \"\"\",\n)\n"},"size":{"kind":"number","value":1513,"string":"1,513"}}},{"rowIdx":355,"cells":{"max_stars_repo_path":{"kind":"string","value":"examples/routes/resequence_multiple_stops.py"},"max_stars_repo_name":{"kind":"string","value":"route4me/route4me-python-sdk"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"id":{"kind":"string","value":"2023170"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport argparse\nimport json\n\nfrom route4me import Route4Me\n\n\ndef load_json(filename):\n data = []\n with open(filename, 'rt') as datafile:\n data = json.load(datafile)\n datafile.close()\n return data\n\n\ndef main(args):\n r4m = Route4Me(args.api_key)\n route_data = load_json(args.route_data_filename)\n route = r4m.route\n print(f'Route ID: {args.route_id}')\n print(\"Addresses to be Re-sequence\")\n for address in route_data['addresses']:\n print(f'Address Sequence: {address[\"sequence_no\"]:6} - '\n f'Route Destination ID: {address[\"route_destination_id\"]:9}')\n print(f\"After Resequence the Route {args.route_id}\")\n response_data = route.resequence_multiple_stops(args.route_id, route_data)\n for address in response_data['addresses']:\n print(f'Address Sequence: {address[\"sequence_no\"]:6} - '\n f'Route Destination ID: {address[\"route_destination_id\"]:9} - Address: {address[\"address\"]} ')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Resequence a Route')\n parser.add_argument('--api_key', dest='api_key', help='Route4Me API KEY',\n type=str, required=True)\n parser.add_argument('--route_id', dest='route_id', help='Route ID',\n type=str, required=True)\n parser.add_argument('--route_data_filename', dest='route_data_filename',\n help='JSON file name with Route Addresses ID and Sequence',\n type=str, required=True)\n args = parser.parse_args()\n main(args)\n"},"size":{"kind":"number","value":1595,"string":"1,595"}}},{"rowIdx":356,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/plot_automaton.py"},"max_stars_repo_name":{"kind":"string","value":"BurnySc2/rust-python-pyo3-test"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023359"},"content":{"kind":"string","value":"import sys\nimport os\nimport lzma\nimport pickle\n\n\nimport numpy as np\n\nnp.set_printoptions(threshold=sys.maxsize)\nimport matplotlib.pyplot as plt\n\nfrom typing import Tuple, List, Iterable\n\nfrom sc2.game_data import GameData\nfrom sc2.game_info import GameInfo\nfrom sc2.game_state import GameState\nfrom sc2.bot_ai import BotAI\n\n\ndef get_map_specific_bots() -> Iterable[BotAI]:\n folder = os.path.dirname(__file__)\n subfolder_name = \"pickle_data\"\n pickle_folder_path = os.path.join(folder, subfolder_name)\n files = os.listdir(pickle_folder_path)\n for file in (f for f in files if f.endswith(\".xz\")):\n with lzma.open(os.path.join(folder, subfolder_name, file), \"rb\") as f:\n raw_game_data, raw_game_info, raw_observation = pickle.load(f)\n\n # Build fresh bot object, and load the pickle'd data into the bot object\n bot = BotAI()\n game_data = GameData(raw_game_data.data)\n game_info = GameInfo(raw_game_info.game_info)\n game_state = GameState(raw_observation)\n bot._initialize_variables()\n bot._prepare_start(client=None, player_id=1, game_info=game_info, game_data=game_data)\n bot._prepare_step(state=game_state, proto_game_info=raw_game_info)\n\n yield bot\n\n\n# Global bot object that is used in TestClass.test_position_*\nbot_object_generator = get_map_specific_bots()\n# random_bot_object: BotAI = next(bot_object_generator)\n\n# print(random_bot_object.game_info.start_locations)\n# print(random_bot_object.townhalls[0].position)\n# print(random_bot_object.enemy_start_locations)\n\n\ndef main():\n # start = (90, 100)\n # goal = (100, 114)\n # Spawn\n start = (29, 65)\n goal = (154, 114)\n # Ramp\n # start = (32, 51)\n # goal = (150, 129)\n # map_grid = np.loadtxt(\"AutomatonLE.txt\", delimiter=\"\").astype(int)\n grid = []\n with open(\"../AutomatonLE.txt\") as f:\n for line in f.readlines():\n values = [int(i) for i in list(line.strip())]\n grid.append(values)\n # print(grid)\n map_grid = np.asarray(grid)\n # print(map_grid)\n\n path = []\n with open(\"../path.txt\") as f:\n for line in f.readlines():\n x, y = line.split(\",\")\n path.append((int(x.strip()), int(y.strip())))\n print()\n # print(map_grid.shape)\n plot(map_grid, route=path, start=start, goal=goal)\n\n\ndef plot(\n grid,\n route: List[Tuple[int, int]] = None,\n start: Tuple[int, int] = None,\n goal: Tuple[int, int] = None,\n waypoints=None,\n):\n # extract x and y coordinates from route list\n x_coords = []\n y_coords = []\n if route:\n for i in range(0, len(route)):\n x = route[i][0]\n y = route[i][1]\n x_coords.append(x)\n y_coords.append(y)\n\n # plot map and path\n fig, ax = plt.subplots(figsize=(20, 20))\n ax.imshow(grid, cmap=plt.cm.Dark2)\n if start:\n ax.scatter(start[0], start[1], marker=\"x\", color=\"red\", s=200)\n if goal:\n ax.scatter(goal[0], goal[1], marker=\"x\", color=\"blue\", s=200)\n if route:\n for w in route:\n ax.scatter(w[0], w[1], marker=\"x\", color=\"orange\", s=100)\n\n if waypoints:\n for w in waypoints:\n ax.scatter(w[0], w[1], marker=\"x\", color=\"black\", s=50)\n # plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":3363,"string":"3,363"}}},{"rowIdx":357,"cells":{"max_stars_repo_path":{"kind":"string","value":"models/utils.py"},"max_stars_repo_name":{"kind":"string","value":"Curli-quan/fewshot-select"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023960"},"content":{"kind":"string","value":"from torch import nn\r\nimport random\r\nfrom functools import wraps\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass EMA():\r\n def __init__(self, beta):\r\n super().__init__()\r\n self.beta = beta\r\n\r\n def update_average(self, old, new):\r\n if old is None:\r\n return new\r\n return old * self.beta + (1 - self.beta) * new\r\n\r\n\r\ndef update_moving_average(ema_updater, ma_model, current_model):\r\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\r\n old_weight, up_weight = ma_params.data, current_params.data\r\n ma_params.data = ema_updater.update_average(old_weight, up_weight)\r\n\r\n\r\nclass RandomApply(nn.Module):\r\n def __init__(self, fn, p):\r\n super().__init__()\r\n self.fn = fn\r\n self.p = p\r\n\r\n def forward(self, x):\r\n if random.random() > self.p:\r\n return x\r\n return self.fn(x)\r\n\r\n\r\ndef default(val, def_val):\r\n return def_val if val is None else val\r\n\r\n\r\ndef flatten(t):\r\n return t.reshape(t.shape[0], -1)\r\n\r\n\r\ndef singleton(cache_key):\r\n def inner_fn(fn):\r\n @wraps(fn)\r\n def wrapper(self, *args, **kwargs):\r\n # print(f\"[.] Function Name: {fn.__name__}\")\r\n instance = getattr(self, cache_key)\r\n if instance is not None:\r\n return instance\r\n instance = fn(self, *args, **kwargs)\r\n setattr(self, cache_key, instance)\r\n return instance\r\n return wrapper\r\n return inner_fn\r\n\r\n\r\ndef get_module_device(module):\r\n return next(module.parameters()).device\r\n\r\n\r\ndef set_requires_grad(model, val):\r\n for p in model.parameters():\r\n p.requires_grad = val\r\n\r\n\r\n# loss fn\r\ndef loss_fn(x, y):\r\n x = F.normalize(x, dim=-1, p=2)\r\n y = F.normalize(y, dim=-1, p=2)\r\n return 2 - 2 * (x * y).sum(dim=-1)\r\n"},"size":{"kind":"number","value":1851,"string":"1,851"}}},{"rowIdx":358,"cells":{"max_stars_repo_path":{"kind":"string","value":"star_printer.py"},"max_stars_repo_name":{"kind":"string","value":"ChangyongKim0/programming_study"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022792"},"content":{"kind":"string","value":"import logger as lg\n\n\nclass StarPrinter:\n def __init__(self, print_types):\n self.print_queue = [{\"type\": ele[0], \"length\": ele[1]}\n for ele in print_types]\n logger = lg.Logger(\"StarPrinter\")\n self.log, self.err = logger.log, logger.err\n self.log(\"StarPrinter initialized.\")\n\n def printDiamond(self, length):\n space = \" \" * (length // 2)\n star = \"*\"\n print()\n for line in range(length):\n print(space + star)\n if line < length / 2 - 1:\n space = space[0:-2]\n star += \"****\"\n else:\n space += \" \"\n star = star[0:-4]\n print()\n\n def printTriangle(self, length):\n space = \" \" * length\n star = \"*\"\n print()\n for line in range(length):\n print(space + star)\n space = space[0:-1]\n star += \"**\"\n print()\n\n def printLeftTriangle(self, length):\n star = \"*\"\n print()\n for line in range(length):\n print(star)\n star += \"**\"\n print()\n\n def addPrintQueue(self, print_type, length):\n self.print_queue.append({\"type\": print_type, \"length\": length})\n self.log(\"{} with length {} is added in print queue.\".format(\n print_type, length))\n\n def printStar(self):\n for ele in self.print_queue:\n if ele[\"type\"] == \"diamond\":\n self.printDiamond(ele[\"length\"])\n elif ele[\"type\"] == \"triangle\":\n self.printTriangle(ele[\"length\"])\n elif ele[\"type\"] == \"left_triangle\":\n self.printLeftTriangle(ele[\"length\"])\n else:\n self.err(\"Wrong print type.\")\n\n def __str__(self):\n string = \"\\n[print_queue]\\n\"\n for ele in self.print_queue:\n string += \"type: {0}; length: {1}\\n\".format(\n ele[\"type\"], ele[\"length\"])\n return string\n\n\nif __name__ == \"__main__\":\n star_printer = StarPrinter([[\"triangle\", 5], [\"left_triangle\", 3]])\n print(star_printer)\n star_printer.addPrintQueue(\"none\", 8)\n star_printer.addPrintQueue(\"diamond\", 7)\n print(star_printer)\n star_printer.printStar()\n"},"size":{"kind":"number","value":2268,"string":"2,268"}}},{"rowIdx":359,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/shapes.py"},"max_stars_repo_name":{"kind":"string","value":"AntVil/Wetter-Daten-Visualizer"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023987"},"content":{"kind":"string","value":"# this file contains all components needed to collect, structure and save the data from GADM\nimport os\nimport requests\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport cartopy.io.shapereader as shpreader\nfrom requests.api import get\n\n\n# constants\nSHAPES_URL = \"https://biogeo.ucdavis.edu/data/gadm3.6/shp/gadm36_DEU_shp.zip\"\nSHAPES_FOLDER = os.path.join(os.path.dirname(__file__), \"data\", \"shapes\")\nos.makedirs(SHAPES_FOLDER, exist_ok = True)\n\n\ndef download_shapes():\n \"\"\"\n this function downloads data from GADM\n \"\"\"\n\n unpacked = ZipFile(BytesIO(requests.get(SHAPES_URL).content))\n file_names = list(set([file.split(\".\")[0] for file in unpacked.namelist()]).difference({\"license\"}))\n\n # saving license\n with unpacked.open(\"license.txt\", \"r\") as read_file:\n with open(os.path.join(SHAPES_FOLDER, \"license.txt\"), \"wb\") as write_file:\n write_file.write(read_file.read())\n\n #downloading files\n for file in file_names:\n for extension in [\".shp\", \".shx\", \".dbf\"]:\n with unpacked.open(file + extension, \"r\") as read_file:\n # creating folder structure\n path = os.path.join(SHAPES_FOLDER, file)\n os.makedirs(path, exist_ok = True)\n \n # saving file\n file_name = \"shape\" + extension\n with open(os.path.join(path, file_name), \"wb\") as write_file:\n write_file.write(read_file.read())\n\n\ndef get_geometry(level=1):\n \"\"\"\n this function returns the administrative-area geometries for germany\n \"\"\"\n \n try:\n return list(\n shpreader.Reader(\n os.path.join(os.path.dirname(__file__), \"data\", \"shapes\", f\"gadm36_DEU_{level}\", \"shape\")\n ).geometries()\n )\n except:\n download_shapes()\n return get_geometry(level)\n\n\nif __name__ == \"__main__\":\n download_shapes()\n"},"size":{"kind":"number","value":1918,"string":"1,918"}}},{"rowIdx":360,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/seekret.apitest/context/response_test.py"},"max_stars_repo_name":{"kind":"string","value":"seek-ret/tavernrtl"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2023678"},"content":{"kind":"string","value":"import io\nimport json as _json\nfrom typing import Optional, Union\n\nimport pytest\nfrom requests import Response\nfrom requests.structures import CaseInsensitiveDict\n\nfrom seekret.apitest.context.response import ResponseWrapper, NullResultError\n\n\ndef make_wrapper(json=None,\n headers: Optional[Union[dict[str],\n CaseInsensitiveDict[str]]] = None):\n response = Response()\n response.raw = io.BytesIO(_json.dumps(json).encode() if json else b'')\n if headers:\n response.headers = CaseInsensitiveDict(headers)\n return ResponseWrapper(response)\n\n\nclass TestResponseWrapper:\n class TestSearch:\n def test_json_nested_value(self):\n wrapper = make_wrapper({'a': {'b': {'c': 'd'}}})\n assert {'c': 'd'} == wrapper.search('json.a.b')\n\n def test_json_array_value(self):\n wrapper = make_wrapper([1, 'b', {'c': 'd'}])\n assert 'd' == wrapper.search('json[2].c')\n\n def test_json_missing_value_causes_null_result_error(self):\n wrapper = make_wrapper({'some-key': 1})\n pytest.raises(NullResultError, wrapper.search, 'json.\"other-key\"')\n\n def test_json_value_none_causes_null_result_error(self):\n wrapper = make_wrapper({'key': None})\n pytest.raises(NullResultError, wrapper.search, 'json.key')\n\n def test_json_case_sensitive(self):\n wrapper = make_wrapper({'caseSensitiveKey': 1})\n pytest.raises(NullResultError, wrapper.search,\n 'json.casesensitivekey')\n\n def test_headers_existing_key(self):\n wrapper = make_wrapper(headers={'Some-Header': 'value'})\n assert wrapper.search('headers.\"Some-Header\"') == 'value'\n\n def test_headers_case_insensitive(self):\n wrapper = make_wrapper(headers={'Some-Header': 'value'})\n assert wrapper.search('headers.\"some-header\"') == 'value'\n\n def test_headers_missing_key_causes_null_result_error(self):\n wrapper = make_wrapper(headers={'Some-Header': 'value'})\n pytest.raises(NullResultError, wrapper.search,\n 'headers.\"other-header\"')\n\n def test_bad_locator_causes_null_result_error(self):\n wrapper = make_wrapper(json={'a': 1}, headers={'b': 2})\n pytest.raises(NullResultError, wrapper.search,\n 'expression.must.start.with.json.or.headers')\n\n class TestAssertSchema:\n def test_validation_success(self):\n wrapper = make_wrapper({\n 'a': 'hello!',\n 'b': 1,\n })\n wrapper.assert_schema(\"\"\"\n type: map\n mapping:\n a:\n type: str\n required: true\n b:\n type: int\n \"\"\")\n\n def test_validation_failure_causes_assertion_error(self):\n wrapper = make_wrapper({\n 'b': 1,\n })\n pytest.raises(\n AssertionError, wrapper.assert_schema, \"\"\"\n type: map\n mapping:\n a:\n type: str\n required: true\n b:\n type: int\n \"\"\")\n"},"size":{"kind":"number","value":3353,"string":"3,353"}}},{"rowIdx":361,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/protocols/BLE/ble_device.py"},"max_stars_repo_name":{"kind":"string","value":"QWERTSKIHACK/peniot"},"max_stars_count":{"kind":"number","value":143,"string":"143"},"id":{"kind":"string","value":"2023848"},"content":{"kind":"string","value":"import pexpect\n\n\nclass BLEDevice:\n \"\"\"\n Represents a BLE device.\n It uses `gatttool` to connect a BLE device.\n \"\"\"\n\n def __init__(self, address):\n self.device = None\n self.address = address\n # connect to the device specified with the given address\n self.connect()\n\n def connect(self):\n \"\"\"\n Connects to the BLE device\n \"\"\"\n print \"Connecting...\"\n # Run gatttool interactively.\n self.device = pexpect.spawn(\"gatttool -b \" + self.address + \" -I\")\n self.device.expect('\\[LE\\]>', timeout=10)\n self.device.sendline('connect')\n self.device.expect('Connection successful.*\\[LE\\]>', timeout=10)\n print \"Successfully connected!\"\n\n \"\"\"\n Updates the value of the handle\n \"\"\"\n\n def writecmd(self, handle, value):\n cmd = \"char-write-cmd \" + handle + \" \" + value\n self.device.sendline(cmd)\n print \"Wrote \" + value + \" to handle: \" + handle\n"},"size":{"kind":"number","value":979,"string":"979"}}},{"rowIdx":362,"cells":{"max_stars_repo_path":{"kind":"string","value":"keras/utils/visualize_util.py"},"max_stars_repo_name":{"kind":"string","value":"nishank974/Keras"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2023324"},"content":{"kind":"string","value":"import itertools\nfrom keras.layers.containers import Graph, Sequential\nfrom keras.layers.core import Merge\n\ntry:\n # pydot-ng is a fork of pydot that is better maintained\n import pydot_ng as pydot\nexcept ImportError:\n # fall back on pydot if necessary\n import pydot\nif not pydot.find_graphviz():\n raise RuntimeError(\"Failed to import pydot. You must install pydot\"\n \" and graphviz for `pydotprint` to work.\")\n\n\ndef layer_typename(layer):\n return type(layer).__module__ + \".\" + type(layer).__name__\n\n\ndef get_layer_to_name(model):\n \"\"\"Returns a dict mapping layer to their name in the model\"\"\"\n if not isinstance(model, Graph):\n return {}\n else:\n node_to_name = itertools.chain(\n model.nodes.items(), model.inputs.items(), model.outputs.items()\n )\n return {v: k for k, v in node_to_name}\n\n\nclass ModelToDot(object):\n \"\"\"\n This is a helper class which visits a keras model (Sequential or Graph) and\n returns a pydot.Graph representation.\n\n This is implemented as a class because we need to maintain various states.\n\n Use it as ```ModelToDot()(model)```\n\n Keras models can have an arbitrary number of inputs and outputs. A given\n layer can have multiple inputs but has a single output. We therefore\n explore the model by starting at its output and crawling \"up\" the tree.\n \"\"\"\n def _pydot_node_for_layer(self, layer, label):\n \"\"\"\n Returns the pydot.Node corresponding to the given layer.\n `label` specify the name of the layer (only used if the layer isn't yet\n associated with a pydot.Node)\n \"\"\"\n # Check if this already exists (will be the case for nodes that\n # serve as input to more than one layer)\n if layer in self.layer_to_pydotnode:\n node = self.layer_to_pydotnode[layer]\n else:\n layer_id = 'layer%d' % self.idgen\n self.idgen += 1\n\n label = label + \" (\" + layer_typename(layer) + \")\"\n\n if self.show_shape:\n # Build the label that will actually contain a table with the\n # input/output\n outputlabels = str(layer.output_shape)\n if hasattr(layer, 'input_shape'):\n inputlabels = str(layer.input_shape)\n elif hasattr(layer, 'input_shapes'):\n inputlabels = ', '.join(\n [str(ishape) for ishape in layer.input_shapes])\n else:\n inputlabels = ''\n label = \"%s\\n|{input:|output:}|{{%s}|{%s}}\" % (\n label, inputlabels, outputlabels)\n\n node = pydot.Node(layer_id, label=label)\n self.g.add_node(node)\n self.layer_to_pydotnode[layer] = node\n return node\n\n def _process_layer(self, layer, layer_to_name=None, connect_to=None):\n \"\"\"\n Process a layer, adding its node to the graph and creating edges to its\n outputs.\n\n `connect_to` specify where the output of the current layer will be\n connected\n `layer_to_name` is a dict mapping layer to their name in the Graph\n model. Should be {} when processing a Sequential model\n \"\"\"\n # The layer can be a container layer, in which case we can recurse\n is_graph = isinstance(layer, Graph)\n is_seq = isinstance(layer, Sequential)\n if self.recursive and (is_graph or is_seq):\n # We got a container layer, recursively transform it\n if is_graph:\n child_layers = layer.outputs.values()\n else:\n child_layers = [layer.layers[-1]]\n for l in child_layers:\n self._process_layer(l, layer_to_name=get_layer_to_name(layer),\n connect_to=connect_to)\n else:\n # This is a simple layer.\n label = layer_to_name.get(layer, '')\n layer_node = self._pydot_node_for_layer(layer, label=label)\n\n if connect_to is not None:\n self.g.add_edge(pydot.Edge(layer_node, connect_to))\n\n # Proceed upwards to the parent(s). Only Merge layers have more\n # than one parent\n if isinstance(layer, Merge): # Merge layer\n for l in layer.layers:\n self._process_layer(l, layer_to_name,\n connect_to=layer_node)\n elif hasattr(layer, 'previous') and layer.previous is not None:\n self._process_layer(layer.previous, layer_to_name,\n connect_to=layer_node)\n\n def __call__(self, model, recursive=True, show_shape=False,\n connect_to=None):\n self.idgen = 0\n # Maps keras layer to the pydot.Node representing them\n self.layer_to_pydotnode = {}\n self.recursive = recursive\n self.show_shape = show_shape\n\n self.g = pydot.Dot()\n self.g.set('rankdir', 'TB')\n self.g.set('concentrate', True)\n self.g.set_node_defaults(shape='record')\n\n if hasattr(model, 'outputs'):\n # Graph\n for name, l in model.outputs.items():\n self._process_layer(l, get_layer_to_name(model),\n connect_to=connect_to)\n else:\n # Sequential container\n self._process_layer(model.layers[-1], {}, connect_to=connect_to)\n\n return self.g\n\n\ndef to_graph(model, **kwargs):\n \"\"\"\n `recursive` controls whether we recursively explore container layers\n `show_shape` controls whether the shape is shown in the graph\n \"\"\"\n return ModelToDot()(model, **kwargs)\n\n\ndef plot(model, to_file='model.png', **kwargs):\n graph = to_graph(model, **kwargs)\n graph.write_png(to_file)\n"},"size":{"kind":"number","value":5854,"string":"5,854"}}},{"rowIdx":363,"cells":{"max_stars_repo_path":{"kind":"string","value":"453-Minimum_Moves_to_Equal_Array_Elements.py"},"max_stars_repo_name":{"kind":"string","value":"QuenLo/leecode"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2022886"},"content":{"kind":"string","value":"class Solution:\n def minMoves(self, nums: List[int]) -> int:\n return sum(nums)-len(nums)*min(nums)\n\n\nclass SolutionII:\n def minMoves(self, nums: List[int]) -> int:\n \n minin = float('inf')\n time = 0\n for num in nums:\n time += num\n minin = min( minin, num )\n \n return time - len(nums)*minin\n"},"size":{"kind":"number","value":366,"string":"366"}}},{"rowIdx":364,"cells":{"max_stars_repo_path":{"kind":"string","value":"dataStructures/exercises/stacks.py"},"max_stars_repo_name":{"kind":"string","value":"Ry4nW/python-wars"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024029"},"content":{"kind":"string","value":"from collections import deque\n\nclass Stack():\n def __init__(self, items: 'list[any]', maxsize) -> None:\n self.items: list[deque] = deque(items)\n self.maxsize: int = maxsize\n self.top: int = self.get_top()\n \n def get_stack(self) -> deque:\n return self.items\n\n def push(self, item) -> None or str:\n if len(self.items) < self.maxsize:\n self.items.append(item)\t\n self.top = item\t\t\t\n else:\n return 'Max capacity reached.'\n\n def pop(self) -> any:\n if self.items: \n popped = self.items.pop()\n self.top = self.get_top()\n return popped\n return 'Stack is empty.'\n \n def is_empty(self) -> bool:\n return True if not self.items else False\n \n def get_top(self) -> any:\n if not self.is_empty():\n return self.items[-1]\n else:\n return 'Stack is empty.'\n\nstack = Stack([1, 2, 3, 4, 5], 6)\nprint(stack.top)\nprint(stack.push(6))\nprint(stack.top)\nprint(stack.pop())\nprint(stack.top)\n"},"size":{"kind":"number","value":1054,"string":"1,054"}}},{"rowIdx":365,"cells":{"max_stars_repo_path":{"kind":"string","value":"socless/models.py"},"max_stars_repo_name":{"kind":"string","value":"A-Gray-Cat/socless_python"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2022933"},"content":{"kind":"string","value":"from dataclasses import dataclass, field\nfrom typing import Optional\n\n\n@dataclass\nclass EventTableItem:\n id: str\n investigation_id: str\n status_: str\n is_duplicate: bool\n created_at: str\n event_type: str\n playbook: Optional[str]\n details: dict\n data_types: dict\n event_meta: dict\n\n\n@dataclass\nclass DedupTableItem:\n current_investigation_id: str\n dedup_hash: str\n\n\n@dataclass\nclass MessageResponsesTableItem:\n message_id: str # PK : callback id for message responses\n await_token: str # used to start next step in step_functions\n receiver: str # step_functions step name\n fulfilled: bool # has await_token been used\n message: str # message sent to user while waiting for their response\n execution_id: str\n investigation_id: str\n datetime: str\n\n\n@dataclass\nclass PlaybookArtifacts:\n event: EventTableItem\n execution_id: str\n\n\n@dataclass\nclass PlaybookInput:\n execution_id: str\n artifacts: PlaybookArtifacts\n results: dict\n errors: dict\n"},"size":{"kind":"number","value":1018,"string":"1,018"}}},{"rowIdx":366,"cells":{"max_stars_repo_path":{"kind":"string","value":"cypherpunkpay/net/tor_client/base_tor_circuits.py"},"max_stars_repo_name":{"kind":"string","value":"prusnak/CypherpunkPay"},"max_stars_count":{"kind":"number","value":44,"string":"44"},"id":{"kind":"string","value":"2023988"},"content":{"kind":"string","value":"from abc import abstractmethod\n\n\nclass BaseTorCircuits(object):\n\n SHARED_CIRCUIT_ID = 'shared_circuit' # for requests were linkability of actions does not matter (merchant callbacks, price tickers, blockchain height, etc)\n SKIP_TOR = 'skip_tor' # for requests where the target is in the local network or Tor cannot be used for other reasons\n\n @abstractmethod\n def mark_as_broken(self, label):\n pass\n\n @abstractmethod\n def get_for(self, privacy_context):\n pass\n\n @abstractmethod\n def close(self):\n pass\n"},"size":{"kind":"number","value":549,"string":"549"}}},{"rowIdx":367,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/Python/1-100/88.MergeArray.py"},"max_stars_repo_name":{"kind":"string","value":"Peefy/PeefyLeetCode"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2023594"},"content":{"kind":"string","value":"\nclass Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n nums = []\n for i in range(m):\n nums.append(nums1[i])\n i = 0\n j = 0\n index = 0\n while i < m or j < n:\n if i == m:\n for k in range(j, n):\n nums1[index] = nums2[k]\n index += 1\n break\n if j == n:\n for k in range(i, m):\n nums1[index] = nums[k]\n index += 1\n break\n if nums[i] < nums2[j]:\n nums1[index] = nums[i]\n i += 1\n else:\n nums1[index] = nums2[j]\n j += 1\n index += 1\n return nums1\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.merge([1,2,3,4,0,0,0,0,0],4, [2,5,6], 3))\nelse:\n pass\n"},"size":{"kind":"number","value":1092,"string":"1,092"}}},{"rowIdx":368,"cells":{"max_stars_repo_path":{"kind":"string","value":"Python/count-primes.py"},"max_stars_repo_name":{"kind":"string","value":"ddyuewang/leetcode"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2023859"},"content":{"kind":"string","value":"# Time: O(n)\n# Space: O(n)\n\n# Description:\n#\n# Count the number of prime numbers less than a non-negative number, n\n#\n# Hint: The number n could be in the order of 100,000 to 5,000,000.\n\nclass Solution:\n # @param {integer} n\n # @return {integer}\n def countPrimes(self, n):\n if n <= 2:\n return 0\n \n is_prime = [True] * n\n num = n / 2\n for i in xrange(3, n, 2):\n if i * i >= n:\n break\n\n if not is_prime[i]:\n continue\n\n for j in xrange(i*i, n, 2*i):\n if not is_prime[j]:\n continue\n\n num -= 1\n is_prime[j] = False\n\n return num\n"},"size":{"kind":"number","value":713,"string":"713"}}},{"rowIdx":369,"cells":{"max_stars_repo_path":{"kind":"string","value":"python3/check_array_formation_through_concatenation.py"},"max_stars_repo_name":{"kind":"string","value":"joshiaj7/CodingChallenges"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023897"},"content":{"kind":"string","value":"\"\"\"\nSpace : O(n)\nTime : O(n)\n\"\"\"\n\n\nclass Solution:\n def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:\n d = {}\n n = len(arr)\n\n for x in pieces:\n if x[0] not in d:\n d[x[0]] = x\n\n i = 0\n while i < n:\n if arr[i] in d:\n temp = d[arr[i]]\n for j in range(len(temp)):\n if temp[j] == arr[i]:\n i += 1\n else:\n return False\n else:\n return False\n\n return True\n"},"size":{"kind":"number","value":597,"string":"597"}}},{"rowIdx":370,"cells":{"max_stars_repo_path":{"kind":"string","value":"models/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"marcoscale98/emojinet"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023214"},"content":{"kind":"string","value":"from models.base_lstm_user import base_lstm_user\nfrom models.base_lstm_cnn_user import base_lstm_cnn_user\nfrom models.base_lstm_subword import base_lstm_subword\nfrom models.ensemble_cnn_subword import ensemble_cnn_subword\nfrom models.base_cnn import base_cnn\nfrom models.base_lstm import base_lstm\nfrom models.vdcnn import vdcnn\n\n\nclass ModelDefinition:\n def __init__(self, func, params):\n self.params = params\n self.func = func\n\n def apply(self, values: dict):\n return self.func(*[values[param] for param in self.params])\n\n\ndef get_model(model: str) -> ModelDefinition:\n models = {\n \"base_cnn\": ModelDefinition(base_cnn, [\"vocabulary_size\", \"embedding_size\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"base_lstm\": ModelDefinition(base_lstm, [\"vocabulary_size\", \"embedding_size\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"base_lstm_user\": ModelDefinition(base_lstm_user, [\"vocabulary_size\", \"embedding_size\", \"history_size\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"base_lstm_cnn_user\": ModelDefinition(base_lstm_user, [\"vocabulary_size\", \"embedding_size\", \"history_size\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"base_lstm_subword\": ModelDefinition(base_lstm_subword, [\"vocabulary_size\", \"embedding_size\", \"max_char_length\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"ensemble_cnn_subword\": ModelDefinition(ensemble_cnn_subword, [\"vocabulary_size\", \"embedding_size\", \"max_char_length\", \"max_seq_length\", \"embedding_matrix\", \"y_dictionary\"]),\n \"vdcnn\": ModelDefinition(vdcnn, [\"num_classes\", \"depth\", \"sequence_length\", \"shortcut\", \"pool_type\", \"sorted\", \"use_bias\"])\n }\n\n return models[model]\n"},"size":{"kind":"number","value":1759,"string":"1,759"}}},{"rowIdx":371,"cells":{"max_stars_repo_path":{"kind":"string","value":"JUPYTER/Supervised/Feature Engineering/generate_dataset.py"},"max_stars_repo_name":{"kind":"string","value":"Reynolds534/IASS_18_ML"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023626"},"content":{"kind":"string","value":"import numpy as np\n\ndef generate_dataset(n_features):\n if n_features <6:\n print('Please enter a number of features strictly bigger than 5')\n return None, None\n target = np.random.uniform(0,10,100)\n X1 = target**2 - target + np.random.uniform(0,25,100)\n X2 = target + np.random.uniform(0,15,100)\n X3 = target + target**2 + np.random.uniform(0,50,100)\n X4 = X3 + np.random.uniform(0,5,100)\n X5 = X1 + X2 + X3\n random_state = np.random.RandomState(0)\n X = np.array([X1,X2,X3,X4,X5]).T\n X = np.c_[X, random_state.randn(100, (n_features-5) )]\n Z = X[:, np.random.permutation(X.shape[1])]\n return Z, target\n"},"size":{"kind":"number","value":651,"string":"651"}}},{"rowIdx":372,"cells":{"max_stars_repo_path":{"kind":"string","value":"mayan/apps/mayan_statistics/dependencies.py"},"max_stars_repo_name":{"kind":"string","value":"Syunkolee9891/Mayan-EDMS"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023680"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom mayan.apps.dependencies.classes import JavaScriptDependency\n\nJavaScriptDependency(\n module=__name__, name='chart.js', static_folder='statistics',\n version_string='=2.7.2'\n)\n"},"size":{"kind":"number","value":225,"string":"225"}}},{"rowIdx":373,"cells":{"max_stars_repo_path":{"kind":"string","value":"config.py"},"max_stars_repo_name":{"kind":"string","value":"mottenhoff/ReMarkable_Zotero_sync"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2023380"},"content":{"kind":"string","value":"def config():\r\n return {\r\n # Zotero\r\n \"path_to_local_zotero_storage\": \"\",\r\n \r\n # ReMarkable\r\n # Get authentication code from https://my.remarkable.com/connect/desktop)\r\n # The auth code is only necessary the first run, you can remove the\r\n # code afterwards.\r\n \"reMarkable_auth_code\": \"\",\r\n\r\n # If you want to sync to a folder called papers at\r\n # ./papers on your reMarkable. then only \"papers\" as\r\n # reMarkable_folder_name\r\n \"reMarkable_folder_name\": \"\",\r\n\r\n\r\n # Monitor\r\n \"check_log_every_n_minutes\": 5,\r\n \"wait_for_n_seconds_idle\": 60\r\n }"},"size":{"kind":"number","value":693,"string":"693"}}},{"rowIdx":374,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/unittests/http_functions/no_return/main.py"},"max_stars_repo_name":{"kind":"string","value":"gohar94/azure-functions-python-worker"},"max_stars_count":{"kind":"number","value":277,"string":"277"},"id":{"kind":"string","value":"2023972"},"content":{"kind":"string","value":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport logging\n\n\nlogger = logging.getLogger('test')\n\n\ndef main(req):\n logger.error('hi')\n"},"size":{"kind":"number","value":186,"string":"186"}}},{"rowIdx":375,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/gui/window.py"},"max_stars_repo_name":{"kind":"string","value":"Aldeshov/ADBFileExplorer"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"id":{"kind":"string","value":"2023530"},"content":{"kind":"string","value":"# ADB File Explorer `tool`\n# Copyright (C) 2022 \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QMainWindow, QAction, qApp, QInputDialog, QMenuBar, QMessageBox\n\nfrom core.configurations import Resources\nfrom core.main import Adb\nfrom core.managers import Global\nfrom data.models import MessageData, MessageType\nfrom data.repositories import DeviceRepository\nfrom gui.explorer import MainExplorer\nfrom gui.help import About\nfrom gui.others.notification import NotificationCenter\nfrom helpers.tools import AsyncRepositoryWorker\n\n\nclass MenuBar(QMenuBar):\n CONNECT_WORKER_ID = 100\n DISCONNECT_WORKER_ID = 101\n\n def __init__(self, parent):\n super(MenuBar, self).__init__(parent)\n\n self.about = About()\n self.file_menu = self.addMenu('&File')\n self.help_menu = self.addMenu('&Help')\n\n connect_action = QAction(QIcon(Resources.icon_link), '&Connect', self)\n connect_action.setShortcut('Alt+C')\n connect_action.triggered.connect(self.connect_device)\n self.file_menu.addAction(connect_action)\n\n disconnect_action = QAction(QIcon(Resources.icon_no_link), '&Disconnect', self)\n disconnect_action.setShortcut('Alt+X')\n disconnect_action.triggered.connect(self.disconnect)\n self.file_menu.addAction(disconnect_action)\n\n devices_action = QAction(QIcon(Resources.icon_phone), '&Show devices', self)\n devices_action.setShortcut('Alt+D')\n devices_action.triggered.connect(Global().communicate.devices.emit)\n self.file_menu.addAction(devices_action)\n\n exit_action = QAction('&Exit', self)\n exit_action.setShortcut('Alt+Q')\n exit_action.triggered.connect(qApp.quit)\n self.file_menu.addAction(exit_action)\n\n about_action = QAction('About', self)\n about_action.triggered.connect(self.about.show)\n self.help_menu.addAction(about_action)\n\n def disconnect(self):\n worker = AsyncRepositoryWorker(\n worker_id=self.DISCONNECT_WORKER_ID,\n name=\"Disconnecting\",\n repository_method=DeviceRepository.disconnect,\n response_callback=self.__async_response_disconnect,\n arguments=()\n )\n if Adb.worker().work(worker):\n Global().communicate.notification.emit(\n MessageData(\n title='Disconnect',\n body=\"Disconnecting from devices, please wait\",\n message_type=MessageType.LOADING_MESSAGE,\n message_catcher=worker.set_loading_widget\n )\n )\n Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000)\n worker.start()\n\n def connect_device(self):\n text, ok = QInputDialog.getText(self, 'Connect Device', 'Enter device IP:')\n Global().communicate.status_bar.emit('Operation: Connecting canceled.', 3000)\n\n if ok and text:\n worker = AsyncRepositoryWorker(\n worker_id=self.CONNECT_WORKER_ID,\n name=\"Connecting to device\",\n repository_method=DeviceRepository.connect,\n arguments=(str(text),),\n response_callback=self.__async_response_connect\n )\n if Adb.worker().work(worker):\n Global().communicate.notification.emit(\n MessageData(\n title='Connect',\n body=\"Connecting to device via IP, please wait\",\n message_type=MessageType.LOADING_MESSAGE,\n message_catcher=worker.set_loading_widget\n )\n )\n Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000)\n worker.start()\n\n @staticmethod\n def __async_response_disconnect(data, error):\n if data:\n Global().communicate.devices.emit()\n Global().communicate.notification.emit(\n MessageData(\n title=\"Disconnect\",\n timeout=15000,\n body=data\n )\n )\n if error:\n Global().communicate.devices.emit()\n Global().communicate.notification.emit(\n MessageData(\n timeout=15000,\n title=\"Disconnect\",\n body=f\"{error}\"\n )\n )\n Global().communicate.status_bar.emit('Operation: Disconnecting finished.', 3000)\n\n @staticmethod\n def __async_response_connect(data, error):\n if data:\n if Adb.CORE == Adb.PYTHON_ADB_SHELL:\n Global().communicate.files.emit()\n elif Adb.CORE == Adb.EXTERNAL_TOOL_ADB:\n Global().communicate.devices.emit()\n Global().communicate.notification.emit(MessageData(title=\"Connecting to device\", timeout=15000, body=data))\n if error:\n Global().communicate.devices.emit()\n Global().communicate.notification.emit(\n MessageData(\n timeout=15000,\n title=\"Connect to device\",\n body=f\"{error}\"\n )\n )\n Global().communicate.status_bar.emit('Operation: Connecting to device finished.', 3000)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n\n self.setMenuBar(MenuBar(self))\n self.setCentralWidget(MainExplorer(self))\n\n self.resize(640, 480)\n self.setMinimumSize(480, 360)\n self.setWindowTitle('ADB File Explorer')\n self.setWindowIcon(QIcon(Resources.icon_logo))\n\n # Show Devices Widget\n Global().communicate.devices.emit()\n\n # Connect to Global class to use it anywhere\n Global().communicate.status_bar.connect(self.statusBar().showMessage)\n\n # Important to add last to stay on top!\n self.notification_center = NotificationCenter(self)\n Global().communicate.notification.connect(self.notify)\n\n # Welcome notification texts\n welcome_title = \"Welcome to ADBFileExplorer!\"\n welcome_body = f\"Here you can see the list of your connected adb devices. Click one of them to see files.
\"\\\n f\"Current selected core: {Adb.current_core()}
\" \\\n f\"To change it adb.set_core() in app.py\"\n\n Global().communicate.status_bar.emit('Ready', 5000)\n Global().communicate.notification.emit(MessageData(title=welcome_title, body=welcome_body, timeout=30000))\n\n def notify(self, data: MessageData):\n message = self.notification_center.append_notification(\n title=data.title,\n body=data.body,\n timeout=data.timeout,\n message_type=data.message_type\n )\n if data.message_catcher:\n data.message_catcher(message)\n\n def closeEvent(self, event):\n if Adb.CORE == Adb.EXTERNAL_TOOL_ADB:\n reply = QMessageBox.question(self, 'ADB Server', \"Do you want to kill adb server?\",\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n Adb.stop()\n elif Adb.CORE == Adb.PYTHON_ADB_SHELL:\n Adb.stop()\n\n event.accept()\n\n # This helps the toast maintain the place after window get resized\n def resizeEvent(self, e):\n if self.notification_center:\n self.notification_center.update_position()\n return super().resizeEvent(e)\n"},"size":{"kind":"number","value":8416,"string":"8,416"}}},{"rowIdx":376,"cells":{"max_stars_repo_path":{"kind":"string","value":"adv/mega_man.py"},"max_stars_repo_name":{"kind":"string","value":"XenoXilus/dl"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023635"},"content":{"kind":"string","value":"from core.advbase import *\nfrom module.bleed import Bleed, mBleed\nfrom module.x_alt import X_alt\n\ndef module():\n return Mega_Man\n\n\nclass Skill_Ammo(Skill):\n def __init__(self, name=None, acts=None):\n super().__init__(name, acts)\n self.c_ammo = 0\n\n @property\n def ammo(self):\n return self.ac.conf.ammo\n\n @property\n def cost(self):\n return self.ac.conf.cost\n \n def check(self):\n if self._static.silence == 1:\n return False\n return self.c_ammo >= self.cost\n\n @allow_acl\n def check_full(self):\n if self._static.silence == 1:\n return False\n return self.c_ammo >= self.ammo\n\n def charge_ammo(self, ammo):\n self.c_ammo = min(self.ammo, self.c_ammo + ammo)\n\nclass Mega_Man(Adv):\n comment = '16 hits leaf shield (max 32 hits)'\n\n conf = {}\n conf['slots.d'] = 'Gala_Mars'\n conf['slots.a'] = ['Primal_Crisis', 'Levins_Champion']\n conf['acl'] = \"\"\"\n `dragon, s=4\n `s3, not buff(s3)\n `s4\n if bleed_stack >= 3\n `s2, c_x(metalblade) or c_x(default)\n `s1, c_x(metalblade)\n else\n `s1, c_x(default) and s1.check_full()\n end\n \"\"\"\n conf['coabs'] = ['Blade', 'Marth', 'Dagger2']\n conf['share'] = ['Karl']\n\n # conf['dragonform'] = {\n # 'act': 'c5-s',\n\n # 'dx1.dmg': 1.20,\n # 'dx1.startup': 10 / 60.0, # c1 frames\n # 'dx1.hit': 3,\n\n # 'dx2.dmg': 1.20,\n # 'dx2.startup': 13 / 60.0 - 0.03333 = 0.18333666666666666667, # c2 frames\n # 'dx2.hit': 3,\n\n # 'dx3.dmg': 1.20,\n # 'dx3.startup': 14 / 60.0 - 0.03333 = 0.20000333333333333333, # c3 frames\n # 'dx3.hit': 3,\n\n # 'dx4.dmg': 1.20,\n # 'dx4.startup': 14 / 60.0, # c4 frames\n # 'dx4.hit': 3,\n\n # 'dx5.dmg': 1.20,\n # 'dx5.startup': 14 / 60.0, # c5 frames\n # 'dx5.recovery': 23 / 60.0, # recovery\n # 'dx5.hit': 3,\n\n # 'ds.dmg': 6.00,\n # 'ds.recovery': 113 / 60, # skill frames\n # 'ds.hit': 5,\n\n # 'dodge.startup': 45 / 60.0, # dodge frames\n # }\n # def ds_proc(self):\n # return self.dmg_make('ds',self.dragonform.conf.ds.dmg,'s')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.a_s_dict['s1'] = Skill_Ammo('s1')\n self.a_s_dict['s2'] = Skill_Ammo('s2')\n\n def prerun(self):\n self.leaf = 2 # number of hits per leaf rotation\n self.s1.charge_ammo(2000)\n self.s2.charge_ammo(4000)\n\n @property\n def skills(self):\n return self.s3, self.s4\n\n def hitattr_make(self, name, base, group, aseq, attr, onhit=None):\n ammo = attr.get('ammo', 0)\n if ammo > 0:\n for s in (self.s1, self.s2):\n s.charge_ammo(ammo)\n elif ammo < 0:\n s = self.s1 if group == 'metalblade' else self.s2\n s.charge_ammo(ammo)\n if s.c_ammo <= 0:\n self.current_x = 'default'\n if ammo != 0:\n log('ammo', name, ammo, ' '.join(f'{s.c_ammo}/{s.ammo}' for s in (self.s1, self.s2)))\n super().hitattr_make(name, base, group, aseq, attr, onhit=None)\n\n def s1_proc(self, e):\n if self.current_x != 'metalblade':\n self.current_x = 'metalblade'\n else:\n self.current_x = 'default'\n\n def s2_proc(self, e):\n if self.current_x != 'leafshield':\n self.current_x = 'leafshield'\n else:\n self.current_x = 'default'\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)"},"size":{"kind":"number","value":3623,"string":"3,623"}}},{"rowIdx":377,"cells":{"max_stars_repo_path":{"kind":"string","value":"votes/frontend.py"},"max_stars_repo_name":{"kind":"string","value":"estan/votes"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022742"},"content":{"kind":"string","value":"from argparse import ArgumentParser\nfrom sys import argv\n\nfrom autobahn.twisted.wamp import ApplicationSession\nfrom autobahn.twisted.wamp import ApplicationRunner\nfrom autobahn.wamp.types import SessionDetails\nfrom autobahn.wamp.types import CloseDetails\n\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QMainWindow\n\nimport qt5reactor\n\nfrom votes.ui.votes_window_ui import Ui_VotesWindow\n\n\nclass VotesSession(QObject, ApplicationSession):\n \"\"\"Votes WAMP application session.\n\n Simply bridges the Autobahn join and leave signals to Qt signals.\n \"\"\"\n\n joinedSession = pyqtSignal(SessionDetails)\n leftSession = pyqtSignal(CloseDetails)\n\n def __init__(self, config=None, parent=None):\n QObject.__init__(self, parent)\n ApplicationSession.__init__(self, config)\n\n def onJoin(self, details):\n self.joinedSession.emit(details)\n\n def onLeave(self, details):\n self.leftSession.emit(details)\n\n\nclass VotesWindow(QMainWindow, Ui_VotesWindow):\n \"\"\"Main window of the votes demo.\"\"\"\n\n closed = pyqtSignal() # Emitted when window is closed.\n\n def __init__(self, url, realm, parent=None):\n super(VotesWindow, self).__init__(parent)\n self.setupUi(self)\n\n self.url = url\n self.realm = realm\n self.session = None\n self.votes = {\n 'Banana': self.bananaVotes,\n 'Chocolate': self.chocolateVotes,\n 'Lemon': self.lemonVotes\n }\n\n # Factory method for ApplicationRunner.run(..)\n def make(config):\n self.session = VotesSession(config)\n self.session.joinedSession.connect(self.onJoinedSession)\n self.session.leftSession.connect(self.onLeftSession)\n return self.session\n\n runner = ApplicationRunner(url, realm)\n runner.run(make, start_reactor=False)\n\n def onJoinedSession(self):\n self.setEnabled(True)\n self.session.subscribe(self.onVoteMessage, u'io.crossbar.demo.vote.onvote')\n self.session.subscribe(self.onResetMessage, u'io.crossbar.demo.vote.onreset')\n self.statusBar().showMessage('Connected to realm {} at {}'\n .format(self.realm, self.url))\n\n def onLeftSession(self):\n print('leave')\n\n def onVoteMessage(self, result):\n self.votes[result[u'subject']].setText(str(result[u'votes']))\n\n def onResetMessage(self):\n self.bananaVotes.setText('0')\n self.chocolateVotes.setText('0')\n self.lemonVotes.setText('0')\n\n def closeEvent(self, event):\n self.session.leave()\n self.closed.emit()\n event.accept()\n\n @pyqtSlot()\n def on_resetButton_clicked(self):\n self.session.call(u'io.crossbar.demo.vote.reset')\n\n @pyqtSlot()\n def on_bananaButton_clicked(self):\n self.session.call(u'io.crossbar.demo.vote.vote', 'Banana')\n\n @pyqtSlot()\n def on_chocolateButton_clicked(self):\n self.session.call(u'io.crossbar.demo.vote.vote', 'Chocolate')\n\n @pyqtSlot()\n def on_lemonButton_clicked(self):\n self.session.call(u'io.crossbar.demo.vote.vote', 'Lemon')\n\n\ndef main():\n parser = ArgumentParser(description='PyQt version of Crossbar Gauges demo.')\n parser.add_argument('--url',\n type=unicode,\n default=u'ws://127.0.0.1:8080/ws',\n metavar='',\n help='WAMP router URL (default: ws://127.0.0.1:8080/ws).')\n args = parser.parse_args()\n\n app = QApplication(argv)\n qt5reactor.install()\n\n from twisted.internet import reactor\n\n def quit():\n if reactor.threadpool is not None:\n reactor.threadpool.stop()\n app.quit()\n\n window = VotesWindow(args.url, u'crossbardemo')\n window.closed.connect(quit)\n window.show()\n\n reactor.run()\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":3991,"string":"3,991"}}},{"rowIdx":378,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_ethosdistro_py.py"},"max_stars_repo_name":{"kind":"string","value":"CoryKrol/ethosdistro_py"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023909"},"content":{"kind":"string","value":"import aiohttp\nimport json\n\nfrom ethosdistro_py import EthosAPI\nimport pytest\nfrom aioresponses import aioresponses\n\nNOT_ALL_KEYS_PRESENT = \"All keys should be in the response\"\nCONTENT_HEADERS = {\"Content-Type\": \"text/html\"}\n\n\n@pytest.mark.asyncio\nasync def test_get_panel(get_panel_keys, get_panel_response):\n \"\"\"Tests an API call to get block count data for a panel_id\"\"\"\n session = aiohttp.ClientSession()\n ethosapi = EthosAPI(session=session)\n assert ethosapi.panel_id_set() is True\n with aioresponses() as m:\n m.get(\n \"http://test.ethosdistro.com/?json=yes\",\n status=200,\n body=json.dumps(get_panel_response),\n headers=CONTENT_HEADERS,\n )\n\n result = await ethosapi.async_get_panel()\n assert isinstance(result, dict)\n assert set(get_panel_keys).issubset(result.keys()), NOT_ALL_KEYS_PRESENT\n\n await session.close()\n"},"size":{"kind":"number","value":916,"string":"916"}}},{"rowIdx":379,"cells":{"max_stars_repo_path":{"kind":"string","value":"class1/exercise10.py"},"max_stars_repo_name":{"kind":"string","value":"SamerLabban/Network_Automation_Course"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2022688"},"content":{"kind":"string","value":"from ciscoconfparse import CiscoConfParse\n\n#open the cisco file and store it in a variable\ncisco_cfg = CiscoConfParse(\"cisco_ipsec.txt\")\n\n#search for any line in our confparse object (cisco_cfg) that begins with the word \"crypto map CRYPTO\"\nintf = cisco_cfg.find_objects_wo_child(parentspec = r\"^crypto map CRYPTO\", childspec = r\"AES\")\n\n#Get all children (children and grandchildren) elements\nfor i in intf:\n\tprint i\n\tfor child in i.all_children:\n\t\tprint child.text\n\tprint \"\\n\"\n\n"},"size":{"kind":"number","value":479,"string":"479"}}},{"rowIdx":380,"cells":{"max_stars_repo_path":{"kind":"string","value":"VirSecCon 2020/old_monitor/rsa.py"},"max_stars_repo_name":{"kind":"string","value":"0xShad3/cybersec-writeups"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"id":{"kind":"string","value":"2022685"},"content":{"kind":"string","value":"import gmpy\n\ne = 3\n\nn1 = 7156756869076785933541721538001332468058823716463367176522928415602207483494410804148006276542112924303341451770810669016327730854877940615498537882480613 \nn2 = 11836621785229749981615163446485056779734671669107550651518896061047640407932488359788368655821120768954153926193557467079978964149306743349885823110789383 \nn3 = 7860042756393802290666610238184735974292004010562137537294207072770895340863879606654646472733984175066809691749398560891393841950513254137326295011918329 \nc1 = 816151508695124692025633485671582530587173533405103918082547285368266333808269829205740958345854863854731967136976590635352281190694769505260562565301138 \nc2 = 8998140232866629819387815907247927277743959734393727442896220493056828525538465067439667506161727590154084150282859497318610746474659806170461730118307571 \nc3 = 3488305941609131204120284497226034329328885177230154449259214328225710811259179072441462596230940261693534332200171468394304414412261146069175272094960414 \n\n\nN = n1 * n2 * n3\nN1 = N/n1\nN2 = N/n2\nN3 = N/n3\n\nu1 = gmpy.invert(N1,n1)\nu2 = gmpy.invert(N2,n2)\nu3 = gmpy.invert(N3,n3)\n\nM = (c1*u1*N1 + c2*u2*N2 + c3*u3*N3) % N\n\nm = gmpy.root(M,e)[0]\n\nprint hex(m)[2:].rstrip(\"L\").decode(\"hex\")"},"size":{"kind":"number","value":1216,"string":"1,216"}}},{"rowIdx":381,"cells":{"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"PaTinLei/MHFC-FSL"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022825"},"content":{"kind":"string","value":"import math\nimport os\nfrom copy import deepcopy\nfrom scipy.linalg import svd\nimport numpy as np\n\nfrom tqdm import tqdm\nimport scipy.io as scio\nimport scipy.sparse\nfrom config import config\n\nfrom models.HyperG import HyperG\n\nimport sklearn\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.preprocessing import normalize\nfrom sklearn.metrics import accuracy_score\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom datasets import CategoriesSampler, DataSet\nfrom utils import get_embedding, mean_confidence_interval, setup_seed\n\n\ndef initial_embed(reduce, d):\n reduce = reduce.lower()\n assert reduce in ['isomap', 'itsa', 'mds', 'lle', 'se', 'pca', 'none']\n if reduce == 'isomap':\n from sklearn.manifold import Isomap\n embed = Isomap(n_components=d)\n elif reduce == 'itsa':\n from sklearn.manifold import LocallyLinearEmbedding\n embed = LocallyLinearEmbedding(n_components=d,\n n_neighbors=5, method='ltsa')\n elif reduce == 'mds':\n from sklearn.manifold import MDS\n embed = MDS(n_components=d, metric=False)\n elif reduce == 'lle':\n from sklearn.manifold import LocallyLinearEmbedding\n embed = LocallyLinearEmbedding(n_components=d, n_neighbors=5,eigen_solver='dense')\n elif reduce == 'se':\n from sklearn.manifold import SpectralEmbedding\n embed = SpectralEmbedding(n_components=d)\n elif reduce == 'pca':\n from sklearn.decomposition import PCA\n embed = PCA(n_components=d,random_state=0)\n \n return embed\n\ndef test(args):\n\n setup_seed(23)\n import warnings\n warnings.filterwarnings('ignore')\n if args.dataset == 'miniimagenet':\n num_classes = 64\n elif args.dataset == 'tieredimagenet':\n num_classes = 351\n elif args.dataset == 'cifar':\n num_classes = 64\n elif args.dataset == 'fc100':\n num_classes = 60\n\t\t\n if args.resume is not None:\n from models.resnet12 import resnet12\n model = resnet12(num_classes).to(args.device)\n state_dict = torch.load(args.resume)\n model.load_state_dict(state_dict)\n\n from models.r_resnet12 import r_resnet12\n r_model = r_resnet12(num_classes).to(args.device)\n r_state_dict = torch.load(args.r_resume)\n r_model.load_state_dict(r_state_dict)\n\n model.to(args.device)\n model.eval()\n r_model.to(args.device)\n r_model.eval()\n\n if args.dataset == 'miniimagenet':\n data_root = os.path.join(args.folder, '/home/wfliu/xdd_xr/LaplacianShot-master-org/LaplacianShot-master/data/')\n elif args.dataset == 'tieredimagenet':\n data_root = '/home/tieredimagenet'\n elif args.dataset == 'cifar':\n data_root = '/home/cifar'\n elif args.dataset == 'fc100':\n data_root = '/home/fc100'\n\n else:\n print(\"error!!!!!!!!!!\")\n\n hyperG = HyperG(num_class=args.num_test_ways,step=args.step, reduce=args.embed, d=args.dim)\n\n dataset = DataSet(data_root, 'test', args.img_size)\n sampler = CategoriesSampler(dataset.label, args.num_batches,\n args.num_test_ways, (args.num_shots, 15, args.unlabel))\n testloader = DataLoader(dataset, batch_sampler=sampler,\n shuffle=False, num_workers=0, pin_memory=True)\n\n k = args.num_shots * args.num_test_ways\n loader = tqdm(testloader, ncols=0)\n if(args.unlabel==0):\n iterations = 22\n else:\n iterations = args.unlabel+2+5\n\n acc_list = [[] for _ in range(iterations)]\n acc_list_task = [[] for _ in range(iterations)]\n acc_list_softmax = [[] for _ in range(iterations)]\n\n \n for data, indicator in loader:\n targets = torch.arange(args.num_test_ways).repeat(args.num_shots+15+args.unlabel).long()[\n indicator[:args.num_test_ways*(args.num_shots+15+args.unlabel)] != 0]\n data = data[indicator != 0].to(args.device)\n\n data_r = get_embedding(r_model, data, args.device)\n data_x = get_embedding(model, data, args.device)\n\n if args.dim != 512:\n if args.unlabel != 0:\n data_train1 = np.concatenate((data_r[:k], data_r[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0)\n data_train2 = np.concatenate((data_x[:k], data_x[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0)\n data_train = np.concatenate((data_train1, data_train2), axis=0)\n embed_data = initial_embed(args.embed, args.dim)\n embed_fit = embed_data.fit(data_train)\n data_r = embed_data.transform(data_r[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways])\n data_x = embed_data.transform(data_x[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways])\n else:\n data_train1 = np.concatenate((data_r[:k], data_r[k:k+15*args.num_test_ways]), axis=0)\n data_train2 = np.concatenate((data_x[:k], data_x[k:k+15*args.num_test_ways]), axis=0)\n\n data_train = np.concatenate((data_train1, data_train2), axis=0)\n embed_data = initial_embed(args.embed, args.dim)\n embed_fit = embed_data.fit(data_train)\n data_r = embed_data.transform(data_train1)\n data_x = embed_data.transform(data_train2)\n data_r_concat = np.concatenate((data_r, data_x), axis=1)\n\n train_targets = targets[:k]\n test_targets = targets[k:k+15*args.num_test_ways]\n\n train_embeddings_task = data_r_concat[:k]\n test_embeddings_task = data_r_concat[k:k+15*args.num_test_ways]\n\n if args.unlabel != 0:\n unlabel_embeddings_task = data_r_concat[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]\n else:\n unlabel_embeddings_task = None\n\n hyperG.fit(train_embeddings_task, train_targets)\n acc = hyperG.predict(test_embeddings_task,unlabel_embeddings_task, True, test_targets,args.eta)\n \n for i in range(len(acc)):\n acc_list[i].append(acc[i])\n \n\n cal_accuracy(acc_list)\n\ndef cal_accuracy(acc_list_task):\n\n mean_list_task = []\n ci_list_task = []\n\n for item in acc_list_task:\n mean, ci = mean_confidence_interval(item)\n mean_list_task.append(mean)\n ci_list_task.append(ci)\n\n print(\"Test Acc Mean_task{}\".format(\n ' '.join([str(i*100)[:6] for i in mean_list_task])))\n print(\"Test Acc ci_task{}\".format(' '.join([str(i*100)[:6] for i in ci_list_task])))\n \n\ndef main(args):\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n args.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(args)\n if args.mode == 'test':\n test(args)\n else:\n raise NameError\n\n\nif __name__ == '__main__':\n args = config()\n main(args)\n"},"size":{"kind":"number","value":6884,"string":"6,884"}}},{"rowIdx":382,"cells":{"max_stars_repo_path":{"kind":"string","value":"datatoaster/datatoaster.py"},"max_stars_repo_name":{"kind":"string","value":"abc612008/datatoaster"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023483"},"content":{"kind":"string","value":"import collections\n\n\"\"\" constants \"\"\"\nXValue = lambda _: \"\"\nSingle = lambda _: \"\"\n\nclass DataSet:\n\n def NumberOfAppearance(self, key_function):\n self.number_of_appearance = True\n if key_function is XValue:\n self.single_dict = True\n\n def yfunc(li):\n os_list = {}\n for i in li:\n key = key_function(i)\n os_list[key] = os_list.get(key, 0) + 1\n return os_list\n\n return yfunc\n\n def Percentage(self, key_function):\n self.percentage = True\n if key_function is XValue:\n self.single_dict = True\n\n def yfunc(li):\n os_list = {}\n for i in li:\n key = key_function(i)\n os_list[key] = os_list.get(key, 0) + 1\n return os_list\n\n return yfunc\n\n def PercentageWithinGroup(self, key_function):\n self.percentage_within_group = True\n if key_function is XValue:\n self.single_dict = True\n\n def yfunc(li):\n os_list = {}\n for i in li:\n key = key_function(i)\n os_list[key] = os_list.get(key, 0) + 1\n return os_list\n\n return yfunc\n\n def __init__(self, raw_data):\n self.raw_data = raw_data\n self.x_function = None\n self.y_function = None\n self.number_of_appearance = False\n self.percentage = False\n self.percentage_within_group = False\n self.single_dict = False\n self.constraints = []\n self.pre_constraints = []\n self.single = False\n self.order_key = None\n\n def set_x(self, func):\n if not callable(func):\n raise ValueError(\"Expect the argument to be a function.\")\n\n self.x_function = func\n\n return self\n\n def set_y(self, param):\n if not callable(param):\n raise ValueError(\"Expect the argument to be a function.\")\n\n self.y_function = param\n\n return self\n\n def add_constraint(self, constraint, is_pre=False):\n if not callable(constraint):\n raise ValueError(\"Expect the argument to be a function.\")\n\n if is_pre:\n self.pre_constraints.append(constraint)\n else:\n self.constraints.append(constraint)\n\n return self\n\n def set_single(self, param):\n self.single = param\n return self\n\n def ordered_by(self, order_key):\n if not callable(order_key):\n raise ValueError(\"Expect the argument to be a function.\")\n self.order_key = order_key\n return self\n\n def get_result(self):\n def process_result(result):\n if self.single_dict:\n for key in result.keys():\n result[key] = result[key][\"\"]\n if self.single:\n if len(result) != 1:\n raise ValueError(\"Single mode set while there are more than one result. \"\n \"Results: \" + str(result))\n return next(iter(result.values()))\n else:\n if self.order_key is not None:\n return collections.OrderedDict(sorted(result.items(), key=self.order_key))\n else:\n return result\n\n if self.x_function is None: # x_function should not be None\n raise ValueError(\"set_x not called when calling get_result\")\n\n filtered_data = [] # data that passed all constraints\n number_of_valid_data = 0 # save the total unfiltered number for percentage\n all_appearance = {} # save the unfiltered number per group for percentage_within_group\n\n for item in self.raw_data:\n pass_constraints = True\n for pre_constraint in self.pre_constraints: # pre constraints\n if not pre_constraint(item):\n pass_constraints = False\n break\n if not pass_constraints:\n continue\n\n number_of_valid_data += 1\n\n for constraint in self.constraints: # constraints\n if not constraint(item):\n pass_constraints = False\n break\n\n if pass_constraints:\n filtered_data.append(item)\n\n if self.percentage_within_group: # for percentage within group\n key = self.x_function(item)\n all_appearance[key] = all_appearance.get(key, 0) + 1\n\n # handle y_function\n if self.y_function:\n values = {}\n for item in filtered_data:\n key = self.x_function(item)\n if key in values:\n values[key].append(item)\n else:\n values[key] = [item]\n\n for key, value in values.items():\n values[key] = self.y_function(value)\n if self.percentage:\n for k in values[key].keys():\n values[key][k] /= number_of_valid_data\n elif self.percentage_within_group:\n for k in values[key].keys():\n values[key][k] /= all_appearance[key]\n\n return process_result(values)\n\n raise ValueError(\"set_y not called when calling get_result\")\n"},"size":{"kind":"number","value":5289,"string":"5,289"}}},{"rowIdx":383,"cells":{"max_stars_repo_path":{"kind":"string","value":"requests/requests_user_agent.py"},"max_stars_repo_name":{"kind":"string","value":"BoogalooLi/python_spiders"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2024020"},"content":{"kind":"string","value":"import requests\n\n# 定义请求的url\n# url = 'https://www.lmonkey.com'\nurl = 'https://www.xicidaili.com/nn'\n\n# 定义请求头信息\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'\n}\n\n# 发起get请求\nres = requests.get(url=url, headers=headers)\n\n\n# 获取响应状态码\ncode = res.status_code\n\nprint(code) # 503 服务器内部拒绝请求\n\n# 响应成功后把响应的内容写入文件中\nif code == 200:\n with open('./test.html', 'w') as fp:\n fp.write(res.text)\n"},"size":{"kind":"number","value":438,"string":"438"}}},{"rowIdx":384,"cells":{"max_stars_repo_path":{"kind":"string","value":"dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_sql_1.py"},"max_stars_repo_name":{"kind":"string","value":"pbarton666/virtual_classroom"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023927"},"content":{"kind":"string","value":"#py_sql_1.py\n\n#import the python/sqlite3 connector; \n#...there are others for postgresql, mysql, etc.\nimport sqlite3\n\n#create a connection object (on other RMDBs you'd provide credentials, too)\nconn = sqlite3.connect('mydb')\n\n#creates a cursor object\ncurs = conn.cursor()\n\n#SQL is case-insensitive, but most people use CAPS for keywords\n\n#Here, we get rid of the table 'dogs' (IF EXISTS prevents a crash)\ncmd = \"DROP TABLE IF EXISTS dogs\"\ncurs.execute(cmd) #this runs the SQL command\nconn.commit() #... and this locks in the changes\n\n#Build a new table's metadata (framework)\ncmd = \"\"\"CREATE TABLE dogs (name CHAR(10), \n toy CHAR(10), \n weight INT(4))\"\"\"\nprint(cmd)\ncurs.execute(cmd)\n\n\n#add a row\ncmd = \"INSERT INTO dogs ('name', 'toy', 'weight') VALUES (?, ?, ?)\"\nvals= ('Fang', 'bone', 90)\ncurs.execute(cmd, vals)\n\n#get some results\ncmd = \"SELECT * from {}\".format('dogs')\nprint(cmd)\ncurs.execute(cmd)\nresult=curs.fetchall()\nprint(result)\n\n#... and print them out (if there are any)\nif result:\n print(\"congrats, you've got some dawgs\")\n for row in result:\n name, toy, weight=row\n print(name, toy, weight)\n \n#Here's an alternative way to insert rows\ncurs.executemany('INSERT INTO dogs VALUES(?,?,?)',\n [('Biscuit', 'towel', '70'),\n ('Snoopy', 'squirrel', '60')\n ]\n )\n \n#It may make sense to create names for the table and its columns \ncols=('name', 'toy','weight')\ntname='dogs'\nval_tuple=(\"Fluffy\", \"sock\", \"25\")\n\ncmd=\\\n \"INSERT INTO {} {} VALUES (?, ?, ?) \".format(tname, cols)\n\ncurs.execute(cmd, val_tuple)\nprint()\n\n#with names we can simply recycle them\ndef print_rows():\n \"a utility function you may want to keep\"\n cmd = \"SELECT * from {}\".format(tname)\n print(cmd)\n curs.execute(cmd)\n result=curs.fetchall()\n if result:\n for r in result:\n nice_output=''\n for label, res in zip(cols, r):\n nice_output+=\"{:>10} = {:<10}\".format(label, res)\n print (nice_output)\n\nprint_rows()\n\n#Getting column names from the database\ncurs.execute(cmd)\nfor ix, name in enumerate(curs.description):\n print(\"column {} is called {}\".format(ix, name[0]))\n\n\n#Figure out how many rows in the table\ncmd=\"SELECT COUNT(*) FROM {}\".format(tname)\ncurs.execute(cmd)\nresult=curs.fetchone()\nnumber_of_rows, = result \nprint(\"Awesome, we've captured {} rows.\".format (number_of_rows))\n\nprint()\n\n#Retrieving information\n#\n\n#Ask for everything:\ncurs.execute('SELECT * FROM dogs')\n\n#You can get however many results using fetchone(), fetchall() or fetchmany()\ncurs.execute('SELECT * FROM dogs')\nwhile True:\n row = curs.fetchone()\n if not row:\n break\n print(row)\nprint('*'*20)\n\ncurs.execute('SELECT * FROM dogs')\nwhile True:\n row = curs.fetchmany(2)\n if not row:\n break\n print(row)\nprint('*'*20)\n\n#You can make queries as complex/fancy as you want \ncmd = 'SELECT name, weight FROM dogs WHERE weight >= 60'\nprint(cmd)\ncurs.execute(cmd)\nprint(curs.fetchall())\n\n#... and order the results\ncmd = 'SELECT name, weight FROM dogs WHERE weight >= 60 ORDER BY name'\nprint(cmd)\ncurs.execute(cmd)\nprint_rows()\n\nfor row in curs.fetchall():\n print(row)\nprint(curs.fetchall())\n\n\n#updates\nprint()\ncmd=\"UPDATE {} SET weight=? WHERE name='Snoopy'\".format(tname)\nweight=(666,)\ncurs.execute(cmd, weight)\n\ncmd=\"SELECT * FROM {} WHERE name='Snoopy'\".format(tname)\nprint(cmd)\ncurs.execute(cmd)\nresult=curs.fetchone()\nprint(result)\n\n#deletions\n\ncmd= \"DELETE FROM {} WHERE toy = ? \".format(tname)\ntoy = ('sock',)\ncurs.execute(cmd, toy)\ncmd = \"SELECT * FROM {}\".format(tname)\ncurs.execute(cmd)\nprint_rows()\n\ncmd= \"DELETE FROM {} WHERE toy LIKE ?\".format(tname)\ntoy_selector = ('%el',)\ncurs.execute(cmd, toy_selector)\ncmd = \"SELECT * FROM {}\".format(tname)\ncurs.execute(cmd)\nprint_rows()\n\n\ncmd= \"DELETE FROM {}\".format(tname)\ncurs.execute(cmd)\ncmd = \"SELECT * FROM {}\".format(tname)\ncurs.execute(cmd)\nprint_rows()\n"},"size":{"kind":"number","value":4049,"string":"4,049"}}},{"rowIdx":385,"cells":{"max_stars_repo_path":{"kind":"string","value":"ServerML/heatmap.py"},"max_stars_repo_name":{"kind":"string","value":"SmallPlanetiOS/smallplanet_Pinball"},"max_stars_count":{"kind":"number","value":13,"string":"13"},"id":{"kind":"string","value":"2023600"},"content":{"kind":"string","value":"from __future__ import division\nfrom PIL import Image\nimport numpy as np\nfrom keras.preprocessing.image import load_img, img_to_array, array_to_img\n\nimport sys\nimport train\nimport model\nimport images\nimport imageio\n\n\ndef ExportAnimatedHeatmapForAllImages(outputPath):\n images = []\n \n savedTrainingRunNumber = train.trainingRunNumber\n \n maxTrainingRun = train.ConfirmTrainingNumber()\n for runNumber in range(0,maxTrainingRun):\n images.append(imageio.imread(train.HeatmapPath(runNumber)))\n \n # add a few more at the end so there is a pause before it loops\n images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))\n images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))\n images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))\n images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1)))\n \n imageio.mimsave(outputPath, images, duration=0.5)\n \n train.trainingRunNumber = savedTrainingRunNumber\n\ndef ExportHeatmapForModel(runNumber, outputPath):\n \n # 0. Load the base image\n baseImg = Image.open('resources/heatmap_base.jpg', 'r')\n img_w, img_h = baseImg.size\n basePix = baseImg.load()\n \n # 1. Load the ball image\n ballImg = Image.open('resources/heatmap_ball.png', 'r')\n ball_w, ball_h = ballImg.size\n \n # 2. Create the scratch image\n scratchImg = Image.new('RGB', (img_w, img_h), (255, 255, 255, 255))\n \n # 3. Create the heat map\n heatmapImg = Image.new('RGB', (img_w//2, img_h), (255, 255, 255, 255))\n heatmapPix = heatmapImg.load()\n \n # 4. load the model\n cnn_model = model.cnn_model()\n cnn_model.load_weights(train.ModelWeightsPath(runNumber+1))\n \n # 5. prepare a numpy img to send to our model\n scratchNP = np.zeros((1, img_h, img_w, 3), dtype='float32')\n \n print(\"Generating heatmap:\")\n for x in range(0,img_w//2):\n sys.stdout.write('.')\n sys.stdout.flush()\n for y in range(0,img_h):\n scratchImg.paste(baseImg, (0,0))\n scratchImg.paste(ballImg, (x-ball_w//2,y-ball_h//2), ballImg)\n scratchImg.paste(ballImg, (x-ball_w//2 + img_w//2 + 5,y-ball_h//2), ballImg)\n \n np.copyto(scratchNP[0],img_to_array(scratchImg)) \n predictions = cnn_model.predict(scratchNP)\n \n pred_left = predictions[0][0] \n pred_right = predictions[0][1]\n \n #heatmapPix[x,y] = ( int(basePix[x,y][0] * 0.4 + pred_left*153.0), int(basePix[x,y][1] * 0.4 + pred_right*153.0), 0)\n heatmapPix[x,y] = (int(pred_left*255.0), int(pred_right*255.0), 0)\n \n print('done')\n heatmapImg = heatmapImg.resize( (heatmapImg.size[0]*6,heatmapImg.size[1]*6), Image.ANTIALIAS)\n \n # overlay the run number on the image\n r = int(runNumber)\n x = heatmapImg.size[0]\n while r >= 0:\n n = r % 10\n r = r // 10\n \n numImg = Image.open('resources/num{}.png'.format(n), 'r')\n \n x -= numImg.size[0]\n heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)\n heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)\n heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg)\n \n if r == 0:\n break\n \n heatmapImg.save(outputPath)\n \n \n\n#maxTrainingRun = train.ConfirmTrainingNumber()\n#for i in range(0,maxTrainingRun-1):\n# ExportHeatmapForModel(i, 'heatmap_{}.png'.format(i))\n\n\n\n"},"size":{"kind":"number","value":3558,"string":"3,558"}}},{"rowIdx":386,"cells":{"max_stars_repo_path":{"kind":"string","value":"swagger_server/test/operational_controllers/test_users_with_roles_for_site.py"},"max_stars_repo_name":{"kind":"string","value":"hedleyroos/core-access-control"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023111"},"content":{"kind":"string","value":"import json\nimport uuid\n\nfrom ge_core_shared import db_actions, decorators\n\nfrom project.settings import API_KEY_HEADER\nfrom swagger_server.test import BaseTestCase, db_create_entry\nfrom swagger_server.models.domain import Domain\nfrom swagger_server.models.domain_role import DomainRole\nfrom swagger_server.models.role import Role\nfrom swagger_server.models.site import Site\nfrom swagger_server.models.site_create import SiteCreate\nfrom swagger_server.models.site_role import SiteRole\nfrom swagger_server.models.user_domain_role import UserDomainRole\nfrom swagger_server.models.user_site_role import UserSiteRole\n\nROLES = [\n {\n \"label\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"Role to view\"\n },\n {\n \"label\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"Role to create\",\n },\n {\n \"label\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"Role to update\"\n },\n {\n \"label\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"Role to delete\",\n }\n]\n\n\nclass TestUsersWithRolesForSite(BaseTestCase):\n\n @decorators.db_exception\n def setUp(self):\n super().setUp()\n # Parent Domain\n self.domain_parent_data = {\n \"name\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"The Root Domain\",\n }\n self.domain_parent_model = db_actions.crud(\n model=\"Domain\",\n api_model=Domain,\n data=self.domain_parent_data,\n action=\"create\"\n )\n # Child Domain\n self.domain_child_data = {\n \"name\": (\"%s\" % uuid.uuid1())[:30],\n \"description\": \"The Child Domain\",\n \"parent_id\": self.domain_parent_model.id\n }\n self.domain_child_model = db_actions.crud(\n model=\"Domain\",\n api_model=Domain,\n data=self.domain_child_data,\n action=\"create\"\n )\n # Site Child\n self.site_data = {\n \"name\": (\"%s\" % uuid.uuid1())[:30],\n \"domain_id\": self.domain_child_model.id,\n \"description\": \"A Site\",\n \"client_id\": 1,\n \"is_active\": True,\n }\n self.site = db_create_entry(\n model=\"Site\",\n data=self.site_data,\n )\n # Create some roles.\n self.roles = []\n for role in ROLES:\n role_model = db_actions.crud(\n model=\"Role\",\n api_model=Role,\n data=role,\n action=\"create\"\n )\n self.roles.append(role_model)\n # Some users as well.\n self.user_id_1 = \"%s\" % uuid.uuid1()\n self.user_id_2 = \"%s\" % uuid.uuid1()\n\n for role in self.roles:\n domain_role_data = {\n \"domain_id\": self.domain_parent_model.id,\n \"role_id\": role.id,\n \"grant_implicitly\": \"view\" in role.description\n }\n db_actions.crud(\n model=\"DomainRole\",\n api_model=DomainRole,\n data=domain_role_data,\n action=\"create\"\n )\n if not domain_role_data[\"grant_implicitly\"]:\n user_domain_role_data = {\n \"user_id\": self.user_id_1,\n \"domain_id\": self.domain_parent_model.id,\n \"role_id\": role.id\n }\n db_actions.crud(\n model=\"UserDomainRole\",\n api_model=UserDomainRole,\n data=user_domain_role_data,\n action=\"create\"\n )\n domain_role_data = {\n \"domain_id\": self.domain_child_model.id,\n \"role_id\": role.id,\n \"grant_implicitly\": \"view\" in role.description\n }\n db_actions.crud(\n model=\"DomainRole\",\n api_model=DomainRole,\n data=domain_role_data,\n action=\"create\"\n )\n if \"create\" in role.description:\n user_domain_role_data = {\n \"user_id\": self.user_id_2,\n \"domain_id\": self.domain_child_model.id,\n \"role_id\": role.id\n }\n db_actions.crud(\n model=\"UserDomainRole\",\n api_model=UserDomainRole,\n data=user_domain_role_data,\n action=\"create\"\n )\n site_role_data = {\n \"site_id\": self.site.id,\n \"role_id\": role.id,\n \"grant_implicitly\": \"view\" in role.description\n }\n db_actions.crud(\n model=\"SiteRole\",\n api_model=SiteRole,\n data=site_role_data,\n action=\"create\"\n )\n\n if \"update\" in role.description:\n user_site_role_data = {\n \"user_id\": self.user_id_2,\n \"site_id\": self.site.id,\n \"role_id\": role.id\n }\n db_actions.crud(\n model=\"UserSiteRole\",\n api_model=UserSiteRole,\n data=user_site_role_data,\n action=\"create\"\n )\n\n self.headers = {API_KEY_HEADER: \"test-api-key\"}\n\n def test_get_users_with_roles_for_site(self):\n \"\"\"Test case for get_users_with_roles_for_site\n \"\"\"\n response = self.client.open(\n \"/api/v1/ops/users_with_roles_for_site/{site_id}\".format(\n site_id=self.site.id\n ), method='GET', headers=self.headers)\n r_data = json.loads(response.data)\n self.assertEquals(len(r_data), 2)\n for user in r_data:\n self.assertEquals(\n len(user[\"role_ids\"]),\n 3 if user[\"user_id\"] == self.user_id_1 else 2\n )\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n"},"size":{"kind":"number","value":5981,"string":"5,981"}}},{"rowIdx":387,"cells":{"max_stars_repo_path":{"kind":"string","value":"6 kyu/Jungersteins Math Training Room 1 How many zeros are at the end of n.py"},"max_stars_repo_name":{"kind":"string","value":"mwk0408/codewars_solutions"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2022654"},"content":{"kind":"string","value":"def count_zeros_n_double_fact(n): \n if n%2!=0:\n return 0\n multiply=10\n total=0\n while multiply pd.Timestamp:\n \"\"\"Returns the time the message was sent to the queue.\"\"\"\n attributes = self.sqs_message['Attributes']\n sent_timestamp = float(attributes['SentTimestamp']) / 1000\n return pd.Timestamp.fromtimestamp(sent_timestamp)\n\n def sqs_approx_receive_count(self) -> int:\n \"\"\"Returns the approx number of times a message has been received from\n the queue but not deleted.\"\"\"\n attributes = self.sqs_message['Attributes']\n return int(attributes['ApproximateReceiveCount'])\n\n def is_multi_level(self):\n return 'height' in self.message and ' ' in self.message['height']\n\n def is_wanted(\n self, nwp_params: List[str], max_receive_count: int=10) -> bool:\n \"\"\"Returns True if this message describes an NWP we want.\n Args:\n nwp_params: The Numerical Weather Prediction parameters we want.\n max_receive_count: If this message has been received more than \n `max_receive_count` times, then we don't want this message.\n \"\"\"\n\n var_name = self.message['name']\n is_multi_level = self.is_multi_level()\n approx_receive_count = self.sqs_approx_receive_count()\n return (\n var_name in nwp_params and\n is_multi_level and\n approx_receive_count < max_receive_count)\n\n def source_url(self) -> str:\n source_bucket = self.message['bucket']\n source_key = self.message['key']\n return os.path.join(source_bucket, source_key)\n\n def load_netcdf(self) -> xr.Dataset:\n boto_s3 = boto3.client('s3')\n get_obj_response = boto_s3.get_object(\n Bucket=self.message['bucket'],\n Key=self.message['key'])\n netcdf_bytes = get_obj_response['Body'].read()\n netcdf_bytes_io = io.BytesIO(netcdf_bytes)\n return xr.open_dataset(netcdf_bytes_io, engine='h5netcdf')\n\n def object_size_mb(self) -> float:\n return self.message['object_size'] / 1E6\n\n def __repr__(self) -> str:\n string = ''\n string += 'var_name={}; '.format(self.message['name'])\n string += 'is_multi_level={}; '.format(self.is_multi_level())\n string += 'object_size={:,.1f} MB; '.format(self.object_size_mb())\n string += 'model={}; '.format(self.message['model'])\n string += 'SQS_message_sent_timestamp={}; '.format(\n self.sqs_message_sent_timestamp())\n string += 'forecast_reference_time={}; '.format(\n self.message['forecast_reference_time'])\n string += 'created_time={}; '.format(self.message['created_time'])\n string += 'time={}; '.format(self.message['time'])\n string += 'source_url={}; '.format(self.source_url())\n string += 'SQS_approx_receive_count={}; '.format(\n self.sqs_approx_receive_count())\n string += 'SQS_message_ID={}'.format(self.sqs_message['MessageId'])\n return string\n\n\ndef _check_md5(text: str, md5_of_body: str):\n text = text.encode('utf-8')\n md5 = hashlib.md5(text)\n if md5.hexdigest() != md5_of_body:\n raise RuntimeError('MD5 checksum does not match!')\n"},"size":{"kind":"number","value":3720,"string":"3,720"}}},{"rowIdx":394,"cells":{"max_stars_repo_path":{"kind":"string","value":"problem/migrations/0014_auto_20180618_1952.py"},"max_stars_repo_name":{"kind":"string","value":"d9e7381f/onlinejudge-2.0"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2024034"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.11.4 on 2018-06-18 11:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('problem', '0013_auto_20180406_1533'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='problem',\n name='vote_downs',\n ),\n migrations.RemoveField(\n model_name='problem',\n name='vote_ups',\n ),\n migrations.AddField(\n model_name='problem',\n name='vote_rank_score',\n field=models.FloatField(default=0.0),\n ),\n ]\n"},"size":{"kind":"number","value":672,"string":"672"}}},{"rowIdx":395,"cells":{"max_stars_repo_path":{"kind":"string","value":"torch_inception_resnet_v2/blocks/inception/inception_resnet.py"},"max_stars_repo_name":{"kind":"string","value":"mhconradt/inception-resnet-v2"},"max_stars_count":{"kind":"number","value":9,"string":"9"},"id":{"kind":"string","value":"2023122"},"content":{"kind":"string","value":"from torch import nn\nfrom torch_inception_resnet_v2.utils.concurrent import Concurrent\n\"\"\"\nDefines the base of an inception ResNet block.\n\"\"\"\n\n\nclass InceptionResNetBlock(nn.Module):\n def __init__(self, scale, combination: nn.Module, *branches: nn.Module):\n super().__init__()\n self.scale = scale\n self.combination = combination\n self.branches = Concurrent()\n for i, branch in enumerate(branches):\n self.branches.append(branch)\n self.activation = nn.ReLU(inplace=True)\n\n def forward(self, x):\n output = self.branches(x)\n output = self.combination(output)\n output = self.scale * output + x\n return self.activation(output)\n\n"},"size":{"kind":"number","value":709,"string":"709"}}},{"rowIdx":396,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/migrations/0007_auto_20201116_0507.py"},"max_stars_repo_name":{"kind":"string","value":"michael-huber2772/portfolio-dashboard"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023302"},"content":{"kind":"string","value":"# Generated by Django 3.1.3 on 2020-11-16 12:07\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0006_auto_20201031_1437'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MTag',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=250)),\n ],\n ),\n migrations.AlterField(\n model_name='productprice',\n name='start_date',\n field=models.DateTimeField(default=datetime.datetime(2020, 11, 16, 5, 7, 53, 23971), null=True),\n ),\n migrations.CreateModel(\n name='RawMaterial',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=250)),\n ('tag', models.ManyToManyField(to='app.MTag')),\n ],\n ),\n migrations.AddField(\n model_name='product',\n name='raw_material',\n field=models.ManyToManyField(to='app.RawMaterial'),\n ),\n ]\n"},"size":{"kind":"number","value":1266,"string":"1,266"}}},{"rowIdx":397,"cells":{"max_stars_repo_path":{"kind":"string","value":"Solutions/Problem12.py"},"max_stars_repo_name":{"kind":"string","value":"sausage948/AoC2017"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2023911"},"content":{"kind":"string","value":"import re\n\n# ------Input----- #\nanswer1 = 0\nanswer2 = 0\n\nprint(\"Input the adjacency list. Calculation will start on the first empty line.\")\ninputList = []\n\nwhile True:\n inputString = input(\"\")\n if inputString == \"\":\n break\n inputList.append(inputString)\n\n\ndef parseString(string):\n splitUp = re.split(' <-> |, ', string)\n neighborsList = []\n for n in splitUp[1:]:\n if n != '':\n neighborsList.append(int(n))\n\n return [int(splitUp[0]), neighborsList, False]\n\n\nadjacencyList = list(map(parseString, inputList))\nnumberOfNodes = len(adjacencyList)\nnumberOfDiscoveredNodes = 0\nnumberOfGroups = 0\n\n\n# ------Parts 1 & 2------ #\ndef DFS(vertex):\n adjacencyList[vertex][2] = True\n for neighbor in adjacencyList[vertex][1]:\n if not wasDiscovered(neighbor):\n DFS(neighbor)\n\n\ndef wasDiscovered(vertex):\n return adjacencyList[vertex][2]\n\n\nwhile numberOfDiscoveredNodes < numberOfNodes:\n indexFirstUndiscovered = list(map(wasDiscovered, range(numberOfNodes))).index(0)\n DFS(indexFirstUndiscovered)\n numberOfDiscoveredNodes = sum(list(map(wasDiscovered, range(numberOfNodes))))\n numberOfGroups += 1\n if indexFirstUndiscovered == 0:\n answer1 = numberOfDiscoveredNodes\n\nanswer2 = numberOfGroups\n\n# ------Output----- #\nprint(\"Answer 1: \" + str(answer1))\nprint(\"Answer 2: \" + str(answer2))\n"},"size":{"kind":"number","value":1365,"string":"1,365"}}},{"rowIdx":398,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/speculos/test_status_word.py"},"max_stars_repo_name":{"kind":"string","value":"aido/app-sskr"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2022839"},"content":{"kind":"string","value":"from pathlib import Path\nfrom typing import List, Dict, Any, Tuple\nimport re\n\nfrom sskr_client.exception import DeviceException\n\n\nSW_RE = re.compile(r\"\"\"(?x)\n \\# # character '#'\n define # string 'define'\n \\s+ # spaces\n (?PSW(?:_[A-Z0-9]+)*) # identifier (e.g. 'SW_OK')\n \\s+ # spaces\n 0x(?P[a-fA-F0-9]{4}) # 4 bytes status word\n\"\"\")\n\n\ndef parse_sw(path: Path) -> List[Tuple[str, int]]:\n if not path.is_file():\n raise FileNotFoundError(f\"Can't find file: '{path}'\")\n\n sw_h: str = path.read_text()\n\n return [(identifier, int(sw, base=16))\n for identifier, sw in SW_RE.findall(sw_h) if sw != \"9000\"]\n\n\ndef test_status_word(sw_h_path):\n expected_status_words: List[Tuple[str, int]] = parse_sw(sw_h_path)\n status_words: Dict[int, Any] = DeviceException.exc\n\n assert len(expected_status_words) == len(status_words), (\n f\"{expected_status_words} doesn't match {status_words}\")\n\n # just keep status words\n expected_status_words = [sw for (identifier, sw) in expected_status_words]\n\n for sw in status_words.keys():\n assert sw in expected_status_words, f\"{status_words[sw]}({hex(sw)}) not found in sw.h!\"\n"},"size":{"kind":"number","value":1320,"string":"1,320"}}},{"rowIdx":399,"cells":{"max_stars_repo_path":{"kind":"string","value":"camtfpc.py"},"max_stars_repo_name":{"kind":"string","value":"MikeHallettUK/RosRobotics"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2022707"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# tf_pc_cam subscribes to PC2 from RealSense camera = and transforms it to frame odom /points2\n\nimport rospy\nimport tf2_ros\nfrom tf2_sensor_msgs.tf2_sensor_msgs import PointCloud2, do_transform_cloud # to support PointCloud2\n\nrospy.init_node(\"tf_pc_cam\")\ntf_pub = rospy.Publisher(\"points2\", PointCloud2, queue_size=10)\ntf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(2))\ntf_listener = tf2_ros.TransformListener(tf_buffer)\nrospy.sleep(0.2) # let tf_buffer fill up a bit ...\n\ndef pc_cb(msg):\n cantran = tf_buffer.can_transform(\"odom\", msg.header.frame_id, \n msg.header.stamp,\n rospy.Duration(0.1))\n if cantran:\n trans = tf_buffer.lookup_transform(\"odom\", msg.header.frame_id,\n msg.header.stamp,\n rospy.Duration(0.1))\n cloud_out = do_transform_cloud(msg, trans)\n tf_pub.publish(cloud_out)\n\nprint(\"Starting do_transform_cloud from /camera/depth/color/points v1\")\nrospy.Subscriber(\"/camera/depth/color/points\", PointCloud2, pc_cb, queue_size=1, buff_size=2**24)\n\nrospy.spin()\n"},"size":{"kind":"number","value":1192,"string":"1,192"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":3,"numItemsPerPage":100,"numTotalItems":129320,"offset":300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjI5MTIzMywic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTYyOTQ4MzMsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.G9m5TKyll61Vp5gOmHuo7M2kwIn30nXJQ4S80BMjY0i6-e5eRye8zpEyYfyrWsHjlH4pkR9ojmkiOcs7QpEjDw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
PSCN/data_imdbB.py
Lawlietgit/MA-GCNNs
6
2022797
import graph_tool class Node: def __init__(self,node,graph,atr = 0): self.node = node self.graph = graph self.atr = atr pnode_file = open("IMDB-MULTI/IMDB-MULTI_graph_indicator.txt","r") pedge_file = open("REDDIT-BINARY/REDDIT-BINARY_A.txt",'r') label_file = open("REDDIT-BINARY/REDDIT-BINARY_graph_labels.txt","r") #node_atr = open("IMDB-BINARY/PTC_MR_node_labels.txt","r") label = [] Graph = [] for lline in label_file: label.append(lline) g = graph_tool.Graph() g.set_directed(False) Graph.append(g) l = len(label) Nodes = {} node_num = [] k = 1 n = 0 for i,node_line in enumerate(pnode_file): #node_label = node_atr.readline().strip('\n') if int(node_line) == k: Nodes[i] = Node(n,k-1) n = n + 1 else: Graph[k-1].add_vertex(n) vprop_value = Graph[k-1].new_vertex_property("int") Graph[k-1].vp.label = vprop_value k = k + 1 n = 0 Nodes[i] = Node(n,k-1) Graph[k-1].add_vertex(n) vprop_value = Graph[k-1].new_vertex_property("int") Graph[k-1].vp.label = vprop_value print("hello") for i in range(len(Nodes)): No = Nodes[i] Graph[No.graph].vp.label[Graph[No.graph].vertex(No.node)] = No.atr for i,edge_line in enumerate(pedge_file): node1,node2 = edge_line.split(', ') Node1 = Nodes[int(node1)-1] Node2 = Nodes[int(node2)-1] if Node1.node <= Node2.node: Node1.atr += 1 Node2.atr += 1 Graph[Node1.graph].add_edge(Graph[Node1.graph].vertex(Node1.node),Graph[Node1.graph].vertex(Node2.node)) for k in range(len(Graph)): vprop_value = Graph[k].new_vertex_property("int") Graph[k].vp.label = vprop_value for i in range(len(Nodes)): No = Nodes[i] Graph[No.graph].vp.label[Graph[No.graph].vertex(No.node)] = No.atr f_text = open("Reddit-B/text.txt","w") for i in range(len(Graph)): file_name = "reddit_b_" + str(i) Graph[i].save("Reddit-B/"+ file_name + ".xml.gz") f_text.write(file_name + ".xml.gz" + " " + label[i]) print(Graph[0]) print(Graph[len(Graph)-1])
2,011
project/custom_session_interface.py
DanielGrams/gsevp
1
2023495
from flask import request from flask.sessions import SecureCookieSessionInterface class CustomSessionInterface(SecureCookieSessionInterface): """Prevent creating session from API requests.""" def save_session(self, *args, **kwargs): if "authorization" in request.headers: return return super(CustomSessionInterface, self).save_session(*args, **kwargs)
391
scripts/run_notebook.py
milakis/microeconometrics
0
2022885
#!/usr/bin/env python """Run notebooks. This script allows to run the lecture notebooks. One can either run all notebooks at once or just a single lecture. It is enough to provide a substring for the name. Examples -------- >> run-notebook Run all lectures. >> run-notebook -n 01 Run lecture 01-introduction. """ import subprocess as sp import glob import os from auxiliary import parse_arguments from auxiliary import LECTURES_ROOT def run_notebook(notebook): cmd = " jupyter nbconvert --execute {} --ExecutePreprocessor.timeout=-1".format( notebook ) sp.check_call(cmd, shell=True) if __name__ == "__main__": request = parse_arguments("Execute notebook") os.chdir(LECTURES_ROOT) for dirname in request: os.chdir(dirname) for fname in glob.glob("*.ipynb"): print(f"\n {os.getcwd().split('/')[-1]}\n") run_notebook(fname) os.chdir("../")
947
get_answer.py
VincentGaoHJ/Spider-Zhihu
0
2023427
# -*- coding: utf-8 -*- """ @Date: Created on 2019/7/26 @Author: <NAME> @Description: """ import os import csv from zhihu_APIs import * from data_getter import get_data from w3lib.html import remove_tags from headers_pool import HEADERS_POOL # default args PROCESS_NUM = 4 MAX_PROCESS_NUM = 8 FLOOD_DISCHARGE_RATIO = 0.3 FLOODPLAIN_RATIO = 0.1 HEADERS_POOL = HEADERS_POOL def load_question(topic_id): file_name = str(topic_id) + "_topic.csv" file_path = os.path.join("./data", file_name) data = [] with open(file_path, encoding="utf-8-sig") as csvfile: csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件 for row in csv_reader: # 将csv 文件中的数据保存到birth_data中 data.append(row) return data def get_answer(topic_id, topic_name): # 不考虑云端数据变化,step小于limit时,取样必然有重叠 time.perf_counter() # 计时 zhi = ZhiHu() func = zhi.questions.answers question_list = load_question(topic_name) total_num = len(question_list) print("[获取回答] ===== 正在准备请求 {} 共有问题数 {} =====".format(topic_name, total_num)) i = 0 answer_all = [] fetch_body = [] for question in question_list: i += 1 question_id = question[0] question_ansnum = int(question[1]) fetch_body.append({"identifier": question_id, "query_args": ["content"], "range": [0, question_ansnum]}) # break print("[获取回答] ===== 正在请求数据 {} 共有问题数 {} =====".format(topic_name, total_num)) res = get_data(fetch_body, func, process_num=PROCESS_NUM, max_process_num=MAX_PROCESS_NUM, flood_discharge_ratio=FLOOD_DISCHARGE_RATIO, floodplain_ratio=FLOODPLAIN_RATIO, headers_pool=HEADERS_POOL) # print(res) print("[获取回答] ===== 正在处理数据 {} 共有问题数 {} =====".format(topic_name, total_num)) i = 0 for question_id, question_result in res.items(): i += 1 answer_list = question_result["data"] if i % 1000 == 0: print("[处理问题 {} / {}]".format(i, total_num), question_id) for item in answer_list: answer_id = item["id"] raw_ans = item["content"] question_content = item["question"]["title"] answer_content = remove_tags(raw_ans) answer_all.append((question_id, answer_id, question_content, answer_content)) print("[获取回答] ===== 正在保存数据 {} 共有问题数 {} =====".format(topic_name, total_num)) file_name = str(topic_name) + "_answers.csv" file_path = os.path.join("./data", file_name) with open(file_path, "a", encoding="utf-8-sig", newline='') as file: writer = csv.writer(file) for item in answer_all: writer.writerows([item]) if __name__ == '__main__': topic_id = "19574423" topic_name = "ZhongGuoJinDaiShi" get_answer(topic_id, topic_name)
2,904
fltk/util/generate_docker_compose.py
tudelft-eemcs-dml/fltk-testbed-gr-5
1
2023141
import sys import yaml import copy template_path = './deploy/templates' def load_system_template(): with open(f'{template_path}/system_stub.yml') as file: documents = yaml.full_load(file) return documents def load_client_template(type='default'): with open(f'{template_path}/client_stub_{type}.yml') as file: documents = yaml.full_load(file) return documents def generate_client(id, template: dict, world_size: int, type='default'): local_template = copy.deepcopy(template) key_name = list(local_template.keys())[0] container_name = f'client_{type}_{id}' local_template[container_name] = local_template.pop(key_name) for key, item in enumerate(local_template[container_name]['environment']): if item == 'RANK={rank}': local_template[container_name]['environment'][key] = item.format(rank=id) if item == 'WORLD_SIZE={world_size}': local_template[container_name]['environment'][key] = item.format(world_size=world_size) local_template[container_name]['ports'] = [f'{5000+id}:5000'] return local_template, container_name def generate(num_clients: int): world_size = num_clients + 1 system_template :dict = load_system_template() for key, item in enumerate(system_template['services']['fl_server']['environment']): if item == 'WORLD_SIZE={world_size}': system_template['services']['fl_server']['environment'][key] = item.format(world_size=world_size) for client_id in range(1, num_clients+1): client_type = 'default' if client_id == 1: client_type='slow' if client_id == 2: client_type='medium' client_template: dict = load_client_template(type=client_type) client_definition, container_name = generate_client(client_id, client_template, world_size, type=client_type) system_template['services'].update(client_definition) with open(r'./docker-compose.yml', 'w') as file: yaml.dump(system_template, file, sort_keys=False) if __name__ == '__main__': num_clients = int(sys.argv[1]) generate(num_clients) print('Done')
2,165
examples/host2gw_diagram.py
community-fabric/python-ipfabric-diagrams
1
2023573
""" unicast_diagram.py """ from ipfabric_diagrams import IPFDiagram, PathLookupSettings, Host2GW if __name__ == '__main__': ipf = IPFDiagram() h2g = Host2GW(startingPoint='10.241.1.203') json_data = ipf.diagram_json(h2g) settings = PathLookupSettings() png_data = ipf.diagram_png(h2g, graph_settings=settings) with open('tmp/host2gw.png', 'wb') as f: f.write(png_data) svg_data = ipf.diagram_svg(h2g) with open('tmp/host2gw.svg', 'wb') as f: f.write(svg_data) ipf.close()
528
testing/unexpected_passes_common/constants.py
chromium/chromium
14,668
2023181
# Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Constants for unexpected pass finders.""" class BuilderTypes(object): CI = 'ci' TRY = 'try'
263
Chapter2_Freight_Train_Formation_Plan_Problems/Column_Generation.py
gaotianze/Gurobi_Learning
0
2023540
# From Gurobi Tutorial P142 - Cutting Stock Problem # Column Generation Tutorial - https://www.cnblogs.com/dengfaheng/p/11249879.html import gurobipy from gurobipy import * TypesDemand = [3, 7, 9, 16] QuantityDemand = [25, 30, 14, 8] LengthUsable = 20 MainProbRelax = Model() SubProb = Model() # 构建主问题模型,选择的初始切割方案每根钢管只切一种长度 # 添加变量、目标函数 # 添加了4个变量(分别代表4种方案): z1 z2 z3 z4 Zp = MainProbRelax.addVars(len(TypesDemand), obj=1.0, vtype=GRB.CONTINUOUS, name='z') # 先将主问题松弛成连续,并创建包含4初始方案的基变量 # 添加约束 # 拿四个初始方案做约束添加进去 # 6*z_1 >= 25; 2*z_2>=30; 2*z_3>=14; 1*z_4>=8; ColumnIndex = MainProbRelax.addConstrs( quicksum(Zp[p] * (LengthUsable // TypesDemand[i]) for p in range(len(TypesDemand)) if p == i) >= QuantityDemand[i] for i in range(len(TypesDemand))) MainProbRelax.optimize() # 构造子问题模型 # 获得对偶值 # lambda_list=MainProbRelax.getAttr(GRB.Attr.Pi, MainProbRelax.getConstrs()) Dualsolution = MainProbRelax.getAttr(GRB.Attr.Pi, MainProbRelax.getConstrs()) # 添加变量 # 目标函数此时为: max(0.166*c_1 + 0.5*c_2 + 0.5*c_3 + c_4) Ci = SubProb.addVars(len(TypesDemand), obj=Dualsolution, vtype=GRB.INTEGER, name='c') # 添加约束 # 3c1+7c2+9c3+16c4 <= 20 单根卷钢长度约束 SubProb.addConstr(quicksum(Ci[i] * TypesDemand[i] for i in range(len(TypesDemand))) <= LengthUsable) SubProb.setAttr(GRB.Attr.ModelSense, -1) # -1为maximize SubProb.optimize() # 判断Reduced Cost是否小于0 while SubProb.objval > 1: # 获取变量取值 columnCoeff = SubProb.getAttr("X", SubProb.getVars()) column = Column(columnCoeff, MainProbRelax.getConstrs()) # 读取到新方案 [2,2,0,0]^T ,作为新的一列添加到RMP中 # 添加变量 MainProbRelax.addVar(obj=1.0, vtype=GRB.CONTINUOUS, name='CG', column=column) MainProbRelax.optimize() # 修改目标函数系数 for i in range(len(TypesDemand)): Ci[i].obj = ColumnIndex[i].pi SubProb.optimize() # 将CG后的模型转为整数,并求 for v in MainProbRelax.getVars(): v.setAttr("VType", GRB.INTEGER) MainProbRelax.optimize() print("\nSolotion:") for v in MainProbRelax.getVars(): if v.X != 0.0: print('%s %g次' % (v.VarName, v.X))
2,005
sort/quicksort/quicksort.py
BlueRhino/algorithm-python
0
2023576
import random def partition_arr(arr, start, end): """ 对输入数组arr在范围start及end之间的元素使用快速排序,使用arr[end]元素作为排序分界 返回arr[end]元素的索引位置 :param arr: :param start: :param end: :return:arr[end]元素的索引位置 """ if start < 0 or end < start: raise Exception("The index is not correct.") arr_len = len(arr) if end > arr_len - 1: raise Exception("The end index must less than the length of arr.") flag = arr[end] i = start - 1 for j in range(start, end): if arr[j] <= flag: i += 1 __exchange_value(arr, i, j) __exchange_value(arr, i + 1, end) return i + 1 def quick_sort(arr, start, end): if start < end: index = partition_arr(arr, start, end) quick_sort(arr, start, index - 1) quick_sort(arr, index + 1, end) def quick_sort_random(arr, start, end): i = random.randint(start, end) __exchange_value(arr, i, end) if start < end: index = partition_arr(arr, start, end) quick_sort(arr, start, index - 1) quick_sort(arr, index + 1, end) def __exchange_value(arr, index1, index2): arr_len = len(arr) if 0 <= index1 < arr_len and 0 <= index2 < arr_len: if index1 == index2: return tmp = arr[index1] arr[index1] = arr[index2] arr[index2] = tmp else: raise Exception("Index is not correct.")
1,405
leetcode/p54.py
mythnc/lab
0
2023363
from typing import List class Solution: def spiralOrder(self, matrix: List[List[int]]) -> List[int]: directions = ( (0, 1), (1, 0), (0, -1), (-1, 0) ) row = len(matrix) col = len(matrix[0]) total = row * col count = 0 d_index = 0 result = [0] * total r = 0 c = 0 VISITED_VALUE = 101 while count < total: result[count] = matrix[r][c] matrix[r][c] = VISITED_VALUE count += 1 if count >= total: break next_r, next_c = directions[d_index] while (not ((0 <= r+next_r < row) and (0 <= c+next_c < col) and matrix[r+next_r][c+next_c] != VISITED_VALUE)): d_index = (d_index + 1) % len(directions) next_r, next_c = directions[d_index] r += next_r c += next_c return result print(Solution().spiralOrder([[1,2,3],[4,5,6],[7,8,9]])) print(Solution().spiralOrder([[1,2,3,4],[5,6,7,8],[9,10,11,12]]))
1,107
examples/testProtection.py
Bill2462/AX3003P
0
2023243
""" Example that demonstrates the overcurrent protection (OVP) and overvoltage protection (OVC). This script sets the OVP level to 5V and OCP to 0.3A. Then slowely increases the voltage until the OVP trips. Next the OVP is reseted and the same procedure is repeated with the OCP. """ # This file is part of AX3003P library. # Copyright (c) 2019 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the “Software”), to deal in the # Software without restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the # following conditions: THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF # OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys from time import sleep import AX3003P # Get the port selection from the command line argument. if len(sys.argv) < 2: print("usage: python3 helloWorld.py [device]") print("Example: python3 helloWorld.py /dev/ttyUSB0") sys.exit(1) port = sys.argv[1] # connect to the power supply and enable output psu = AX3003P.connect(port) # Set the OVP and OCP level. print("Setting OVP threshold to 5V...") psu.setOvpTreshold(5) print("Setting OCP threshold to 0.5A") psu.setOcpTreshold(0.3) print("\n######### Testing the OVP #########") # set the current to 50mA psu.setCurrent(0.05) psu.enableOutput() # slowly increase the voltage until the OVP trips voltages = [1, 2, 4, 9] # voltages that we are going to test for voltage in voltages: # a little hack to trigger the OVP. # Normally PSU won't allow us to set voltage higher then OVP threshold. # However we can first turn the OVP off, set the voltage and turn it back on. psu.disableOvp() sleep(2) psu.setVoltage(voltage) sleep(4) psu.enableOvp() sleep(1) # delay to allow the voltage to stabilize # check if ovp is tripped if psu.ovpTripped(): status = "TRIP" else: status = "RDY" # print the status print("Voltage: " + str(voltage) + "V OVP: " + status) if status == "TRIP": break # exit if ovp tripped # reset OVP and set voltage to 5V print("Resetting the OVP...") psu.resetOvp() psu.setVoltage(5) #now we have to short the PSU output print("Short PSU output and press enter to continue") input() print("\n######### Testing the OCP #########") psu.enableOutput() # slowely increase the current until OCP trips currents = [0.1, 0.2, 0.3, 1.0] for current in currents: psu.disableOcp() sleep(2) psu.setCurrent(current) sleep(4) psu.enableOcp() sleep(1) # delay to allow the voltage to stabilize # check if ovp is tripped if psu.ocpTripped(): status = "TRIP" else: status = "RDY" # print the status print("Curent: " + str(current) + "A OCP: " + status) if status == "TRIP": break # exit if ocp tripped #disable output and disconnect psu.disableOutput() psu.resetOcp() psu.disconnect()
3,514
webscraper/Windsor/WebScraping.py
rex-lui/Hong-Kong-Mall-Shop-Directory-Web-Scraping
0
2023737
#Import necessary package import requests import re from bs4 import BeautifulSoup import pandas as pd import numpy as np import datetime as dt import configparser import os import json #Configure parameter config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'config.ini')) mall = config['general']['mall'] shoplisturl = config['url']['shoplisturl'] fnblisturl = config['url']['fnblisturl'] shopdetailbasicurl = config['url']['shopdetailbasicurl'] #Get shop category data and export into csv def getShopCategory(): #Create empty DataFrame for shop category shopcategory = pd.DataFrame() for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]): #Get shop category page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') for category_selected in soup.find_all('select', class_ = 'categorySelected'): for cat in category_selected.find_all('option'): try: shop_category_id = cat.get('value') except: shop_category_id = np.nan try: shop_category_name = cat.text.split('\r\n')[0].strip() except: shop_category_name = np.nan shopcategory = shopcategory.append( { 'type':type, 'shop_category_id':shop_category_id, 'shop_category_name':shop_category_name }, ignore_index=True ) shopcategory['update_date'] = dt.date.today() shopcategory['mall'] = mall shopcategory.drop(shopcategory[shopcategory.shop_category_name == 'All'].index, inplace = True) shopcategory = shopcategory.loc[:, ['mall','type','shop_category_id','shop_category_name','update_date']] return shopcategory #Get shop master data and export into csv def getShopMaster(): shopcategory = getShopCategory() #Create empty DataFrame for shop master shoplist = pd.DataFrame() shoplisttc = pd.DataFrame() shopdetail = pd.DataFrame() for type, url in zip(['Shopping','Dining'],[shoplisturl,fnblisturl]): page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') for shop in soup.find_all('div', class_ = 'shop'): try: shop_floor = shop.get('floorid').replace('/','').strip() except: shop_floor = np.nan try: shop_category_id = shop.get('catid') except: shop_category_id = np.nan try: shop_category_name = shopcategory.loc[shopcategory['shop_category_id'] == shop_category_id, 'shop_category_name'].values[0] except: shop_category_name = np.nan for shop_body in shop.find_all('div', class_= 'card shop-body'): for shop_content in shop_body.find_all('div', class_= 'card-body shop-body-content'): try: shop_detail_link = shop_content.find(class_= 'card-title').find('a').get('href') shoplinkid = shop_detail_link.find('&id=') shop_id = shop_detail_link[shoplinkid+4:].replace('&lang=en','') except: shop_detail_link = np.nan shop_id = np.nan try: shop_name = shop_content.find(class_= 'card-title').find('a').text except: shop_name = np.nan try: shop_number = shop_content.find(src = re.compile('ShopDetail_icon_shopNo')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text except: shop_number = np.nan for shop_footer in shop_body.find_all('div', class_= 'card-footer'): try: if shop_footer.find(class_ = 'shop-tag-club'): loyalty_offer = 'WINDSOR CLUB Offer' else: loyalty_offer = np.nan except: loyalty_offer = np.nan try: if shop_footer.find(class_ = 'shop-tag-coupon'): voucher_acceptance = '1' else: voucher_acceptance = np.nan except: voucher_acceptance = np.nan shoplist = shoplist.append( { 'type':type, 'shop_id':shop_id, 'shop_name_en':shop_name, 'shop_number':shop_number, 'shop_floor':shop_floor, 'shop_category_id':shop_category_id, 'shop_category_name':shop_category_name, 'loyalty_offer':loyalty_offer, 'voucher_acceptance':voucher_acceptance, 'shop_detail_link':shop_detail_link }, ignore_index=True ) urltc = url.replace('en','tc') page = requests.get(urltc) soup = BeautifulSoup(page.content, 'html.parser') for shop in soup.find_all('div', class_ = 'shop'): for shop_body in shop.find_all('div', class_= 'card shop-body'): for shop_content in shop_body.find_all('div', class_= 'card-body shop-body-content'): try: shop_detail_link = shop_content.find(class_= 'card-title').find('a').get('href') shoplinkid = shop_detail_link.find('&id=') shop_id = shop_detail_link[shoplinkid+4:].replace('&lang=tc','') except: shop_detail_link = np.nan shop_id = np.nan try: shop_name_zh = shop_content.find(class_= 'card-title').find('a').text except: shop_name_zh = np.nan shoplisttc = shoplisttc.append( { 'shop_id':shop_id, 'shop_name_tc':shop_name_zh }, ignore_index=True ) for shop_detail_link in shoplist['shop_detail_link']: shopdetailurl = shopdetailbasicurl + shop_detail_link page = requests.get(shopdetailurl) soup = BeautifulSoup(page.content, 'html.parser') for shop_detail in soup.find_all('div', class_ = 'shop-detail'): for shop_table in shop_detail.find_all('table', class_ = 'shop-table'): try: opening_hours = shop_table.find(src = re.compile('ShopDetail_icon_time')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text opening_hours = ';'.join([opening_hour.strip() for opening_hour in opening_hours.split('\r\n')]) except: opening_hours = np.nan try: phone = shop_table.find(src = re.compile('ShopDetail_icon_tel')).find_parent('td').find_next_sibling('td').find_next_sibling('td').text phone = phone.replace(' ','') except: phone = np.nan shopdetail = shopdetail.append( { 'shop_detail_link':shop_detail_link, 'opening_hours':opening_hours, 'phone':phone }, ignore_index=True ) shoplist = pd.merge(shoplist, shoplisttc, on = 'shop_id') shopmaster = pd.merge(shoplist, shopdetail, on = 'shop_detail_link') shopmaster['update_date'] = dt.date.today() shopmaster['mall'] = mall shopmaster['tag'] = np.nan shopmaster = shopmaster.loc[:, ['mall','type','shop_id','shop_name_en','shop_name_tc','shop_number','shop_floor','phone','opening_hours','loyalty_offer','voucher_acceptance','shop_category_id','shop_category_name','tag','update_date']] shopmaster = shopmaster[shopmaster['shop_number'] != r'(非商店)'] return shopmaster
8,681
meta/asttools/tests/test_remove_trivial.py
tomviner/Meta
95
2022862
''' Created on Aug 5, 2011 @author: sean ''' from __future__ import print_function import unittest import ast from meta.asttools.mutators.remove_trivial import remove_trivial from meta.asttools.tests import assert_ast_eq, skip_networkx from meta.asttools.visitors.graph_visitor import GraphGen def simple_case(self, toremove, expected): root = ast.parse(toremove) remove_trivial(root) expected_root = ast.parse(expected) assert_ast_eq(self, root, expected_root) @skip_networkx class Test(unittest.TestCase): def assertRemoved(self, toremove, expected): root = ast.parse(toremove) remove_trivial(root) expected = ast.parse(expected) assert_ast_eq(self, root, expected) def test_single(self): simple_case(self, 'a = 1', 'a = 1') def test_empty(self): simple_case(self,'', '') def test_simple(self): simple_case(self, 'a = 1; a = 2', 'pass; a = 2') def test_multi(self): simple_case(self, 'a = 1; a = 2; a = 3', 'pass; pass; a = 3') def test_apart(self): simple_case(self, 'a = 1; b = 1; a = 2', 'pass; b = 1; a = 2') def test_if(self): simple_case(self, 'a = 1\nif x: a = 2', 'a = 1\nif x: a = 2') def test_if2(self): simple_case(self, 'if x: a = 2\na = 1', 'if x: a = 2\na = 1') def test_if_else(self): simple_case(self, 'a = 1\nif x: a = 2\nelse: a = 3', 'pass\nif x: a = 2\nelse: a = 3') def test_if_else2(self): simple_case(self, 'if x: a = 2\nelse: a = 3\na = 1', 'if x: pass\nelse: pass\na = 1') def test_for(self): simple_case(self, 'a = 1\nfor x in y: a = 2', 'a = 1\nfor x in y: a = 2') def test_for_else(self): simple_case(self, 'a = 1\nfor x in y: a = 2\nelse: a = 3', 'pass\nfor x in y: a = 2\nelse: a = 3') def test_for_else_break(self): simple_case(self, 'a = 1\nfor x in y:\n break\n a = 2\nelse: a = 3', 'a = 1\nfor x in y:\n break\n a = 2\nelse: a = 3') def test_for_else_conti(self): simple_case(self, 'a = 1\nfor x in y:\n continue\n a = 2\nelse: a = 3', 'a = 1\nfor x in y:\n continue\n a = 2\nelse: a = 3') def test_while(self): simple_case(self, 'a = 1\nwhile x: a = 2', 'a = 1\nwhile x: a = 2') def test_while_else(self): simple_case(self, 'a = 1\nwhile x: a = 2\nelse: a = 3', 'pass\nwhile x: a = 2\nelse: a = 3') if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
2,975
tests/config/test_config.py
kevinbuchanjr/sagify
352
2023862
from sagify.config.config import ConfigManager, Config def test_config(tmpdir): config_file = tmpdir.join('config.json') config_file.write(""" { "image_name": "keras-app-img", "aws_profile": "sagemaker", "aws_region": "us-east-1", "python_version": "3.6", "sagify_module_dir": "keras-app-img", "requirements_dir": "requirements.txt" } """) config_manager = ConfigManager(str(config_file)) actual_config_obj = config_manager.get_config() assert actual_config_obj.to_dict() == Config( image_name="keras-app-img", aws_profile="sagemaker", aws_region="us-east-1", python_version="3.6", sagify_module_dir="keras-app-img", requirements_dir="requirements.txt" ).to_dict()
767
custom_components/sensor.py
hombrelab/home-assistant-smartmeter-reader
0
2022719
# Copyright (c) 2021 Hombrelab <<EMAIL>> import logging import pytz from dsmr_parser import obis_references as obis_ref from dsmr_parser import telegram_specifications from dsmr_parser.parsers import TelegramParser from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.typing import HomeAssistantType from . import SmartmeterDevice from .const import ( DOMAIN, UUID, SERVICE, DSMRVERSION, PRECISION, TIMEZONE, GAS_CONSUMPTION_NAME, GAS_HOURLY_CONSUMPTION_NAME, GAS_HOURLY_LAST_UPDATE_NAME, ENTITIES, ENTITIES_SCHEMA, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry, async_add_entities): """set up entities based on a config entry""" _version = entry.data[DSMRVERSION] _precision = entry.data[PRECISION] _timezone = pytz.timezone(entry.data[TIMEZONE]) # Protocol version specific obis if _version in "4": _gas_obis = obis_ref.HOURLY_GAS_METER_READING _parser = TelegramParser(telegram_specifications.V4) elif _version in "5": _gas_obis = obis_ref.HOURLY_GAS_METER_READING _parser = TelegramParser(telegram_specifications.V5) elif _version in ("5B",): _gas_obis = obis_ref.BELGIUM_HOURLY_GAS_METER_READING _parser = TelegramParser(telegram_specifications.BELGIUM_FLUVIUS) else: _gas_obis = obis_ref.GAS_METER_READING _parser = TelegramParser(telegram_specifications.V2_2) # Define mapping for electricity mappings elements = ENTITIES elements += [ [ GAS_CONSUMPTION_NAME, 'mdi:fire', _gas_obis ], ] # generate smart entities entities = [ ElecticityEntity(name, icon, obis, _precision, _timezone, _parser) for name, icon, obis in elements ] elements = [ [ GAS_HOURLY_CONSUMPTION_NAME, 'mdi:fire', _gas_obis ], [ GAS_HOURLY_LAST_UPDATE_NAME, 'mdi:update', _gas_obis ], ] # generate gas entities entities += [ GasEntity(name, icon, obis, _precision, _timezone, _parser) for name, icon, obis in elements ] # Set up the sensor platform async_add_entities(entities) async def async_consume_service(call): """handle calls to the service.""" telegram = call.data.get('telegram') telegram = telegram.replace(" ", "") telegram = telegram.replace("\\r\\n", "\r\n") for entity in entities: entity.set_consumed(telegram) hass.services.async_register( DOMAIN, SERVICE, async_consume_service, schema=ENTITIES_SCHEMA, ) class ElecticityEntity(SmartmeterDevice, RestoreEntity): """representation of a electricity entity""" def __init__(self, name, icon, obis, precision, timezone, parser): """initialize the electricity entity""" self._name = name self._icon = icon self._obis = obis self._element = self._name.lower().replace(" ", "_") self._unit = '' self._obis = obis self._precision = precision self._timezone = timezone self._parser = parser self._data = '' self._telegram = '' self._state = '-' self._attributes = {} async def async_added_to_hass(self): """run when entity is about to be added""" await super().async_added_to_hass() state = await self.async_get_last_state() if state: try: self._state = state.state self._attributes = state.attributes self._data = self._attributes['data'] self._telegram = self._parser.parse(self._data) except Exception as err: _LOGGER.warning(f"could not restore {self._name}: {err}") def get_attribute(self, name): """get the attribute value if the object has it""" attribute = self._telegram[self._obis] return getattr(attribute, name, None) @staticmethod def translate_tariff(value): # Convert 2/1 to normal/low # DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is # used for normal rate. if value == '0002': return 'normal' if value == '0001': return 'low' return None def set_consumed(self, data): """set the telegram for the electricity reading""" if data is not None: self._data = data self._telegram = self._parser.parse(self._data) def update(self): try: self._unit = self.get_attribute('unit') except: self._unit = '' try: value = self.get_attribute('value') except: self._state = '-' return if self.name == 'Smartmeter Power Consumption (both)': value = value + self._telegram[obis_ref.ELECTRICITY_USED_TARIFF_2].value elif self._obis == obis_ref.ELECTRICITY_ACTIVE_TARIFF: self._state = self.translate_tariff(value) return try: value = round(float(value), self._precision) except TypeError: pass if value is not None: self._state = value else: self._state = '-' @property def unique_id(self) -> str: """return the unique id""" return f"{UUID}.{self._element}" @property def name(self) -> str: """return the name of the entity""" return self._name @property def icon(self) -> str: """return the icon to be used for this entity""" return self._icon @property def unit_of_measurement(self): """return the unit of measurement""" return self._unit @property def state(self): """return the state of the entity""" return self._state @property def state_attributes(self): """return the state attributes""" return {'data': self._data} class GasEntity(ElecticityEntity): """representation of a gas entity""" def __init__(self, name, icon, obis, precision, timezone, parser): """initialize the gas entity""" super().__init__(name, icon, obis, precision, timezone, parser) self._previous_state = None self._previous_timestamp = None def update(self): try: if self._name == GAS_HOURLY_CONSUMPTION_NAME: self._unit = f"{self.get_attribute('unit')}/h" elif self._name == GAS_HOURLY_LAST_UPDATE_NAME: self._unit = '' except Exception: self._unit = '' try: value = self.get_attribute('value') except: self._state = '-' return try: timestamp = self.get_attribute('datetime') timestamp = timestamp.astimezone(self._timezone) except: timestamp = '' if self._previous_state is None: try: self._previous_state = self._attributes['previous_state'] except: self._previous_state = 0 if self._previous_timestamp is None: try: self._previous_timestamp = self._attributes['previous_timestamp'] except: self._previous_timestamp = '' # check if the timestamp for the object differs from the previous one if self.name == GAS_HOURLY_CONSUMPTION_NAME: if timestamp != self._previous_timestamp: try: self._state = value - self._previous_state #diff = value - self._previous_state #timediff = timestamp - self._previous_timestamp #total_seconds = timediff.total_seconds() #self._state = round(float(diff) / total_seconds * 3600, self._precision) except: self._state = 0 self._previous_state = self._state self._previous_timestamp = timestamp else: self._state = 0 else: self._state = timestamp.strftime('%X') @property def device_state_attributes(self): """return the state attributes""" return {'data': self._data, 'previous_state': self._previous_state, 'previous_timestamp': self._previous_timestamp}
8,657
b-series/b294.py
TheLurkingCat/ZeroJudge
1
2022662
a = input() s = [int(x) for x in input().split()] ans = 0 for date, num in enumerate(s, 1): ans += num * date print(ans)
125
vaquero/collectors.py
jbn/vaquero
1
2023709
import jmespath import random # See also: fill_in_unknowns in transformations as a collector. def sampling(items, p): for item in items: if random.random() < p: yield item class Collector: def update(self, item): raise NotImplementedError("Collector#update(item) not implemented") def update_over_all(self, items): for item in items: self.update(item) @property def collected(self): raise NotImplementedError("Collector#collected not implemented") class SetCollector(Collector): """ Collect the set of values for jmespaths over applied items. """ def __init__(self, paths): self._paths = {} self._sets = {} self.add_paths(paths) def add_paths(self, paths): """ :param paths: an interable of jmespath paths """ for path in paths: self.add_path(path) def add_path(self, path): """ :param path: a jmespath """ self._paths[path] = jmespath.compile(path) def update(self, item): """ Apply the paths to an item, collecting the values. :param item: an item to process """ for path, jmes_obj in self._paths.items(): res = jmes_obj.search(item) if res is not None: result_set = self._sets.get(path) if not result_set: result_set = set() self._sets[path] = result_set result_set.add(res) @property def collected(self): return self._sets class GroupCollector(Collector): """ Collect one item per group. """ def __init__(self, group_f): """ :param group_f: function which returns some key representing the group """ self._group_f = group_f self._groups = {} def update(self, item): k = self._group_f(item) if k not in self._groups: self._groups[k] = item @property def collected(self): return self._groups
2,089
alipay/aop/api/domain/SmartAutomatScene.py
articuly/alipay-sdk-python-all
0
2022782
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.constant.ParamConstants import * class SmartAutomatScene(object): def __init__(self): self._level_1 = None self._level_2 = None @property def level_1(self): return self._level_1 @level_1.setter def level_1(self, value): self._level_1 = value @property def level_2(self): return self._level_2 @level_2.setter def level_2(self, value): self._level_2 = value def to_alipay_dict(self): params = dict() if self.level_1: if hasattr(self.level_1, 'to_alipay_dict'): params['level_1'] = self.level_1.to_alipay_dict() else: params['level_1'] = self.level_1 if self.level_2: if hasattr(self.level_2, 'to_alipay_dict'): params['level_2'] = self.level_2.to_alipay_dict() else: params['level_2'] = self.level_2 return params @staticmethod def from_alipay_dict(d): if not d: return None o = SmartAutomatScene() if 'level_1' in d: o.level_1 = d['level_1'] if 'level_2' in d: o.level_2 = d['level_2'] return o
1,314
ModernWarfare/XAssets/itemSources.py
Mario-Kart-Felix/Hyde
14
2023295
import logging from typing import Any, Dict, List, TypedDict from utility import Utility log: logging.Logger = logging.getLogger(__name__) class ItemSourceTable(TypedDict): """Structure of mp/itemsourcetable.csv""" marketPlaceID: int refType: str refName: str gameSourceID: str equippableIW8MP: int # bool equippableWZ: int # bool equippableT9: int # bool equippableS4: int # bool lookupType: str class ItemSources: """Item Source XAssets.""" def Compile(self: Any) -> None: """Compile the Item Source XAssets.""" sources: List[Dict[str, Any]] = [] sources = ItemSources.Table(self, sources) Utility.WriteFile(self, f"{self.eXAssets}/itemSources.json", sources) log.info(f"Compiled {len(sources):,} Item Sources") def Table(self: Any, sources: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Compile the mp/itemsourcetable.csv XAsset.""" table: List[Dict[str, Any]] = Utility.ReadCSV( self, f"{self.iXAssets}/mp/itemsourcetable.csv", ItemSourceTable ) if table is None: return sources for entry in table: sources.append( { "id": entry.get("marketPlaceID"), "altId": entry.get("refName"), "type": self.ModernWarfare.GetLootType(entry.get("marketPlaceID")), "source": entry.get("gameSourceID"), "iw8mp": bool(entry.get("equippableIW8MP")), "wz": bool(entry.get("equippableWZ")), "t9": bool(entry.get("equippableT9")), "s4": bool(entry.get("equippableS4")), } ) return sources
1,777
v2/goalnet/connectors/telegram/main.py
DaniloZZZ/GoalNet
0
2023010
import zmq import json import tgflow as tgf from tgflow.api.cli import cliAPI from enum import Enum from network import ConnectorNetwork from utils__ import get_network_config from database import DB def _print(*args): print(">telegram bot>",*args) class States(Enum): action=1 settings=2 login=3 start=4 def bot(netconf): net = ConnectorNetwork(netconf, appid='0', name='telegram' ) db = DB() def handle_notif(notif): str_notif = json.dumps(notif) try: user_id = str(notif['user_id']) tgid = db.get_tg_id(user_id) except Exception as e: _print("Notif was not sent",e) return "FAIL" _print("got notif:",str_notif) message = "Got new notif of type %s. Content: %s"%( notif.get('type'),notif.get('content') ) if not tgid: print("User id %s has no telegram log"%user_id) return "FAIL" try: tgf.send_raw(message, tgid) except Exception as e: _print("Notif was not sent",e) return "FAIL" return 'OK' net.listen_for_notif(handle_notif) def login_uid_1(i): telegram_id = i.message.chat.id user_id = '1' db.save_tg_id(user_id,telegram_id) return States.action, {'user_id': user_id} def handle_action(i,user_id=None): _print('inp',i) if not user_id: _print('user not logged in') return States.login text = i.text msg_type = 'telegram' try: msg_type, content = text.split('\\') except ValueError: content = text message = { 'type':msg_type, 'content':content, 'user_id':user_id, } net.send(message) # stay silent return -1 UI = { States.action:{ 't':'Enter an action type and content to send', 'b':[ {"Settings":tgf.action(States.settings)} ], 'react':tgf.action(handle_action,react_to='text') }, States.settings:{ 't':'Settings', 'b':[ {"Action":tgf.action(States.action)} ], }, States.start:{ 't':'Welcome!', 'b':[ {"Log in":tgf.action(States.login)}, ] }, States.login:{ 't':'Please log in', 'b':[ {"Log in as 1": tgf.action(login_uid_1)} ], } } key='<KEY>' tgf.configure(token=key, state=States.start, #apiModel=cliAPI, verbose=True, ) tgf.start(UI) def main(): netconf = get_network_config() print("Starting bot") bot(netconf) if __name__=="__main__": main()
3,119
zamna/playlists/migrations/0007_rating_vote.py
nistalhelmuth/zamna_back_end
0
2023508
# Generated by Django 2.1.7 on 2019-05-13 00:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('playlists', '0006_auto_20190512_1702'), ] operations = [ migrations.AddField( model_name='rating', name='vote', field=models.IntegerField(default=0), preserve_default=False, ), ]
419
strongbox_fixity.py
rejveno/IFIscripts
0
2023192
#!/usr/bin/env python ''' Analyses the CSV file reports from Strongbox. Accepts an identifier input, at least the package ID but the UUID would also be useful. The script then finds the relevant entries, harvests the checksums and stores them as a regular manifest. It would make sense to also accept an existing sha512 manifest as an argparse so that the script can tell if they are identical. ''' import os import sys import argparse import ififuncs def parse_args(args_): ''' Parse command line arguments. ''' parser = argparse.ArgumentParser( description='Analyses the CSV file reports from Strongbox.' 'Prints the output to the terminal if the -manifest option is not used' 'if the -manifest option is used, just the differences, if any, will appear on screen' ' Written by <NAME>.' ) parser.add_argument( 'input', help='Input directory' ) parser.add_argument( '-id', help='Enter the identifier that you would like to search for. UUID/Accession/OE.' ) parser.add_argument( '-manifest', help='Enter the sha512 manifest that you would like to compare against.' ) parsed_args = parser.parse_args(args_) return parsed_args def diff_manifests(args, strongbox_list): ''' Compare the list of strongbox hashes to the original AIP manifest. ''' print '\nStrongbox_fixity - IFIscripts' print '\nDiffing the manifests..' with open(args.manifest, 'r') as original_manifest: aip_manifest = original_manifest.read().splitlines() # A list of items in strongbox, that are different in aip sha512 manifest strongbox_check = [item for item in strongbox_list if item not in aip_manifest] # A list of items in the AIP manifest, that are different in the strongbox manifest aip_check = [item for item in aip_manifest if item not in strongbox_list] if len(strongbox_check) == 0: print 'All files in the strongbox manifest are present in your AIP manifest and the hashes validate' else: for i in strongbox_check: print '%s is different from the strongbox_csv to the AIP manifest' % i if len(aip_check) == 0: print 'All files in the AIP manifest are present in your strongbox manifest and the hashes validate' else: for i in strongbox_check: print '%s is different from the AIP manifest to the Strongbox manifest' % i def find_checksums(csv_file, identifier): ''' Finds the relevant entries in the CSV and prints to terminal ''' csv_dict = ififuncs.extract_metadata(csv_file) manifest_lines = [] for items in csv_dict: for x in items: if type(x) is dict: if identifier in x['path']: identifier_string = "/%s/" % identifier manifest_line = x['hash_code'] + ' ' + x['path'].replace(identifier_string, '') manifest_lines.append(manifest_line) strongbox_list = sorted(manifest_lines, key=lambda x: (x[130:])) return strongbox_list def main(args_): args = parse_args(args_) source = args.input identifier = args.id strongbox_list = find_checksums(source, identifier) if args.manifest: diff_manifests(args, strongbox_list) else: for i in strongbox_list: print i if __name__ == '__main__': main(sys.argv[1:])
3,423
api/network_map.py
michahagg/domoticz-zigbee2mqtt-plugin
146
2023176
from api.command import APICommand class NetworkMap(APICommand): def execute(self, params): self.publish_mqtt('bridge/networkmap/routes', 'graphviz') def handle_mqtt_message(self, topic, message): if topic == 'bridge/networkmap/graphviz': self.send_response(message)
306
02-ui/pyqt5/widget_examples.py
cccaaannn/useful_functions
0
2023891
# pip install pyqt5 from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, qApp from PyQt5.QtWidgets import QCheckBox, QLabel, QLineEdit, QPushButton, QRadioButton, QButtonGroup, QTextEdit, QFileDialog, QAction, QDesktopWidget from PyQt5 import QtGui, QtCore, QtWidgets import sys import os class app(QMainWindow): def __init__(self): super().__init__() self.init_variables() self.init_ui() def init_ui(self): """inits ui""" self.setWindowTitle("example window") # self.setGeometry(100,100,700,700) # self.move(100, 100) self.center() self.setFixedSize(700,700) self.labels() self.buttons() self.line_edits() self.checkboxes() self.radiobuttons() self.text_edits() self.menu_bar() self.show() def init_variables(self): """inits class variables""" self.button_counter = 0 self.img1_path = "07-pyqt5/images/img.png" self.file_dialog_path = "" def center(self): qr = self.frameGeometry() cp = QDesktopWidget().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) def labels(self): """adds labels""" self.l1 = QLabel(self) self.l1.setText("label 1") # self.l1.move(120, 230) self.l1.setGeometry(120, 230, 100, 10) # add image self.l2 = QLabel(self) self.l2.setPixmap(QtGui.QPixmap(self.img1_path)) self.l2.setGeometry(0, 0, 700, 200) def buttons(self): """adds buttons""" self.b1 = QPushButton(self) self.b1.setText("counter") self.b1.move(10, 220) self.b1.clicked.connect(self.on_click) self.b2 = QPushButton(self) self.b2.setText("clear") self.b2.move(10, 270) self.b2.clicked.connect(self.on_click) self.b3 = QPushButton(self) self.b3.setText("open file dialog") self.b3.move(10, 420) self.b3.clicked.connect(self.on_click) def line_edits(self): """adds line edits""" self.line1 = QLineEdit(self) self.line1.setText("") self.line1.move(120, 270) def checkboxes(self): """adds checkboxes""" self.checkbox1 = QCheckBox(self) self.checkbox1.setText("checkbox example") self.checkbox1.move(10, 320) self.checkbox1.setObjectName("checkbox1") # self.checkbox1.setDisabled(True) self.checkbox1.clicked.connect(self.on_click) def radiobuttons(self): """adds radiobuttons""" self.radiobutton1 = QRadioButton(self) self.radiobutton1.setText("radiobutton 1") self.radiobutton1.move(10, 370) self.radiobutton1.setObjectName("radiobutton1") self.radiobutton1.clicked.connect(self.on_click) self.radiobutton2 = QRadioButton(self) self.radiobutton2.setText("radiobutton 2") self.radiobutton2.move(120, 370) self.radiobutton2.setObjectName("radiobutton2") self.radiobutton2.clicked.connect(self.on_click) self.radiobutton3 = QRadioButton(self) self.radiobutton3.setText("radiobutton 3") self.radiobutton3.move(230, 370) self.radiobutton3.setObjectName("radiobutton3") self.radiobutton3.clicked.connect(self.on_click) # button groups self.button_group1 = QButtonGroup() self.button_group1.addButton(self.radiobutton1) self.button_group1.addButton(self.radiobutton2) self.button_group1.setObjectName("button_group1") # this can listen all buttons in the group but it has no attribute calles .text() # so you cant use single listener function with it or use objectname() # self.button_group1.buttonClicked.connect(self.on_click) self.button_group2 = QButtonGroup() self.button_group2.addButton(self.radiobutton3) self.button_group2.setObjectName("button_group2") def text_edits(self): """adds text edits""" self.te1 = QTextEdit(self) self.te1.setGeometry(350, 220, 300, 200) self.te1.setText("this is a biiiiiig text field") def menu_bar(self): """adds menu bar and actions under it""" # menu items self.bar = self.menuBar() self.file_menu = self.bar.addMenu("file") self.file_menu.triggered.connect(self.on_menu_click) self.edit_menu = self.bar.addMenu("edit") self.sub_menu = self.edit_menu.addMenu("sub menu") # actions self.open_file_function = QAction("open file", self) self.open_file_function.setShortcut("Ctrl+O") self.open_file_function.setObjectName("open_file_function") self.open_file_function.triggered.connect(self.on_click) self.file_menu.addAction(self.open_file_function) self.test_trigger = QAction("test trigger", self) self.test_trigger.setObjectName("test_trigger") self.file_menu.addAction(self.test_trigger) self.exit_function = QAction("exit", self) self.exit_function.setShortcut("Ctrl+Q") self.exit_function.setObjectName("exit") self.exit_function.triggered.connect(self.on_click) self.sub_menu.addAction(self.exit_function) def open_file(self): """opens file dialog""" file_name = QFileDialog.getOpenFileName(self, "file dialog example", self.file_dialog_path) print(file_name) if(os.path.exists(file_name[0])): return file_name[0] else: return None def on_menu_click(self, action): """triggers on menu clicks""" if(action.text() == "open file"): print("open file used") if(action.text() == "test trigger"): print("test trigger used") def on_click(self): """button click function for listeners""" sender = self.sender() # buttons if(sender.text() == "counter"): self.button_counter += 1 self.l1.setText("counter is:{}".format(self.button_counter)) elif(sender.text() == "clear"): self.line1.setText("") elif(sender.text() == "open file dialog"): self.open_file() # checkboxes elif(sender.objectName() == "checkbox1"): if(self.checkbox1.isChecked()): self.checkbox1.setText("checked") else: self.checkbox1.setText("not checked") # radiobuttons elif(sender.objectName() == "radiobutton1"): self.radiobutton1.setText("hi there") elif(sender.objectName() == "radiobutton2"): self.radiobutton2.setText("hi there") elif(sender.objectName() == "radiobutton3"): self.radiobutton3.setText("hi there") # menu items elif(sender.objectName() == "exit"): sys.exit() elif(sender.objectName() == "open_file_function"): self.open_file() print(sender.objectName()) application = QApplication(sys.argv) a = app() sys.exit(application.exec_())
7,238
crsbi/urls.py
kingsdigitallab/crsbi-django
1
2023681
from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from django.views.static import serve from kdl_ldap.signal_handlers import \ register_signal_handlers as kdl_ldap_register_signal_hadlers from mezzanine.pages.views import page from sculpture.views.display import get_pdf kdl_ldap_register_signal_hadlers() admin.autodiscover() urlpatterns = [ url(r'^', include('sculpture.urls')), url(r'^accounts/', include('registration.backends.default.urls')), url(r'^admin/', include(admin.site.urls)), url(r'^pdf/', get_pdf, name='pdf_view'), url(r'^search/', include('haystack.urls')), url(r'^media(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), url(r'^static(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT}), # HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE # --------------------------------------------- # This pattern gives us a normal ``Page`` object, so that your # homepage can be managed via the page tree in the admin. If you # use this pattern, you'll need to create a page in the page tree, # and specify its URL (in the Meta Data section) as "/", which # is the value used below in the ``{"slug": "/"}`` part. Make # sure to uncheck "show in navigation" when you create the page, # since the link to the homepage is always hard-coded into all the # page menus that display navigation on the site. Also note that # the normal rule of adding a custom template per page with the # template name using the page's slug doesn't apply here, since # we can't have a template called "/.html" - so for this case, the # template "pages/index.html" can be used. url("^$", page, {"slug": "/"}, name="home"), # MEZZANINE'S URLS # ---------------- # ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW. # ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN # FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls`` # WILL NEVER BE MATCHED! # If you'd like more granular control over the patterns in # ``mezzanine.urls``, go right ahead and take the parts you want # from it, and use them directly below instead of using # ``mezzanine.urls``. url("^", include("mezzanine.urls")), ] # Adds ``STATIC_URL`` to the context of error pages, so that error # pages can use JS, CSS and images. handler500 = "mezzanine.core.views.server_error" # ----------------------------------------------------------------------------- # Django Debug Toolbar URLS # ----------------------------------------------------------------------------- try: if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ] except ImportError: pass # ----------------------------------------------------------------------------- # Static file DEBUGGING # ----------------------------------------------------------------------------- if settings.DEBUG: from django.conf.urls.static import static from django.contrib.staticfiles.urls import staticfiles_urlpatterns import os.path urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL + 'images/', document_root=os.path.join(settings.MEDIA_ROOT, 'images'))
3,442
roomai/RoomAILogger.py
yooyoo2004/RoomAI
0
2022809
#!/bin/python import logging; import sys; project_name = "roomai"; logger = logging.getLogger(project_name); handler = logging.StreamHandler(sys.stderr); formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"); logger.setLevel(logging.INFO); handler.setLevel(logging.INFO); handler.setFormatter(formatter); logger.addHandler(handler); def set_level(level): logger.setLevel(level) handler.setLevel(level) def get_logger(): return logger def init_logger(opts): global logger; global handler; global project_name; print opts; if "project_name" in opts: project_name = opts["project_name"]; print "in Logger", project_name; logger.removeHandler(handler); logger = logging.getLogger(project_name); #set longer if "logfile" in opts: handler = logging.FileHandler(opts["logfile"]); else: handler = logging.StreamHandler(sys.stderr); #set formatter formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"); handler.setFormatter(formatter); ##set level logger.setLevel(logging.INFO); if "level" in opts: if "notset" == opts["level"].lowcase(): logger.setLevel(logging.NOTSET) elif "debug" == opts["level"].lowcase(): logger.setLevel(logging.DEBUG) elif "info" == opts["level"].lowcase(): logger.setLevel(logging.INFO) elif "warning" == opts["level"].lowcase(): logger.setLevel(logging.WARNING) elif "error" == opts["level"].lowcase(): logger.setLevel(logging.ERROR) elif "critical" == opts["level"].lowcase(): logger.setLevel(logging.critical) logger.addHandler(handler);
1,777
NER/loss/adaptive_dice_loss.py
xueshang-liulp/diaKG-code
10
2023397
# encoding: utf-8 import torch import torch.nn as nn from torch import Tensor from typing import Optional class AdaptiveDiceLoss(nn.Module): """ Dice coefficient for short, is an F1-oriented statistic used to gauge the similarity of two sets. Math Function: https://arxiv.org/abs/1911.02855.pdf adaptive_dice_loss(p, y) = 1 - numerator / denominator numerator = 2 * \sum_{1}^{t} (1 - p_i) ** alpha * p_i * y_i + smooth denominator = \sum_{1}^{t} (1 - p_i) ** alpha * p_i + \sum_{1} ^{t} y_i + smooth Args: alpha: alpha in math function smooth (float, optional): smooth in math function square_denominator (bool, optional): [True, False], specifies whether to square the denominator in the loss function. with_logits (bool, optional): [True, False], specifies whether the input tensor is normalized by Sigmoid/Softmax funcs. True: the loss combines a `sigmoid` layer and the `BCELoss` in one single class. False: the loss contains `BCELoss`. Shape: - input: (*) - target: (*) - mask: (*) 0,1 mask for the input sequence. - Output: Scalar loss Examples: >>> loss = AdaptiveDiceLoss() >>> input = torch.randn(3, 1, requires_grad=True) >>> target = torch.empty(3, dtype=torch.long).random_(5) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, alpha: float = 0.1, smooth: Optional[float] = 1e-8, square_denominator: Optional[bool] = False, with_logits: Optional[bool] = True, reduction: Optional[str] = "mean") -> None: super(AdaptiveDiceLoss, self).__init__() self.reduction = reduction self.with_logits = with_logits self.alpha = alpha self.smooth = smooth self.square_denominator = square_denominator def forward(self, input: Tensor, target: Tensor, mask: Optional[Tensor] = None) -> Tensor: flat_input = input.view(-1) flat_target = target.view(-1) if self.with_logits: flat_input = torch.sigmoid(flat_input) if mask is not None: mask = mask.view(-1).float() flat_input = flat_input * mask flat_target = flat_target * mask intersection = torch.sum((1-flat_input)**self.alpha * flat_input * flat_target, -1) + self.smooth denominator = torch.sum((1-flat_input)**self.alpha * flat_input) + flat_target.sum() + self.smooth return 1 - 2 * intersection / denominator def __str__(self): return f"Adaptive Dice Loss, smooth:{self.smooth}; alpha:{self.alpha}"
2,793
core/fileinspector.py
domenico-suriano/SentinAir
2
2023969
#!/usr/bin/python # Copyright 2020 Dr. <NAME> (<EMAIL>) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi # path where files containing data are placed DATAPATH = "/var/www/html/data/" ## path where files containing data plots are placed IMGPATH = "/img/" ## function to get plots images to insert in the web page def get_plots(filename): csvfile = open(filename,'r') hd1 = csvfile.readline() csvfile.close() hd = hd1.rstrip("\r\n") header = hd.split(";") return header ## function to build the web page in the correct format def print_page_meas(filename,head,mcn): fn = filename.rstrip("txt") print ("Content-type: text/html\n") print ('<html><head>') print ('<title>' + "Measure page in file " + fn + " on " + mcn + '</title>') print ('<style type=\"text/css\"> body { background-image: url(\"/sentinair.jpg\");background-size: cover;}</style>') print ('</head><body>') print ('<p><h2><font face = \"arial\"> Here below are plots from<br>' + fn.rstrip(".") + '<br>on<br>' + mcn + '</font></h2></p>') print ('<table>') hnum = 0 for h in head: if hnum == 0: hnum=hnum+1 else: print ('<tr><td>') h=h.replace('%','') h=h.replace('/','') print ('<img alt=\"Data plot unavailable\" src=\"' + IMGPATH + fn + h + '.png\">') print ('</td></tr>') hnum=hnum+1 print ('</table>') print ('</body></html>') ##### MAIN ######### fs = cgi.FieldStorage() fn = DATAPATH + str(fs["fn"].value) mn = str(fs["mn"].value) hd = get_plots(fn) print_page_meas(str(fs["fn"].value),hd,mn)
2,159
codenames/models.py
Schluggi/codenames
3
2023366
from . import db class Game(db.Model): __tablename__ = 'games' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), unique=True, nullable=False) mode = db.Column(db.String(255), nullable=False) images = db.Column(db.Text, nullable=False) cards = db.Column(db.Text, nullable=False) score_red = db.Column(db.Integer) score_blue = db.Column(db.Integer) members_red = db.Column(db.Text, nullable=False, default='[]') members_blue = db.Column(db.Text, nullable=False, default='[]') start_score_red = db.Column(db.Integer) start_score_blue = db.Column(db.Integer) fields = db.relationship('Field', backref='game', lazy='dynamic') class Field(db.Model): __tablename__ = 'fields' id = db.Column(db.Integer, primary_key=True, nullable=False) game_id = db.Column(db.Integer, db.ForeignKey('games.id'), nullable=False, primary_key=True) hidden = db.Column(db.Boolean, nullable=False, default=True) type = db.Column(db.String(8), nullable=False)
1,055
emails/admin.py
vasudeveloper001/mvc_python_django
0
2022703
from django.contrib import admin # Register your models here. from emails.models import EmailEntry admin.site.register(EmailEntry)
133
paginas/migrations/0011_players_lobby_slug.py
igor-pontes/Dolex
0
2023684
# Generated by Django 2.1.5 on 2019-02-06 17:36 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('paginas', '0010_players_lobby_slot'), ] operations = [ migrations.AddField( model_name='players_lobby', name='slug', field=models.CharField(default=None, max_length=110), ), ]
404
script_python/csvAll.py
goldleaf3i/generativeCMLgraphs
0
2023804
#!/usr/bin/python # APRE LA CARTELLA DOVE STA LO SCRIPT O, ALTERNATIVAMETNE, argv[1]. # PARSA TUTTE <NAME> # PRENDE TUTTI I FILE DITESTO, CHE CONSIDERA MATRICI DI ADIACENZA DI UN GRAFO # INSERISCE LE MATRICI TROVATE IN UN GRAFO DI IGRAPH ### COPIATO DA CARTELLA SVILUPPO DROPBOX, DA REINTEGRARE POI NEL PROGETTO ORIGINALE - FINITO 28/9/14 # IN PARTICOLARE INSERIRE NELLA LIBRERIA LE METRICHE PER STAMPARE LE VARIE CARATTERISTICHE DEI GRAFI # TODO SPOSTARE LE FUNZIONI DI SUPPORTO IN UTILS from sys import argv import re import sys import math from loadGraph import * import numpy as Math import os import glob from multiprocessing import Process mylabelschema = 'office.xml' #for i in matrix: #M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\')[0]]]) def parseEverything(direct) : global mylabelschema for filename in glob.glob(direct+"/*.xml") : #try : print("apro il file " , filename) loadXML(filename,mylabelschema) #except Exception as e: # print str(e) # print "cannot process " , filename # exit() p = [] i = 0 for directories in glob.glob(direct+"/*/") : #p.append(Process(target = parseEverything, args =(directories,))) parseEverything(directories) #p[i].start() i+=1 print("apro la cartella " , directories) #for j in range(i-1) : # p[j].join() return True def plotAdiacency(filename) : myfile = open(filename); #inizializzo la struttura dati matrix = [] for line in myfile: matrix.append([int(i)for i in line.split(',')]) myfile.close() topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix) graph = topologicalmap.graph print(".".join(filename.split(".")[:-1])+ ".png") print(graph.vs["label"]) #print graph.vs["label"] #exit() vertex_shape = ['rect' if i =='C' or i =='H' or i == 'L' or i=='E' or i=='N' or i=='Q' else 'circle' for i in graph.vs["label"]] #print vertex_shape #exit() plot(graph,".".join(filename.split(".")[:-1])+".png",vertex_label_size = 0, vertex_shape = vertex_shape,bbox=(700,700),layout='kk') # #for i in matrix: # #M.append( [int(j) for j in i.split(',')[:-1] +[i.split(',')[-1].split('\')[0]]]) # def parseEverything(direct) : # for filename in glob.glob(direct+"/*") : # #try : # print "apro il file " , filename # plotAdiacency(filename) # #except Exception as e: # # print str(e) # # print "cannot process " , filename # # exit() # for directories in glob.glob(direct+"/*/") : # parseEverything(directories) # print "apro la cartella " , directories # return True # def plotAdiacency(filename) : # myfile = open(filename); # #inizializzo la struttura dati # matrix = [] # for line in myfile: # matrix.append([int(i)for i in line.split(',')]) # myfile.close() # topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix) # graph = topologicalmap.graph # print ".".join(filename.split(".")[:-1])+ ".png" # #print graph.vs["label"] # #exit() # vertex_shape = ['rect' if i =='C' or i =='H' or i == 'E' else 'circle' for i in graph.vs["label"]] # #print vertex_shape # #exit() # plot(graph,".".join(filename.split(".")[:-1])+".png",vertex_label_size = 0, vertex_shape = vertex_shape) def evaluateGraphs(direct, myformat = None ) : # calcola tutte le metriche di igraph e poi le stampa graphStats = dict() metrics = ['nodes','R','C','path_len','diameter','density','articulation_points','betweenness', 'mu_betweenness','scaled_betweenness','mu_scaled_betweenness','Rbetweenness','mu_Rbetweenness', 'Cbetweenness','mu_Cbetweenness','closeness','mu_closeness','Rcloseness','mu_Rcloseness', 'Ccloseness','mu_Ccloseness','eig','mu_eig','Reig', 'mu_Reig','Ceig','mu_Ceig' ] for filename in glob.glob(direct+"/*.txt") : #try : print("apro il file " , filename) graphStats[filename] = analyzeGraph(filename, myformat) #except Exception as e: # print str(e) # print "cannot process " , filename # exit() data = aggrateMetrics(graphStats,metrics) if data : text_file = open(direct+"/aggregate_graph_data.log", "w") text_file.write(str(data)) text_file.close() for directories in glob.glob(direct+"/*/") : evaluateGraphs(directories, myformat=myformat) print("apro la cartella " , directories) return True def analyzeGraph(filename, myformat = 'adjacency') : # format: adjacency e' la matrice di 0 e 1, valori spaziati da "," e righe termiante da ; DEFAULT # il formato matlab e' quello invece ce usa matlab per fare le matrici myfile = open(filename); #inizializzo la struttura dati matrix = [] for line in myfile: print(line) if myformat == 'matlab' : line = line.replace('[','') line = line.replace(']','') line = line.replace(';','') print(line) matrix.append([int(i)for i in line.split(',')]) myfile.close() topologicalmap = importFromMatlabJava2012FormatToIgraph(matrix) g = topologicalmap.graph Cs = g.vs.select(RC_label = 'C') Rs = g.vs.select(RC_label = 'R') indexC = [i.index for i in Cs] indexR = [i.index for i in Rs] data = dict() # numero di nodi data['nodes'] = len(g.vs()) # numero di R data['R'] = len(indexR) # numero di C data['C'] = len(indexC) # average path len data['path_len'] = g.average_path_length() # diametro data['diameter'] = g.diameter() # average degree (densirt) data['density'] = g.density() # articulation points, quanti sono data['articulation_points'] = len(g.articulation_points()) # betweenness betweenness = g.betweenness() data['betweenness'] = betweenness # mean betweenness data['mu_betweenness'] = avg(betweenness) # scaled betweenness scaled_b = [ float(i)/(float(len(betweenness)-1))/(float(len(betweenness))-2) for i in betweenness ] data['scaled_betweenness'] = scaled_b # mean scaled betweenness data['mu_scaled_betweenness'] = avg(scaled_b) # betweenness scaled solo R data['Rbetweenness'] = selectLabelArray(scaled_b,indexR) # average betweennes scaled solo R print(data['Rbetweenness']) data['mu_Rbetweenness'] = avg(data['Rbetweenness']) # betweenness scaled solo C data['Cbetweenness'] = selectLabelArray(scaled_b,indexC) # average betwenness scaled solo C data['mu_Cbetweenness'] = avg(data['Cbetweenness']) # closenesss closeness = g.closeness() data['closeness'] = closeness # average closeness data['mu_closeness'] = avg(closeness) # closeness solo R data['Rcloseness'] = selectLabelArray(closeness,indexR) # avg closeness solo R data['mu_Rcloseness'] = avg(data['Rcloseness']) # closeness solo C data['Ccloseness'] = selectLabelArray(closeness,indexC) # avg closeness solo C data['mu_Ccloseness'] = avg(data['Ccloseness']) # eigenvector centrality eigenvec = g.eigenvector_centrality() data['eig'] = eigenvec # mean eig data['mu_eig'] = avg(eigenvec) # eigenvec centrality R data['Reig'] = selectLabelArray(eigenvec,indexR) # mean eigenvec centrality R data['mu_Reig'] = avg(data['Reig']) # eigenvec centrality C data['Ceig'] = selectLabelArray(eigenvec,indexC) # mean eigenvec centrality C data['mu_Ceig'] = avg(data['Ceig']) #print ".".join(filename.split(".")[:-1])+ ".png" #plot(graph,".".join(filename.split(".")[:-1])+".png") stringa = str() for i in data.keys(): stringa+= str(i) + ":\n" stringa+= str(data[i]) + "\n" text_file = open(".".join(filename.split(".")[:-1])+"_aggregate_data.log", "w") text_file.write(str(stringa)) text_file.close() return data def selectLabelArray(array,indexes) : # restituisce gli elementi del vettore array di indice contenuto in indexes tmp = [] for i in indexes : tmp.append(array[i]) return tmp def averageLabel(array,indexes): # restituisce la media degli elementi del vettore array di indice contenuto in indexes tmp = [] for i in indexes : tmp.append(array[i]) return sum(tmp)/float(len(indexes)) def avg(array) : return sum(array)/float(len(array)) def aggrateMetrics(dictionary,list_of_metrics) : # per ora non calcolo dati aggregati sugli array # prende un array di array e poi ricalcola tutto mydict = dict() # inizializzo le variabili for i in list_of_metrics : mydict[i] = variable(i) # per ogni grafo parso il dizionario e lo inserisco nelle variabili for i in dictionary.keys() : for j in dictionary[i].keys() : if type(dictionary[i][j]) is list : # per ora non calcolo dati aggregati sugli array. pass else : mydict[j].add(dictionary[i][j]) ret_str = str() for i in list_of_metrics : if mydict[i].n > 0 : ret_str += mydict[i].printVar() return ret_str # apre ricorsivamente tutti i file di TXT che ci trova. usa la cartella corrente, se non specifichi una cartella di start alternativa current = os.getcwd() #try: # current = argv[1] #except : # print("non hai specificato la cartella corrente") #print("inizio a parsare la cartella ", current , 'che diavleria e ques?') #parseEverything(current) #print("finito!") count = 0 btypename = 'zoffice.xml' #btypename = 'zoffice.xml' for filename in glob.glob(current+"/*.xml"): count+=1 print filename # LOADXML carica i TOPOLOGICAL. LOAD XML2 carica i XML standard if not btypename in filename : matrix = loadXML2(filename, btypename) Math.savetxt("graph_"+str(count)+".csv", matrix, fmt='%s', delimiter=",") print "done"
9,925
tests/utils/test_time.py
SatelCreative/toolip
0
2023696
from datetime import datetime, timezone import pytz from toolip.utils.time import make_time_aware, now, now_epoch, now_epoch_ms def test_now(): assert now().tzinfo == timezone.utc def test_now_epoch(): now = datetime.now(timezone.utc).timestamp() assert now_epoch() == int(now) def test_now_epoch_ms(): now = datetime.now(timezone.utc).timestamp() * 1000 assert now_epoch_ms() == int(now) def test_make_time_aware(): dtime = datetime.now() assert dtime.tzinfo != pytz.utc assert make_time_aware(dtime).tzinfo == pytz.utc
563
app/robot/types.py
mogenson/tubers
1
2022621
from dataclasses import dataclass from enum import Enum from struct import unpack from .packet import Packet @dataclass class Bumper: left: bool right: bool @classmethod def from_packet(cls, packet: Packet): return Bumper(packet.payload[4] & 0x80 != 0, packet.payload[4] & 0x40 != 0) @dataclass class Color: WHITE = 0 BLACK = 1 RED = 2 GREEN = 3 BLUE = 4 ORANGE = 5 YELLOW = 6 MAGENTA = 7 NONE = 15 ANY = -1 colors: list[int] @classmethod def from_packet(cls, packet: Packet): return Color([c >> i & 0xF for c in packet.payload for i in range(4, -1, -4)]) @dataclass class Light: DARKER = 4 RIGHT_BRIGHTER = 5 LEFT_BRIGHTER = 6 LIGHTER = 7 state: int left: int = 0 right: int = 0 @classmethod def from_packet(cls, packet: Packet): return Light( packet.payload[4], unpack(">H", packet.payload[5:7])[0], unpack(">H", packet.payload[7:9])[0], ) @dataclass class Touch: front_left: bool front_right: bool back_right: bool back_left: bool @classmethod def from_packet(cls, packet: Packet): return Touch( packet.payload[4] & 0x80 != 0, packet.payload[4] & 0x40 != 0, packet.payload[4] & 0x20 != 0, packet.payload[4] & 0x10 != 0, ) def note(note: str, A4=440) -> float: """Convert a note name into frequency in hertz: eg. 'C#5'""" notes = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] octave = int(note[-1]) step = notes.index(note[0:-1]) step += ((octave - 1) * 12) + 1 return A4 * 2 ** ((step - 46) / 12) class Marker(Enum): UP = 0 DOWN = 1 ERASE = 2 class Animation(Enum): OFF = 0 ON = 1 BLINK = 2 SPIN = 3 class ColorSensors(Enum): SENSORS_0_TO_7 = 0 SENSORS_8_TO_15 = 1 SENSORS_16_TO_23 = 2 SENSORS_24_TO_31 = 3 class ColorLighting(Enum): OFF = 0 RED = 1 GREEN = 2 BLUE = 3 ALL = 4 class ColorFormat(Enum): ADC_COUNTS = 0 MILLIVOLTS = 1 class ModulationType(Enum): DISABLED = 0 VOLUME = 1 PULSE_WIDTH = 2 FREQUENCY = 3
2,238
split_data.py
smtnkc/gcn4epi
0
2023632
import os import random import pickle as pkl import argparse from sklearn.model_selection import train_test_split from prepare_data import getTuples def trainTestSplit(cell_line, cross_cell_line, id_dict, cross_begin_id, label_rate, seed): def getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed): """ Returns ID portions for train, test, validation split. Label rate is the number of labeled nodes (x) that are used for training divided by the total number of nodes in dataset. Example: Label rate = 0.1 10% labeled training (x) 60% unlabaled training (ux) 10% validation (vx) 20% test (tx) !!! 20% of the same or cross cell-line !!! allx = x + ux + vx """ idx = list(id_dict.values())[0:cross_begin_id] # do not include cross cell-line elements idx_allx, idx_tx = train_test_split(idx, test_size=0.2, random_state=seed) idx_x_vx, idx_ux = train_test_split(idx_allx, test_size=1-(label_rate*2/0.8), random_state=seed) idx_x, idx_vx = train_test_split(idx_x_vx, test_size=0.5, random_state=seed) if cross_begin_id == len(id_dict): # No cross cell-line specified. Use same cell-line for testings. print('SAME CELL-LINE TESTING:\n {} labeled training \n {} validation \n {} test ({}) \n{} unlabeled training' .format(len(idx_x), len(idx_vx), len(idx_tx), cell_line, len(idx_ux))) else: # Use cross cell-line for testing. Overwrite idx_tx. cross_idx = list(id_dict.values())[cross_begin_id:] _, idx_tx = train_test_split(cross_idx, test_size=0.2, random_state=seed) print('CROSS CELL-LINE TESTING:\n {} labeled training \n {} validation \n {} test ({}) \n{} unlabeled training' .format(len(idx_x), len(idx_vx), len(idx_tx), cross_cell_line, len(idx_ux))) return idx_x, idx_ux, idx_vx, idx_tx # TRAIN / TEST / VALIDATION SPLIT idx_x, idx_ux, idx_vx, idx_tx = getIdPortions(cell_line, cross_cell_line, id_dict, cross_begin_id, seed) print('Writing index files for train/test/validation split...') if (args.cross_cell_line != None) and (args.cross_cell_line != args.cell_line): dump_dir = 'data/{}/'.format(cell_line + '_' + cross_cell_line) else: dump_dir = 'data/{}/'.format(cell_line) if not os.path.exists(dump_dir): os.makedirs(dump_dir) lr = '{:.2f}'.format(label_rate).split('.')[1] idx_x_file = open('{}/x_{}.index'.format(dump_dir, lr), "wb") pkl.dump(idx_x, idx_x_file) idx_x_file.close() idx_ux_file = open('{}/ux_{}.index'.format(dump_dir, lr), "wb") pkl.dump(idx_ux, idx_ux_file) idx_ux_file.close() idx_vx_file = open('{}/vx_{}.index'.format(dump_dir, lr), "wb") pkl.dump(idx_vx, idx_vx_file) idx_vx_file.close() idx_tx_file = open('{}/tx_{}.index'.format(dump_dir, lr), "wb") pkl.dump(idx_tx, idx_tx_file) idx_tx_file.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description='gcn4epi') parser.add_argument('--cell_line', default='GM12878', type=str) parser.add_argument('--cross_cell_line', default=None, type=str) # set to run cross cell-line testing parser.add_argument('--k_mer', default=5, type=int) parser.add_argument('--seed', default=42, type=int) parser.add_argument('--label_rate', default=0.2, type=float) # [0.2, 0.1, 0.05] parser.add_argument('--frag_len', default=200, type=int) # set 0 to disable fragmentation and use full sequences args = parser.parse_args() random.seed(args.seed) _, id_dict, cross_begin_id = getTuples(args.cell_line, args.cross_cell_line, args.k_mer) # requires successful run of prepare_gcn_data.py trainTestSplit(args.cell_line, args.cross_cell_line, id_dict, cross_begin_id, args.label_rate, args.seed)
3,949
muk_autovacuum/__init__.py
Yousif-Mobark/odoo11_cutom
0
2023387
################################################################################### # # Copyright (C) 2018 MuK IT GmbH # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################### from odoo import api, SUPERUSER_ID from . import models def _get_value(env, model): model_model = env['ir.model'] model_fields = env['ir.model.fields'] model = model_model.search([('model', '=', model)], limit=1) if model.exists(): field_domain = [ ('model_id', '=', model.id), ('ttype', '=', 'datetime'), ('name', '=', 'create_date')] field = model_fields.search(field_domain, limit=1) return model, field return None def _init_default_rules(cr, registry): env = api.Environment(cr, SUPERUSER_ID, {}) rule = env['muk_autovacuum.rules'] values = _get_value(env, 'mail.message') if values: rule.create({ 'name': "Delete Message Attachments after 6 Months", 'model': values[0].id, 'active': False, 'state': 'time', 'time_field': values[1].id, 'time_type': 'months', 'time': 6, 'only_attachments': True}) rule.create({ 'name': "Delete Messages after 1 Year", 'model': values[0].id, 'active': False, 'state': 'time', 'time_field': values[1].id, 'time_type': 'years', 'time': 1}) values = _get_value(env, 'ir.logging') if values: rule.create({ 'name': "Delete Logs after 2 Weeks", 'model': values[0].id, 'active': False, 'state': 'time', 'time_field': values[1].id, 'time_type': 'weeks', 'time': 2, 'protect_starred': False})
2,528
src/tvl/transforms.py
hyperfraise/tvl
0
2023356
"""Functions for transforming image data stored in PyTorch tensors. This module is necessary since most of the transformations provided by the `torchvision` package are applicable for PIL.Image images only. Since tvl may load video frames on the GPU, we want to be able to take the computation to the data rather than moving the images to and from main memory. As an additional benefit, these functions are defined such that they also work in batched mode, which is especially useful for videos. """ import math from typing import Sequence import torch from torch.nn.functional import interpolate from torchgeometry import warp_affine def normalise(tensor, mean, stddev, inplace=False): """Normalise the image with channel-wise mean and standard deviation. Args: tensor (torch.Tensor): The image tensor to be normalised. mean (Sequence of float): Means for each channel. stddev (Sequence of float): Standard deviations for each channel. inplace (bool): Perform normalisation in-place. Returns: Tensor: The normalised image tensor. """ mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None] stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None] if inplace: tensor.sub_(mean) else: tensor = tensor.sub(mean) tensor.div_(stddev) return tensor def denormalise(tensor, mean, stddev, inplace=False): """Denormalise the image with channel-wise mean and standard deviation. Args: tensor (torch.Tensor): The image tensor to be denormalised. mean (Sequence of float): Means for each channel. stddev (Sequence of float): Standard deviations for each channel. inplace (bool): Perform denormalisation in-place. Returns: Tensor: The denormalised image tensor. """ mean = torch.as_tensor(mean, device=tensor.device)[..., :, None, None] stddev = torch.as_tensor(stddev, device=tensor.device)[..., :, None, None] if inplace: return tensor.mul_(stddev).add_(mean) else: return torch.addcmul(mean, tensor, stddev) def resize(tensor, size, mode='bilinear'): """Resize the image. Args: tensor (torch.Tensor): The image tensor to be resized. size (tuple of int): Size of the resized image (height, width). mode (str): The pixel sampling interpolation mode to be used. Returns: Tensor: The resized image tensor. """ assert len(size) == 2 # If the tensor is already the desired size, return it immediately. if tensor.shape[-2] == size[0] and tensor.shape[-1] == size[1]: return tensor if not tensor.is_floating_point(): dtype = tensor.dtype tensor = tensor.to(torch.float32) tensor = resize(tensor, size, mode) return tensor.to(dtype) out_shape = (*tensor.shape[:-2], *size) if tensor.ndimension() < 3: raise Exception('tensor must be at least 2D') elif tensor.ndimension() == 3: tensor = tensor.unsqueeze(0) elif tensor.ndimension() > 4: tensor = tensor.view(-1, *tensor.shape[-3:]) align_corners = None if mode in {'linear', 'bilinear', 'trilinear'}: align_corners = False resized = interpolate(tensor, size=size, mode=mode, align_corners=align_corners) return resized.view(*out_shape) def crop(tensor, t, l, h, w, padding_mode='constant', fill=0): """Crop the image, padding out-of-bounds regions. Args: tensor (torch.Tensor): The image tensor to be cropped. t (int): Top pixel coordinate. l (int): Left pixel coordinate. h (int): Height of the cropped image. w (int): Width of the cropped image. padding_mode (str): Padding mode (currently "constant" is the only valid option). fill (float): Fill value to use with constant padding. Returns: Tensor: The cropped image tensor. """ # If the crop region is wholly within the image, simply narrow the tensor. if t >= 0 and l >= 0 and t + h <= tensor.size(-2) and l + w <= tensor.size(-1): return tensor[..., t:t+h, l:l+w] if padding_mode == 'constant': result = torch.full((*tensor.size()[:-2], h, w), fill, device=tensor.device, dtype=tensor.dtype) else: raise Exception('crop only supports "constant" padding currently.') sx1 = l sy1 = t sx2 = l + w sy2 = t + h dx1 = 0 dy1 = 0 if sx1 < 0: dx1 = -sx1 w += sx1 sx1 = 0 if sy1 < 0: dy1 = -sy1 h += sy1 sy1 = 0 if sx2 >= tensor.size(-1): w -= sx2 - tensor.size(-1) if sy2 >= tensor.size(-2): h -= sy2 - tensor.size(-2) # Copy the in-bounds sub-area of the crop region into the result tensor. if h > 0 and w > 0: src = tensor.narrow(-2, sy1, h).narrow(-1, sx1, w) dst = result.narrow(-2, dy1, h).narrow(-1, dx1, w) dst.copy_(src) return result def flip(tensor, horizontal=False, vertical=False): """Flip the image. Args: tensor (torch.Tensor): The image tensor to be flipped. horizontal: Flip horizontally. vertical: Flip vertically. Returns: Tensor: The flipped image tensor. """ if horizontal == True: tensor = tensor.flip(-1) if vertical == True: tensor = tensor.flip(-2) return tensor def affine(tensor, matrix): """Apply an affine transformation to the image. Args: tensor (torch.Tensor): The image tensor to be warped. matrix (torch.Tensor): The 2x3 affine transformation matrix. Returns: Tensor: The warped image. """ is_unbatched = tensor.ndimension() == 3 if is_unbatched: tensor = tensor.unsqueeze(0) warped = warp_affine(tensor, matrix, tensor.size()[-2:]) if is_unbatched: warped = warped.squeeze(0) return warped def rotate(tensor, degrees): """Rotate the image anti-clockwise about the centre. Args: tensor (torch.Tensor): The image tensor to be rotated. degrees (float): The angle through which to rotate. Returns: Tensor: The rotated image tensor. """ rads = math.radians(degrees) h, w = tensor.size()[-2:] c = math.cos(rads) s = math.sin(rads) x = (w - 1) / 2 y = (h - 1) / 2 # Transformation matrix for clockwise rotation about the centre of the image. matrix = torch.tensor([[ [ c, s, -c * x - s * y + x], [-s, c, s * x - c * y + y], ]], dtype=torch.float32, device=tensor.device) return affine(tensor, matrix) def fit(tensor, size, fit_mode='cover', resize_mode='bilinear', *, fill=0): """Fit the image within the given spatial dimensions. Args: tensor (torch.Tensor): The image tensor to be fit. size (tuple of int): Size of the output (height, width). fit_mode (str): 'fill', 'contain', or 'cover'. These behave in the same way as CSS's `object-fit` property. fill (float): padding value (only applicable in 'contain' mode). Returns: Tensor: The resized image tensor. """ # Modes are named after CSS object-fit values. assert fit_mode in {'fill', 'contain', 'cover'} if fit_mode == 'fill': return resize(tensor, size, mode=resize_mode) elif fit_mode == 'contain': ih, iw = tensor.shape[-2:] k = min(size[-1] / iw, size[-2] / ih) oh = round(k * ih) ow = round(k * iw) resized = resize(tensor, (oh, ow), mode=resize_mode) result = tensor.new_full((*tensor.size()[:-2], *size), fill) y_off = (size[-2] - oh) // 2 x_off = (size[-1] - ow) // 2 result[..., y_off:y_off + oh, x_off:x_off + ow] = resized return result elif fit_mode == 'cover': ih, iw = tensor.shape[-2:] k = max(size[-1] / iw, size[-2] / ih) oh = round(k * ih) ow = round(k * iw) resized = resize(tensor, (oh, ow), mode=resize_mode) y_trim = (oh - size[-2]) // 2 x_trim = (ow - size[-1]) // 2 result = crop(resized, y_trim, x_trim, size[-2], size[-1]) return result raise Exception('This code should not be reached.')
8,316
TE-1/PL-1/OSD/2. Socket(py)/server.py
Adityajn/College-Codes
1
2023780
import socket,sys s=socket.socket() #host=socket.gethostname() port=28901 #port between 1024 and 49151 s.bind((sys.argv[1],port)) s.listen(4) c,addr=s.accept() print "Connected to:",addr f1=open(sys.argv[2],"r") #open file in read mode bytes=f1.read(1024) #read 1024 bytes while(bytes): c.send(bytes) #send read bytes bytes=f1.read(1024) #read next 1024 bytes f1.close() c.close()
388
master_django/intensity/register/context_processors.py
kripken/intensityengine
31
2023204
# Copyright 2010 <NAME> ('kripken'). All rights reserved. # This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing. from intensity.models import UserAccount import intensity.conf as intensity_conf def account(request): ''' A context processor that provides 'my_account', the Intensity Engine account info for a user, and shows messages for that account ''' ret = { 'my_account': request.account if request.user.is_authenticated() else None, 'message': request.session.get('message'), } request.session['message'] = None return ret def toplevel(request): ''' Gives a redirect URL for the toplevel ''' return { 'toplevel_root': intensity_conf.get('Sites', 'toplevel_root') }
795
manifest/mixins.py
ozgurgunes/django-manifest
0
2023580
# -*- coding: utf-8 -*- """ Manifest View Mixins """ from django.contrib import messages from django.contrib.auth.decorators import login_required from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.core.mail import EmailMultiAlternatives from django.template.loader import render_to_string from django.utils.decorators import method_decorator from django.views.generic import FormView, View from manifest import decorators, defaults from manifest.utils import get_protocol class MessageMixin: """ View mixin adding messages to response. """ success_message = "" error_message = "" extra_context = None def set_success_message(self, message): if defaults.MANIFEST_USE_MESSAGES: messages.success(self.request, message, fail_silently=True) def set_error_message(self, message): if defaults.MANIFEST_USE_MESSAGES: messages.error(self.request, message, fail_silently=True) class SendMailMixin: """ Mixin that send an email to given recipients. """ from_email = None email_subject_template_name = None email_message_template_name = None email_html_template_name = None def create_email(self, context, recipient): if not self.email_subject_template_name: raise ImproperlyConfigured( "No template name for subject. " "Provide a email_subject_template_name." ) if not self.email_message_template_name: raise ImproperlyConfigured( "No template name for message. " "Provide a email_message_template_name." ) subject = "".join( render_to_string( self.email_subject_template_name, context ).splitlines() ) message = render_to_string(self.email_message_template_name, context) return EmailMultiAlternatives( subject, message, self.from_email, [recipient] ) def send_mail(self, recipient, opts): """ Send a django.core.mail.EmailMultiAlternatives to `to_email`. """ context = { "protocol": get_protocol(), "site": Site.objects.get_current(), } context.update(opts) email = self.create_email(context, recipient) if self.email_html_template_name is not None: html_email = render_to_string( self.email_html_template_name, context ) email.attach_alternative(html_email, "text/html") return email.send() class SendActivationMailMixin(SendMailMixin): def send_activation_mail(self, user): context = { "user": user, "activation_days": defaults.MANIFEST_ACTIVATION_DAYS, "activation_key": user.activation_key, } self.send_mail(user.email, context) class EmailChangeMixin(SendMailMixin): email_subject_template_name_old = ( "manifest/emails/confirmation_email_subject_old.txt" ) email_message_template_name_old = ( "manifest/emails/confirmation_email_message_old.txt" ) email_html_template_name_old = None email_subject_template_name_new = ( "manifest/emails/confirmation_email_subject_new.txt" ) email_message_template_name_new = ( "manifest/emails/confirmation_email_message_new.txt" ) email_html_template_name_new = None def send_confirmation_mail(self, user): context = { "user": user, "new_email": user.email_unconfirmed, "confirmation_key": user.email_confirmation_key, } self.email_subject_template_name = self.email_subject_template_name_old self.email_message_template_name = self.email_message_template_name_old self.email_html_template_name = self.email_html_template_name_old self.send_mail(user.email, context) self.email_subject_template_name = self.email_subject_template_name_new self.email_message_template_name = self.email_message_template_name_new self.email_html_template_name = self.email_html_template_name_new self.send_mail(user.email_unconfirmed, context) class SecureRequiredMixin(View): """ Mixin that switches URL from http to https if ``MANIFEST_USE_HTTPS`` setting is ``True``. """ @method_decorator(decorators.secure_required) def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) class LoginRequiredMixin(View): """ Mixin that redirects user to login form if not authenticated yet. """ @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) # pylint: disable=bad-continuation class UserFormMixin( FormView, SecureRequiredMixin, LoginRequiredMixin, MessageMixin ): """ Mixin that sets forms user argument to ``request.user``. """ def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs["user"] = self.request.user return kwargs
5,201
scripts/size_msgs_test.py
UCY-LINC-LAB/Self-Stabilization-Edge-Simulator
3
2022934
import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import io import base64 import os import sys import argparse # See https://matplotlib.org/3.1.0/users/dflt_style_changes.html plt.style.use('seaborn-ticks') mpl.rcParams['grid.color'] = 'grey' mpl.rcParams['grid.linestyle'] = ':' mpl.rcParams['grid.linewidth'] = 0.5 mpl.rcParams['axes.spines.right'] = False mpl.rcParams['axes.spines.top'] = False mpl.rcParams['font.size'] = 15 mpl.rcParams['legend.fontsize'] = 'medium' mpl.rcParams['figure.titlesize'] = 'large' def build_graph(f, export): if export: f.savefig(export, format='png') return img = io.BytesIO() f.set_size_inches(11.7, 8.27) f.savefig(img, format='png') img.seek(0) graph_url = base64.b64encode(img.getvalue()).decode() return graph_url # return 'data:image/png;base64,{}'.format(graph_url) def load_data(file, period): data = [] last_time = 0 partial = [0., 0., 0., 0.] with open(file, 'r') as fp: for line in fp: line = line.strip() if len(line) == 0: continue if line.startswith('time'): continue toks = line.split(',') t = int(toks[0]) control_count = int(toks[1]) control_size = int(toks[2]) data_count = int(toks[5]) data_size = int(toks[6]) control_size *=(1000/period) data_size *=(1000/period) partial[0] += control_count partial[1] += control_size partial[2] += data_count partial[3] += data_size if t - last_time > period: last_time = t data.append([t, partial[0], partial[1]/1024, partial[2], partial[3]/1024]) partial = [0., 0., 0., 0.] return np.array(data) def compute_graph2(data): fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 12), sharex=False) controlColor = 'xkcd:bright blue' dataColor = 'xkcd:light orange' # Time is in ms... df_time = data[:,0]/1000 df_control_msgs_count = data[:,1] df_control_msgs_size = data[:,2]/1024 df_data_msgs_count = data[:,3] df_data_msgs_size = data[:,4]/1024 ax1.fill_between(x=df_time, y1=df_data_msgs_size, y2=0, color=dataColor, alpha=1, label="Data Plane") ax1.plot(df_time ,df_data_msgs_size, color=dataColor, marker='o', markersize=2, alpha=1, linewidth=1) ax1.fill_between(x=df_time, y1=df_control_msgs_size,y2=0, color=controlColor, alpha=0.55, label="Control Plane") ax1.plot(df_time,df_control_msgs_size, color=controlColor, marker='D', markersize=2, alpha=0.85, linewidth=1) ax1.legend() # ax1.set_title('Traffic Transmitted') ax1.set_ylabel('Network Traffic (MB/s)') ax1.set_xlabel('Time (s)') ax1.grid() # Now to MBs #df['control_msgs_sz'] /= 1024 #df['data_msgs_sz'] /= 1024 ax2.plot(df_time, df_data_msgs_size.cumsum(), color=dataColor, alpha=1, label="Data Plane") ax2.plot(df_time, df_control_msgs_size.cumsum(), color=controlColor, alpha=1, label="Control Plane") ax2.legend() ax2.grid() ax2.set_ylabel('Total Network Traffic (MB)') ax2.set_xlabel('Time (s)') return fig if __name__ == '__main__': root = os.getenv('RESULTS_ROOT',"../results/small") scenario=os.getenv('SCENARIO',"all_failures") experiments = os.listdir(os.path.join(os.path.abspath(root),scenario)) print("Existing experiments: "+str(experiments)) experiment= experiments[0] print("Using experiment: "+str(experiment)) file = "stats/network/msgs.csv" # In ms period = 200 path = os.path.join(root,scenario,experiment, file) data = load_data(path, period) fig = compute_graph2(data) plt.show() #build_graph(fig, export=None)
3,864
src/azure-cli/azure/cli/command_modules/cognitiveservices/tests/latest/test_network_rules.py
xaliciayang/azure-cli
7
2023413
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import unittest from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer class CognitiveServicesNetworkRulesTests(ScenarioTest): @ResourceGroupPreparer() def test_cognitiveservices_network_rules(self, resource_group): sname = self.create_random_name(prefix='cs_cli_test_', length=16) customdomain = self.create_random_name(prefix='csclitest', length=16) self.kwargs.update({ 'sname': sname, 'vnetname': sname, 'kind': 'Face', 'sku': 'S0', 'location': 'westus', 'customdomain': customdomain, }) self.cmd('network vnet create --resource-group {rg} --name {vnetname}') subnet1 = self.cmd('network vnet subnet create --resource-group {rg} --name default' ' --vnet-name {vnetname} --address-prefixes 10.0.0.0/24').get_output_in_json() subnet2 = self.cmd('network vnet subnet create --resource-group {rg} --name subnet' ' --vnet-name {vnetname} --address-prefixes 10.0.1.0/24').get_output_in_json() self.cmd('az cognitiveservices account create -n {sname} -g {rg} --kind {kind} --sku {sku} -l {location}' ' --custom-domain {customdomain} --yes', checks=[self.check('name', '{sname}'), self.check('location', '{location}'), self.check('sku.name', '{sku}'), self.check('properties.provisioningState', 'Succeeded')]) rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 0) self.assertEqual(len(rules['virtualNetworkRules']), 0) self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address "172.16.58.3"') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 1) self.assertEqual(len(rules['virtualNetworkRules']), 0) self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3") self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --ip-address "172.16.17.32/24"') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 2) self.assertEqual(len(rules['virtualNetworkRules']), 0) self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3") self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24") self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet1['id']) rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 2) self.assertEqual(len(rules['virtualNetworkRules']), 1) self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3") self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24") self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id']) self.cmd('az cognitiveservices account network-rule add -n {sname} -g {rg} --subnet ' + subnet2['name'] + ' --vnet-name {vnetname}') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 2) self.assertEqual(len(rules['virtualNetworkRules']), 2) self.assertEqual(rules['ipRules'][0]['value'], "172.16.58.3") self.assertEqual(rules['ipRules'][1]['value'], "172.16.17.32/24") self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id']) self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id']) self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address "172.16.58.3"') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 1) self.assertEqual(len(rules['virtualNetworkRules']), 2) self.assertEqual(rules['ipRules'][0]['value'], "172.16.17.32/24") self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id']) self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id']) self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --ip-address "172.16.17.32/24"') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 0) self.assertEqual(len(rules['virtualNetworkRules']), 2) self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet1['id']) self.assertEqual(rules['virtualNetworkRules'][1]['id'], subnet2['id']) self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet1['id']) rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 0) self.assertEqual(len(rules['virtualNetworkRules']), 1) self.assertEqual(rules['virtualNetworkRules'][0]['id'], subnet2['id']) self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] + ' --vnet-name {vnetname}') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 0) self.assertEqual(len(rules['virtualNetworkRules']), 0) # Remove something doesn't exists in rules self.cmd('az cognitiveservices account network-rule remove -n {sname} -g {rg} --subnet ' + subnet2['name'] + ' --vnet-name {vnetname}') rules = self.cmd('az cognitiveservices account network-rule list -n {sname} -g {rg}').get_output_in_json() self.assertEqual(len(rules['ipRules']), 0) self.assertEqual(len(rules['virtualNetworkRules']), 0) # delete the cognitive services account ret = self.cmd('az cognitiveservices account delete -n {sname} -g {rg}') self.assertEqual(ret.exit_code, 0) if __name__ == '__main__': unittest.main()
6,825
nbcelltests/define.py
timkpaine/nbcelltests
52
2022802
# ***************************************************************************** # # Copyright (c) 2019, the nbcelltests authors. # # This file is part of the nbcelltests library, distributed under the terms of # the Apache License 2.0. The full license can be found in the LICENSE file. # from enum import Enum class LintType(Enum): LINES_PER_CELL = "lines_per_cell" CELLS_PER_NOTEBOOK = "cells_per_notebook" FUNCTION_DEFINITIONS = "function_definitions" CLASS_DEFINITIONS = "class_definitions" LINTER = "linter" KERNELSPEC = "kernelspec" MAGICS = "magics" class TestType(Enum): CELL_COVERAGE = "cell_coverage" CELL_TEST = "cell_test" class LintMessage(object): def __init__(self, cell, message, type, passed=False): self.cell = cell self.message = message self.type = type self.passed = passed def __repr__(self): ret = "PASSED: " if self.passed else "FAILED: " ret += self.message ret += " (Cell %d)" % self.cell if self.cell > 0 else "" return ret def to_html(self): ret = ( '<span style="color: green;">PASSED&nbsp;</span>' if self.passed else '<span style="color: red;">FAILED&nbsp;</span>' ) ret += self.message ret += "(Cell %d)" % self.cell if self.cell > 0 else "" return ret class TestMessage(object): def __init__(self, cell, message, type, passed=0): self.cell = cell self.message = message self.type = type self.passed = passed def __repr__(self): ret = ( "PASSED: " if self.passed > 0 else "FAILED: " if self.passed < 0 else "NOT RUN: " ) ret += self.message ret += " (Cell %d)" % self.cell if self.cell > 0 else "" return ret def to_html(self): ret = ( '<span style="color: green;">PASSED&nbsp;</span>' if self.passed else '<span style="color: red;">FAILED&nbsp;</span>' ) ret += self.message ret += "(Cell %d)" % self.cell if self.cell > 0 else "" return ret
2,196
embeddings/clean_text.py
onai/code-ecosystem-analyzer
0
2022976
''' ''' import os import json import sys import emoji import json import os import string import sys def remove_emoji(text): return emoji.get_emoji_regexp().sub(u'', text) def clean_text(the_text): lower = the_text.lower().split() cleaned = ' '.join(lower) trans_dict = {} for key in string.punctuation: if key == "'": trans_dict[key] = '' else: trans_dict[key] = ' ' text_punct = str.maketrans(trans_dict) text_low = cleaned.lower() text_toks = text_low.translate(text_punct).split() return text_toks if __name__ == '__main__': dirname = sys.argv[1] dest = sys.argv[2] count = 0 reply_count = 0 for root, dirs, files in os.walk(dirname): for filename in files: print(filename) full_path = os.path.join(root, filename) dest_path = os.path.join(dest, filename) cmts = [] with open(full_path) as handle: for new_line in handle: the_payload = json.loads(new_line) the_text = '' if the_payload['kind'] == 'youtube#commentThread': the_text = the_payload['snippet']['topLevelComment']['snippet']['textOriginal'] elif the_payload['kind'] == 'youtube#comment': the_text = the_payload['snippet']['textOriginal'] cleaned_toks = clean_text(the_text) the_payload['cleaned_tokens'] = cleaned_toks cmts.append(the_payload) with open(dest_path, 'a') as handle: for cmt in cmts: handle.write(json.dumps(cmt)) handle.write('\n')
1,781
unused/mlat/dim1/stringABEdge.py
yoongun/topological-edge-modes-of-mechanical-lattice
1
2023695
import numpy as np from numpy import linalg as la from typing import List, Tuple import plotly.graph_objects as go import plotly.express as px import pandas as pd class StringABLatticeEdge: """ Reference: "A study of topological effects in 1D and 2D mechanical lattices" <NAME> (2018), et. al. from 'Journal of the Mechanics and Physics of Solids', Volum 117, Aug 2018, 22-36, https://www.sciencedirect.com/science/article/abs/pii/S0022509618301820 """ def __init__(self, k: List[float], m: List[float], precision: float = .01) -> None: """ Represents dynamic system of 1 dimensional mechanical lattice. e.g.) ABABAB... :param k: Spring constants (2) :param m: Mass (2) :param precision: Precision for wavenumber q """ if len(k) != len(m): raise ValueError( f"The length of k={len(k)} and m={len(m)} does not match.") self.k = k self.M = np.diag(m) self.qs = np.arange(-np.pi, np.pi, precision) def H(self, q): """ Hamiltonian :return: Hamiltonian defined given k and q """ k = self.k Q = np.exp(1.j * q) return np.array([[k[0] + k[1], -k[0] - k[1] * Q.conj()], [-k[0] - k[1] * Q, k[0] + k[1]]]) def dispersion(self) -> List[Tuple[float, float]]: """ Calculate the dispersion relation :return: List of angular frequency omega for each q (wavenumber) and its eigenvectors """ M_inv = la.inv(self.M) eigenvals = [] eigenvecs = [] for q in self.qs: eigen_val, eigen_vec = self._min_eigen(M_inv.dot(self.H(q))) eigenvals.append(eigen_val) eigenvecs.append(eigen_vec) ws = np.sqrt(np.array(eigenvals).real) evs = np.array(eigenvecs) return ws, evs def _min_eigen(self, mat: np.ndarray) -> Tuple[float, float]: """ Return eigenvalue, eigenvector pair of minimum eigenvalue. :return: eigenvalue, eigenvector """ eigenvals, eigenvecs = la.eig(mat) min_idx = np.argsort(eigenvals) return eigenvals[min_idx], eigenvecs[min_idx] def beta(self) -> float: """ Calculate varying contrast beta with given spring constants :return: Varying contrast beta """ k = self.k return (k[0] - k[1]) / (k[0] + k[1]) def animate(self, q: float, N: int, mode: int, *, fps: int = 30, s: int = 3): """ :param q: Wavenumber to animate [-pi, pi] :param N: Number of unit cells :param mode: Mode to animate (0 for acoustic, 1 for optical) :param fps: (Optional) Frame per second (/s) (default: 30 /s) :param s: (Optional) Animation duration (s) (default: 3 s) """ ws, evs = self.dispersion() # Parameters idx = min(range(len(self.qs)), key=lambda i: abs(self.qs[i] - q)) w = ws[idx, mode] # /s # Construct frames frames = [] for t in range(int(s * fps)): dt = t / fps dphase = dt * w * 2 * np.pi y = [] for i in range(N): y.append(evs[idx, mode, 0] * np.exp(1.j * (q * i + dphase))) y.append(evs[idx, mode, 1] * np.exp(1.j * (q * i + dphase))) y = np.array(y) frames.append( go.Frame(data=[go.Scatter(y=y.real, line_shape='spline')])) # Figure components start_button = dict( label="Play", method="animate", args=[ None, { "frame": {"duration": 1000 / fps, "redraw": False}, "fromcurrent": True, "transition": {"duration": 100} }]) pause_button = dict( label="Pause", method="animate", args=[ [None], { "frame": {"duration": 0, "redraw": False}, "mode": "immediate", "transition": {"duration": 0} }]) # Plot fig = go.Figure( data=frames[0].data, layout=go.Layout( title="Dispersion relation animation", yaxis=dict(range=[-1., 1.], autorange=False), updatemenus=[ dict( type="buttons", buttons=[start_button, pause_button ]) ] ), frames=frames[1:]) fig.show() def plot_dispersion_relation(self): ws, _ = self.dispersion() w0 = ws[:, 0] w1 = ws[:, 1] ws = np.append(w0, w1) x = np.append(self.qs, self.qs) y = ws index = np.append(np.repeat(0, len(self.qs)), np.repeat(1, len(self.qs))) df = pd.DataFrame({ "q": x, "w": y, "index": index, }) fig = px.line(df, x="q", y="w", color='index') fig.show()
5,196
Lesson 4/website_alive/make_request.py
arechesk/PythonHW
0
2023763
import requests OK = requests.codes.ok def request(url): r = requests.get(url) return r
100
ddi_search_engine/Bio/dbdefs/embl.py
dbmi-pitt/DIKB-Evidence-analytics
3
2023274
# Copyright 2002 by <NAME>. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. from Bio.config.DBRegistry import CGIDB, DBGroup from _support import * embl_xembl_cgi = CGIDB( name="embl-xembl-cgi", doc="Query XEMBL for EMBL sequence data in XML format.", cgi="http://www.ebi.ac.uk/cgi-bin/xembl/XEMBL.pl", url="http://www.ebi.ac.uk/xembl/", delay=5.0, params=[("format", "Bsml")], key="id", failure_cases=[(has_str("NOT EXIST"), "id does not exist")], ) embl_dbfetch_cgi = CGIDB( name="embl-dbfetch-cgi", cgi="http://www.ebi.ac.uk/cgi-bin/dbfetch", url="http://www.ebi.ac.uk/cgi-bin/dbfetch", doc="dbfetch provides EMBL, Genbank, and SWALL sequences", delay=5.0, params=[("db", "embl"), ("style", "raw"), ("format", "embl"), ], key="id", failure_cases=[(has_str("not found in database"), "id does not exist")] ) embl_ebi_cgi = CGIDB( name="embl-ebi-cgi", cgi="http://www.ebi.ac.uk/cgi-bin/emblfetch", url="http://www.ebi.ac.uk/cgi-bin/emblfetch", doc="Retrieve many kinds of sequences from EBI", delay=5.0, params=[("db", "EMBL"), ("format", "default"), # also Fasta, bsml, agave available ("style", "raw") ], key="id", failure_cases=[(blank_expr, "No results returned")] ) embl = DBGroup( name="embl", behavior="serial", ## cache="XXX" ) embl.add(embl_dbfetch_cgi) embl.add(embl_ebi_cgi) embl_xml = DBGroup( name = "embl-xml", behavior = "serial") embl_fast = DBGroup( name="embl-fast", behavior="concurrent", ) embl_fast.add(embl_dbfetch_cgi) embl_fast.add(embl_ebi_cgi)
1,844
reverse_proxy/proxies/admin.py
optimor/reverse-proxy
0
2023991
from django.contrib import admin from jet.admin import CompactInline from .models import ProxySite, ProxyRewrite, ProxyHeader from .forms import ProxySiteForm class ProxyRewriteInline(CompactInline): model = ProxyRewrite extra = 1 fieldsets = ( ( None, { "fields": ("from_regex", "to_regex"), "description": "A list of tuples in the style (from, to) where from " "must by a valid regex expression and to a valid URL. If " "request.get_full_path matches the from expression the " "request will be redirected to to with an status code 302. " "Matches groups can be used to pass parts from the from " "URL to the to URL using numbered groups.", }, ), ) class ProxyHeaderInline(CompactInline): model = ProxyHeader extra = 1 fieldsets = ( ( None, { "fields": ("header_name", "header_value"), "description": "A list of tuples in the style (key, value) where key " "must by a valid HEADER and key a valid header value.", }, ), ) @admin.register(ProxySite) class ProxySiteAdmin(admin.ModelAdmin): list_display = ( "name", "upstream", "subdomain_name", "subdomain_full_url", "add_remote_user", "default_content_type", "retries", ) fieldsets = ( (None, {"fields": ("name", "upstream", "thumbnail")}), ( "Subdomain", { "fields": ("subdomain_name", "subdomain_full_url"), "description": "Specify those to setup proxy that redirects based on " "the subdomain of the current URL", }, ), ("Extra", {"fields": ("add_remote_user", "default_content_type", "retries")}), ) form = ProxySiteForm inlines = (ProxyRewriteInline, ProxyHeaderInline)
2,024
preprocess.py
Cyna298/hifi-gan
0
2023535
import glob import os from pathlib import Path from tqdm import tqdm import numpy as np from TTS.utils.audio import AudioProcessor def preprocess_wav_files(out_path, config, ap): os.makedirs(os.path.join(out_path, "quant"), exist_ok=True) os.makedirs(os.path.join(out_path, "mel"), exist_ok=True) wav_files = find_wav_files(config.data_path) for path in tqdm(wav_files): wav_name = Path(path).stem quant_path = os.path.join(out_path, "quant", wav_name + ".npy") mel_path = os.path.join(out_path, "mel", wav_name + ".npy") y = ap.load_wav(path) mel = ap.melspectrogram(y) np.save(mel_path, mel) if isinstance(config.mode, int): quant = ( ap.mulaw_encode(y, qc=config.mode) if config.mulaw else ap.quantize(y, bits=config.mode) ) np.save(quant_path, quant) def find_wav_files(data_path): wav_paths = glob.glob(os.path.join(data_path, "**", "*.wav"), recursive=True) return wav_paths
1,051
setup.py
NineteenPeriod/django-bulk-update-or-create
0
2023217
#!/usr/bin/env python3 from setuptools import setup setup( name='django-bulk-update-or-create', )
103
artap/tests/test_benchmark_robust.py
tamasorosz/artap
5
2023499
import unittest from ..individual import Individual from ..benchmark_robust import Synthetic1D, Synthetic2D, Synthetic5D, Synthetic10D class TestSynthetic1D(unittest.TestCase): def test_synthetic1d(self): test = Synthetic1D() self.assertAlmostEqual(test.evaluate(Individual([11.0]))[0], 3.23, 3) self.assertAlmostEqual(test.evaluate(Individual([1.6]))[0], 3.205, 2) class TestSynthetic2D(unittest.TestCase): def test_synthetic2d(self): test = Synthetic2D() self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0]))[0], 1.21112, 4) self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0]))[0], 1.00096, 4) class TestSynthetic5D(unittest.TestCase): def test_synthetic5d(self): test = Synthetic5D() self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.200000000, 4) self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000, 4) self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0]))[0], .7) self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0]))[0], .75) self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0) self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2, 5) self.assertAlmostEqual(test.evaluate(Individual([5.0, 2.0, 9.6, 7.3, 8.6]))[0], 1.0) self.assertAlmostEqual(test.evaluate(Individual([7.5, 8.0, 9.0, 3.2, 4.6]))[0], .6, 4) self.assertAlmostEqual(test.evaluate(Individual([5.7, 9.3, 2.2, 8.4, 7.1]))[0], .5) self.assertAlmostEqual(test.evaluate(Individual([5.5, 7.2, 5.8, 2.3, 4.5]))[0], .2, 4) self.assertAlmostEqual(test.evaluate(Individual([4.7, 3.2, 5.5, 7.1, 3.3]))[0], 0.4) self.assertAlmostEqual(test.evaluate(Individual([9.7, 8.4, 0.6, 3.2, 8.5]))[0], 0.1) class TestSynthetic10D(unittest.TestCase): def test_synthetic10d(self): test = Synthetic10D() self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.200000000, 4) self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.000, 4) self.assertAlmostEqual(test.evaluate(Individual([10., 1.0, 6.0, 7.0, 8.0, 1.0, 1.0, 6.0, 7.0, 8.0]))[0], 0.7) self.assertAlmostEqual(test.evaluate(Individual([1.0, 3.0, 8.0, 9.5, 2.0, 1.0, 3.0, 8.0, 9.5, 2.0]))[0], 0.75) self.assertAlmostEqual(test.evaluate(Individual([3.0, 1.0, 3.0, 2.0, 5.0, 3.0, 1.0, 3.0, 2.0, 5.0]))[0], 1.0) self.assertAlmostEqual(test.evaluate(Individual([3.0, 4.0, 1.3, 5.0, 5.0, 3.0, 4.0, 1.3, 5.0, 5.0]))[0], 1.2)
2,815
tests/data/write_data.py
lzmch/framequery
66
2023577
from __future__ import print_function, division, absolute_import import os.path import pandas as pd df = pd.DataFrame({ 'g': [0, 0, 0, 1, 1, 2], 'i': [1, 2, 3, 4, 5, 6], 'f': [7.0, 8.0, 9.0, 0.0, 1.0, 2.0], }) df.to_csv( os.path.join(os.path.dirname(__file__), 'test.csv'), sep=';', index=False, )
326
projects/crawl_taobao_goods_migrate/model/result.py
kingking888/crawler-pyspider
1
2022702
from crawl_taobao_goods_migrate.model.task import Task from pyspider.core.model.mongo_base import * from pyspider.helper.date import Date class Result(ResultBase): def __init__(self): super(Result, self).__init__() def find_by_goods_id(self, goods_id): """ 从 goods image 库查找商品 :param goods_id: :return: """ return self.find_one({"taskid": Task.get_task_id_goods_image(goods_id)}) def find_complete_goods(self, goods_id): """ 从 goods image 和 goods details 两个库中同时查找数据,返回更新时间较新的结果; 如果能查到两条记录,说明有一个商品已经下架了 :param goods_id: :return: """ image_goods = self.find_one({"taskid": Task.get_task_id_goods_image(goods_id)}) detail_goods = self.find_one({"taskid": Task.get_task_id_goods_detail(goods_id)}) img_result = image_goods.get('result') if image_goods else "1970-01-01" detail_result = detail_goods.get('result') if detail_goods else "1970-01-01" img_date = img_result.get('update_time') if isinstance(img_result, dict) else "1970-01-01" detail_date = detail_result.get('update_time') if isinstance(detail_result, dict) else "1970-01-01" if img_date is None: img_date = "1970-01-01" if detail_date is None: detail_date = "1970-01-01" return detail_goods if Date(img_date) < Date(detail_date) else image_goods def find_all_goods(self, shop_id=''): """ 查询 goods 库里的所有商品; 有 shop_id 则查询该 shop_id 下的所有商品,否则就返回所有的商品; :param shop_id: 店铺ID :return: """ builder = { 'goods_id': {'$exists': 'true'}, } if shop_id: builder['shop_id'] = shop_id return self.find(builder) def find_all_shop_goods(self, shop_list: list): """ 获取所有的店铺商品ID :param shop_list: str list :return: """ builder = { "goods_id": {"$exists": 'true'} } if shop_list: shop_list = [str(item) for item in shop_list] builder["shop_id"] = {"$in": shop_list} return self.find(builder) def find_filter_goods(self, shop_ids: list, update_time=0): """ 过滤查询商品数据 :param shop_ids: int list :param update_time: 如果有更新时间,则获取小于更新时间的商品 :return: """ builder = { 'result.goods_id': {'$exists': 'true'}, } if shop_ids: shop_ids = [int(item) for item in shop_ids] builder['result.shop_id'] = {"$in": shop_ids} if update_time > 0: builder['updatetime'] = {"$gte": update_time} return self.find(builder) def find_all_shop_id(self): """ 获取所有的店铺ID :return: """ return self.find({ 'result.shop_id': {'$exists': 'true'}, 'result.shop_url': {'$exists': 'true'}, 'result.banner_imgs': {'$exists': 'true'}, }) def find_shop_by_id(self, shop_id): """ 从 shop details 库查找店铺详情 :param shop_id: :return: """ return self.find_one({"taskid": Task.get_task_id_shop_details(shop_id)}) def update_shop_crawled_status(self, shop_id, status): """ 更改店铺的被抓取的状态 :param shop_id: :param status: :return: """ return self.update_many({'taskid': Task.get_task_id_shop_details(shop_id)}, {"$set": {"result.crawled": status}}) def insert_or_update_goods(self, doc): """ 写入或者更新天猫商品 :param doc: :return: """ goods_id = doc.get("goods_id", "") goods_name = doc.get("goods_name", "") shop_id = doc.get("shop_id", "") update_time = doc.get("update_time", 0) if goods_id: re = self.find_one({"goods_id": goods_id}) if re: return self.update( {'goods_id': goods_id}, {"$set": {"goods_id": goods_id, "goods_name": goods_name, "shop_id": shop_id, "update_time": update_time}}) else: return self.insert(doc) else: return self.insert(doc)
4,288
src/Tokenize.py
ttrung149/turquoise
2
2023117
#!/usr/bin/env python # ----------------------------------------------------------------------------- # Turquoise - VHDL linter and compilation toolchain # Copyright (c) 2020-2021: Turquoise team # # File name: Tokenize.py # # Description: Implementation of tokenizer class # # ----------------------------------------------------------------------------- from pyVHDLParser.Token.Parser import Tokenizer from pyVHDLParser.Blocks import TokenToBlockParser from pyVHDLParser.Base import ParserException class Tokenize(): def __init__(self, filename=None): self._filename = filename def get_token_stream(self): with open (self._filename, 'r') as handle: content = handle.read() stream = Tokenizer.GetVHDLTokenizer(content) return stream def get_token_iter(self): stream = self.get_token_stream() token_iter = iter(stream) return token_iter
925
python/oneflow/framework/docstr/unbind.py
L-Net-1992/oneflow
1
2023764
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow from oneflow.framework.docstr.utils import add_docstr add_docstr( oneflow.unbind, """ This function is equivalent to PyTorch's unbind function. Removes a tensor dimension. Returns a tuple of all slices along a given dimension, already without it. Args: x(Tensor): the tensor to unbind dim(int): dimension to remove For example: .. code-block:: python >>> import oneflow as flow >>> x = flow.tensor(range(12)).reshape([3,4]) >>> flow.unbind(x) (tensor([0, 1, 2, 3], dtype=oneflow.int64), tensor([4, 5, 6, 7], dtype=oneflow.int64), tensor([ 8, 9, 10, 11], dtype=oneflow.int64)) >>> flow.unbind(x, 1) (tensor([0, 4, 8], dtype=oneflow.int64), tensor([1, 5, 9], dtype=oneflow.int64), tensor([ 2, 6, 10], dtype=oneflow.int64), tensor([ 3, 7, 11], dtype=oneflow.int64)) """, )
1,513
examples/routes/resequence_multiple_stops.py
route4me/route4me-python-sdk
10
2023170
# -*- coding: utf-8 -*- import argparse import json from route4me import Route4Me def load_json(filename): data = [] with open(filename, 'rt') as datafile: data = json.load(datafile) datafile.close() return data def main(args): r4m = Route4Me(args.api_key) route_data = load_json(args.route_data_filename) route = r4m.route print(f'Route ID: {args.route_id}') print("Addresses to be Re-sequence") for address in route_data['addresses']: print(f'Address Sequence: {address["sequence_no"]:6} - ' f'Route Destination ID: {address["route_destination_id"]:9}') print(f"After Resequence the Route {args.route_id}") response_data = route.resequence_multiple_stops(args.route_id, route_data) for address in response_data['addresses']: print(f'Address Sequence: {address["sequence_no"]:6} - ' f'Route Destination ID: {address["route_destination_id"]:9} - Address: {address["address"]} ') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Resequence a Route') parser.add_argument('--api_key', dest='api_key', help='Route4Me API KEY', type=str, required=True) parser.add_argument('--route_id', dest='route_id', help='Route ID', type=str, required=True) parser.add_argument('--route_data_filename', dest='route_data_filename', help='JSON file name with Route Addresses ID and Sequence', type=str, required=True) args = parser.parse_args() main(args)
1,595
src/plot_automaton.py
BurnySc2/rust-python-pyo3-test
1
2023359
import sys import os import lzma import pickle import numpy as np np.set_printoptions(threshold=sys.maxsize) import matplotlib.pyplot as plt from typing import Tuple, List, Iterable from sc2.game_data import GameData from sc2.game_info import GameInfo from sc2.game_state import GameState from sc2.bot_ai import BotAI def get_map_specific_bots() -> Iterable[BotAI]: folder = os.path.dirname(__file__) subfolder_name = "pickle_data" pickle_folder_path = os.path.join(folder, subfolder_name) files = os.listdir(pickle_folder_path) for file in (f for f in files if f.endswith(".xz")): with lzma.open(os.path.join(folder, subfolder_name, file), "rb") as f: raw_game_data, raw_game_info, raw_observation = pickle.load(f) # Build fresh bot object, and load the pickle'd data into the bot object bot = BotAI() game_data = GameData(raw_game_data.data) game_info = GameInfo(raw_game_info.game_info) game_state = GameState(raw_observation) bot._initialize_variables() bot._prepare_start(client=None, player_id=1, game_info=game_info, game_data=game_data) bot._prepare_step(state=game_state, proto_game_info=raw_game_info) yield bot # Global bot object that is used in TestClass.test_position_* bot_object_generator = get_map_specific_bots() # random_bot_object: BotAI = next(bot_object_generator) # print(random_bot_object.game_info.start_locations) # print(random_bot_object.townhalls[0].position) # print(random_bot_object.enemy_start_locations) def main(): # start = (90, 100) # goal = (100, 114) # Spawn start = (29, 65) goal = (154, 114) # Ramp # start = (32, 51) # goal = (150, 129) # map_grid = np.loadtxt("AutomatonLE.txt", delimiter="").astype(int) grid = [] with open("../AutomatonLE.txt") as f: for line in f.readlines(): values = [int(i) for i in list(line.strip())] grid.append(values) # print(grid) map_grid = np.asarray(grid) # print(map_grid) path = [] with open("../path.txt") as f: for line in f.readlines(): x, y = line.split(",") path.append((int(x.strip()), int(y.strip()))) print() # print(map_grid.shape) plot(map_grid, route=path, start=start, goal=goal) def plot( grid, route: List[Tuple[int, int]] = None, start: Tuple[int, int] = None, goal: Tuple[int, int] = None, waypoints=None, ): # extract x and y coordinates from route list x_coords = [] y_coords = [] if route: for i in range(0, len(route)): x = route[i][0] y = route[i][1] x_coords.append(x) y_coords.append(y) # plot map and path fig, ax = plt.subplots(figsize=(20, 20)) ax.imshow(grid, cmap=plt.cm.Dark2) if start: ax.scatter(start[0], start[1], marker="x", color="red", s=200) if goal: ax.scatter(goal[0], goal[1], marker="x", color="blue", s=200) if route: for w in route: ax.scatter(w[0], w[1], marker="x", color="orange", s=100) if waypoints: for w in waypoints: ax.scatter(w[0], w[1], marker="x", color="black", s=50) # plt.gca().invert_xaxis() plt.gca().invert_yaxis() plt.show() if __name__ == "__main__": main()
3,363
models/utils.py
Curli-quan/fewshot-select
0
2023960
from torch import nn import random from functools import wraps import torch.nn.functional as F class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_average(self, old, new): if old is None: return new return old * self.beta + (1 - self.beta) * new def update_moving_average(ema_updater, ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = ema_updater.update_average(old_weight, up_weight) class RandomApply(nn.Module): def __init__(self, fn, p): super().__init__() self.fn = fn self.p = p def forward(self, x): if random.random() > self.p: return x return self.fn(x) def default(val, def_val): return def_val if val is None else val def flatten(t): return t.reshape(t.shape[0], -1) def singleton(cache_key): def inner_fn(fn): @wraps(fn) def wrapper(self, *args, **kwargs): # print(f"[.] Function Name: {fn.__name__}") instance = getattr(self, cache_key) if instance is not None: return instance instance = fn(self, *args, **kwargs) setattr(self, cache_key, instance) return instance return wrapper return inner_fn def get_module_device(module): return next(module.parameters()).device def set_requires_grad(model, val): for p in model.parameters(): p.requires_grad = val # loss fn def loss_fn(x, y): x = F.normalize(x, dim=-1, p=2) y = F.normalize(y, dim=-1, p=2) return 2 - 2 * (x * y).sum(dim=-1)
1,851
star_printer.py
ChangyongKim0/programming_study
1
2022792
import logger as lg class StarPrinter: def __init__(self, print_types): self.print_queue = [{"type": ele[0], "length": ele[1]} for ele in print_types] logger = lg.Logger("StarPrinter") self.log, self.err = logger.log, logger.err self.log("StarPrinter initialized.") def printDiamond(self, length): space = " " * (length // 2) star = "*" print() for line in range(length): print(space + star) if line < length / 2 - 1: space = space[0:-2] star += "****" else: space += " " star = star[0:-4] print() def printTriangle(self, length): space = " " * length star = "*" print() for line in range(length): print(space + star) space = space[0:-1] star += "**" print() def printLeftTriangle(self, length): star = "*" print() for line in range(length): print(star) star += "**" print() def addPrintQueue(self, print_type, length): self.print_queue.append({"type": print_type, "length": length}) self.log("{} with length {} is added in print queue.".format( print_type, length)) def printStar(self): for ele in self.print_queue: if ele["type"] == "diamond": self.printDiamond(ele["length"]) elif ele["type"] == "triangle": self.printTriangle(ele["length"]) elif ele["type"] == "left_triangle": self.printLeftTriangle(ele["length"]) else: self.err("Wrong print type.") def __str__(self): string = "<StarPrinter>\n[print_queue]\n" for ele in self.print_queue: string += "type: {0}; length: {1}\n".format( ele["type"], ele["length"]) return string if __name__ == "__main__": star_printer = StarPrinter([["triangle", 5], ["left_triangle", 3]]) print(star_printer) star_printer.addPrintQueue("none", 8) star_printer.addPrintQueue("diamond", 7) print(star_printer) star_printer.printStar()
2,268
src/shapes.py
AntVil/Wetter-Daten-Visualizer
0
2023987
# this file contains all components needed to collect, structure and save the data from GADM import os import requests from zipfile import ZipFile from io import BytesIO import cartopy.io.shapereader as shpreader from requests.api import get # constants SHAPES_URL = "https://biogeo.ucdavis.edu/data/gadm3.6/shp/gadm36_DEU_shp.zip" SHAPES_FOLDER = os.path.join(os.path.dirname(__file__), "data", "shapes") os.makedirs(SHAPES_FOLDER, exist_ok = True) def download_shapes(): """ this function downloads data from GADM """ unpacked = ZipFile(BytesIO(requests.get(SHAPES_URL).content)) file_names = list(set([file.split(".")[0] for file in unpacked.namelist()]).difference({"license"})) # saving license with unpacked.open("license.txt", "r") as read_file: with open(os.path.join(SHAPES_FOLDER, "license.txt"), "wb") as write_file: write_file.write(read_file.read()) #downloading files for file in file_names: for extension in [".shp", ".shx", ".dbf"]: with unpacked.open(file + extension, "r") as read_file: # creating folder structure path = os.path.join(SHAPES_FOLDER, file) os.makedirs(path, exist_ok = True) # saving file file_name = "shape" + extension with open(os.path.join(path, file_name), "wb") as write_file: write_file.write(read_file.read()) def get_geometry(level=1): """ this function returns the administrative-area geometries for germany """ try: return list( shpreader.Reader( os.path.join(os.path.dirname(__file__), "data", "shapes", f"gadm36_DEU_{level}", "shape") ).geometries() ) except: download_shapes() return get_geometry(level) if __name__ == "__main__": download_shapes()
1,918
tests/seekret.apitest/context/response_test.py
seek-ret/tavernrtl
4
2023678
import io import json as _json from typing import Optional, Union import pytest from requests import Response from requests.structures import CaseInsensitiveDict from seekret.apitest.context.response import ResponseWrapper, NullResultError def make_wrapper(json=None, headers: Optional[Union[dict[str], CaseInsensitiveDict[str]]] = None): response = Response() response.raw = io.BytesIO(_json.dumps(json).encode() if json else b'') if headers: response.headers = CaseInsensitiveDict(headers) return ResponseWrapper(response) class TestResponseWrapper: class TestSearch: def test_json_nested_value(self): wrapper = make_wrapper({'a': {'b': {'c': 'd'}}}) assert {'c': 'd'} == wrapper.search('json.a.b') def test_json_array_value(self): wrapper = make_wrapper([1, 'b', {'c': 'd'}]) assert 'd' == wrapper.search('json[2].c') def test_json_missing_value_causes_null_result_error(self): wrapper = make_wrapper({'some-key': 1}) pytest.raises(NullResultError, wrapper.search, 'json."other-key"') def test_json_value_none_causes_null_result_error(self): wrapper = make_wrapper({'key': None}) pytest.raises(NullResultError, wrapper.search, 'json.key') def test_json_case_sensitive(self): wrapper = make_wrapper({'caseSensitiveKey': 1}) pytest.raises(NullResultError, wrapper.search, 'json.casesensitivekey') def test_headers_existing_key(self): wrapper = make_wrapper(headers={'Some-Header': 'value'}) assert wrapper.search('headers."Some-Header"') == 'value' def test_headers_case_insensitive(self): wrapper = make_wrapper(headers={'Some-Header': 'value'}) assert wrapper.search('headers."some-header"') == 'value' def test_headers_missing_key_causes_null_result_error(self): wrapper = make_wrapper(headers={'Some-Header': 'value'}) pytest.raises(NullResultError, wrapper.search, 'headers."other-header"') def test_bad_locator_causes_null_result_error(self): wrapper = make_wrapper(json={'a': 1}, headers={'b': 2}) pytest.raises(NullResultError, wrapper.search, 'expression.must.start.with.json.or.headers') class TestAssertSchema: def test_validation_success(self): wrapper = make_wrapper({ 'a': 'hello!', 'b': 1, }) wrapper.assert_schema(""" type: map mapping: a: type: str required: true b: type: int """) def test_validation_failure_causes_assertion_error(self): wrapper = make_wrapper({ 'b': 1, }) pytest.raises( AssertionError, wrapper.assert_schema, """ type: map mapping: a: type: str required: true b: type: int """)
3,353
src/protocols/BLE/ble_device.py
QWERTSKIHACK/peniot
143
2023848
import pexpect class BLEDevice: """ Represents a BLE device. It uses `gatttool` to connect a BLE device. """ def __init__(self, address): self.device = None self.address = address # connect to the device specified with the given address self.connect() def connect(self): """ Connects to the BLE device """ print "Connecting..." # Run gatttool interactively. self.device = pexpect.spawn("gatttool -b " + self.address + " -I") self.device.expect('\[LE\]>', timeout=10) self.device.sendline('connect') self.device.expect('Connection successful.*\[LE\]>', timeout=10) print "Successfully connected!" """ Updates the value of the handle """ def writecmd(self, handle, value): cmd = "char-write-cmd " + handle + " " + value self.device.sendline(cmd) print "Wrote " + value + " to handle: " + handle
979
keras/utils/visualize_util.py
nishank974/Keras
2
2023324
import itertools from keras.layers.containers import Graph, Sequential from keras.layers.core import Merge try: # pydot-ng is a fork of pydot that is better maintained import pydot_ng as pydot except ImportError: # fall back on pydot if necessary import pydot if not pydot.find_graphviz(): raise RuntimeError("Failed to import pydot. You must install pydot" " and graphviz for `pydotprint` to work.") def layer_typename(layer): return type(layer).__module__ + "." + type(layer).__name__ def get_layer_to_name(model): """Returns a dict mapping layer to their name in the model""" if not isinstance(model, Graph): return {} else: node_to_name = itertools.chain( model.nodes.items(), model.inputs.items(), model.outputs.items() ) return {v: k for k, v in node_to_name} class ModelToDot(object): """ This is a helper class which visits a keras model (Sequential or Graph) and returns a pydot.Graph representation. This is implemented as a class because we need to maintain various states. Use it as ```ModelToDot()(model)``` Keras models can have an arbitrary number of inputs and outputs. A given layer can have multiple inputs but has a single output. We therefore explore the model by starting at its output and crawling "up" the tree. """ def _pydot_node_for_layer(self, layer, label): """ Returns the pydot.Node corresponding to the given layer. `label` specify the name of the layer (only used if the layer isn't yet associated with a pydot.Node) """ # Check if this already exists (will be the case for nodes that # serve as input to more than one layer) if layer in self.layer_to_pydotnode: node = self.layer_to_pydotnode[layer] else: layer_id = 'layer%d' % self.idgen self.idgen += 1 label = label + " (" + layer_typename(layer) + ")" if self.show_shape: # Build the label that will actually contain a table with the # input/output outputlabels = str(layer.output_shape) if hasattr(layer, 'input_shape'): inputlabels = str(layer.input_shape) elif hasattr(layer, 'input_shapes'): inputlabels = ', '.join( [str(ishape) for ishape in layer.input_shapes]) else: inputlabels = '' label = "%s\n|{input:|output:}|{{%s}|{%s}}" % ( label, inputlabels, outputlabels) node = pydot.Node(layer_id, label=label) self.g.add_node(node) self.layer_to_pydotnode[layer] = node return node def _process_layer(self, layer, layer_to_name=None, connect_to=None): """ Process a layer, adding its node to the graph and creating edges to its outputs. `connect_to` specify where the output of the current layer will be connected `layer_to_name` is a dict mapping layer to their name in the Graph model. Should be {} when processing a Sequential model """ # The layer can be a container layer, in which case we can recurse is_graph = isinstance(layer, Graph) is_seq = isinstance(layer, Sequential) if self.recursive and (is_graph or is_seq): # We got a container layer, recursively transform it if is_graph: child_layers = layer.outputs.values() else: child_layers = [layer.layers[-1]] for l in child_layers: self._process_layer(l, layer_to_name=get_layer_to_name(layer), connect_to=connect_to) else: # This is a simple layer. label = layer_to_name.get(layer, '') layer_node = self._pydot_node_for_layer(layer, label=label) if connect_to is not None: self.g.add_edge(pydot.Edge(layer_node, connect_to)) # Proceed upwards to the parent(s). Only Merge layers have more # than one parent if isinstance(layer, Merge): # Merge layer for l in layer.layers: self._process_layer(l, layer_to_name, connect_to=layer_node) elif hasattr(layer, 'previous') and layer.previous is not None: self._process_layer(layer.previous, layer_to_name, connect_to=layer_node) def __call__(self, model, recursive=True, show_shape=False, connect_to=None): self.idgen = 0 # Maps keras layer to the pydot.Node representing them self.layer_to_pydotnode = {} self.recursive = recursive self.show_shape = show_shape self.g = pydot.Dot() self.g.set('rankdir', 'TB') self.g.set('concentrate', True) self.g.set_node_defaults(shape='record') if hasattr(model, 'outputs'): # Graph for name, l in model.outputs.items(): self._process_layer(l, get_layer_to_name(model), connect_to=connect_to) else: # Sequential container self._process_layer(model.layers[-1], {}, connect_to=connect_to) return self.g def to_graph(model, **kwargs): """ `recursive` controls whether we recursively explore container layers `show_shape` controls whether the shape is shown in the graph """ return ModelToDot()(model, **kwargs) def plot(model, to_file='model.png', **kwargs): graph = to_graph(model, **kwargs) graph.write_png(to_file)
5,854
453-Minimum_Moves_to_Equal_Array_Elements.py
QuenLo/leecode
6
2022886
class Solution: def minMoves(self, nums: List[int]) -> int: return sum(nums)-len(nums)*min(nums) class SolutionII: def minMoves(self, nums: List[int]) -> int: minin = float('inf') time = 0 for num in nums: time += num minin = min( minin, num ) return time - len(nums)*minin
366
dataStructures/exercises/stacks.py
Ry4nW/python-wars
1
2024029
from collections import deque class Stack(): def __init__(self, items: 'list[any]', maxsize) -> None: self.items: list[deque] = deque(items) self.maxsize: int = maxsize self.top: int = self.get_top() def get_stack(self) -> deque: return self.items def push(self, item) -> None or str: if len(self.items) < self.maxsize: self.items.append(item) self.top = item else: return 'Max capacity reached.' def pop(self) -> any: if self.items: popped = self.items.pop() self.top = self.get_top() return popped return 'Stack is empty.' def is_empty(self) -> bool: return True if not self.items else False def get_top(self) -> any: if not self.is_empty(): return self.items[-1] else: return 'Stack is empty.' stack = Stack([1, 2, 3, 4, 5], 6) print(stack.top) print(stack.push(6)) print(stack.top) print(stack.pop()) print(stack.top)
1,054
socless/models.py
A-Gray-Cat/socless_python
4
2022933
from dataclasses import dataclass, field from typing import Optional @dataclass class EventTableItem: id: str investigation_id: str status_: str is_duplicate: bool created_at: str event_type: str playbook: Optional[str] details: dict data_types: dict event_meta: dict @dataclass class DedupTableItem: current_investigation_id: str dedup_hash: str @dataclass class MessageResponsesTableItem: message_id: str # PK : callback id for message responses await_token: str # used to start next step in step_functions receiver: str # step_functions step name fulfilled: bool # has await_token been used message: str # message sent to user while waiting for their response execution_id: str investigation_id: str datetime: str @dataclass class PlaybookArtifacts: event: EventTableItem execution_id: str @dataclass class PlaybookInput: execution_id: str artifacts: PlaybookArtifacts results: dict errors: dict
1,018
cypherpunkpay/net/tor_client/base_tor_circuits.py
prusnak/CypherpunkPay
44
2023988
from abc import abstractmethod class BaseTorCircuits(object): SHARED_CIRCUIT_ID = 'shared_circuit' # for requests were linkability of actions does not matter (merchant callbacks, price tickers, blockchain height, etc) SKIP_TOR = 'skip_tor' # for requests where the target is in the local network or Tor cannot be used for other reasons @abstractmethod def mark_as_broken(self, label): pass @abstractmethod def get_for(self, privacy_context): pass @abstractmethod def close(self): pass
549
src/Python/1-100/88.MergeArray.py
Peefy/PeefyLeetCode
2
2023594
class Solution(object): def merge(self, nums1, m, nums2, n): """ :type nums1: List[int] :type m: int :type nums2: List[int] :type n: int :rtype: void Do not return anything, modify nums1 in-place instead. """ nums = [] for i in range(m): nums.append(nums1[i]) i = 0 j = 0 index = 0 while i < m or j < n: if i == m: for k in range(j, n): nums1[index] = nums2[k] index += 1 break if j == n: for k in range(i, m): nums1[index] = nums[k] index += 1 break if nums[i] < nums2[j]: nums1[index] = nums[i] i += 1 else: nums1[index] = nums2[j] j += 1 index += 1 return nums1 if __name__ == '__main__': solution = Solution() print(solution.merge([1,2,3,4,0,0,0,0,0],4, [2,5,6], 3)) else: pass
1,092
Python/count-primes.py
ddyuewang/leetcode
4
2023859
# Time: O(n) # Space: O(n) # Description: # # Count the number of prime numbers less than a non-negative number, n # # Hint: The number n could be in the order of 100,000 to 5,000,000. class Solution: # @param {integer} n # @return {integer} def countPrimes(self, n): if n <= 2: return 0 is_prime = [True] * n num = n / 2 for i in xrange(3, n, 2): if i * i >= n: break if not is_prime[i]: continue for j in xrange(i*i, n, 2*i): if not is_prime[j]: continue num -= 1 is_prime[j] = False return num
713
python3/check_array_formation_through_concatenation.py
joshiaj7/CodingChallenges
1
2023897
""" Space : O(n) Time : O(n) """ class Solution: def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool: d = {} n = len(arr) for x in pieces: if x[0] not in d: d[x[0]] = x i = 0 while i < n: if arr[i] in d: temp = d[arr[i]] for j in range(len(temp)): if temp[j] == arr[i]: i += 1 else: return False else: return False return True
597
models/__init__.py
marcoscale98/emojinet
0
2023214
from models.base_lstm_user import base_lstm_user from models.base_lstm_cnn_user import base_lstm_cnn_user from models.base_lstm_subword import base_lstm_subword from models.ensemble_cnn_subword import ensemble_cnn_subword from models.base_cnn import base_cnn from models.base_lstm import base_lstm from models.vdcnn import vdcnn class ModelDefinition: def __init__(self, func, params): self.params = params self.func = func def apply(self, values: dict): return self.func(*[values[param] for param in self.params]) def get_model(model: str) -> ModelDefinition: models = { "base_cnn": ModelDefinition(base_cnn, ["vocabulary_size", "embedding_size", "max_seq_length", "embedding_matrix", "y_dictionary"]), "base_lstm": ModelDefinition(base_lstm, ["vocabulary_size", "embedding_size", "max_seq_length", "embedding_matrix", "y_dictionary"]), "base_lstm_user": ModelDefinition(base_lstm_user, ["vocabulary_size", "embedding_size", "history_size", "max_seq_length", "embedding_matrix", "y_dictionary"]), "base_lstm_cnn_user": ModelDefinition(base_lstm_user, ["vocabulary_size", "embedding_size", "history_size", "max_seq_length", "embedding_matrix", "y_dictionary"]), "base_lstm_subword": ModelDefinition(base_lstm_subword, ["vocabulary_size", "embedding_size", "max_char_length", "max_seq_length", "embedding_matrix", "y_dictionary"]), "ensemble_cnn_subword": ModelDefinition(ensemble_cnn_subword, ["vocabulary_size", "embedding_size", "max_char_length", "max_seq_length", "embedding_matrix", "y_dictionary"]), "vdcnn": ModelDefinition(vdcnn, ["num_classes", "depth", "sequence_length", "shortcut", "pool_type", "sorted", "use_bias"]) } return models[model]
1,759
JUPYTER/Supervised/Feature Engineering/generate_dataset.py
Reynolds534/IASS_18_ML
1
2023626
import numpy as np def generate_dataset(n_features): if n_features <6: print('Please enter a number of features strictly bigger than 5') return None, None target = np.random.uniform(0,10,100) X1 = target**2 - target + np.random.uniform(0,25,100) X2 = target + np.random.uniform(0,15,100) X3 = target + target**2 + np.random.uniform(0,50,100) X4 = X3 + np.random.uniform(0,5,100) X5 = X1 + X2 + X3 random_state = np.random.RandomState(0) X = np.array([X1,X2,X3,X4,X5]).T X = np.c_[X, random_state.randn(100, (n_features-5) )] Z = X[:, np.random.permutation(X.shape[1])] return Z, target
651
mayan/apps/mayan_statistics/dependencies.py
Syunkolee9891/Mayan-EDMS
1
2023680
from __future__ import unicode_literals from mayan.apps.dependencies.classes import JavaScriptDependency JavaScriptDependency( module=__name__, name='chart.js', static_folder='statistics', version_string='=2.7.2' )
225
config.py
mottenhoff/ReMarkable_Zotero_sync
1
2023380
def config(): return { # Zotero "path_to_local_zotero_storage": "<path to your local zotero storage>", # ReMarkable # Get authentication code from https://my.remarkable.com/connect/desktop) # The auth code is only necessary the first run, you can remove the # code afterwards. "reMarkable_auth_code": "", # If you want to sync to a folder called papers at # ./papers on your reMarkable. then only "papers" as # reMarkable_folder_name "reMarkable_folder_name": "", # Monitor "check_log_every_n_minutes": 5, "wait_for_n_seconds_idle": 60 }
693
tests/unittests/http_functions/no_return/main.py
gohar94/azure-functions-python-worker
277
2023972
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import logging logger = logging.getLogger('test') def main(req): logger.error('hi')
186
src/gui/window.py
Aldeshov/ADBFileExplorer
12
2023530
# ADB File Explorer `tool` # Copyright (C) 2022 <NAME> <EMAIL> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QInputDialog, QMenuBar, QMessageBox from core.configurations import Resources from core.main import Adb from core.managers import Global from data.models import MessageData, MessageType from data.repositories import DeviceRepository from gui.explorer import MainExplorer from gui.help import About from gui.others.notification import NotificationCenter from helpers.tools import AsyncRepositoryWorker class MenuBar(QMenuBar): CONNECT_WORKER_ID = 100 DISCONNECT_WORKER_ID = 101 def __init__(self, parent): super(MenuBar, self).__init__(parent) self.about = About() self.file_menu = self.addMenu('&File') self.help_menu = self.addMenu('&Help') connect_action = QAction(QIcon(Resources.icon_link), '&Connect', self) connect_action.setShortcut('Alt+C') connect_action.triggered.connect(self.connect_device) self.file_menu.addAction(connect_action) disconnect_action = QAction(QIcon(Resources.icon_no_link), '&Disconnect', self) disconnect_action.setShortcut('Alt+X') disconnect_action.triggered.connect(self.disconnect) self.file_menu.addAction(disconnect_action) devices_action = QAction(QIcon(Resources.icon_phone), '&Show devices', self) devices_action.setShortcut('Alt+D') devices_action.triggered.connect(Global().communicate.devices.emit) self.file_menu.addAction(devices_action) exit_action = QAction('&Exit', self) exit_action.setShortcut('Alt+Q') exit_action.triggered.connect(qApp.quit) self.file_menu.addAction(exit_action) about_action = QAction('About', self) about_action.triggered.connect(self.about.show) self.help_menu.addAction(about_action) def disconnect(self): worker = AsyncRepositoryWorker( worker_id=self.DISCONNECT_WORKER_ID, name="Disconnecting", repository_method=DeviceRepository.disconnect, response_callback=self.__async_response_disconnect, arguments=() ) if Adb.worker().work(worker): Global().communicate.notification.emit( MessageData( title='Disconnect', body="Disconnecting from devices, please wait", message_type=MessageType.LOADING_MESSAGE, message_catcher=worker.set_loading_widget ) ) Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000) worker.start() def connect_device(self): text, ok = QInputDialog.getText(self, 'Connect Device', 'Enter device IP:') Global().communicate.status_bar.emit('Operation: Connecting canceled.', 3000) if ok and text: worker = AsyncRepositoryWorker( worker_id=self.CONNECT_WORKER_ID, name="Connecting to device", repository_method=DeviceRepository.connect, arguments=(str(text),), response_callback=self.__async_response_connect ) if Adb.worker().work(worker): Global().communicate.notification.emit( MessageData( title='Connect', body="Connecting to device via IP, please wait", message_type=MessageType.LOADING_MESSAGE, message_catcher=worker.set_loading_widget ) ) Global().communicate.status_bar.emit(f'Operation: {worker.name}... Please wait.', 3000) worker.start() @staticmethod def __async_response_disconnect(data, error): if data: Global().communicate.devices.emit() Global().communicate.notification.emit( MessageData( title="Disconnect", timeout=15000, body=data ) ) if error: Global().communicate.devices.emit() Global().communicate.notification.emit( MessageData( timeout=15000, title="Disconnect", body=f"<span style='color: red; font-weight: 600'>{error}</span>" ) ) Global().communicate.status_bar.emit('Operation: Disconnecting finished.', 3000) @staticmethod def __async_response_connect(data, error): if data: if Adb.CORE == Adb.PYTHON_ADB_SHELL: Global().communicate.files.emit() elif Adb.CORE == Adb.EXTERNAL_TOOL_ADB: Global().communicate.devices.emit() Global().communicate.notification.emit(MessageData(title="Connecting to device", timeout=15000, body=data)) if error: Global().communicate.devices.emit() Global().communicate.notification.emit( MessageData( timeout=15000, title="Connect to device", body=f"<span style='color: red; font-weight: 600'>{error}</span>" ) ) Global().communicate.status_bar.emit('Operation: Connecting to device finished.', 3000) class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.setMenuBar(MenuBar(self)) self.setCentralWidget(MainExplorer(self)) self.resize(640, 480) self.setMinimumSize(480, 360) self.setWindowTitle('ADB File Explorer') self.setWindowIcon(QIcon(Resources.icon_logo)) # Show Devices Widget Global().communicate.devices.emit() # Connect to Global class to use it anywhere Global().communicate.status_bar.connect(self.statusBar().showMessage) # Important to add last to stay on top! self.notification_center = NotificationCenter(self) Global().communicate.notification.connect(self.notify) # Welcome notification texts welcome_title = "Welcome to ADBFileExplorer!" welcome_body = f"Here you can see the list of your connected adb devices. Click one of them to see files.<br/>"\ f"Current selected core: <strong>{Adb.current_core()}</strong><br/>" \ f"To change it <code style='color: blue'>adb.set_core()</code> in <code>app.py</code>" Global().communicate.status_bar.emit('Ready', 5000) Global().communicate.notification.emit(MessageData(title=welcome_title, body=welcome_body, timeout=30000)) def notify(self, data: MessageData): message = self.notification_center.append_notification( title=data.title, body=data.body, timeout=data.timeout, message_type=data.message_type ) if data.message_catcher: data.message_catcher(message) def closeEvent(self, event): if Adb.CORE == Adb.EXTERNAL_TOOL_ADB: reply = QMessageBox.question(self, 'ADB Server', "Do you want to kill adb server?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: Adb.stop() elif Adb.CORE == Adb.PYTHON_ADB_SHELL: Adb.stop() event.accept() # This helps the toast maintain the place after window get resized def resizeEvent(self, e): if self.notification_center: self.notification_center.update_position() return super().resizeEvent(e)
8,416
adv/mega_man.py
XenoXilus/dl
0
2023635
from core.advbase import * from module.bleed import Bleed, mBleed from module.x_alt import X_alt def module(): return Mega_Man class Skill_Ammo(Skill): def __init__(self, name=None, acts=None): super().__init__(name, acts) self.c_ammo = 0 @property def ammo(self): return self.ac.conf.ammo @property def cost(self): return self.ac.conf.cost def check(self): if self._static.silence == 1: return False return self.c_ammo >= self.cost @allow_acl def check_full(self): if self._static.silence == 1: return False return self.c_ammo >= self.ammo def charge_ammo(self, ammo): self.c_ammo = min(self.ammo, self.c_ammo + ammo) class Mega_Man(Adv): comment = '16 hits leaf shield (max 32 hits)' conf = {} conf['slots.d'] = 'Gala_Mars' conf['slots.a'] = ['Primal_Crisis', 'Levins_Champion'] conf['acl'] = """ `dragon, s=4 `s3, not buff(s3) `s4 if bleed_stack >= 3 `s2, c_x(metalblade) or c_x(default) `s1, c_x(metalblade) else `s1, c_x(default) and s1.check_full() end """ conf['coabs'] = ['Blade', 'Marth', 'Dagger2'] conf['share'] = ['Karl'] # conf['dragonform'] = { # 'act': 'c5-s', # 'dx1.dmg': 1.20, # 'dx1.startup': 10 / 60.0, # c1 frames # 'dx1.hit': 3, # 'dx2.dmg': 1.20, # 'dx2.startup': 13 / 60.0 - 0.03333 = 0.18333666666666666667, # c2 frames # 'dx2.hit': 3, # 'dx3.dmg': 1.20, # 'dx3.startup': 14 / 60.0 - 0.03333 = 0.20000333333333333333, # c3 frames # 'dx3.hit': 3, # 'dx4.dmg': 1.20, # 'dx4.startup': 14 / 60.0, # c4 frames # 'dx4.hit': 3, # 'dx5.dmg': 1.20, # 'dx5.startup': 14 / 60.0, # c5 frames # 'dx5.recovery': 23 / 60.0, # recovery # 'dx5.hit': 3, # 'ds.dmg': 6.00, # 'ds.recovery': 113 / 60, # skill frames # 'ds.hit': 5, # 'dodge.startup': 45 / 60.0, # dodge frames # } # def ds_proc(self): # return self.dmg_make('ds',self.dragonform.conf.ds.dmg,'s') def __init__(self, **kwargs): super().__init__(**kwargs) self.a_s_dict['s1'] = Skill_Ammo('s1') self.a_s_dict['s2'] = Skill_Ammo('s2') def prerun(self): self.leaf = 2 # number of hits per leaf rotation self.s1.charge_ammo(2000) self.s2.charge_ammo(4000) @property def skills(self): return self.s3, self.s4 def hitattr_make(self, name, base, group, aseq, attr, onhit=None): ammo = attr.get('ammo', 0) if ammo > 0: for s in (self.s1, self.s2): s.charge_ammo(ammo) elif ammo < 0: s = self.s1 if group == 'metalblade' else self.s2 s.charge_ammo(ammo) if s.c_ammo <= 0: self.current_x = 'default' if ammo != 0: log('ammo', name, ammo, ' '.join(f'{s.c_ammo}/{s.ammo}' for s in (self.s1, self.s2))) super().hitattr_make(name, base, group, aseq, attr, onhit=None) def s1_proc(self, e): if self.current_x != 'metalblade': self.current_x = 'metalblade' else: self.current_x = 'default' def s2_proc(self, e): if self.current_x != 'leafshield': self.current_x = 'leafshield' else: self.current_x = 'default' if __name__ == '__main__': from core.simulate import test_with_argv test_with_argv(None, *sys.argv)
3,623
votes/frontend.py
estan/votes
1
2022742
from argparse import ArgumentParser from sys import argv from autobahn.twisted.wamp import ApplicationSession from autobahn.twisted.wamp import ApplicationRunner from autobahn.wamp.types import SessionDetails from autobahn.wamp.types import CloseDetails from PyQt5.QtCore import pyqtSignal from PyQt5.QtCore import pyqtSlot from PyQt5.QtCore import QObject from PyQt5.QtWidgets import QApplication from PyQt5.QtWidgets import QMainWindow import qt5reactor from votes.ui.votes_window_ui import Ui_VotesWindow class VotesSession(QObject, ApplicationSession): """Votes WAMP application session. Simply bridges the Autobahn join and leave signals to Qt signals. """ joinedSession = pyqtSignal(SessionDetails) leftSession = pyqtSignal(CloseDetails) def __init__(self, config=None, parent=None): QObject.__init__(self, parent) ApplicationSession.__init__(self, config) def onJoin(self, details): self.joinedSession.emit(details) def onLeave(self, details): self.leftSession.emit(details) class VotesWindow(QMainWindow, Ui_VotesWindow): """Main window of the votes demo.""" closed = pyqtSignal() # Emitted when window is closed. def __init__(self, url, realm, parent=None): super(VotesWindow, self).__init__(parent) self.setupUi(self) self.url = url self.realm = realm self.session = None self.votes = { 'Banana': self.bananaVotes, 'Chocolate': self.chocolateVotes, 'Lemon': self.lemonVotes } # Factory method for ApplicationRunner.run(..) def make(config): self.session = VotesSession(config) self.session.joinedSession.connect(self.onJoinedSession) self.session.leftSession.connect(self.onLeftSession) return self.session runner = ApplicationRunner(url, realm) runner.run(make, start_reactor=False) def onJoinedSession(self): self.setEnabled(True) self.session.subscribe(self.onVoteMessage, u'io.crossbar.demo.vote.onvote') self.session.subscribe(self.onResetMessage, u'io.crossbar.demo.vote.onreset') self.statusBar().showMessage('Connected to realm {} at {}' .format(self.realm, self.url)) def onLeftSession(self): print('leave') def onVoteMessage(self, result): self.votes[result[u'subject']].setText(str(result[u'votes'])) def onResetMessage(self): self.bananaVotes.setText('0') self.chocolateVotes.setText('0') self.lemonVotes.setText('0') def closeEvent(self, event): self.session.leave() self.closed.emit() event.accept() @pyqtSlot() def on_resetButton_clicked(self): self.session.call(u'io.crossbar.demo.vote.reset') @pyqtSlot() def on_bananaButton_clicked(self): self.session.call(u'io.crossbar.demo.vote.vote', 'Banana') @pyqtSlot() def on_chocolateButton_clicked(self): self.session.call(u'io.crossbar.demo.vote.vote', 'Chocolate') @pyqtSlot() def on_lemonButton_clicked(self): self.session.call(u'io.crossbar.demo.vote.vote', 'Lemon') def main(): parser = ArgumentParser(description='PyQt version of Crossbar Gauges demo.') parser.add_argument('--url', type=unicode, default=u'ws://127.0.0.1:8080/ws', metavar='<url>', help='WAMP router URL (default: ws://127.0.0.1:8080/ws).') args = parser.parse_args() app = QApplication(argv) qt5reactor.install() from twisted.internet import reactor def quit(): if reactor.threadpool is not None: reactor.threadpool.stop() app.quit() window = VotesWindow(args.url, u'crossbardemo') window.closed.connect(quit) window.show() reactor.run() if __name__ == '__main__': main()
3,991
tests/test_ethosdistro_py.py
CoryKrol/ethosdistro_py
0
2023909
import aiohttp import json from ethosdistro_py import EthosAPI import pytest from aioresponses import aioresponses NOT_ALL_KEYS_PRESENT = "All keys should be in the response" CONTENT_HEADERS = {"Content-Type": "text/html"} @pytest.mark.asyncio async def test_get_panel(get_panel_keys, get_panel_response): """Tests an API call to get block count data for a panel_id""" session = aiohttp.ClientSession() ethosapi = EthosAPI(session=session) assert ethosapi.panel_id_set() is True with aioresponses() as m: m.get( "http://test.ethosdistro.com/?json=yes", status=200, body=json.dumps(get_panel_response), headers=CONTENT_HEADERS, ) result = await ethosapi.async_get_panel() assert isinstance(result, dict) assert set(get_panel_keys).issubset(result.keys()), NOT_ALL_KEYS_PRESENT await session.close()
916
class1/exercise10.py
SamerLabban/Network_Automation_Course
1
2022688
from ciscoconfparse import CiscoConfParse #open the cisco file and store it in a variable cisco_cfg = CiscoConfParse("cisco_ipsec.txt") #search for any line in our confparse object (cisco_cfg) that begins with the word "crypto map CRYPTO" intf = cisco_cfg.find_objects_wo_child(parentspec = r"^crypto map CRYPTO", childspec = r"AES") #Get all children (children and grandchildren) elements for i in intf: print i for child in i.all_children: print child.text print "\n"
479
VirSecCon 2020/old_monitor/rsa.py
0xShad3/cybersec-writeups
10
2022685
import gmpy e = 3 n1 = 7156756869076785933541721538001332468058823716463367176522928415602207483494410804148006276542112924303341451770810669016327730854877940615498537882480613 n2 = 11836621785229749981615163446485056779734671669107550651518896061047640407932488359788368655821120768954153926193557467079978964149306743349885823110789383 n3 = 7860042756393802290666610238184735974292004010562137537294207072770895340863879606654646472733984175066809691749398560891393841950513254137326295011918329 c1 = 816151508695124692025633485671582530587173533405103918082547285368266333808269829205740958345854863854731967136976590635352281190694769505260562565301138 c2 = 8998140232866629819387815907247927277743959734393727442896220493056828525538465067439667506161727590154084150282859497318610746474659806170461730118307571 c3 = 3488305941609131204120284497226034329328885177230154449259214328225710811259179072441462596230940261693534332200171468394304414412261146069175272094960414 N = n1 * n2 * n3 N1 = N/n1 N2 = N/n2 N3 = N/n3 u1 = gmpy.invert(N1,n1) u2 = gmpy.invert(N2,n2) u3 = gmpy.invert(N3,n3) M = (c1*u1*N1 + c2*u2*N2 + c3*u3*N3) % N m = gmpy.root(M,e)[0] print hex(m)[2:].rstrip("L").decode("hex")
1,216
main.py
PaTinLei/MHFC-FSL
0
2022825
import math import os from copy import deepcopy from scipy.linalg import svd import numpy as np from tqdm import tqdm import scipy.io as scio import scipy.sparse from config import config from models.HyperG import HyperG import sklearn from sklearn.linear_model import ElasticNet from sklearn.preprocessing import normalize from sklearn.metrics import accuracy_score import torch import torch.nn as nn from torch.utils.data import DataLoader from datasets import CategoriesSampler, DataSet from utils import get_embedding, mean_confidence_interval, setup_seed def initial_embed(reduce, d): reduce = reduce.lower() assert reduce in ['isomap', 'itsa', 'mds', 'lle', 'se', 'pca', 'none'] if reduce == 'isomap': from sklearn.manifold import Isomap embed = Isomap(n_components=d) elif reduce == 'itsa': from sklearn.manifold import LocallyLinearEmbedding embed = LocallyLinearEmbedding(n_components=d, n_neighbors=5, method='ltsa') elif reduce == 'mds': from sklearn.manifold import MDS embed = MDS(n_components=d, metric=False) elif reduce == 'lle': from sklearn.manifold import LocallyLinearEmbedding embed = LocallyLinearEmbedding(n_components=d, n_neighbors=5,eigen_solver='dense') elif reduce == 'se': from sklearn.manifold import SpectralEmbedding embed = SpectralEmbedding(n_components=d) elif reduce == 'pca': from sklearn.decomposition import PCA embed = PCA(n_components=d,random_state=0) return embed def test(args): setup_seed(23) import warnings warnings.filterwarnings('ignore') if args.dataset == 'miniimagenet': num_classes = 64 elif args.dataset == 'tieredimagenet': num_classes = 351 elif args.dataset == 'cifar': num_classes = 64 elif args.dataset == 'fc100': num_classes = 60 if args.resume is not None: from models.resnet12 import resnet12 model = resnet12(num_classes).to(args.device) state_dict = torch.load(args.resume) model.load_state_dict(state_dict) from models.r_resnet12 import r_resnet12 r_model = r_resnet12(num_classes).to(args.device) r_state_dict = torch.load(args.r_resume) r_model.load_state_dict(r_state_dict) model.to(args.device) model.eval() r_model.to(args.device) r_model.eval() if args.dataset == 'miniimagenet': data_root = os.path.join(args.folder, '/home/wfliu/xdd_xr/LaplacianShot-master-org/LaplacianShot-master/data/') elif args.dataset == 'tieredimagenet': data_root = '/home/tieredimagenet' elif args.dataset == 'cifar': data_root = '/home/cifar' elif args.dataset == 'fc100': data_root = '/home/fc100' else: print("error!!!!!!!!!!") hyperG = HyperG(num_class=args.num_test_ways,step=args.step, reduce=args.embed, d=args.dim) dataset = DataSet(data_root, 'test', args.img_size) sampler = CategoriesSampler(dataset.label, args.num_batches, args.num_test_ways, (args.num_shots, 15, args.unlabel)) testloader = DataLoader(dataset, batch_sampler=sampler, shuffle=False, num_workers=0, pin_memory=True) k = args.num_shots * args.num_test_ways loader = tqdm(testloader, ncols=0) if(args.unlabel==0): iterations = 22 else: iterations = args.unlabel+2+5 acc_list = [[] for _ in range(iterations)] acc_list_task = [[] for _ in range(iterations)] acc_list_softmax = [[] for _ in range(iterations)] for data, indicator in loader: targets = torch.arange(args.num_test_ways).repeat(args.num_shots+15+args.unlabel).long()[ indicator[:args.num_test_ways*(args.num_shots+15+args.unlabel)] != 0] data = data[indicator != 0].to(args.device) data_r = get_embedding(r_model, data, args.device) data_x = get_embedding(model, data, args.device) if args.dim != 512: if args.unlabel != 0: data_train1 = np.concatenate((data_r[:k], data_r[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0) data_train2 = np.concatenate((data_x[:k], data_x[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]), axis=0) data_train = np.concatenate((data_train1, data_train2), axis=0) embed_data = initial_embed(args.embed, args.dim) embed_fit = embed_data.fit(data_train) data_r = embed_data.transform(data_r[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]) data_x = embed_data.transform(data_x[:k+15*args.num_test_ways+args.unlabel*args.num_test_ways]) else: data_train1 = np.concatenate((data_r[:k], data_r[k:k+15*args.num_test_ways]), axis=0) data_train2 = np.concatenate((data_x[:k], data_x[k:k+15*args.num_test_ways]), axis=0) data_train = np.concatenate((data_train1, data_train2), axis=0) embed_data = initial_embed(args.embed, args.dim) embed_fit = embed_data.fit(data_train) data_r = embed_data.transform(data_train1) data_x = embed_data.transform(data_train2) data_r_concat = np.concatenate((data_r, data_x), axis=1) train_targets = targets[:k] test_targets = targets[k:k+15*args.num_test_ways] train_embeddings_task = data_r_concat[:k] test_embeddings_task = data_r_concat[k:k+15*args.num_test_ways] if args.unlabel != 0: unlabel_embeddings_task = data_r_concat[k+15*args.num_test_ways:k+15*args.num_test_ways+args.unlabel*args.num_test_ways] else: unlabel_embeddings_task = None hyperG.fit(train_embeddings_task, train_targets) acc = hyperG.predict(test_embeddings_task,unlabel_embeddings_task, True, test_targets,args.eta) for i in range(len(acc)): acc_list[i].append(acc[i]) cal_accuracy(acc_list) def cal_accuracy(acc_list_task): mean_list_task = [] ci_list_task = [] for item in acc_list_task: mean, ci = mean_confidence_interval(item) mean_list_task.append(mean) ci_list_task.append(ci) print("Test Acc Mean_task{}".format( ' '.join([str(i*100)[:6] for i in mean_list_task]))) print("Test Acc ci_task{}".format(' '.join([str(i*100)[:6] for i in ci_list_task]))) def main(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(args) if args.mode == 'test': test(args) else: raise NameError if __name__ == '__main__': args = config() main(args)
6,884
datatoaster/datatoaster.py
abc612008/datatoaster
0
2023483
import collections """ constants """ XValue = lambda _: "" Single = lambda _: "" class DataSet: def NumberOfAppearance(self, key_function): self.number_of_appearance = True if key_function is XValue: self.single_dict = True def yfunc(li): os_list = {} for i in li: key = key_function(i) os_list[key] = os_list.get(key, 0) + 1 return os_list return yfunc def Percentage(self, key_function): self.percentage = True if key_function is XValue: self.single_dict = True def yfunc(li): os_list = {} for i in li: key = key_function(i) os_list[key] = os_list.get(key, 0) + 1 return os_list return yfunc def PercentageWithinGroup(self, key_function): self.percentage_within_group = True if key_function is XValue: self.single_dict = True def yfunc(li): os_list = {} for i in li: key = key_function(i) os_list[key] = os_list.get(key, 0) + 1 return os_list return yfunc def __init__(self, raw_data): self.raw_data = raw_data self.x_function = None self.y_function = None self.number_of_appearance = False self.percentage = False self.percentage_within_group = False self.single_dict = False self.constraints = [] self.pre_constraints = [] self.single = False self.order_key = None def set_x(self, func): if not callable(func): raise ValueError("Expect the argument to be a function.") self.x_function = func return self def set_y(self, param): if not callable(param): raise ValueError("Expect the argument to be a function.") self.y_function = param return self def add_constraint(self, constraint, is_pre=False): if not callable(constraint): raise ValueError("Expect the argument to be a function.") if is_pre: self.pre_constraints.append(constraint) else: self.constraints.append(constraint) return self def set_single(self, param): self.single = param return self def ordered_by(self, order_key): if not callable(order_key): raise ValueError("Expect the argument to be a function.") self.order_key = order_key return self def get_result(self): def process_result(result): if self.single_dict: for key in result.keys(): result[key] = result[key][""] if self.single: if len(result) != 1: raise ValueError("Single mode set while there are more than one result. " "Results: " + str(result)) return next(iter(result.values())) else: if self.order_key is not None: return collections.OrderedDict(sorted(result.items(), key=self.order_key)) else: return result if self.x_function is None: # x_function should not be None raise ValueError("set_x not called when calling get_result") filtered_data = [] # data that passed all constraints number_of_valid_data = 0 # save the total unfiltered number for percentage all_appearance = {} # save the unfiltered number per group for percentage_within_group for item in self.raw_data: pass_constraints = True for pre_constraint in self.pre_constraints: # pre constraints if not pre_constraint(item): pass_constraints = False break if not pass_constraints: continue number_of_valid_data += 1 for constraint in self.constraints: # constraints if not constraint(item): pass_constraints = False break if pass_constraints: filtered_data.append(item) if self.percentage_within_group: # for percentage within group key = self.x_function(item) all_appearance[key] = all_appearance.get(key, 0) + 1 # handle y_function if self.y_function: values = {} for item in filtered_data: key = self.x_function(item) if key in values: values[key].append(item) else: values[key] = [item] for key, value in values.items(): values[key] = self.y_function(value) if self.percentage: for k in values[key].keys(): values[key][k] /= number_of_valid_data elif self.percentage_within_group: for k in values[key].keys(): values[key][k] /= all_appearance[key] return process_result(values) raise ValueError("set_y not called when calling get_result")
5,289
requests/requests_user_agent.py
BoogalooLi/python_spiders
1
2024020
import requests # 定义请求的url # url = 'https://www.lmonkey.com' url = 'https://www.xicidaili.com/nn' # 定义请求头信息 headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0' } # 发起get请求 res = requests.get(url=url, headers=headers) # 获取响应状态码 code = res.status_code print(code) # 503 服务器内部拒绝请求 # 响应成功后把响应的内容写入文件中 if code == 200: with open('./test.html', 'w') as fp: fp.write(res.text)
438
dkr-py310/docker-student-portal-310/course_files/begin_advanced/py_sql_1.py
pbarton666/virtual_classroom
0
2023927
#py_sql_1.py #import the python/sqlite3 connector; #...there are others for postgresql, mysql, etc. import sqlite3 #create a connection object (on other RMDBs you'd provide credentials, too) conn = sqlite3.connect('mydb') #creates a cursor object curs = conn.cursor() #SQL is case-insensitive, but most people use CAPS for keywords #Here, we get rid of the table 'dogs' (IF EXISTS prevents a crash) cmd = "DROP TABLE IF EXISTS dogs" curs.execute(cmd) #this runs the SQL command conn.commit() #... and this locks in the changes #Build a new table's metadata (framework) cmd = """CREATE TABLE dogs (name CHAR(10), toy CHAR(10), weight INT(4))""" print(cmd) curs.execute(cmd) #add a row cmd = "INSERT INTO dogs ('name', 'toy', 'weight') VALUES (?, ?, ?)" vals= ('Fang', 'bone', 90) curs.execute(cmd, vals) #get some results cmd = "SELECT * from {}".format('dogs') print(cmd) curs.execute(cmd) result=curs.fetchall() print(result) #... and print them out (if there are any) if result: print("congrats, you've got some dawgs") for row in result: name, toy, weight=row print(name, toy, weight) #Here's an alternative way to insert rows curs.executemany('INSERT INTO dogs VALUES(?,?,?)', [('Biscuit', 'towel', '70'), ('Snoopy', 'squirrel', '60') ] ) #It may make sense to create names for the table and its columns cols=('name', 'toy','weight') tname='dogs' val_tuple=("Fluffy", "sock", "25") cmd=\ "INSERT INTO {} {} VALUES (?, ?, ?) ".format(tname, cols) curs.execute(cmd, val_tuple) print() #with names we can simply recycle them def print_rows(): "a utility function you may want to keep" cmd = "SELECT * from {}".format(tname) print(cmd) curs.execute(cmd) result=curs.fetchall() if result: for r in result: nice_output='' for label, res in zip(cols, r): nice_output+="{:>10} = {:<10}".format(label, res) print (nice_output) print_rows() #Getting column names from the database curs.execute(cmd) for ix, name in enumerate(curs.description): print("column {} is called {}".format(ix, name[0])) #Figure out how many rows in the table cmd="SELECT COUNT(*) FROM {}".format(tname) curs.execute(cmd) result=curs.fetchone() number_of_rows, = result print("Awesome, we've captured {} rows.".format (number_of_rows)) print() #Retrieving information # #Ask for everything: curs.execute('SELECT * FROM dogs') #You can get however many results using fetchone(), fetchall() or fetchmany() curs.execute('SELECT * FROM dogs') while True: row = curs.fetchone() if not row: break print(row) print('*'*20) curs.execute('SELECT * FROM dogs') while True: row = curs.fetchmany(2) if not row: break print(row) print('*'*20) #You can make queries as complex/fancy as you want cmd = 'SELECT name, weight FROM dogs WHERE weight >= 60' print(cmd) curs.execute(cmd) print(curs.fetchall()) #... and order the results cmd = 'SELECT name, weight FROM dogs WHERE weight >= 60 ORDER BY name' print(cmd) curs.execute(cmd) print_rows() for row in curs.fetchall(): print(row) print(curs.fetchall()) #updates print() cmd="UPDATE {} SET weight=? WHERE name='Snoopy'".format(tname) weight=(666,) curs.execute(cmd, weight) cmd="SELECT * FROM {} WHERE name='Snoopy'".format(tname) print(cmd) curs.execute(cmd) result=curs.fetchone() print(result) #deletions cmd= "DELETE FROM {} WHERE toy = ? ".format(tname) toy = ('sock',) curs.execute(cmd, toy) cmd = "SELECT * FROM {}".format(tname) curs.execute(cmd) print_rows() cmd= "DELETE FROM {} WHERE toy LIKE ?".format(tname) toy_selector = ('%el',) curs.execute(cmd, toy_selector) cmd = "SELECT * FROM {}".format(tname) curs.execute(cmd) print_rows() cmd= "DELETE FROM {}".format(tname) curs.execute(cmd) cmd = "SELECT * FROM {}".format(tname) curs.execute(cmd) print_rows()
4,049
ServerML/heatmap.py
SmallPlanetiOS/smallplanet_Pinball
13
2023600
from __future__ import division from PIL import Image import numpy as np from keras.preprocessing.image import load_img, img_to_array, array_to_img import sys import train import model import images import imageio def ExportAnimatedHeatmapForAllImages(outputPath): images = [] savedTrainingRunNumber = train.trainingRunNumber maxTrainingRun = train.ConfirmTrainingNumber() for runNumber in range(0,maxTrainingRun): images.append(imageio.imread(train.HeatmapPath(runNumber))) # add a few more at the end so there is a pause before it loops images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1))) images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1))) images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1))) images.append(imageio.imread(train.HeatmapPath(maxTrainingRun-1))) imageio.mimsave(outputPath, images, duration=0.5) train.trainingRunNumber = savedTrainingRunNumber def ExportHeatmapForModel(runNumber, outputPath): # 0. Load the base image baseImg = Image.open('resources/heatmap_base.jpg', 'r') img_w, img_h = baseImg.size basePix = baseImg.load() # 1. Load the ball image ballImg = Image.open('resources/heatmap_ball.png', 'r') ball_w, ball_h = ballImg.size # 2. Create the scratch image scratchImg = Image.new('RGB', (img_w, img_h), (255, 255, 255, 255)) # 3. Create the heat map heatmapImg = Image.new('RGB', (img_w//2, img_h), (255, 255, 255, 255)) heatmapPix = heatmapImg.load() # 4. load the model cnn_model = model.cnn_model() cnn_model.load_weights(train.ModelWeightsPath(runNumber+1)) # 5. prepare a numpy img to send to our model scratchNP = np.zeros((1, img_h, img_w, 3), dtype='float32') print("Generating heatmap:") for x in range(0,img_w//2): sys.stdout.write('.') sys.stdout.flush() for y in range(0,img_h): scratchImg.paste(baseImg, (0,0)) scratchImg.paste(ballImg, (x-ball_w//2,y-ball_h//2), ballImg) scratchImg.paste(ballImg, (x-ball_w//2 + img_w//2 + 5,y-ball_h//2), ballImg) np.copyto(scratchNP[0],img_to_array(scratchImg)) predictions = cnn_model.predict(scratchNP) pred_left = predictions[0][0] pred_right = predictions[0][1] #heatmapPix[x,y] = ( int(basePix[x,y][0] * 0.4 + pred_left*153.0), int(basePix[x,y][1] * 0.4 + pred_right*153.0), 0) heatmapPix[x,y] = (int(pred_left*255.0), int(pred_right*255.0), 0) print('done') heatmapImg = heatmapImg.resize( (heatmapImg.size[0]*6,heatmapImg.size[1]*6), Image.ANTIALIAS) # overlay the run number on the image r = int(runNumber) x = heatmapImg.size[0] while r >= 0: n = r % 10 r = r // 10 numImg = Image.open('resources/num{}.png'.format(n), 'r') x -= numImg.size[0] heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg) heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg) heatmapImg.paste(numImg, (x,heatmapImg.size[1]-numImg.size[1]),numImg) if r == 0: break heatmapImg.save(outputPath) #maxTrainingRun = train.ConfirmTrainingNumber() #for i in range(0,maxTrainingRun-1): # ExportHeatmapForModel(i, 'heatmap_{}.png'.format(i))
3,558
swagger_server/test/operational_controllers/test_users_with_roles_for_site.py
hedleyroos/core-access-control
0
2023111
import json import uuid from ge_core_shared import db_actions, decorators from project.settings import API_KEY_HEADER from swagger_server.test import BaseTestCase, db_create_entry from swagger_server.models.domain import Domain from swagger_server.models.domain_role import DomainRole from swagger_server.models.role import Role from swagger_server.models.site import Site from swagger_server.models.site_create import SiteCreate from swagger_server.models.site_role import SiteRole from swagger_server.models.user_domain_role import UserDomainRole from swagger_server.models.user_site_role import UserSiteRole ROLES = [ { "label": ("%s" % uuid.uuid1())[:30], "description": "Role to view" }, { "label": ("%s" % uuid.uuid1())[:30], "description": "Role to create", }, { "label": ("%s" % uuid.uuid1())[:30], "description": "Role to update" }, { "label": ("%s" % uuid.uuid1())[:30], "description": "Role to delete", } ] class TestUsersWithRolesForSite(BaseTestCase): @decorators.db_exception def setUp(self): super().setUp() # Parent Domain self.domain_parent_data = { "name": ("%s" % uuid.uuid1())[:30], "description": "The Root Domain", } self.domain_parent_model = db_actions.crud( model="Domain", api_model=Domain, data=self.domain_parent_data, action="create" ) # Child Domain self.domain_child_data = { "name": ("%s" % uuid.uuid1())[:30], "description": "The Child Domain", "parent_id": self.domain_parent_model.id } self.domain_child_model = db_actions.crud( model="Domain", api_model=Domain, data=self.domain_child_data, action="create" ) # Site Child self.site_data = { "name": ("%s" % uuid.uuid1())[:30], "domain_id": self.domain_child_model.id, "description": "A Site", "client_id": 1, "is_active": True, } self.site = db_create_entry( model="Site", data=self.site_data, ) # Create some roles. self.roles = [] for role in ROLES: role_model = db_actions.crud( model="Role", api_model=Role, data=role, action="create" ) self.roles.append(role_model) # Some users as well. self.user_id_1 = "%s" % uuid.uuid1() self.user_id_2 = "%s" % uuid.uuid1() for role in self.roles: domain_role_data = { "domain_id": self.domain_parent_model.id, "role_id": role.id, "grant_implicitly": "view" in role.description } db_actions.crud( model="DomainRole", api_model=DomainRole, data=domain_role_data, action="create" ) if not domain_role_data["grant_implicitly"]: user_domain_role_data = { "user_id": self.user_id_1, "domain_id": self.domain_parent_model.id, "role_id": role.id } db_actions.crud( model="UserDomainRole", api_model=UserDomainRole, data=user_domain_role_data, action="create" ) domain_role_data = { "domain_id": self.domain_child_model.id, "role_id": role.id, "grant_implicitly": "view" in role.description } db_actions.crud( model="DomainRole", api_model=DomainRole, data=domain_role_data, action="create" ) if "create" in role.description: user_domain_role_data = { "user_id": self.user_id_2, "domain_id": self.domain_child_model.id, "role_id": role.id } db_actions.crud( model="UserDomainRole", api_model=UserDomainRole, data=user_domain_role_data, action="create" ) site_role_data = { "site_id": self.site.id, "role_id": role.id, "grant_implicitly": "view" in role.description } db_actions.crud( model="SiteRole", api_model=SiteRole, data=site_role_data, action="create" ) if "update" in role.description: user_site_role_data = { "user_id": self.user_id_2, "site_id": self.site.id, "role_id": role.id } db_actions.crud( model="UserSiteRole", api_model=UserSiteRole, data=user_site_role_data, action="create" ) self.headers = {API_KEY_HEADER: "test-api-key"} def test_get_users_with_roles_for_site(self): """Test case for get_users_with_roles_for_site """ response = self.client.open( "/api/v1/ops/users_with_roles_for_site/{site_id}".format( site_id=self.site.id ), method='GET', headers=self.headers) r_data = json.loads(response.data) self.assertEquals(len(r_data), 2) for user in r_data: self.assertEquals( len(user["role_ids"]), 3 if user["user_id"] == self.user_id_1 else 2 ) if __name__ == '__main__': import unittest unittest.main()
5,981
6 kyu/Jungersteins Math Training Room 1 How many zeros are at the end of n.py
mwk0408/codewars_solutions
6
2022654
def count_zeros_n_double_fact(n): if n%2!=0: return 0 multiply=10 total=0 while multiply<n: total+=n//multiply multiply*=5 return total
180
django_private_chat2/admin.py
sidarun88/django_private_chat2
150
2022778
# -*- coding: utf-8 -*- from django.contrib.admin import ModelAdmin, site from .models import MessageModel, DialogsModel class MessageModelAdmin(ModelAdmin): readonly_fields = ('created', 'modified',) search_fields = ('id', 'text', 'sender__pk', 'recipient__pk') list_display = ('id', 'sender', 'recipient', 'text', 'file', 'read') list_display_links = ('id',) list_filter = ('sender', 'recipient') date_hierarchy = 'created' class DialogsModelAdmin(ModelAdmin): readonly_fields = ('created', 'modified',) search_fields = ('id', 'user1__pk', 'user2__pk') list_display = ('id', 'user1', 'user2') list_display_links = ('id',) date_hierarchy = 'created' site.register(DialogsModel, DialogsModelAdmin) site.register(MessageModel, MessageModelAdmin)
796
cam-server/camera.py
ckauth/swissless
0
2022875
from picamera import PiCamera from time import sleep camera = PiCamera() camera.rotation = 180 camera.start_preview() for x in xrange(30): #capture for 1 min sleep(2) camera.capture('images/image.jpg') camera.stop_preview()
245
tools/Vitis-AI-Library/graph_runner/test/yolov4-tiny.py
hito0512/Vitis-AI
848
2023288
# # Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/python3 import hashlib import xir import vart import numpy as np def md5(np_array): hash_md5 = hashlib.md5() hash_md5.update(np_array) return hash_md5.hexdigest() g = xir.Graph.deserialize( '/workspace/aisw/debug_models/tensorflow-yolov4-tiny-master/tensorflow-yolov4-tiny-master/compile/yolov4-tiny/yolov4-tiny.xmodel' ) # dissection of subgraphs. subgraphs = g.get_root_subgraph().toposort_child_subgraph() dpu_subgraph1 = subgraphs[2] print("dpu_subgraph1 = " + dpu_subgraph1.get_name() ) # must be subgraph_detector/yolo-v4-tiny/Conv/Conv2D dpu_subgraph2 = subgraphs[4] print("dpu_subgraph2 = " + dpu_subgraph2.get_name() ) # must be subgraph_detector/yolo-v4-tiny/Conv_3/Conv2D dpu_subgraph3 = subgraphs[6] print("dpu_subgraph3 = " + dpu_subgraph3.get_name() ) # must be subgraph_detector/yolo-v4-tiny/Conv_10/Conv2D dpu_subgraph4 = subgraphs[8] print("dpu_subgraph4 = " + dpu_subgraph4.get_name() ) # must be subgraph_detector/yolo-v4-tiny/Conv_11/Conv2D ### start to run first DPU subgraph 'subgraph_detector/yolo-v4-tiny/Conv/Conv2D' input1 = np.fromfile( '/scratch/models/cache/golden/74/32192dbe8b0cacdf99c2112732324b', dtype='int8') print("md5(input1)={}".format(md5(input1))) # 7432192dbe8b0cacdf99c2112732324b input1 = input1.reshape([1, 416, 416, 3]) output1 = np.zeros( [1, 104, 104, 64], dtype='int8' ) # it would be better to use fix point, convenient for comparing. dpu_1 = vart.Runner.create_runner(dpu_subgraph1, "run") job1 = dpu_1.execute_async([input1], [output1]) dpu_1.wait(job1) print("md5(output1)={}".format( md5(output1))) # a47ffd19dbae3b7185f48198e024736a ### start to run second DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_3/Conv2D ### note this subgraph needs two inputs. # copy is important, otherwise we see error like 'ndarray is not C-contiguous' input2_0 = output1[:, :, :, 32:64].copy() print("md5(input2_0)={}".format( md5(input2_0))) # aa55fc2bfef038563e5a031dbddebee9 input2_1 = output1 # dpu2 need two inputs output2 = np.zeros( [1, 52, 52, 128], dtype='int8' ) # it would be better to use fix point, convenient for comparing. dpu_2 = vart.Runner.create_runner(dpu_subgraph2, "run") job2 = dpu_2.execute_async([input2_0, input2_1], [output2]) dpu_2.wait(job2) print("md5(output2)={}".format( md5(output2))) # 1866755506ebdb54c7f766fd530e1cc3 ### start to run 3rd DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_10/Conv2D ### similiar to the second subgraph. input3_0 = output2[:, :, :, 64:128].copy() print("md5(input3_0)={}".format( md5(input3_0))) # 9fe461a5deb61f09210bb4ac415ec8b7 input3_1 = output2 # dpu3 need two inputs output3 = np.zeros( [1, 26, 26, 256], dtype='int8' ) # it would be better to use fix point, convenient for comparing. dpu_3 = vart.Runner.create_runner(dpu_subgraph3, "run") print("dpu_3.get_input_tensors()={}".format(dpu_3.get_input_tensors())) # note: the input tensors do not have stable order, we must be careful to match the order of inputs. job3 = dpu_3.execute_async([input3_1, input3_0], [output3]) dpu_3.wait(job3) print("md5(output3)={}".format( md5(output3))) # 4efe5a9bf47ce2bd861632ec1a535b34 ### start to run 4th DPU subgraph subgraph_detector/yolo-v4-tiny/Conv_11/Conv2D input4_0 = output3[:, :, :, 128:256].copy() print("md5(input4_0)={}".format( md5(input4_0))) # b4eb64306980a99f951ae2396edc08e4 input4_1 = output3 # dpu3 need two inputs output4 = np.zeros( [1, 26, 26, 255], dtype='int8' ) # it would be better to use fix point, convenient for comparing. dpu_4 = vart.Runner.create_runner(dpu_subgraph4, "run") print("dpu_4.get_input_tensors()={}".format(dpu_4.get_input_tensors())) # note: the input tensors do not have stable order, we must be careful to match the order of inputs. job4 = dpu_4.execute_async([input4_1, input4_0], [output4]) dpu_4.wait(job4) print("md5(output4)={}".format( md5(output4))) # 17eb158cbb6c978bb75445c3002998fb
4,549
google/cloud/osconfig/agentendpoint/v1beta/osconfig-agentendpoint-v1beta-py/google/cloud/osconfig/agentendpoint_v1beta/types/__init__.py
googleapis/googleapis-gen
7
2023993
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .agentendpoint import ( ReceiveTaskNotificationRequest, ReceiveTaskNotificationResponse, RegisterAgentRequest, RegisterAgentResponse, ReportTaskCompleteRequest, ReportTaskCompleteResponse, ReportTaskProgressRequest, ReportTaskProgressResponse, StartNextTaskRequest, StartNextTaskResponse, ) from .guest_policies import ( AptRepository, EffectiveGuestPolicy, GooRepository, LookupEffectiveGuestPolicyRequest, Package, PackageRepository, SoftwareRecipe, YumRepository, ZypperRepository, DesiredState, ) from .patch_jobs import ( AptSettings, ExecStep, ExecStepConfig, GcsObject, GooSettings, PatchConfig, RetryStrategy, WindowsUpdateSettings, YumSettings, ZypperSettings, ) from .tasks import ( ApplyPatchesTask, ApplyPatchesTaskOutput, ApplyPatchesTaskProgress, ExecStepTask, ExecStepTaskOutput, ExecStepTaskProgress, Task, TaskDirective, TaskType, ) __all__ = ( 'ReceiveTaskNotificationRequest', 'ReceiveTaskNotificationResponse', 'RegisterAgentRequest', 'RegisterAgentResponse', 'ReportTaskCompleteRequest', 'ReportTaskCompleteResponse', 'ReportTaskProgressRequest', 'ReportTaskProgressResponse', 'StartNextTaskRequest', 'StartNextTaskResponse', 'AptRepository', 'EffectiveGuestPolicy', 'GooRepository', 'LookupEffectiveGuestPolicyRequest', 'Package', 'PackageRepository', 'SoftwareRecipe', 'YumRepository', 'ZypperRepository', 'DesiredState', 'AptSettings', 'ExecStep', 'ExecStepConfig', 'GcsObject', 'GooSettings', 'PatchConfig', 'RetryStrategy', 'WindowsUpdateSettings', 'YumSettings', 'ZypperSettings', 'ApplyPatchesTask', 'ApplyPatchesTaskOutput', 'ApplyPatchesTaskProgress', 'ExecStepTask', 'ExecStepTaskOutput', 'ExecStepTaskProgress', 'Task', 'TaskDirective', 'TaskType', )
2,601
ee/clickhouse/sql/sessions/average_per_period.py
avoajaugochukwu/posthog
7,409
2022639
AVERAGE_PER_PERIOD_SQL = """ SELECT AVG(session_duration_seconds) as total, {interval}(timestamp) as day_start FROM ({sessions}) GROUP BY {interval}(timestamp) """
214
metoffice_ec2/message.py
tomwhite/metoffice_ec2
1
2023774
import os from typing import Dict, List import hashlib import json import pandas as pd import io import xarray as xr import boto3 class MetOfficeMessage: def __init__(self, sqs_message: Dict): """ Args: sqs_message: An AWS Simple Queue Service message. """ body_json_string = sqs_message['Body'] _check_md5(body_json_string, sqs_message['MD5OfBody']) body_dict = json.loads(body_json_string) self.message = json.loads(body_dict['Message']) self.sqs_message = sqs_message def sqs_message_sent_timestamp(self) -> pd.Timestamp: """Returns the time the message was sent to the queue.""" attributes = self.sqs_message['Attributes'] sent_timestamp = float(attributes['SentTimestamp']) / 1000 return pd.Timestamp.fromtimestamp(sent_timestamp) def sqs_approx_receive_count(self) -> int: """Returns the approx number of times a message has been received from the queue but not deleted.""" attributes = self.sqs_message['Attributes'] return int(attributes['ApproximateReceiveCount']) def is_multi_level(self): return 'height' in self.message and ' ' in self.message['height'] def is_wanted( self, nwp_params: List[str], max_receive_count: int=10) -> bool: """Returns True if this message describes an NWP we want. Args: nwp_params: The Numerical Weather Prediction parameters we want. max_receive_count: If this message has been received more than `max_receive_count` times, then we don't want this message. """ var_name = self.message['name'] is_multi_level = self.is_multi_level() approx_receive_count = self.sqs_approx_receive_count() return ( var_name in nwp_params and is_multi_level and approx_receive_count < max_receive_count) def source_url(self) -> str: source_bucket = self.message['bucket'] source_key = self.message['key'] return os.path.join(source_bucket, source_key) def load_netcdf(self) -> xr.Dataset: boto_s3 = boto3.client('s3') get_obj_response = boto_s3.get_object( Bucket=self.message['bucket'], Key=self.message['key']) netcdf_bytes = get_obj_response['Body'].read() netcdf_bytes_io = io.BytesIO(netcdf_bytes) return xr.open_dataset(netcdf_bytes_io, engine='h5netcdf') def object_size_mb(self) -> float: return self.message['object_size'] / 1E6 def __repr__(self) -> str: string = '' string += 'var_name={}; '.format(self.message['name']) string += 'is_multi_level={}; '.format(self.is_multi_level()) string += 'object_size={:,.1f} MB; '.format(self.object_size_mb()) string += 'model={}; '.format(self.message['model']) string += 'SQS_message_sent_timestamp={}; '.format( self.sqs_message_sent_timestamp()) string += 'forecast_reference_time={}; '.format( self.message['forecast_reference_time']) string += 'created_time={}; '.format(self.message['created_time']) string += 'time={}; '.format(self.message['time']) string += 'source_url={}; '.format(self.source_url()) string += 'SQS_approx_receive_count={}; '.format( self.sqs_approx_receive_count()) string += 'SQS_message_ID={}'.format(self.sqs_message['MessageId']) return string def _check_md5(text: str, md5_of_body: str): text = text.encode('utf-8') md5 = hashlib.md5(text) if md5.hexdigest() != md5_of_body: raise RuntimeError('MD5 checksum does not match!')
3,720
problem/migrations/0014_auto_20180618_1952.py
d9e7381f/onlinejudge-2.0
0
2024034
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-06-18 11:52 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('problem', '0013_auto_20180406_1533'), ] operations = [ migrations.RemoveField( model_name='problem', name='vote_downs', ), migrations.RemoveField( model_name='problem', name='vote_ups', ), migrations.AddField( model_name='problem', name='vote_rank_score', field=models.FloatField(default=0.0), ), ]
672
torch_inception_resnet_v2/blocks/inception/inception_resnet.py
mhconradt/inception-resnet-v2
9
2023122
from torch import nn from torch_inception_resnet_v2.utils.concurrent import Concurrent """ Defines the base of an inception ResNet block. """ class InceptionResNetBlock(nn.Module): def __init__(self, scale, combination: nn.Module, *branches: nn.Module): super().__init__() self.scale = scale self.combination = combination self.branches = Concurrent() for i, branch in enumerate(branches): self.branches.append(branch) self.activation = nn.ReLU(inplace=True) def forward(self, x): output = self.branches(x) output = self.combination(output) output = self.scale * output + x return self.activation(output)
709
app/migrations/0007_auto_20201116_0507.py
michael-huber2772/portfolio-dashboard
0
2023302
# Generated by Django 3.1.3 on 2020-11-16 12:07 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20201031_1437'), ] operations = [ migrations.CreateModel( name='MTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=250)), ], ), migrations.AlterField( model_name='productprice', name='start_date', field=models.DateTimeField(default=datetime.datetime(2020, 11, 16, 5, 7, 53, 23971), null=True), ), migrations.CreateModel( name='RawMaterial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=250)), ('tag', models.ManyToManyField(to='app.MTag')), ], ), migrations.AddField( model_name='product', name='raw_material', field=models.ManyToManyField(to='app.RawMaterial'), ), ]
1,266
Solutions/Problem12.py
sausage948/AoC2017
0
2023911
import re # ------Input----- # answer1 = 0 answer2 = 0 print("Input the adjacency list. Calculation will start on the first empty line.") inputList = [] while True: inputString = input("") if inputString == "": break inputList.append(inputString) def parseString(string): splitUp = re.split(' <-> |, ', string) neighborsList = [] for n in splitUp[1:]: if n != '': neighborsList.append(int(n)) return [int(splitUp[0]), neighborsList, False] adjacencyList = list(map(parseString, inputList)) numberOfNodes = len(adjacencyList) numberOfDiscoveredNodes = 0 numberOfGroups = 0 # ------Parts 1 & 2------ # def DFS(vertex): adjacencyList[vertex][2] = True for neighbor in adjacencyList[vertex][1]: if not wasDiscovered(neighbor): DFS(neighbor) def wasDiscovered(vertex): return adjacencyList[vertex][2] while numberOfDiscoveredNodes < numberOfNodes: indexFirstUndiscovered = list(map(wasDiscovered, range(numberOfNodes))).index(0) DFS(indexFirstUndiscovered) numberOfDiscoveredNodes = sum(list(map(wasDiscovered, range(numberOfNodes)))) numberOfGroups += 1 if indexFirstUndiscovered == 0: answer1 = numberOfDiscoveredNodes answer2 = numberOfGroups # ------Output----- # print("Answer 1: " + str(answer1)) print("Answer 2: " + str(answer2))
1,365
tests/speculos/test_status_word.py
aido/app-sskr
3
2022839
from pathlib import Path from typing import List, Dict, Any, Tuple import re from sskr_client.exception import DeviceException SW_RE = re.compile(r"""(?x) \# # character '#' define # string 'define' \s+ # spaces (?P<identifier>SW(?:_[A-Z0-9]+)*) # identifier (e.g. 'SW_OK') \s+ # spaces 0x(?P<sw>[a-fA-F0-9]{4}) # 4 bytes status word """) def parse_sw(path: Path) -> List[Tuple[str, int]]: if not path.is_file(): raise FileNotFoundError(f"Can't find file: '{path}'") sw_h: str = path.read_text() return [(identifier, int(sw, base=16)) for identifier, sw in SW_RE.findall(sw_h) if sw != "9000"] def test_status_word(sw_h_path): expected_status_words: List[Tuple[str, int]] = parse_sw(sw_h_path) status_words: Dict[int, Any] = DeviceException.exc assert len(expected_status_words) == len(status_words), ( f"{expected_status_words} doesn't match {status_words}") # just keep status words expected_status_words = [sw for (identifier, sw) in expected_status_words] for sw in status_words.keys(): assert sw in expected_status_words, f"{status_words[sw]}({hex(sw)}) not found in sw.h!"
1,320
camtfpc.py
MikeHallettUK/RosRobotics
0
2022707
#!/usr/bin/env python3 # tf_pc_cam subscribes to PC2 from RealSense camera = and transforms it to frame odom /points2 import rospy import tf2_ros from tf2_sensor_msgs.tf2_sensor_msgs import PointCloud2, do_transform_cloud # to support PointCloud2 rospy.init_node("tf_pc_cam") tf_pub = rospy.Publisher("points2", PointCloud2, queue_size=10) tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(2)) tf_listener = tf2_ros.TransformListener(tf_buffer) rospy.sleep(0.2) # let tf_buffer fill up a bit ... def pc_cb(msg): cantran = tf_buffer.can_transform("odom", msg.header.frame_id, msg.header.stamp, rospy.Duration(0.1)) if cantran: trans = tf_buffer.lookup_transform("odom", msg.header.frame_id, msg.header.stamp, rospy.Duration(0.1)) cloud_out = do_transform_cloud(msg, trans) tf_pub.publish(cloud_out) print("Starting do_transform_cloud from /camera/depth/color/points v1") rospy.Subscriber("/camera/depth/color/points", PointCloud2, pc_cb, queue_size=1, buff_size=2**24) rospy.spin()
1,192